re PR bootstrap/44713 (Revision 161530 failed bootstrap)
[official-gcc.git] / gcc / config / i386 / i386.c
blob8d952975818a1e2bf78b0465e494ca21ee487a5e
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "langhooks.h"
48 #include "cgraph.h"
49 #include "gimple.h"
50 #include "dwarf2.h"
51 #include "df.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54 #include "cselib.h"
55 #include "debug.h"
56 #include "dwarf2out.h"
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78 const
79 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 struct processor_costs bdver1_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (2), /* cost of a lea instruction */
824 COSTS_N_INSNS (1), /* variable shift costs */
825 COSTS_N_INSNS (1), /* constant shift costs */
826 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (4), /* HI */
828 COSTS_N_INSNS (3), /* SI */
829 COSTS_N_INSNS (4), /* DI */
830 COSTS_N_INSNS (5)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (35), /* HI */
834 COSTS_N_INSNS (51), /* SI */
835 COSTS_N_INSNS (83), /* DI */
836 COSTS_N_INSNS (83)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 8, /* "large" insn */
840 9, /* MOVE_RATIO */
841 4, /* cost for loading QImode using movzbl */
842 {3, 4, 3}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {3, 4, 3}, /* cost of storing integer registers */
846 4, /* cost of reg,reg fld/fst */
847 {4, 4, 12}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {6, 6, 8}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {3, 3}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {4, 4}, /* cost of storing MMX registers
855 in SImode and DImode */
856 2, /* cost of moving SSE register */
857 {4, 4, 3}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {4, 4, 5}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 3, /* MMX or SSE register to integer */
862 /* On K8
863 MOVD reg64, xmmreg Double FSTORE 4
864 MOVD reg32, xmmreg Double FSTORE 4
865 On AMDFAM10
866 MOVD reg64, xmmreg Double FADD 3
867 1/1 1/1
868 MOVD reg32, xmmreg Double FADD 3
869 1/1 1/1 */
870 64, /* size of l1 cache. */
871 1024, /* size of l2 cache. */
872 64, /* size of prefetch block */
873 /* New AMD processors never drop prefetches; if they cannot be performed
874 immediately, they are queued. We set number of simultaneous prefetches
875 to a large constant to reflect this (it probably is not a good idea not
876 to limit number of prefetches at all, as their execution also takes some
877 time). */
878 100, /* number of parallel prefetches */
879 2, /* Branch cost */
880 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
881 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
882 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
883 COSTS_N_INSNS (2), /* cost of FABS instruction. */
884 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
885 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
887 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
888 very small blocks it is better to use loop. For large blocks, libcall can
889 do nontemporary accesses and beat inline considerably. */
890 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
891 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
892 {{libcall, {{8, loop}, {24, unrolled_loop},
893 {2048, rep_prefix_4_byte}, {-1, libcall}}},
894 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
895 4, /* scalar_stmt_cost. */
896 2, /* scalar load_cost. */
897 2, /* scalar_store_cost. */
898 6, /* vec_stmt_cost. */
899 0, /* vec_to_scalar_cost. */
900 2, /* scalar_to_vec_cost. */
901 2, /* vec_align_load_cost. */
902 2, /* vec_unalign_load_cost. */
903 2, /* vec_store_cost. */
904 2, /* cond_taken_branch_cost. */
905 1, /* cond_not_taken_branch_cost. */
908 static const
909 struct processor_costs pentium4_cost = {
910 COSTS_N_INSNS (1), /* cost of an add instruction */
911 COSTS_N_INSNS (3), /* cost of a lea instruction */
912 COSTS_N_INSNS (4), /* variable shift costs */
913 COSTS_N_INSNS (4), /* constant shift costs */
914 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
915 COSTS_N_INSNS (15), /* HI */
916 COSTS_N_INSNS (15), /* SI */
917 COSTS_N_INSNS (15), /* DI */
918 COSTS_N_INSNS (15)}, /* other */
919 0, /* cost of multiply per each bit set */
920 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
921 COSTS_N_INSNS (56), /* HI */
922 COSTS_N_INSNS (56), /* SI */
923 COSTS_N_INSNS (56), /* DI */
924 COSTS_N_INSNS (56)}, /* other */
925 COSTS_N_INSNS (1), /* cost of movsx */
926 COSTS_N_INSNS (1), /* cost of movzx */
927 16, /* "large" insn */
928 6, /* MOVE_RATIO */
929 2, /* cost for loading QImode using movzbl */
930 {4, 5, 4}, /* cost of loading integer registers
931 in QImode, HImode and SImode.
932 Relative to reg-reg move (2). */
933 {2, 3, 2}, /* cost of storing integer registers */
934 2, /* cost of reg,reg fld/fst */
935 {2, 2, 6}, /* cost of loading fp registers
936 in SFmode, DFmode and XFmode */
937 {4, 4, 6}, /* cost of storing fp registers
938 in SFmode, DFmode and XFmode */
939 2, /* cost of moving MMX register */
940 {2, 2}, /* cost of loading MMX registers
941 in SImode and DImode */
942 {2, 2}, /* cost of storing MMX registers
943 in SImode and DImode */
944 12, /* cost of moving SSE register */
945 {12, 12, 12}, /* cost of loading SSE registers
946 in SImode, DImode and TImode */
947 {2, 2, 8}, /* cost of storing SSE registers
948 in SImode, DImode and TImode */
949 10, /* MMX or SSE register to integer */
950 8, /* size of l1 cache. */
951 256, /* size of l2 cache. */
952 64, /* size of prefetch block */
953 6, /* number of parallel prefetches */
954 2, /* Branch cost */
955 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
956 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
957 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
958 COSTS_N_INSNS (2), /* cost of FABS instruction. */
959 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
960 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
961 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
962 DUMMY_STRINGOP_ALGS},
963 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
964 {-1, libcall}}},
965 DUMMY_STRINGOP_ALGS},
966 1, /* scalar_stmt_cost. */
967 1, /* scalar load_cost. */
968 1, /* scalar_store_cost. */
969 1, /* vec_stmt_cost. */
970 1, /* vec_to_scalar_cost. */
971 1, /* scalar_to_vec_cost. */
972 1, /* vec_align_load_cost. */
973 2, /* vec_unalign_load_cost. */
974 1, /* vec_store_cost. */
975 3, /* cond_taken_branch_cost. */
976 1, /* cond_not_taken_branch_cost. */
979 static const
980 struct processor_costs nocona_cost = {
981 COSTS_N_INSNS (1), /* cost of an add instruction */
982 COSTS_N_INSNS (1), /* cost of a lea instruction */
983 COSTS_N_INSNS (1), /* variable shift costs */
984 COSTS_N_INSNS (1), /* constant shift costs */
985 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
986 COSTS_N_INSNS (10), /* HI */
987 COSTS_N_INSNS (10), /* SI */
988 COSTS_N_INSNS (10), /* DI */
989 COSTS_N_INSNS (10)}, /* other */
990 0, /* cost of multiply per each bit set */
991 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
992 COSTS_N_INSNS (66), /* HI */
993 COSTS_N_INSNS (66), /* SI */
994 COSTS_N_INSNS (66), /* DI */
995 COSTS_N_INSNS (66)}, /* other */
996 COSTS_N_INSNS (1), /* cost of movsx */
997 COSTS_N_INSNS (1), /* cost of movzx */
998 16, /* "large" insn */
999 17, /* MOVE_RATIO */
1000 4, /* cost for loading QImode using movzbl */
1001 {4, 4, 4}, /* cost of loading integer registers
1002 in QImode, HImode and SImode.
1003 Relative to reg-reg move (2). */
1004 {4, 4, 4}, /* cost of storing integer registers */
1005 3, /* cost of reg,reg fld/fst */
1006 {12, 12, 12}, /* cost of loading fp registers
1007 in SFmode, DFmode and XFmode */
1008 {4, 4, 4}, /* cost of storing fp registers
1009 in SFmode, DFmode and XFmode */
1010 6, /* cost of moving MMX register */
1011 {12, 12}, /* cost of loading MMX registers
1012 in SImode and DImode */
1013 {12, 12}, /* cost of storing MMX registers
1014 in SImode and DImode */
1015 6, /* cost of moving SSE register */
1016 {12, 12, 12}, /* cost of loading SSE registers
1017 in SImode, DImode and TImode */
1018 {12, 12, 12}, /* cost of storing SSE registers
1019 in SImode, DImode and TImode */
1020 8, /* MMX or SSE register to integer */
1021 8, /* size of l1 cache. */
1022 1024, /* size of l2 cache. */
1023 128, /* size of prefetch block */
1024 8, /* number of parallel prefetches */
1025 1, /* Branch cost */
1026 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1027 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1028 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1029 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1030 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1031 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1032 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1033 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1034 {100000, unrolled_loop}, {-1, libcall}}}},
1035 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1036 {-1, libcall}}},
1037 {libcall, {{24, loop}, {64, unrolled_loop},
1038 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1039 1, /* scalar_stmt_cost. */
1040 1, /* scalar load_cost. */
1041 1, /* scalar_store_cost. */
1042 1, /* vec_stmt_cost. */
1043 1, /* vec_to_scalar_cost. */
1044 1, /* scalar_to_vec_cost. */
1045 1, /* vec_align_load_cost. */
1046 2, /* vec_unalign_load_cost. */
1047 1, /* vec_store_cost. */
1048 3, /* cond_taken_branch_cost. */
1049 1, /* cond_not_taken_branch_cost. */
1052 static const
1053 struct processor_costs core2_cost = {
1054 COSTS_N_INSNS (1), /* cost of an add instruction */
1055 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1056 COSTS_N_INSNS (1), /* variable shift costs */
1057 COSTS_N_INSNS (1), /* constant shift costs */
1058 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1059 COSTS_N_INSNS (3), /* HI */
1060 COSTS_N_INSNS (3), /* SI */
1061 COSTS_N_INSNS (3), /* DI */
1062 COSTS_N_INSNS (3)}, /* other */
1063 0, /* cost of multiply per each bit set */
1064 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1065 COSTS_N_INSNS (22), /* HI */
1066 COSTS_N_INSNS (22), /* SI */
1067 COSTS_N_INSNS (22), /* DI */
1068 COSTS_N_INSNS (22)}, /* other */
1069 COSTS_N_INSNS (1), /* cost of movsx */
1070 COSTS_N_INSNS (1), /* cost of movzx */
1071 8, /* "large" insn */
1072 16, /* MOVE_RATIO */
1073 2, /* cost for loading QImode using movzbl */
1074 {6, 6, 6}, /* cost of loading integer registers
1075 in QImode, HImode and SImode.
1076 Relative to reg-reg move (2). */
1077 {4, 4, 4}, /* cost of storing integer registers */
1078 2, /* cost of reg,reg fld/fst */
1079 {6, 6, 6}, /* cost of loading fp registers
1080 in SFmode, DFmode and XFmode */
1081 {4, 4, 4}, /* cost of storing fp registers
1082 in SFmode, DFmode and XFmode */
1083 2, /* cost of moving MMX register */
1084 {6, 6}, /* cost of loading MMX registers
1085 in SImode and DImode */
1086 {4, 4}, /* cost of storing MMX registers
1087 in SImode and DImode */
1088 2, /* cost of moving SSE register */
1089 {6, 6, 6}, /* cost of loading SSE registers
1090 in SImode, DImode and TImode */
1091 {4, 4, 4}, /* cost of storing SSE registers
1092 in SImode, DImode and TImode */
1093 2, /* MMX or SSE register to integer */
1094 32, /* size of l1 cache. */
1095 2048, /* size of l2 cache. */
1096 128, /* size of prefetch block */
1097 8, /* number of parallel prefetches */
1098 3, /* Branch cost */
1099 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1100 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1101 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1102 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1103 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1104 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1105 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1106 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1107 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1108 {{libcall, {{8, loop}, {15, unrolled_loop},
1109 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1110 {libcall, {{24, loop}, {32, unrolled_loop},
1111 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1112 1, /* scalar_stmt_cost. */
1113 1, /* scalar load_cost. */
1114 1, /* scalar_store_cost. */
1115 1, /* vec_stmt_cost. */
1116 1, /* vec_to_scalar_cost. */
1117 1, /* scalar_to_vec_cost. */
1118 1, /* vec_align_load_cost. */
1119 2, /* vec_unalign_load_cost. */
1120 1, /* vec_store_cost. */
1121 3, /* cond_taken_branch_cost. */
1122 1, /* cond_not_taken_branch_cost. */
1125 static const
1126 struct processor_costs atom_cost = {
1127 COSTS_N_INSNS (1), /* cost of an add instruction */
1128 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1129 COSTS_N_INSNS (1), /* variable shift costs */
1130 COSTS_N_INSNS (1), /* constant shift costs */
1131 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1132 COSTS_N_INSNS (4), /* HI */
1133 COSTS_N_INSNS (3), /* SI */
1134 COSTS_N_INSNS (4), /* DI */
1135 COSTS_N_INSNS (2)}, /* other */
1136 0, /* cost of multiply per each bit set */
1137 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1138 COSTS_N_INSNS (26), /* HI */
1139 COSTS_N_INSNS (42), /* SI */
1140 COSTS_N_INSNS (74), /* DI */
1141 COSTS_N_INSNS (74)}, /* other */
1142 COSTS_N_INSNS (1), /* cost of movsx */
1143 COSTS_N_INSNS (1), /* cost of movzx */
1144 8, /* "large" insn */
1145 17, /* MOVE_RATIO */
1146 2, /* cost for loading QImode using movzbl */
1147 {4, 4, 4}, /* cost of loading integer registers
1148 in QImode, HImode and SImode.
1149 Relative to reg-reg move (2). */
1150 {4, 4, 4}, /* cost of storing integer registers */
1151 4, /* cost of reg,reg fld/fst */
1152 {12, 12, 12}, /* cost of loading fp registers
1153 in SFmode, DFmode and XFmode */
1154 {6, 6, 8}, /* cost of storing fp registers
1155 in SFmode, DFmode and XFmode */
1156 2, /* cost of moving MMX register */
1157 {8, 8}, /* cost of loading MMX registers
1158 in SImode and DImode */
1159 {8, 8}, /* cost of storing MMX registers
1160 in SImode and DImode */
1161 2, /* cost of moving SSE register */
1162 {8, 8, 8}, /* cost of loading SSE registers
1163 in SImode, DImode and TImode */
1164 {8, 8, 8}, /* cost of storing SSE registers
1165 in SImode, DImode and TImode */
1166 5, /* MMX or SSE register to integer */
1167 32, /* size of l1 cache. */
1168 256, /* size of l2 cache. */
1169 64, /* size of prefetch block */
1170 6, /* number of parallel prefetches */
1171 3, /* Branch cost */
1172 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1173 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1174 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1175 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1176 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1177 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1178 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1179 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1180 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1181 {{libcall, {{8, loop}, {15, unrolled_loop},
1182 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1183 {libcall, {{24, loop}, {32, unrolled_loop},
1184 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1185 1, /* scalar_stmt_cost. */
1186 1, /* scalar load_cost. */
1187 1, /* scalar_store_cost. */
1188 1, /* vec_stmt_cost. */
1189 1, /* vec_to_scalar_cost. */
1190 1, /* scalar_to_vec_cost. */
1191 1, /* vec_align_load_cost. */
1192 2, /* vec_unalign_load_cost. */
1193 1, /* vec_store_cost. */
1194 3, /* cond_taken_branch_cost. */
1195 1, /* cond_not_taken_branch_cost. */
1198 /* Generic64 should produce code tuned for Nocona and K8. */
1199 static const
1200 struct processor_costs generic64_cost = {
1201 COSTS_N_INSNS (1), /* cost of an add instruction */
1202 /* On all chips taken into consideration lea is 2 cycles and more. With
1203 this cost however our current implementation of synth_mult results in
1204 use of unnecessary temporary registers causing regression on several
1205 SPECfp benchmarks. */
1206 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1207 COSTS_N_INSNS (1), /* variable shift costs */
1208 COSTS_N_INSNS (1), /* constant shift costs */
1209 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1210 COSTS_N_INSNS (4), /* HI */
1211 COSTS_N_INSNS (3), /* SI */
1212 COSTS_N_INSNS (4), /* DI */
1213 COSTS_N_INSNS (2)}, /* other */
1214 0, /* cost of multiply per each bit set */
1215 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1216 COSTS_N_INSNS (26), /* HI */
1217 COSTS_N_INSNS (42), /* SI */
1218 COSTS_N_INSNS (74), /* DI */
1219 COSTS_N_INSNS (74)}, /* other */
1220 COSTS_N_INSNS (1), /* cost of movsx */
1221 COSTS_N_INSNS (1), /* cost of movzx */
1222 8, /* "large" insn */
1223 17, /* MOVE_RATIO */
1224 4, /* cost for loading QImode using movzbl */
1225 {4, 4, 4}, /* cost of loading integer registers
1226 in QImode, HImode and SImode.
1227 Relative to reg-reg move (2). */
1228 {4, 4, 4}, /* cost of storing integer registers */
1229 4, /* cost of reg,reg fld/fst */
1230 {12, 12, 12}, /* cost of loading fp registers
1231 in SFmode, DFmode and XFmode */
1232 {6, 6, 8}, /* cost of storing fp registers
1233 in SFmode, DFmode and XFmode */
1234 2, /* cost of moving MMX register */
1235 {8, 8}, /* cost of loading MMX registers
1236 in SImode and DImode */
1237 {8, 8}, /* cost of storing MMX registers
1238 in SImode and DImode */
1239 2, /* cost of moving SSE register */
1240 {8, 8, 8}, /* cost of loading SSE registers
1241 in SImode, DImode and TImode */
1242 {8, 8, 8}, /* cost of storing SSE registers
1243 in SImode, DImode and TImode */
1244 5, /* MMX or SSE register to integer */
1245 32, /* size of l1 cache. */
1246 512, /* size of l2 cache. */
1247 64, /* size of prefetch block */
1248 6, /* number of parallel prefetches */
1249 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1250 is increased to perhaps more appropriate value of 5. */
1251 3, /* Branch cost */
1252 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1253 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1254 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1255 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1256 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1257 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1258 {DUMMY_STRINGOP_ALGS,
1259 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1260 {DUMMY_STRINGOP_ALGS,
1261 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1262 1, /* scalar_stmt_cost. */
1263 1, /* scalar load_cost. */
1264 1, /* scalar_store_cost. */
1265 1, /* vec_stmt_cost. */
1266 1, /* vec_to_scalar_cost. */
1267 1, /* scalar_to_vec_cost. */
1268 1, /* vec_align_load_cost. */
1269 2, /* vec_unalign_load_cost. */
1270 1, /* vec_store_cost. */
1271 3, /* cond_taken_branch_cost. */
1272 1, /* cond_not_taken_branch_cost. */
1275 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1276 static const
1277 struct processor_costs generic32_cost = {
1278 COSTS_N_INSNS (1), /* cost of an add instruction */
1279 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1280 COSTS_N_INSNS (1), /* variable shift costs */
1281 COSTS_N_INSNS (1), /* constant shift costs */
1282 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1283 COSTS_N_INSNS (4), /* HI */
1284 COSTS_N_INSNS (3), /* SI */
1285 COSTS_N_INSNS (4), /* DI */
1286 COSTS_N_INSNS (2)}, /* other */
1287 0, /* cost of multiply per each bit set */
1288 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1289 COSTS_N_INSNS (26), /* HI */
1290 COSTS_N_INSNS (42), /* SI */
1291 COSTS_N_INSNS (74), /* DI */
1292 COSTS_N_INSNS (74)}, /* other */
1293 COSTS_N_INSNS (1), /* cost of movsx */
1294 COSTS_N_INSNS (1), /* cost of movzx */
1295 8, /* "large" insn */
1296 17, /* MOVE_RATIO */
1297 4, /* cost for loading QImode using movzbl */
1298 {4, 4, 4}, /* cost of loading integer registers
1299 in QImode, HImode and SImode.
1300 Relative to reg-reg move (2). */
1301 {4, 4, 4}, /* cost of storing integer registers */
1302 4, /* cost of reg,reg fld/fst */
1303 {12, 12, 12}, /* cost of loading fp registers
1304 in SFmode, DFmode and XFmode */
1305 {6, 6, 8}, /* cost of storing fp registers
1306 in SFmode, DFmode and XFmode */
1307 2, /* cost of moving MMX register */
1308 {8, 8}, /* cost of loading MMX registers
1309 in SImode and DImode */
1310 {8, 8}, /* cost of storing MMX registers
1311 in SImode and DImode */
1312 2, /* cost of moving SSE register */
1313 {8, 8, 8}, /* cost of loading SSE registers
1314 in SImode, DImode and TImode */
1315 {8, 8, 8}, /* cost of storing SSE registers
1316 in SImode, DImode and TImode */
1317 5, /* MMX or SSE register to integer */
1318 32, /* size of l1 cache. */
1319 256, /* size of l2 cache. */
1320 64, /* size of prefetch block */
1321 6, /* number of parallel prefetches */
1322 3, /* Branch cost */
1323 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1324 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1325 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1326 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1327 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1328 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1329 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1330 DUMMY_STRINGOP_ALGS},
1331 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1332 DUMMY_STRINGOP_ALGS},
1333 1, /* scalar_stmt_cost. */
1334 1, /* scalar load_cost. */
1335 1, /* scalar_store_cost. */
1336 1, /* vec_stmt_cost. */
1337 1, /* vec_to_scalar_cost. */
1338 1, /* scalar_to_vec_cost. */
1339 1, /* vec_align_load_cost. */
1340 2, /* vec_unalign_load_cost. */
1341 1, /* vec_store_cost. */
1342 3, /* cond_taken_branch_cost. */
1343 1, /* cond_not_taken_branch_cost. */
1346 const struct processor_costs *ix86_cost = &pentium_cost;
1348 /* Processor feature/optimization bitmasks. */
1349 #define m_386 (1<<PROCESSOR_I386)
1350 #define m_486 (1<<PROCESSOR_I486)
1351 #define m_PENT (1<<PROCESSOR_PENTIUM)
1352 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1353 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1354 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1355 #define m_CORE2 (1<<PROCESSOR_CORE2)
1356 #define m_ATOM (1<<PROCESSOR_ATOM)
1358 #define m_GEODE (1<<PROCESSOR_GEODE)
1359 #define m_K6 (1<<PROCESSOR_K6)
1360 #define m_K6_GEODE (m_K6 | m_GEODE)
1361 #define m_K8 (1<<PROCESSOR_K8)
1362 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1363 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1364 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1365 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1366 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1368 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1369 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1371 /* Generic instruction choice should be common subset of supported CPUs
1372 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1373 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1375 /* Feature tests against the various tunings. */
1376 unsigned char ix86_tune_features[X86_TUNE_LAST];
1378 /* Feature tests against the various tunings used to create ix86_tune_features
1379 based on the processor mask. */
1380 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1381 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1382 negatively, so enabling for Generic64 seems like good code size
1383 tradeoff. We can't enable it for 32bit generic because it does not
1384 work well with PPro base chips. */
1385 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1387 /* X86_TUNE_PUSH_MEMORY */
1388 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1389 | m_NOCONA | m_CORE2 | m_GENERIC,
1391 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1392 m_486 | m_PENT,
1394 /* X86_TUNE_UNROLL_STRLEN */
1395 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1399 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1401 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1402 on simulation result. But after P4 was made, no performance benefit
1403 was observed with branch hints. It also increases the code size.
1404 As a result, icc never generates branch hints. */
1407 /* X86_TUNE_DOUBLE_WITH_ADD */
1408 ~m_386,
1410 /* X86_TUNE_USE_SAHF */
1411 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1412 | m_NOCONA | m_CORE2 | m_GENERIC,
1414 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1415 partial dependencies. */
1416 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1417 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1419 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1420 register stalls on Generic32 compilation setting as well. However
1421 in current implementation the partial register stalls are not eliminated
1422 very well - they can be introduced via subregs synthesized by combine
1423 and can happen in caller/callee saving sequences. Because this option
1424 pays back little on PPro based chips and is in conflict with partial reg
1425 dependencies used by Athlon/P4 based chips, it is better to leave it off
1426 for generic32 for now. */
1427 m_PPRO,
1429 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1430 m_CORE2 | m_GENERIC,
1432 /* X86_TUNE_USE_HIMODE_FIOP */
1433 m_386 | m_486 | m_K6_GEODE,
1435 /* X86_TUNE_USE_SIMODE_FIOP */
1436 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1438 /* X86_TUNE_USE_MOV0 */
1439 m_K6,
1441 /* X86_TUNE_USE_CLTD */
1442 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1444 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1445 m_PENT4,
1447 /* X86_TUNE_SPLIT_LONG_MOVES */
1448 m_PPRO,
1450 /* X86_TUNE_READ_MODIFY_WRITE */
1451 ~m_PENT,
1453 /* X86_TUNE_READ_MODIFY */
1454 ~(m_PENT | m_PPRO),
1456 /* X86_TUNE_PROMOTE_QIMODE */
1457 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1458 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1460 /* X86_TUNE_FAST_PREFIX */
1461 ~(m_PENT | m_486 | m_386),
1463 /* X86_TUNE_SINGLE_STRINGOP */
1464 m_386 | m_PENT4 | m_NOCONA,
1466 /* X86_TUNE_QIMODE_MATH */
1469 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1470 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1471 might be considered for Generic32 if our scheme for avoiding partial
1472 stalls was more effective. */
1473 ~m_PPRO,
1475 /* X86_TUNE_PROMOTE_QI_REGS */
1478 /* X86_TUNE_PROMOTE_HI_REGS */
1479 m_PPRO,
1481 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1482 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1483 | m_CORE2 | m_GENERIC,
1485 /* X86_TUNE_ADD_ESP_8 */
1486 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1487 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1489 /* X86_TUNE_SUB_ESP_4 */
1490 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1491 | m_GENERIC,
1493 /* X86_TUNE_SUB_ESP_8 */
1494 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1495 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1497 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1498 for DFmode copies */
1499 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1500 | m_GENERIC | m_GEODE),
1502 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1503 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1505 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1506 conflict here in between PPro/Pentium4 based chips that thread 128bit
1507 SSE registers as single units versus K8 based chips that divide SSE
1508 registers to two 64bit halves. This knob promotes all store destinations
1509 to be 128bit to allow register renaming on 128bit SSE units, but usually
1510 results in one extra microop on 64bit SSE units. Experimental results
1511 shows that disabling this option on P4 brings over 20% SPECfp regression,
1512 while enabling it on K8 brings roughly 2.4% regression that can be partly
1513 masked by careful scheduling of moves. */
1514 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1515 | m_AMDFAM10 | m_BDVER1,
1517 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1518 m_AMDFAM10 | m_BDVER1,
1520 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1521 m_BDVER1,
1523 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1524 m_BDVER1,
1526 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1527 are resolved on SSE register parts instead of whole registers, so we may
1528 maintain just lower part of scalar values in proper format leaving the
1529 upper part undefined. */
1530 m_ATHLON_K8,
1532 /* X86_TUNE_SSE_TYPELESS_STORES */
1533 m_AMD_MULTIPLE,
1535 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1536 m_PPRO | m_PENT4 | m_NOCONA,
1538 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1539 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1541 /* X86_TUNE_PROLOGUE_USING_MOVE */
1542 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1544 /* X86_TUNE_EPILOGUE_USING_MOVE */
1545 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1547 /* X86_TUNE_SHIFT1 */
1548 ~m_486,
1550 /* X86_TUNE_USE_FFREEP */
1551 m_AMD_MULTIPLE,
1553 /* X86_TUNE_INTER_UNIT_MOVES */
1554 ~(m_AMD_MULTIPLE | m_GENERIC),
1556 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1557 ~(m_AMDFAM10 | m_BDVER1),
1559 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1560 than 4 branch instructions in the 16 byte window. */
1561 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1562 | m_GENERIC,
1564 /* X86_TUNE_SCHEDULE */
1565 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1566 | m_GENERIC,
1568 /* X86_TUNE_USE_BT */
1569 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1571 /* X86_TUNE_USE_INCDEC */
1572 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1574 /* X86_TUNE_PAD_RETURNS */
1575 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1577 /* X86_TUNE_EXT_80387_CONSTANTS */
1578 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1579 | m_CORE2 | m_GENERIC,
1581 /* X86_TUNE_SHORTEN_X87_SSE */
1582 ~m_K8,
1584 /* X86_TUNE_AVOID_VECTOR_DECODE */
1585 m_K8 | m_GENERIC64,
1587 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1588 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1589 ~(m_386 | m_486),
1591 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1592 vector path on AMD machines. */
1593 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1595 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1596 machines. */
1597 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1599 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1600 than a MOV. */
1601 m_PENT,
1603 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1604 but one byte longer. */
1605 m_PENT,
1607 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1608 operand that cannot be represented using a modRM byte. The XOR
1609 replacement is long decoded, so this split helps here as well. */
1610 m_K6,
1612 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1613 from FP to FP. */
1614 m_AMDFAM10 | m_GENERIC,
1616 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1617 from integer to FP. */
1618 m_AMDFAM10,
1620 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1621 with a subsequent conditional jump instruction into a single
1622 compare-and-branch uop. */
1623 m_CORE2 | m_BDVER1,
1625 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1626 will impact LEA instruction selection. */
1627 m_ATOM,
1630 /* Feature tests against the various architecture variations. */
1631 unsigned char ix86_arch_features[X86_ARCH_LAST];
1633 /* Feature tests against the various architecture variations, used to create
1634 ix86_arch_features based on the processor mask. */
1635 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1636 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1637 ~(m_386 | m_486 | m_PENT | m_K6),
1639 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1640 ~m_386,
1642 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1643 ~(m_386 | m_486),
1645 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1646 ~m_386,
1648 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1649 ~m_386,
1652 static const unsigned int x86_accumulate_outgoing_args
1653 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1654 | m_GENERIC;
1656 static const unsigned int x86_arch_always_fancy_math_387
1657 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1658 | m_NOCONA | m_CORE2 | m_GENERIC;
1660 static enum stringop_alg stringop_alg = no_stringop;
1662 /* In case the average insn count for single function invocation is
1663 lower than this constant, emit fast (but longer) prologue and
1664 epilogue code. */
1665 #define FAST_PROLOGUE_INSN_COUNT 20
1667 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1668 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1669 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1670 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1672 /* Array of the smallest class containing reg number REGNO, indexed by
1673 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1675 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1677 /* ax, dx, cx, bx */
1678 AREG, DREG, CREG, BREG,
1679 /* si, di, bp, sp */
1680 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1681 /* FP registers */
1682 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1683 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1684 /* arg pointer */
1685 NON_Q_REGS,
1686 /* flags, fpsr, fpcr, frame */
1687 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1688 /* SSE registers */
1689 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1690 SSE_REGS, SSE_REGS,
1691 /* MMX registers */
1692 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1693 MMX_REGS, MMX_REGS,
1694 /* REX registers */
1695 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1696 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1697 /* SSE REX registers */
1698 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1699 SSE_REGS, SSE_REGS,
1702 /* The "default" register map used in 32bit mode. */
1704 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1706 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1707 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1708 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1709 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1710 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1711 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1712 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1715 /* The "default" register map used in 64bit mode. */
1717 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1719 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1720 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1721 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1722 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1723 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1724 8,9,10,11,12,13,14,15, /* extended integer registers */
1725 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1728 /* Define the register numbers to be used in Dwarf debugging information.
1729 The SVR4 reference port C compiler uses the following register numbers
1730 in its Dwarf output code:
1731 0 for %eax (gcc regno = 0)
1732 1 for %ecx (gcc regno = 2)
1733 2 for %edx (gcc regno = 1)
1734 3 for %ebx (gcc regno = 3)
1735 4 for %esp (gcc regno = 7)
1736 5 for %ebp (gcc regno = 6)
1737 6 for %esi (gcc regno = 4)
1738 7 for %edi (gcc regno = 5)
1739 The following three DWARF register numbers are never generated by
1740 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1741 believes these numbers have these meanings.
1742 8 for %eip (no gcc equivalent)
1743 9 for %eflags (gcc regno = 17)
1744 10 for %trapno (no gcc equivalent)
1745 It is not at all clear how we should number the FP stack registers
1746 for the x86 architecture. If the version of SDB on x86/svr4 were
1747 a bit less brain dead with respect to floating-point then we would
1748 have a precedent to follow with respect to DWARF register numbers
1749 for x86 FP registers, but the SDB on x86/svr4 is so completely
1750 broken with respect to FP registers that it is hardly worth thinking
1751 of it as something to strive for compatibility with.
1752 The version of x86/svr4 SDB I have at the moment does (partially)
1753 seem to believe that DWARF register number 11 is associated with
1754 the x86 register %st(0), but that's about all. Higher DWARF
1755 register numbers don't seem to be associated with anything in
1756 particular, and even for DWARF regno 11, SDB only seems to under-
1757 stand that it should say that a variable lives in %st(0) (when
1758 asked via an `=' command) if we said it was in DWARF regno 11,
1759 but SDB still prints garbage when asked for the value of the
1760 variable in question (via a `/' command).
1761 (Also note that the labels SDB prints for various FP stack regs
1762 when doing an `x' command are all wrong.)
1763 Note that these problems generally don't affect the native SVR4
1764 C compiler because it doesn't allow the use of -O with -g and
1765 because when it is *not* optimizing, it allocates a memory
1766 location for each floating-point variable, and the memory
1767 location is what gets described in the DWARF AT_location
1768 attribute for the variable in question.
1769 Regardless of the severe mental illness of the x86/svr4 SDB, we
1770 do something sensible here and we use the following DWARF
1771 register numbers. Note that these are all stack-top-relative
1772 numbers.
1773 11 for %st(0) (gcc regno = 8)
1774 12 for %st(1) (gcc regno = 9)
1775 13 for %st(2) (gcc regno = 10)
1776 14 for %st(3) (gcc regno = 11)
1777 15 for %st(4) (gcc regno = 12)
1778 16 for %st(5) (gcc regno = 13)
1779 17 for %st(6) (gcc regno = 14)
1780 18 for %st(7) (gcc regno = 15)
1782 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1784 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1785 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1786 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1787 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1788 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1789 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1790 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1793 /* Test and compare insns in i386.md store the information needed to
1794 generate branch and scc insns here. */
1796 rtx ix86_compare_op0 = NULL_RTX;
1797 rtx ix86_compare_op1 = NULL_RTX;
1799 /* Define parameter passing and return registers. */
1801 static int const x86_64_int_parameter_registers[6] =
1803 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1806 static int const x86_64_ms_abi_int_parameter_registers[4] =
1808 CX_REG, DX_REG, R8_REG, R9_REG
1811 static int const x86_64_int_return_registers[4] =
1813 AX_REG, DX_REG, DI_REG, SI_REG
1816 /* Define the structure for the machine field in struct function. */
1818 struct GTY(()) stack_local_entry {
1819 unsigned short mode;
1820 unsigned short n;
1821 rtx rtl;
1822 struct stack_local_entry *next;
1825 /* Structure describing stack frame layout.
1826 Stack grows downward:
1828 [arguments]
1829 <- ARG_POINTER
1830 saved pc
1832 saved frame pointer if frame_pointer_needed
1833 <- HARD_FRAME_POINTER
1834 [saved regs]
1836 [padding0]
1838 [saved SSE regs]
1840 [padding1] \
1842 [va_arg registers] (
1843 > to_allocate <- FRAME_POINTER
1844 [frame] (
1846 [padding2] /
1848 struct ix86_frame
1850 int padding0;
1851 int nsseregs;
1852 int nregs;
1853 int padding1;
1854 int va_arg_size;
1855 int red_zone_size;
1856 HOST_WIDE_INT frame;
1857 int padding2;
1858 int outgoing_arguments_size;
1860 HOST_WIDE_INT to_allocate;
1861 /* The offsets relative to ARG_POINTER. */
1862 HOST_WIDE_INT frame_pointer_offset;
1863 HOST_WIDE_INT hard_frame_pointer_offset;
1864 HOST_WIDE_INT stack_pointer_offset;
1866 /* When save_regs_using_mov is set, emit prologue using
1867 move instead of push instructions. */
1868 bool save_regs_using_mov;
1871 /* Code model option. */
1872 enum cmodel ix86_cmodel;
1873 /* Asm dialect. */
1874 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1875 /* TLS dialects. */
1876 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1878 /* Which unit we are generating floating point math for. */
1879 enum fpmath_unit ix86_fpmath;
1881 /* Which cpu are we scheduling for. */
1882 enum attr_cpu ix86_schedule;
1884 /* Which cpu are we optimizing for. */
1885 enum processor_type ix86_tune;
1887 /* Which instruction set architecture to use. */
1888 enum processor_type ix86_arch;
1890 /* true if sse prefetch instruction is not NOOP. */
1891 int x86_prefetch_sse;
1893 /* ix86_regparm_string as a number */
1894 static int ix86_regparm;
1896 /* -mstackrealign option */
1897 extern int ix86_force_align_arg_pointer;
1898 static const char ix86_force_align_arg_pointer_string[]
1899 = "force_align_arg_pointer";
1901 static rtx (*ix86_gen_leave) (void);
1902 static rtx (*ix86_gen_pop1) (rtx);
1903 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1904 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1905 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1906 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1907 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1908 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1910 /* Preferred alignment for stack boundary in bits. */
1911 unsigned int ix86_preferred_stack_boundary;
1913 /* Alignment for incoming stack boundary in bits specified at
1914 command line. */
1915 static unsigned int ix86_user_incoming_stack_boundary;
1917 /* Default alignment for incoming stack boundary in bits. */
1918 static unsigned int ix86_default_incoming_stack_boundary;
1920 /* Alignment for incoming stack boundary in bits. */
1921 unsigned int ix86_incoming_stack_boundary;
1923 /* The abi used by target. */
1924 enum calling_abi ix86_abi;
1926 /* Values 1-5: see jump.c */
1927 int ix86_branch_cost;
1929 /* Calling abi specific va_list type nodes. */
1930 static GTY(()) tree sysv_va_list_type_node;
1931 static GTY(()) tree ms_va_list_type_node;
1933 /* Variables which are this size or smaller are put in the data/bss
1934 or ldata/lbss sections. */
1936 int ix86_section_threshold = 65536;
1938 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1939 char internal_label_prefix[16];
1940 int internal_label_prefix_len;
1942 /* Fence to use after loop using movnt. */
1943 tree x86_mfence;
1945 /* Register class used for passing given 64bit part of the argument.
1946 These represent classes as documented by the PS ABI, with the exception
1947 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1948 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1950 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1951 whenever possible (upper half does contain padding). */
1952 enum x86_64_reg_class
1954 X86_64_NO_CLASS,
1955 X86_64_INTEGER_CLASS,
1956 X86_64_INTEGERSI_CLASS,
1957 X86_64_SSE_CLASS,
1958 X86_64_SSESF_CLASS,
1959 X86_64_SSEDF_CLASS,
1960 X86_64_SSEUP_CLASS,
1961 X86_64_X87_CLASS,
1962 X86_64_X87UP_CLASS,
1963 X86_64_COMPLEX_X87_CLASS,
1964 X86_64_MEMORY_CLASS
1967 #define MAX_CLASSES 4
1969 /* Table of constants used by fldpi, fldln2, etc.... */
1970 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1971 static bool ext_80387_constants_init = 0;
1974 static struct machine_function * ix86_init_machine_status (void);
1975 static rtx ix86_function_value (const_tree, const_tree, bool);
1976 static bool ix86_function_value_regno_p (const unsigned int);
1977 static rtx ix86_static_chain (const_tree, bool);
1978 static int ix86_function_regparm (const_tree, const_tree);
1979 static void ix86_compute_frame_layout (struct ix86_frame *);
1980 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1981 rtx, rtx, int);
1982 static void ix86_add_new_builtins (int);
1983 static rtx ix86_expand_vec_perm_builtin (tree);
1984 static tree ix86_canonical_va_list_type (tree);
1986 enum ix86_function_specific_strings
1988 IX86_FUNCTION_SPECIFIC_ARCH,
1989 IX86_FUNCTION_SPECIFIC_TUNE,
1990 IX86_FUNCTION_SPECIFIC_FPMATH,
1991 IX86_FUNCTION_SPECIFIC_MAX
1994 static char *ix86_target_string (int, int, const char *, const char *,
1995 const char *, bool);
1996 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1997 static void ix86_function_specific_save (struct cl_target_option *);
1998 static void ix86_function_specific_restore (struct cl_target_option *);
1999 static void ix86_function_specific_print (FILE *, int,
2000 struct cl_target_option *);
2001 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2002 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2003 static bool ix86_can_inline_p (tree, tree);
2004 static void ix86_set_current_function (tree);
2005 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2007 static enum calling_abi ix86_function_abi (const_tree);
2010 #ifndef SUBTARGET32_DEFAULT_CPU
2011 #define SUBTARGET32_DEFAULT_CPU "i386"
2012 #endif
2014 /* The svr4 ABI for the i386 says that records and unions are returned
2015 in memory. */
2016 #ifndef DEFAULT_PCC_STRUCT_RETURN
2017 #define DEFAULT_PCC_STRUCT_RETURN 1
2018 #endif
2020 /* Whether -mtune= or -march= were specified */
2021 static int ix86_tune_defaulted;
2022 static int ix86_arch_specified;
2024 /* Bit flags that specify the ISA we are compiling for. */
2025 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
2027 /* A mask of ix86_isa_flags that includes bit X if X
2028 was set or cleared on the command line. */
2029 static int ix86_isa_flags_explicit;
2031 /* Define a set of ISAs which are available when a given ISA is
2032 enabled. MMX and SSE ISAs are handled separately. */
2034 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2035 #define OPTION_MASK_ISA_3DNOW_SET \
2036 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2038 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2039 #define OPTION_MASK_ISA_SSE2_SET \
2040 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2041 #define OPTION_MASK_ISA_SSE3_SET \
2042 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2043 #define OPTION_MASK_ISA_SSSE3_SET \
2044 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2045 #define OPTION_MASK_ISA_SSE4_1_SET \
2046 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2047 #define OPTION_MASK_ISA_SSE4_2_SET \
2048 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2049 #define OPTION_MASK_ISA_AVX_SET \
2050 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2051 #define OPTION_MASK_ISA_FMA_SET \
2052 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2054 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2055 as -msse4.2. */
2056 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2058 #define OPTION_MASK_ISA_SSE4A_SET \
2059 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2060 #define OPTION_MASK_ISA_FMA4_SET \
2061 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2062 | OPTION_MASK_ISA_AVX_SET)
2063 #define OPTION_MASK_ISA_XOP_SET \
2064 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2065 #define OPTION_MASK_ISA_LWP_SET \
2066 OPTION_MASK_ISA_LWP
2068 /* AES and PCLMUL need SSE2 because they use xmm registers */
2069 #define OPTION_MASK_ISA_AES_SET \
2070 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2071 #define OPTION_MASK_ISA_PCLMUL_SET \
2072 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2074 #define OPTION_MASK_ISA_ABM_SET \
2075 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2077 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2078 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2079 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2080 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2081 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2083 /* Define a set of ISAs which aren't available when a given ISA is
2084 disabled. MMX and SSE ISAs are handled separately. */
2086 #define OPTION_MASK_ISA_MMX_UNSET \
2087 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2088 #define OPTION_MASK_ISA_3DNOW_UNSET \
2089 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2090 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2092 #define OPTION_MASK_ISA_SSE_UNSET \
2093 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2094 #define OPTION_MASK_ISA_SSE2_UNSET \
2095 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2096 #define OPTION_MASK_ISA_SSE3_UNSET \
2097 (OPTION_MASK_ISA_SSE3 \
2098 | OPTION_MASK_ISA_SSSE3_UNSET \
2099 | OPTION_MASK_ISA_SSE4A_UNSET )
2100 #define OPTION_MASK_ISA_SSSE3_UNSET \
2101 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2102 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2103 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2104 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2105 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2106 #define OPTION_MASK_ISA_AVX_UNSET \
2107 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2108 | OPTION_MASK_ISA_FMA4_UNSET)
2109 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2111 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2112 as -mno-sse4.1. */
2113 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2115 #define OPTION_MASK_ISA_SSE4A_UNSET \
2116 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2118 #define OPTION_MASK_ISA_FMA4_UNSET \
2119 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2120 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2121 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2123 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2124 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2125 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2126 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2127 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2128 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2129 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2130 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2132 /* Vectorization library interface and handlers. */
2133 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2134 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2135 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2137 /* Processor target table, indexed by processor number */
2138 struct ptt
2140 const struct processor_costs *cost; /* Processor costs */
2141 const int align_loop; /* Default alignments. */
2142 const int align_loop_max_skip;
2143 const int align_jump;
2144 const int align_jump_max_skip;
2145 const int align_func;
2148 static const struct ptt processor_target_table[PROCESSOR_max] =
2150 {&i386_cost, 4, 3, 4, 3, 4},
2151 {&i486_cost, 16, 15, 16, 15, 16},
2152 {&pentium_cost, 16, 7, 16, 7, 16},
2153 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2154 {&geode_cost, 0, 0, 0, 0, 0},
2155 {&k6_cost, 32, 7, 32, 7, 32},
2156 {&athlon_cost, 16, 7, 16, 7, 16},
2157 {&pentium4_cost, 0, 0, 0, 0, 0},
2158 {&k8_cost, 16, 7, 16, 7, 16},
2159 {&nocona_cost, 0, 0, 0, 0, 0},
2160 {&core2_cost, 16, 10, 16, 10, 16},
2161 {&generic32_cost, 16, 7, 16, 7, 16},
2162 {&generic64_cost, 16, 10, 16, 10, 16},
2163 {&amdfam10_cost, 32, 24, 32, 7, 32},
2164 {&bdver1_cost, 32, 24, 32, 7, 32},
2165 {&atom_cost, 16, 7, 16, 7, 16}
2168 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2170 "generic",
2171 "i386",
2172 "i486",
2173 "pentium",
2174 "pentium-mmx",
2175 "pentiumpro",
2176 "pentium2",
2177 "pentium3",
2178 "pentium4",
2179 "pentium-m",
2180 "prescott",
2181 "nocona",
2182 "core2",
2183 "atom",
2184 "geode",
2185 "k6",
2186 "k6-2",
2187 "k6-3",
2188 "athlon",
2189 "athlon-4",
2190 "k8",
2191 "amdfam10",
2192 "bdver1"
2195 /* Implement TARGET_HANDLE_OPTION. */
2197 static bool
2198 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2200 switch (code)
2202 case OPT_mmmx:
2203 if (value)
2205 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2206 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2208 else
2210 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2211 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2213 return true;
2215 case OPT_m3dnow:
2216 if (value)
2218 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2219 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2221 else
2223 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2224 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2226 return true;
2228 case OPT_m3dnowa:
2229 return false;
2231 case OPT_msse:
2232 if (value)
2234 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2237 else
2239 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2242 return true;
2244 case OPT_msse2:
2245 if (value)
2247 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2250 else
2252 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2253 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2255 return true;
2257 case OPT_msse3:
2258 if (value)
2260 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2261 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2263 else
2265 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2266 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2268 return true;
2270 case OPT_mssse3:
2271 if (value)
2273 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2274 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2276 else
2278 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2279 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2281 return true;
2283 case OPT_msse4_1:
2284 if (value)
2286 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2287 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2289 else
2291 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2292 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2294 return true;
2296 case OPT_msse4_2:
2297 if (value)
2299 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2300 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2302 else
2304 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2305 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2307 return true;
2309 case OPT_mavx:
2310 if (value)
2312 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2313 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2315 else
2317 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2318 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2320 return true;
2322 case OPT_mfma:
2323 if (value)
2325 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2326 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2328 else
2330 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2331 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2333 return true;
2335 case OPT_msse4:
2336 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2338 return true;
2340 case OPT_mno_sse4:
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2343 return true;
2345 case OPT_msse4a:
2346 if (value)
2348 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2349 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2351 else
2353 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2354 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2356 return true;
2358 case OPT_mfma4:
2359 if (value)
2361 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2362 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2364 else
2366 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2367 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2369 return true;
2371 case OPT_mxop:
2372 if (value)
2374 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2375 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2377 else
2379 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2380 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2382 return true;
2384 case OPT_mlwp:
2385 if (value)
2387 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2388 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2390 else
2392 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2393 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2395 return true;
2397 case OPT_mabm:
2398 if (value)
2400 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2401 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2403 else
2405 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2406 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2408 return true;
2410 case OPT_mpopcnt:
2411 if (value)
2413 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2414 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2416 else
2418 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2419 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2421 return true;
2423 case OPT_msahf:
2424 if (value)
2426 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2427 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2429 else
2431 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2432 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2434 return true;
2436 case OPT_mcx16:
2437 if (value)
2439 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2440 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2442 else
2444 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2445 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2447 return true;
2449 case OPT_mmovbe:
2450 if (value)
2452 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2453 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2455 else
2457 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2458 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2460 return true;
2462 case OPT_mcrc32:
2463 if (value)
2465 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2466 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2468 else
2470 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2471 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2473 return true;
2475 case OPT_maes:
2476 if (value)
2478 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2479 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2481 else
2483 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2484 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2486 return true;
2488 case OPT_mpclmul:
2489 if (value)
2491 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2492 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2494 else
2496 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2497 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2499 return true;
2501 default:
2502 return true;
2506 /* Return a string that documents the current -m options. The caller is
2507 responsible for freeing the string. */
2509 static char *
2510 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2511 const char *fpmath, bool add_nl_p)
2513 struct ix86_target_opts
2515 const char *option; /* option string */
2516 int mask; /* isa mask options */
2519 /* This table is ordered so that options like -msse4.2 that imply
2520 preceding options while match those first. */
2521 static struct ix86_target_opts isa_opts[] =
2523 { "-m64", OPTION_MASK_ISA_64BIT },
2524 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2525 { "-mfma", OPTION_MASK_ISA_FMA },
2526 { "-mxop", OPTION_MASK_ISA_XOP },
2527 { "-mlwp", OPTION_MASK_ISA_LWP },
2528 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2529 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2530 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2531 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2532 { "-msse3", OPTION_MASK_ISA_SSE3 },
2533 { "-msse2", OPTION_MASK_ISA_SSE2 },
2534 { "-msse", OPTION_MASK_ISA_SSE },
2535 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2536 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2537 { "-mmmx", OPTION_MASK_ISA_MMX },
2538 { "-mabm", OPTION_MASK_ISA_ABM },
2539 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2540 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2541 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2542 { "-maes", OPTION_MASK_ISA_AES },
2543 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2546 /* Flag options. */
2547 static struct ix86_target_opts flag_opts[] =
2549 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2550 { "-m80387", MASK_80387 },
2551 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2552 { "-malign-double", MASK_ALIGN_DOUBLE },
2553 { "-mcld", MASK_CLD },
2554 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2555 { "-mieee-fp", MASK_IEEE_FP },
2556 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2557 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2558 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2559 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2560 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2561 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2562 { "-mno-red-zone", MASK_NO_RED_ZONE },
2563 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2564 { "-mrecip", MASK_RECIP },
2565 { "-mrtd", MASK_RTD },
2566 { "-msseregparm", MASK_SSEREGPARM },
2567 { "-mstack-arg-probe", MASK_STACK_PROBE },
2568 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2571 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2573 char isa_other[40];
2574 char target_other[40];
2575 unsigned num = 0;
2576 unsigned i, j;
2577 char *ret;
2578 char *ptr;
2579 size_t len;
2580 size_t line_len;
2581 size_t sep_len;
2583 memset (opts, '\0', sizeof (opts));
2585 /* Add -march= option. */
2586 if (arch)
2588 opts[num][0] = "-march=";
2589 opts[num++][1] = arch;
2592 /* Add -mtune= option. */
2593 if (tune)
2595 opts[num][0] = "-mtune=";
2596 opts[num++][1] = tune;
2599 /* Pick out the options in isa options. */
2600 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2602 if ((isa & isa_opts[i].mask) != 0)
2604 opts[num++][0] = isa_opts[i].option;
2605 isa &= ~ isa_opts[i].mask;
2609 if (isa && add_nl_p)
2611 opts[num++][0] = isa_other;
2612 sprintf (isa_other, "(other isa: %#x)", isa);
2615 /* Add flag options. */
2616 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2618 if ((flags & flag_opts[i].mask) != 0)
2620 opts[num++][0] = flag_opts[i].option;
2621 flags &= ~ flag_opts[i].mask;
2625 if (flags && add_nl_p)
2627 opts[num++][0] = target_other;
2628 sprintf (target_other, "(other flags: %#x)", flags);
2631 /* Add -fpmath= option. */
2632 if (fpmath)
2634 opts[num][0] = "-mfpmath=";
2635 opts[num++][1] = fpmath;
2638 /* Any options? */
2639 if (num == 0)
2640 return NULL;
2642 gcc_assert (num < ARRAY_SIZE (opts));
2644 /* Size the string. */
2645 len = 0;
2646 sep_len = (add_nl_p) ? 3 : 1;
2647 for (i = 0; i < num; i++)
2649 len += sep_len;
2650 for (j = 0; j < 2; j++)
2651 if (opts[i][j])
2652 len += strlen (opts[i][j]);
2655 /* Build the string. */
2656 ret = ptr = (char *) xmalloc (len);
2657 line_len = 0;
2659 for (i = 0; i < num; i++)
2661 size_t len2[2];
2663 for (j = 0; j < 2; j++)
2664 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2666 if (i != 0)
2668 *ptr++ = ' ';
2669 line_len++;
2671 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2673 *ptr++ = '\\';
2674 *ptr++ = '\n';
2675 line_len = 0;
2679 for (j = 0; j < 2; j++)
2680 if (opts[i][j])
2682 memcpy (ptr, opts[i][j], len2[j]);
2683 ptr += len2[j];
2684 line_len += len2[j];
2688 *ptr = '\0';
2689 gcc_assert (ret + len >= ptr);
2691 return ret;
2694 /* Return TRUE if software prefetching is beneficial for the
2695 given CPU. */
2697 static bool
2698 software_prefetching_beneficial_p (void)
2700 switch (ix86_tune)
2702 case PROCESSOR_GEODE:
2703 case PROCESSOR_K6:
2704 case PROCESSOR_ATHLON:
2705 case PROCESSOR_K8:
2706 case PROCESSOR_AMDFAM10:
2707 return true;
2709 default:
2710 return false;
2714 /* Function that is callable from the debugger to print the current
2715 options. */
2716 void
2717 ix86_debug_options (void)
2719 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2720 ix86_arch_string, ix86_tune_string,
2721 ix86_fpmath_string, true);
2723 if (opts)
2725 fprintf (stderr, "%s\n\n", opts);
2726 free (opts);
2728 else
2729 fputs ("<no options>\n\n", stderr);
2731 return;
2734 /* Sometimes certain combinations of command options do not make
2735 sense on a particular target machine. You can define a macro
2736 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2737 defined, is executed once just after all the command options have
2738 been parsed.
2740 Don't use this macro to turn on various extra optimizations for
2741 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2743 void
2744 override_options (bool main_args_p)
2746 int i;
2747 unsigned int ix86_arch_mask, ix86_tune_mask;
2748 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2749 const char *prefix;
2750 const char *suffix;
2751 const char *sw;
2753 /* Comes from final.c -- no real reason to change it. */
2754 #define MAX_CODE_ALIGN 16
2756 enum pta_flags
2758 PTA_SSE = 1 << 0,
2759 PTA_SSE2 = 1 << 1,
2760 PTA_SSE3 = 1 << 2,
2761 PTA_MMX = 1 << 3,
2762 PTA_PREFETCH_SSE = 1 << 4,
2763 PTA_3DNOW = 1 << 5,
2764 PTA_3DNOW_A = 1 << 6,
2765 PTA_64BIT = 1 << 7,
2766 PTA_SSSE3 = 1 << 8,
2767 PTA_CX16 = 1 << 9,
2768 PTA_POPCNT = 1 << 10,
2769 PTA_ABM = 1 << 11,
2770 PTA_SSE4A = 1 << 12,
2771 PTA_NO_SAHF = 1 << 13,
2772 PTA_SSE4_1 = 1 << 14,
2773 PTA_SSE4_2 = 1 << 15,
2774 PTA_AES = 1 << 16,
2775 PTA_PCLMUL = 1 << 17,
2776 PTA_AVX = 1 << 18,
2777 PTA_FMA = 1 << 19,
2778 PTA_MOVBE = 1 << 20,
2779 PTA_FMA4 = 1 << 21,
2780 PTA_XOP = 1 << 22,
2781 PTA_LWP = 1 << 23
2784 static struct pta
2786 const char *const name; /* processor name or nickname. */
2787 const enum processor_type processor;
2788 const enum attr_cpu schedule;
2789 const unsigned /*enum pta_flags*/ flags;
2791 const processor_alias_table[] =
2793 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2794 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2795 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2796 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2797 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2798 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2799 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2800 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2801 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2802 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2803 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2804 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2805 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2806 PTA_MMX | PTA_SSE},
2807 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2808 PTA_MMX | PTA_SSE},
2809 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2810 PTA_MMX | PTA_SSE | PTA_SSE2},
2811 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2812 PTA_MMX |PTA_SSE | PTA_SSE2},
2813 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2814 PTA_MMX | PTA_SSE | PTA_SSE2},
2815 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2816 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2817 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2818 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2819 | PTA_CX16 | PTA_NO_SAHF},
2820 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2821 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2822 | PTA_SSSE3 | PTA_CX16},
2823 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2824 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2825 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2826 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2827 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2828 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2829 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2830 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2831 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2832 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2833 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2834 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2835 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2836 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2837 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2838 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2839 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2840 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2841 {"x86-64", PROCESSOR_K8, CPU_K8,
2842 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2843 {"k8", PROCESSOR_K8, CPU_K8,
2844 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2845 | PTA_SSE2 | PTA_NO_SAHF},
2846 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2847 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2848 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2849 {"opteron", PROCESSOR_K8, CPU_K8,
2850 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2851 | PTA_SSE2 | PTA_NO_SAHF},
2852 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2853 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2854 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2855 {"athlon64", PROCESSOR_K8, CPU_K8,
2856 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2857 | PTA_SSE2 | PTA_NO_SAHF},
2858 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2859 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2860 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2861 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2862 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2863 | PTA_SSE2 | PTA_NO_SAHF},
2864 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2865 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2866 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2867 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2868 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2869 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2870 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2871 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2872 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2873 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2874 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2875 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2876 0 /* flags are only used for -march switch. */ },
2877 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2878 PTA_64BIT /* flags are only used for -march switch. */ },
2881 int const pta_size = ARRAY_SIZE (processor_alias_table);
2883 /* Set up prefix/suffix so the error messages refer to either the command
2884 line argument, or the attribute(target). */
2885 if (main_args_p)
2887 prefix = "-m";
2888 suffix = "";
2889 sw = "switch";
2891 else
2893 prefix = "option(\"";
2894 suffix = "\")";
2895 sw = "attribute";
2898 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2899 SUBTARGET_OVERRIDE_OPTIONS;
2900 #endif
2902 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2903 SUBSUBTARGET_OVERRIDE_OPTIONS;
2904 #endif
2906 /* -fPIC is the default for x86_64. */
2907 if (TARGET_MACHO && TARGET_64BIT)
2908 flag_pic = 2;
2910 /* Set the default values for switches whose default depends on TARGET_64BIT
2911 in case they weren't overwritten by command line options. */
2912 if (TARGET_64BIT)
2914 if (flag_zee == 2)
2915 flag_zee = 1;
2916 /* Mach-O doesn't support omitting the frame pointer for now. */
2917 if (flag_omit_frame_pointer == 2)
2918 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2919 if (flag_asynchronous_unwind_tables == 2)
2920 flag_asynchronous_unwind_tables = 1;
2921 if (flag_pcc_struct_return == 2)
2922 flag_pcc_struct_return = 0;
2924 else
2926 if (flag_zee == 2)
2927 flag_zee = 0;
2928 if (flag_omit_frame_pointer == 2)
2929 flag_omit_frame_pointer = 0;
2930 if (flag_asynchronous_unwind_tables == 2)
2931 flag_asynchronous_unwind_tables = 0;
2932 if (flag_pcc_struct_return == 2)
2933 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2936 /* Need to check -mtune=generic first. */
2937 if (ix86_tune_string)
2939 if (!strcmp (ix86_tune_string, "generic")
2940 || !strcmp (ix86_tune_string, "i686")
2941 /* As special support for cross compilers we read -mtune=native
2942 as -mtune=generic. With native compilers we won't see the
2943 -mtune=native, as it was changed by the driver. */
2944 || !strcmp (ix86_tune_string, "native"))
2946 if (TARGET_64BIT)
2947 ix86_tune_string = "generic64";
2948 else
2949 ix86_tune_string = "generic32";
2951 /* If this call is for setting the option attribute, allow the
2952 generic32/generic64 that was previously set. */
2953 else if (!main_args_p
2954 && (!strcmp (ix86_tune_string, "generic32")
2955 || !strcmp (ix86_tune_string, "generic64")))
2957 else if (!strncmp (ix86_tune_string, "generic", 7))
2958 error ("bad value (%s) for %stune=%s %s",
2959 ix86_tune_string, prefix, suffix, sw);
2960 else if (!strcmp (ix86_tune_string, "x86-64"))
2961 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2962 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2963 prefix, suffix, prefix, suffix, prefix, suffix);
2965 else
2967 if (ix86_arch_string)
2968 ix86_tune_string = ix86_arch_string;
2969 if (!ix86_tune_string)
2971 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2972 ix86_tune_defaulted = 1;
2975 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2976 need to use a sensible tune option. */
2977 if (!strcmp (ix86_tune_string, "generic")
2978 || !strcmp (ix86_tune_string, "x86-64")
2979 || !strcmp (ix86_tune_string, "i686"))
2981 if (TARGET_64BIT)
2982 ix86_tune_string = "generic64";
2983 else
2984 ix86_tune_string = "generic32";
2988 if (ix86_stringop_string)
2990 if (!strcmp (ix86_stringop_string, "rep_byte"))
2991 stringop_alg = rep_prefix_1_byte;
2992 else if (!strcmp (ix86_stringop_string, "libcall"))
2993 stringop_alg = libcall;
2994 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2995 stringop_alg = rep_prefix_4_byte;
2996 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2997 && TARGET_64BIT)
2998 /* rep; movq isn't available in 32-bit code. */
2999 stringop_alg = rep_prefix_8_byte;
3000 else if (!strcmp (ix86_stringop_string, "byte_loop"))
3001 stringop_alg = loop_1_byte;
3002 else if (!strcmp (ix86_stringop_string, "loop"))
3003 stringop_alg = loop;
3004 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
3005 stringop_alg = unrolled_loop;
3006 else
3007 error ("bad value (%s) for %sstringop-strategy=%s %s",
3008 ix86_stringop_string, prefix, suffix, sw);
3011 if (!ix86_arch_string)
3012 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
3013 else
3014 ix86_arch_specified = 1;
3016 /* Validate -mabi= value. */
3017 if (ix86_abi_string)
3019 if (strcmp (ix86_abi_string, "sysv") == 0)
3020 ix86_abi = SYSV_ABI;
3021 else if (strcmp (ix86_abi_string, "ms") == 0)
3022 ix86_abi = MS_ABI;
3023 else
3024 error ("unknown ABI (%s) for %sabi=%s %s",
3025 ix86_abi_string, prefix, suffix, sw);
3027 else
3028 ix86_abi = DEFAULT_ABI;
3030 if (ix86_cmodel_string != 0)
3032 if (!strcmp (ix86_cmodel_string, "small"))
3033 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3034 else if (!strcmp (ix86_cmodel_string, "medium"))
3035 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3036 else if (!strcmp (ix86_cmodel_string, "large"))
3037 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3038 else if (flag_pic)
3039 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3040 else if (!strcmp (ix86_cmodel_string, "32"))
3041 ix86_cmodel = CM_32;
3042 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3043 ix86_cmodel = CM_KERNEL;
3044 else
3045 error ("bad value (%s) for %scmodel=%s %s",
3046 ix86_cmodel_string, prefix, suffix, sw);
3048 else
3050 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3051 use of rip-relative addressing. This eliminates fixups that
3052 would otherwise be needed if this object is to be placed in a
3053 DLL, and is essentially just as efficient as direct addressing. */
3054 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3055 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3056 else if (TARGET_64BIT)
3057 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3058 else
3059 ix86_cmodel = CM_32;
3061 if (ix86_asm_string != 0)
3063 if (! TARGET_MACHO
3064 && !strcmp (ix86_asm_string, "intel"))
3065 ix86_asm_dialect = ASM_INTEL;
3066 else if (!strcmp (ix86_asm_string, "att"))
3067 ix86_asm_dialect = ASM_ATT;
3068 else
3069 error ("bad value (%s) for %sasm=%s %s",
3070 ix86_asm_string, prefix, suffix, sw);
3072 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3073 error ("code model %qs not supported in the %s bit mode",
3074 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3075 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3076 sorry ("%i-bit mode not compiled in",
3077 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3079 for (i = 0; i < pta_size; i++)
3080 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3082 ix86_schedule = processor_alias_table[i].schedule;
3083 ix86_arch = processor_alias_table[i].processor;
3084 /* Default cpu tuning to the architecture. */
3085 ix86_tune = ix86_arch;
3087 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3088 error ("CPU you selected does not support x86-64 "
3089 "instruction set");
3091 if (processor_alias_table[i].flags & PTA_MMX
3092 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3093 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3094 if (processor_alias_table[i].flags & PTA_3DNOW
3095 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3096 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3097 if (processor_alias_table[i].flags & PTA_3DNOW_A
3098 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3099 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3100 if (processor_alias_table[i].flags & PTA_SSE
3101 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3102 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3103 if (processor_alias_table[i].flags & PTA_SSE2
3104 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3105 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3106 if (processor_alias_table[i].flags & PTA_SSE3
3107 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3108 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3109 if (processor_alias_table[i].flags & PTA_SSSE3
3110 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3111 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3112 if (processor_alias_table[i].flags & PTA_SSE4_1
3113 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3114 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3115 if (processor_alias_table[i].flags & PTA_SSE4_2
3116 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3117 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3118 if (processor_alias_table[i].flags & PTA_AVX
3119 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3120 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3121 if (processor_alias_table[i].flags & PTA_FMA
3122 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3123 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3124 if (processor_alias_table[i].flags & PTA_SSE4A
3125 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3126 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3127 if (processor_alias_table[i].flags & PTA_FMA4
3128 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3129 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3130 if (processor_alias_table[i].flags & PTA_XOP
3131 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3132 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3133 if (processor_alias_table[i].flags & PTA_LWP
3134 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3135 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3136 if (processor_alias_table[i].flags & PTA_ABM
3137 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3138 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3139 if (processor_alias_table[i].flags & PTA_CX16
3140 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3141 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3142 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3143 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3144 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3145 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3146 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3147 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3148 if (processor_alias_table[i].flags & PTA_MOVBE
3149 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3150 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3151 if (processor_alias_table[i].flags & PTA_AES
3152 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3153 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3154 if (processor_alias_table[i].flags & PTA_PCLMUL
3155 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3156 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3157 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3158 x86_prefetch_sse = true;
3160 break;
3163 if (!strcmp (ix86_arch_string, "generic"))
3164 error ("generic CPU can be used only for %stune=%s %s",
3165 prefix, suffix, sw);
3166 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3167 error ("bad value (%s) for %sarch=%s %s",
3168 ix86_arch_string, prefix, suffix, sw);
3170 ix86_arch_mask = 1u << ix86_arch;
3171 for (i = 0; i < X86_ARCH_LAST; ++i)
3172 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3174 for (i = 0; i < pta_size; i++)
3175 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3177 ix86_schedule = processor_alias_table[i].schedule;
3178 ix86_tune = processor_alias_table[i].processor;
3179 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3181 if (ix86_tune_defaulted)
3183 ix86_tune_string = "x86-64";
3184 for (i = 0; i < pta_size; i++)
3185 if (! strcmp (ix86_tune_string,
3186 processor_alias_table[i].name))
3187 break;
3188 ix86_schedule = processor_alias_table[i].schedule;
3189 ix86_tune = processor_alias_table[i].processor;
3191 else
3192 error ("CPU you selected does not support x86-64 "
3193 "instruction set");
3195 /* Intel CPUs have always interpreted SSE prefetch instructions as
3196 NOPs; so, we can enable SSE prefetch instructions even when
3197 -mtune (rather than -march) points us to a processor that has them.
3198 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3199 higher processors. */
3200 if (TARGET_CMOVE
3201 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3202 x86_prefetch_sse = true;
3203 break;
3206 if (ix86_tune_specified && i == pta_size)
3207 error ("bad value (%s) for %stune=%s %s",
3208 ix86_tune_string, prefix, suffix, sw);
3210 ix86_tune_mask = 1u << ix86_tune;
3211 for (i = 0; i < X86_TUNE_LAST; ++i)
3212 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3214 if (optimize_size)
3215 ix86_cost = &ix86_size_cost;
3216 else
3217 ix86_cost = processor_target_table[ix86_tune].cost;
3219 /* Arrange to set up i386_stack_locals for all functions. */
3220 init_machine_status = ix86_init_machine_status;
3222 /* Validate -mregparm= value. */
3223 if (ix86_regparm_string)
3225 if (TARGET_64BIT)
3226 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3227 i = atoi (ix86_regparm_string);
3228 if (i < 0 || i > REGPARM_MAX)
3229 error ("%sregparm=%d%s is not between 0 and %d",
3230 prefix, i, suffix, REGPARM_MAX);
3231 else
3232 ix86_regparm = i;
3234 if (TARGET_64BIT)
3235 ix86_regparm = REGPARM_MAX;
3237 /* If the user has provided any of the -malign-* options,
3238 warn and use that value only if -falign-* is not set.
3239 Remove this code in GCC 3.2 or later. */
3240 if (ix86_align_loops_string)
3242 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3243 prefix, suffix, suffix);
3244 if (align_loops == 0)
3246 i = atoi (ix86_align_loops_string);
3247 if (i < 0 || i > MAX_CODE_ALIGN)
3248 error ("%salign-loops=%d%s is not between 0 and %d",
3249 prefix, i, suffix, MAX_CODE_ALIGN);
3250 else
3251 align_loops = 1 << i;
3255 if (ix86_align_jumps_string)
3257 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3258 prefix, suffix, suffix);
3259 if (align_jumps == 0)
3261 i = atoi (ix86_align_jumps_string);
3262 if (i < 0 || i > MAX_CODE_ALIGN)
3263 error ("%salign-loops=%d%s is not between 0 and %d",
3264 prefix, i, suffix, MAX_CODE_ALIGN);
3265 else
3266 align_jumps = 1 << i;
3270 if (ix86_align_funcs_string)
3272 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3273 prefix, suffix, suffix);
3274 if (align_functions == 0)
3276 i = atoi (ix86_align_funcs_string);
3277 if (i < 0 || i > MAX_CODE_ALIGN)
3278 error ("%salign-loops=%d%s is not between 0 and %d",
3279 prefix, i, suffix, MAX_CODE_ALIGN);
3280 else
3281 align_functions = 1 << i;
3285 /* Default align_* from the processor table. */
3286 if (align_loops == 0)
3288 align_loops = processor_target_table[ix86_tune].align_loop;
3289 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3291 if (align_jumps == 0)
3293 align_jumps = processor_target_table[ix86_tune].align_jump;
3294 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3296 if (align_functions == 0)
3298 align_functions = processor_target_table[ix86_tune].align_func;
3301 /* Validate -mbranch-cost= value, or provide default. */
3302 ix86_branch_cost = ix86_cost->branch_cost;
3303 if (ix86_branch_cost_string)
3305 i = atoi (ix86_branch_cost_string);
3306 if (i < 0 || i > 5)
3307 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3308 else
3309 ix86_branch_cost = i;
3311 if (ix86_section_threshold_string)
3313 i = atoi (ix86_section_threshold_string);
3314 if (i < 0)
3315 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3316 else
3317 ix86_section_threshold = i;
3320 if (ix86_tls_dialect_string)
3322 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3323 ix86_tls_dialect = TLS_DIALECT_GNU;
3324 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3325 ix86_tls_dialect = TLS_DIALECT_GNU2;
3326 else
3327 error ("bad value (%s) for %stls-dialect=%s %s",
3328 ix86_tls_dialect_string, prefix, suffix, sw);
3331 if (ix87_precision_string)
3333 i = atoi (ix87_precision_string);
3334 if (i != 32 && i != 64 && i != 80)
3335 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3338 if (TARGET_64BIT)
3340 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3342 /* Enable by default the SSE and MMX builtins. Do allow the user to
3343 explicitly disable any of these. In particular, disabling SSE and
3344 MMX for kernel code is extremely useful. */
3345 if (!ix86_arch_specified)
3346 ix86_isa_flags
3347 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3348 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3350 if (TARGET_RTD)
3351 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3353 else
3355 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3357 if (!ix86_arch_specified)
3358 ix86_isa_flags
3359 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3361 /* i386 ABI does not specify red zone. It still makes sense to use it
3362 when programmer takes care to stack from being destroyed. */
3363 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3364 target_flags |= MASK_NO_RED_ZONE;
3367 /* Keep nonleaf frame pointers. */
3368 if (flag_omit_frame_pointer)
3369 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3370 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3371 flag_omit_frame_pointer = 1;
3373 /* If we're doing fast math, we don't care about comparison order
3374 wrt NaNs. This lets us use a shorter comparison sequence. */
3375 if (flag_finite_math_only)
3376 target_flags &= ~MASK_IEEE_FP;
3378 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3379 since the insns won't need emulation. */
3380 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3381 target_flags &= ~MASK_NO_FANCY_MATH_387;
3383 /* Likewise, if the target doesn't have a 387, or we've specified
3384 software floating point, don't use 387 inline intrinsics. */
3385 if (!TARGET_80387)
3386 target_flags |= MASK_NO_FANCY_MATH_387;
3388 /* Turn on MMX builtins for -msse. */
3389 if (TARGET_SSE)
3391 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3392 x86_prefetch_sse = true;
3395 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3396 if (TARGET_SSE4_2 || TARGET_ABM)
3397 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3399 /* Validate -mpreferred-stack-boundary= value or default it to
3400 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3401 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3402 if (ix86_preferred_stack_boundary_string)
3404 i = atoi (ix86_preferred_stack_boundary_string);
3405 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3406 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3407 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3408 else
3409 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3412 /* Set the default value for -mstackrealign. */
3413 if (ix86_force_align_arg_pointer == -1)
3414 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3416 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3418 /* Validate -mincoming-stack-boundary= value or default it to
3419 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3420 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3421 if (ix86_incoming_stack_boundary_string)
3423 i = atoi (ix86_incoming_stack_boundary_string);
3424 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3425 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3426 i, TARGET_64BIT ? 4 : 2);
3427 else
3429 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3430 ix86_incoming_stack_boundary
3431 = ix86_user_incoming_stack_boundary;
3435 /* Accept -msseregparm only if at least SSE support is enabled. */
3436 if (TARGET_SSEREGPARM
3437 && ! TARGET_SSE)
3438 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3440 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3441 if (ix86_fpmath_string != 0)
3443 if (! strcmp (ix86_fpmath_string, "387"))
3444 ix86_fpmath = FPMATH_387;
3445 else if (! strcmp (ix86_fpmath_string, "sse"))
3447 if (!TARGET_SSE)
3449 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3450 ix86_fpmath = FPMATH_387;
3452 else
3453 ix86_fpmath = FPMATH_SSE;
3455 else if (! strcmp (ix86_fpmath_string, "387,sse")
3456 || ! strcmp (ix86_fpmath_string, "387+sse")
3457 || ! strcmp (ix86_fpmath_string, "sse,387")
3458 || ! strcmp (ix86_fpmath_string, "sse+387")
3459 || ! strcmp (ix86_fpmath_string, "both"))
3461 if (!TARGET_SSE)
3463 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3464 ix86_fpmath = FPMATH_387;
3466 else if (!TARGET_80387)
3468 warning (0, "387 instruction set disabled, using SSE arithmetics");
3469 ix86_fpmath = FPMATH_SSE;
3471 else
3472 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3474 else
3475 error ("bad value (%s) for %sfpmath=%s %s",
3476 ix86_fpmath_string, prefix, suffix, sw);
3479 /* If the i387 is disabled, then do not return values in it. */
3480 if (!TARGET_80387)
3481 target_flags &= ~MASK_FLOAT_RETURNS;
3483 /* Use external vectorized library in vectorizing intrinsics. */
3484 if (ix86_veclibabi_string)
3486 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3487 ix86_veclib_handler = ix86_veclibabi_svml;
3488 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3489 ix86_veclib_handler = ix86_veclibabi_acml;
3490 else
3491 error ("unknown vectorization library ABI type (%s) for "
3492 "%sveclibabi=%s %s", ix86_veclibabi_string,
3493 prefix, suffix, sw);
3496 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3497 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3498 && !optimize_size)
3499 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3501 /* ??? Unwind info is not correct around the CFG unless either a frame
3502 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3503 unwind info generation to be aware of the CFG and propagating states
3504 around edges. */
3505 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3506 || flag_exceptions || flag_non_call_exceptions)
3507 && flag_omit_frame_pointer
3508 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3510 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3511 warning (0, "unwind tables currently require either a frame pointer "
3512 "or %saccumulate-outgoing-args%s for correctness",
3513 prefix, suffix);
3514 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3517 /* If stack probes are required, the space used for large function
3518 arguments on the stack must also be probed, so enable
3519 -maccumulate-outgoing-args so this happens in the prologue. */
3520 if (TARGET_STACK_PROBE
3521 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3523 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3524 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3525 "for correctness", prefix, suffix);
3526 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3529 /* For sane SSE instruction set generation we need fcomi instruction.
3530 It is safe to enable all CMOVE instructions. */
3531 if (TARGET_SSE)
3532 TARGET_CMOVE = 1;
3534 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3536 char *p;
3537 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3538 p = strchr (internal_label_prefix, 'X');
3539 internal_label_prefix_len = p - internal_label_prefix;
3540 *p = '\0';
3543 /* When scheduling description is not available, disable scheduler pass
3544 so it won't slow down the compilation and make x87 code slower. */
3545 if (!TARGET_SCHEDULE)
3546 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3548 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3549 set_param_value ("simultaneous-prefetches",
3550 ix86_cost->simultaneous_prefetches);
3551 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3552 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3553 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3554 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3555 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3556 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3558 /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
3559 if (flag_prefetch_loop_arrays < 0
3560 && HAVE_prefetch
3561 && optimize >= 3
3562 && software_prefetching_beneficial_p ())
3563 flag_prefetch_loop_arrays = 1;
3565 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3566 can be optimized to ap = __builtin_next_arg (0). */
3567 if (!TARGET_64BIT)
3568 targetm.expand_builtin_va_start = NULL;
3570 if (TARGET_64BIT)
3572 ix86_gen_leave = gen_leave_rex64;
3573 ix86_gen_pop1 = gen_popdi1;
3574 ix86_gen_add3 = gen_adddi3;
3575 ix86_gen_sub3 = gen_subdi3;
3576 ix86_gen_sub3_carry = gen_subdi3_carry;
3577 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3578 ix86_gen_monitor = gen_sse3_monitor64;
3579 ix86_gen_andsp = gen_anddi3;
3581 else
3583 ix86_gen_leave = gen_leave;
3584 ix86_gen_pop1 = gen_popsi1;
3585 ix86_gen_add3 = gen_addsi3;
3586 ix86_gen_sub3 = gen_subsi3;
3587 ix86_gen_sub3_carry = gen_subsi3_carry;
3588 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3589 ix86_gen_monitor = gen_sse3_monitor;
3590 ix86_gen_andsp = gen_andsi3;
3593 #ifdef USE_IX86_CLD
3594 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3595 if (!TARGET_64BIT)
3596 target_flags |= MASK_CLD & ~target_flags_explicit;
3597 #endif
3599 /* Save the initial options in case the user does function specific options */
3600 if (main_args_p)
3601 target_option_default_node = target_option_current_node
3602 = build_target_option_node ();
3605 /* Update register usage after having seen the compiler flags. */
3607 void
3608 ix86_conditional_register_usage (void)
3610 int i;
3611 unsigned int j;
3613 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3615 if (fixed_regs[i] > 1)
3616 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3617 if (call_used_regs[i] > 1)
3618 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3621 /* The PIC register, if it exists, is fixed. */
3622 j = PIC_OFFSET_TABLE_REGNUM;
3623 if (j != INVALID_REGNUM)
3624 fixed_regs[j] = call_used_regs[j] = 1;
3626 /* The MS_ABI changes the set of call-used registers. */
3627 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3629 call_used_regs[SI_REG] = 0;
3630 call_used_regs[DI_REG] = 0;
3631 call_used_regs[XMM6_REG] = 0;
3632 call_used_regs[XMM7_REG] = 0;
3633 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3634 call_used_regs[i] = 0;
3637 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3638 other call-clobbered regs for 64-bit. */
3639 if (TARGET_64BIT)
3641 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3643 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3644 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3645 && call_used_regs[i])
3646 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3649 /* If MMX is disabled, squash the registers. */
3650 if (! TARGET_MMX)
3651 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3652 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3653 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3655 /* If SSE is disabled, squash the registers. */
3656 if (! TARGET_SSE)
3657 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3658 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3659 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3661 /* If the FPU is disabled, squash the registers. */
3662 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3663 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3664 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3665 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3667 /* If 32-bit, squash the 64-bit registers. */
3668 if (! TARGET_64BIT)
3670 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3671 reg_names[i] = "";
3672 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3673 reg_names[i] = "";
3678 /* Save the current options */
3680 static void
3681 ix86_function_specific_save (struct cl_target_option *ptr)
3683 ptr->arch = ix86_arch;
3684 ptr->schedule = ix86_schedule;
3685 ptr->tune = ix86_tune;
3686 ptr->fpmath = ix86_fpmath;
3687 ptr->branch_cost = ix86_branch_cost;
3688 ptr->tune_defaulted = ix86_tune_defaulted;
3689 ptr->arch_specified = ix86_arch_specified;
3690 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3691 ptr->target_flags_explicit = target_flags_explicit;
3693 /* The fields are char but the variables are not; make sure the
3694 values fit in the fields. */
3695 gcc_assert (ptr->arch == ix86_arch);
3696 gcc_assert (ptr->schedule == ix86_schedule);
3697 gcc_assert (ptr->tune == ix86_tune);
3698 gcc_assert (ptr->fpmath == ix86_fpmath);
3699 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3702 /* Restore the current options */
3704 static void
3705 ix86_function_specific_restore (struct cl_target_option *ptr)
3707 enum processor_type old_tune = ix86_tune;
3708 enum processor_type old_arch = ix86_arch;
3709 unsigned int ix86_arch_mask, ix86_tune_mask;
3710 int i;
3712 ix86_arch = (enum processor_type) ptr->arch;
3713 ix86_schedule = (enum attr_cpu) ptr->schedule;
3714 ix86_tune = (enum processor_type) ptr->tune;
3715 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3716 ix86_branch_cost = ptr->branch_cost;
3717 ix86_tune_defaulted = ptr->tune_defaulted;
3718 ix86_arch_specified = ptr->arch_specified;
3719 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3720 target_flags_explicit = ptr->target_flags_explicit;
3722 /* Recreate the arch feature tests if the arch changed */
3723 if (old_arch != ix86_arch)
3725 ix86_arch_mask = 1u << ix86_arch;
3726 for (i = 0; i < X86_ARCH_LAST; ++i)
3727 ix86_arch_features[i]
3728 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3731 /* Recreate the tune optimization tests */
3732 if (old_tune != ix86_tune)
3734 ix86_tune_mask = 1u << ix86_tune;
3735 for (i = 0; i < X86_TUNE_LAST; ++i)
3736 ix86_tune_features[i]
3737 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3741 /* Print the current options */
3743 static void
3744 ix86_function_specific_print (FILE *file, int indent,
3745 struct cl_target_option *ptr)
3747 char *target_string
3748 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3749 NULL, NULL, NULL, false);
3751 fprintf (file, "%*sarch = %d (%s)\n",
3752 indent, "",
3753 ptr->arch,
3754 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3755 ? cpu_names[ptr->arch]
3756 : "<unknown>"));
3758 fprintf (file, "%*stune = %d (%s)\n",
3759 indent, "",
3760 ptr->tune,
3761 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3762 ? cpu_names[ptr->tune]
3763 : "<unknown>"));
3765 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3766 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3767 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3768 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3770 if (target_string)
3772 fprintf (file, "%*s%s\n", indent, "", target_string);
3773 free (target_string);
3778 /* Inner function to process the attribute((target(...))), take an argument and
3779 set the current options from the argument. If we have a list, recursively go
3780 over the list. */
3782 static bool
3783 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3785 char *next_optstr;
3786 bool ret = true;
3788 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3789 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3790 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3791 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3793 enum ix86_opt_type
3795 ix86_opt_unknown,
3796 ix86_opt_yes,
3797 ix86_opt_no,
3798 ix86_opt_str,
3799 ix86_opt_isa
3802 static const struct
3804 const char *string;
3805 size_t len;
3806 enum ix86_opt_type type;
3807 int opt;
3808 int mask;
3809 } attrs[] = {
3810 /* isa options */
3811 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3812 IX86_ATTR_ISA ("abm", OPT_mabm),
3813 IX86_ATTR_ISA ("aes", OPT_maes),
3814 IX86_ATTR_ISA ("avx", OPT_mavx),
3815 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3816 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3817 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3818 IX86_ATTR_ISA ("sse", OPT_msse),
3819 IX86_ATTR_ISA ("sse2", OPT_msse2),
3820 IX86_ATTR_ISA ("sse3", OPT_msse3),
3821 IX86_ATTR_ISA ("sse4", OPT_msse4),
3822 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3823 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3824 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3825 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3826 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3827 IX86_ATTR_ISA ("xop", OPT_mxop),
3828 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3830 /* string options */
3831 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3832 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3833 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3835 /* flag options */
3836 IX86_ATTR_YES ("cld",
3837 OPT_mcld,
3838 MASK_CLD),
3840 IX86_ATTR_NO ("fancy-math-387",
3841 OPT_mfancy_math_387,
3842 MASK_NO_FANCY_MATH_387),
3844 IX86_ATTR_YES ("ieee-fp",
3845 OPT_mieee_fp,
3846 MASK_IEEE_FP),
3848 IX86_ATTR_YES ("inline-all-stringops",
3849 OPT_minline_all_stringops,
3850 MASK_INLINE_ALL_STRINGOPS),
3852 IX86_ATTR_YES ("inline-stringops-dynamically",
3853 OPT_minline_stringops_dynamically,
3854 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3856 IX86_ATTR_NO ("align-stringops",
3857 OPT_mno_align_stringops,
3858 MASK_NO_ALIGN_STRINGOPS),
3860 IX86_ATTR_YES ("recip",
3861 OPT_mrecip,
3862 MASK_RECIP),
3866 /* If this is a list, recurse to get the options. */
3867 if (TREE_CODE (args) == TREE_LIST)
3869 bool ret = true;
3871 for (; args; args = TREE_CHAIN (args))
3872 if (TREE_VALUE (args)
3873 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3874 ret = false;
3876 return ret;
3879 else if (TREE_CODE (args) != STRING_CST)
3880 gcc_unreachable ();
3882 /* Handle multiple arguments separated by commas. */
3883 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3885 while (next_optstr && *next_optstr != '\0')
3887 char *p = next_optstr;
3888 char *orig_p = p;
3889 char *comma = strchr (next_optstr, ',');
3890 const char *opt_string;
3891 size_t len, opt_len;
3892 int opt;
3893 bool opt_set_p;
3894 char ch;
3895 unsigned i;
3896 enum ix86_opt_type type = ix86_opt_unknown;
3897 int mask = 0;
3899 if (comma)
3901 *comma = '\0';
3902 len = comma - next_optstr;
3903 next_optstr = comma + 1;
3905 else
3907 len = strlen (p);
3908 next_optstr = NULL;
3911 /* Recognize no-xxx. */
3912 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3914 opt_set_p = false;
3915 p += 3;
3916 len -= 3;
3918 else
3919 opt_set_p = true;
3921 /* Find the option. */
3922 ch = *p;
3923 opt = N_OPTS;
3924 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3926 type = attrs[i].type;
3927 opt_len = attrs[i].len;
3928 if (ch == attrs[i].string[0]
3929 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3930 && memcmp (p, attrs[i].string, opt_len) == 0)
3932 opt = attrs[i].opt;
3933 mask = attrs[i].mask;
3934 opt_string = attrs[i].string;
3935 break;
3939 /* Process the option. */
3940 if (opt == N_OPTS)
3942 error ("attribute(target(\"%s\")) is unknown", orig_p);
3943 ret = false;
3946 else if (type == ix86_opt_isa)
3947 ix86_handle_option (opt, p, opt_set_p);
3949 else if (type == ix86_opt_yes || type == ix86_opt_no)
3951 if (type == ix86_opt_no)
3952 opt_set_p = !opt_set_p;
3954 if (opt_set_p)
3955 target_flags |= mask;
3956 else
3957 target_flags &= ~mask;
3960 else if (type == ix86_opt_str)
3962 if (p_strings[opt])
3964 error ("option(\"%s\") was already specified", opt_string);
3965 ret = false;
3967 else
3968 p_strings[opt] = xstrdup (p + opt_len);
3971 else
3972 gcc_unreachable ();
3975 return ret;
3978 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3980 tree
3981 ix86_valid_target_attribute_tree (tree args)
3983 const char *orig_arch_string = ix86_arch_string;
3984 const char *orig_tune_string = ix86_tune_string;
3985 const char *orig_fpmath_string = ix86_fpmath_string;
3986 int orig_tune_defaulted = ix86_tune_defaulted;
3987 int orig_arch_specified = ix86_arch_specified;
3988 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3989 tree t = NULL_TREE;
3990 int i;
3991 struct cl_target_option *def
3992 = TREE_TARGET_OPTION (target_option_default_node);
3994 /* Process each of the options on the chain. */
3995 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3996 return NULL_TREE;
3998 /* If the changed options are different from the default, rerun override_options,
3999 and then save the options away. The string options are are attribute options,
4000 and will be undone when we copy the save structure. */
4001 if (ix86_isa_flags != def->ix86_isa_flags
4002 || target_flags != def->target_flags
4003 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
4004 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
4005 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4007 /* If we are using the default tune= or arch=, undo the string assigned,
4008 and use the default. */
4009 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
4010 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
4011 else if (!orig_arch_specified)
4012 ix86_arch_string = NULL;
4014 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
4015 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
4016 else if (orig_tune_defaulted)
4017 ix86_tune_string = NULL;
4019 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
4020 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
4021 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
4022 else if (!TARGET_64BIT && TARGET_SSE)
4023 ix86_fpmath_string = "sse,387";
4025 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
4026 override_options (false);
4028 /* Add any builtin functions with the new isa if any. */
4029 ix86_add_new_builtins (ix86_isa_flags);
4031 /* Save the current options unless we are validating options for
4032 #pragma. */
4033 t = build_target_option_node ();
4035 ix86_arch_string = orig_arch_string;
4036 ix86_tune_string = orig_tune_string;
4037 ix86_fpmath_string = orig_fpmath_string;
4039 /* Free up memory allocated to hold the strings */
4040 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4041 if (option_strings[i])
4042 free (option_strings[i]);
4045 return t;
4048 /* Hook to validate attribute((target("string"))). */
4050 static bool
4051 ix86_valid_target_attribute_p (tree fndecl,
4052 tree ARG_UNUSED (name),
4053 tree args,
4054 int ARG_UNUSED (flags))
4056 struct cl_target_option cur_target;
4057 bool ret = true;
4058 tree old_optimize = build_optimization_node ();
4059 tree new_target, new_optimize;
4060 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4062 /* If the function changed the optimization levels as well as setting target
4063 options, start with the optimizations specified. */
4064 if (func_optimize && func_optimize != old_optimize)
4065 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4067 /* The target attributes may also change some optimization flags, so update
4068 the optimization options if necessary. */
4069 cl_target_option_save (&cur_target);
4070 new_target = ix86_valid_target_attribute_tree (args);
4071 new_optimize = build_optimization_node ();
4073 if (!new_target)
4074 ret = false;
4076 else if (fndecl)
4078 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4080 if (old_optimize != new_optimize)
4081 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4084 cl_target_option_restore (&cur_target);
4086 if (old_optimize != new_optimize)
4087 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4089 return ret;
4093 /* Hook to determine if one function can safely inline another. */
4095 static bool
4096 ix86_can_inline_p (tree caller, tree callee)
4098 bool ret = false;
4099 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4100 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4102 /* If callee has no option attributes, then it is ok to inline. */
4103 if (!callee_tree)
4104 ret = true;
4106 /* If caller has no option attributes, but callee does then it is not ok to
4107 inline. */
4108 else if (!caller_tree)
4109 ret = false;
4111 else
4113 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4114 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4116 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4117 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4118 function. */
4119 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4120 != callee_opts->ix86_isa_flags)
4121 ret = false;
4123 /* See if we have the same non-isa options. */
4124 else if (caller_opts->target_flags != callee_opts->target_flags)
4125 ret = false;
4127 /* See if arch, tune, etc. are the same. */
4128 else if (caller_opts->arch != callee_opts->arch)
4129 ret = false;
4131 else if (caller_opts->tune != callee_opts->tune)
4132 ret = false;
4134 else if (caller_opts->fpmath != callee_opts->fpmath)
4135 ret = false;
4137 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4138 ret = false;
4140 else
4141 ret = true;
4144 return ret;
4148 /* Remember the last target of ix86_set_current_function. */
4149 static GTY(()) tree ix86_previous_fndecl;
4151 /* Establish appropriate back-end context for processing the function
4152 FNDECL. The argument might be NULL to indicate processing at top
4153 level, outside of any function scope. */
4154 static void
4155 ix86_set_current_function (tree fndecl)
4157 /* Only change the context if the function changes. This hook is called
4158 several times in the course of compiling a function, and we don't want to
4159 slow things down too much or call target_reinit when it isn't safe. */
4160 if (fndecl && fndecl != ix86_previous_fndecl)
4162 tree old_tree = (ix86_previous_fndecl
4163 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4164 : NULL_TREE);
4166 tree new_tree = (fndecl
4167 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4168 : NULL_TREE);
4170 ix86_previous_fndecl = fndecl;
4171 if (old_tree == new_tree)
4174 else if (new_tree)
4176 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4177 target_reinit ();
4180 else if (old_tree)
4182 struct cl_target_option *def
4183 = TREE_TARGET_OPTION (target_option_current_node);
4185 cl_target_option_restore (def);
4186 target_reinit ();
4192 /* Return true if this goes in large data/bss. */
4194 static bool
4195 ix86_in_large_data_p (tree exp)
4197 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4198 return false;
4200 /* Functions are never large data. */
4201 if (TREE_CODE (exp) == FUNCTION_DECL)
4202 return false;
4204 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4206 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4207 if (strcmp (section, ".ldata") == 0
4208 || strcmp (section, ".lbss") == 0)
4209 return true;
4210 return false;
4212 else
4214 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4216 /* If this is an incomplete type with size 0, then we can't put it
4217 in data because it might be too big when completed. */
4218 if (!size || size > ix86_section_threshold)
4219 return true;
4222 return false;
4225 /* Switch to the appropriate section for output of DECL.
4226 DECL is either a `VAR_DECL' node or a constant of some sort.
4227 RELOC indicates whether forming the initial value of DECL requires
4228 link-time relocations. */
4230 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4231 ATTRIBUTE_UNUSED;
4233 static section *
4234 x86_64_elf_select_section (tree decl, int reloc,
4235 unsigned HOST_WIDE_INT align)
4237 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4238 && ix86_in_large_data_p (decl))
4240 const char *sname = NULL;
4241 unsigned int flags = SECTION_WRITE;
4242 switch (categorize_decl_for_section (decl, reloc))
4244 case SECCAT_DATA:
4245 sname = ".ldata";
4246 break;
4247 case SECCAT_DATA_REL:
4248 sname = ".ldata.rel";
4249 break;
4250 case SECCAT_DATA_REL_LOCAL:
4251 sname = ".ldata.rel.local";
4252 break;
4253 case SECCAT_DATA_REL_RO:
4254 sname = ".ldata.rel.ro";
4255 break;
4256 case SECCAT_DATA_REL_RO_LOCAL:
4257 sname = ".ldata.rel.ro.local";
4258 break;
4259 case SECCAT_BSS:
4260 sname = ".lbss";
4261 flags |= SECTION_BSS;
4262 break;
4263 case SECCAT_RODATA:
4264 case SECCAT_RODATA_MERGE_STR:
4265 case SECCAT_RODATA_MERGE_STR_INIT:
4266 case SECCAT_RODATA_MERGE_CONST:
4267 sname = ".lrodata";
4268 flags = 0;
4269 break;
4270 case SECCAT_SRODATA:
4271 case SECCAT_SDATA:
4272 case SECCAT_SBSS:
4273 gcc_unreachable ();
4274 case SECCAT_TEXT:
4275 case SECCAT_TDATA:
4276 case SECCAT_TBSS:
4277 /* We don't split these for medium model. Place them into
4278 default sections and hope for best. */
4279 break;
4280 case SECCAT_EMUTLS_VAR:
4281 case SECCAT_EMUTLS_TMPL:
4282 gcc_unreachable ();
4284 if (sname)
4286 /* We might get called with string constants, but get_named_section
4287 doesn't like them as they are not DECLs. Also, we need to set
4288 flags in that case. */
4289 if (!DECL_P (decl))
4290 return get_section (sname, flags, NULL);
4291 return get_named_section (decl, sname, reloc);
4294 return default_elf_select_section (decl, reloc, align);
4297 /* Build up a unique section name, expressed as a
4298 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4299 RELOC indicates whether the initial value of EXP requires
4300 link-time relocations. */
4302 static void ATTRIBUTE_UNUSED
4303 x86_64_elf_unique_section (tree decl, int reloc)
4305 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4306 && ix86_in_large_data_p (decl))
4308 const char *prefix = NULL;
4309 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4310 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4312 switch (categorize_decl_for_section (decl, reloc))
4314 case SECCAT_DATA:
4315 case SECCAT_DATA_REL:
4316 case SECCAT_DATA_REL_LOCAL:
4317 case SECCAT_DATA_REL_RO:
4318 case SECCAT_DATA_REL_RO_LOCAL:
4319 prefix = one_only ? ".ld" : ".ldata";
4320 break;
4321 case SECCAT_BSS:
4322 prefix = one_only ? ".lb" : ".lbss";
4323 break;
4324 case SECCAT_RODATA:
4325 case SECCAT_RODATA_MERGE_STR:
4326 case SECCAT_RODATA_MERGE_STR_INIT:
4327 case SECCAT_RODATA_MERGE_CONST:
4328 prefix = one_only ? ".lr" : ".lrodata";
4329 break;
4330 case SECCAT_SRODATA:
4331 case SECCAT_SDATA:
4332 case SECCAT_SBSS:
4333 gcc_unreachable ();
4334 case SECCAT_TEXT:
4335 case SECCAT_TDATA:
4336 case SECCAT_TBSS:
4337 /* We don't split these for medium model. Place them into
4338 default sections and hope for best. */
4339 break;
4340 case SECCAT_EMUTLS_VAR:
4341 prefix = targetm.emutls.var_section;
4342 break;
4343 case SECCAT_EMUTLS_TMPL:
4344 prefix = targetm.emutls.tmpl_section;
4345 break;
4347 if (prefix)
4349 const char *name, *linkonce;
4350 char *string;
4352 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4353 name = targetm.strip_name_encoding (name);
4355 /* If we're using one_only, then there needs to be a .gnu.linkonce
4356 prefix to the section name. */
4357 linkonce = one_only ? ".gnu.linkonce" : "";
4359 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4361 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4362 return;
4365 default_unique_section (decl, reloc);
4368 #ifdef COMMON_ASM_OP
4369 /* This says how to output assembler code to declare an
4370 uninitialized external linkage data object.
4372 For medium model x86-64 we need to use .largecomm opcode for
4373 large objects. */
4374 void
4375 x86_elf_aligned_common (FILE *file,
4376 const char *name, unsigned HOST_WIDE_INT size,
4377 int align)
4379 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4380 && size > (unsigned int)ix86_section_threshold)
4381 fputs (".largecomm\t", file);
4382 else
4383 fputs (COMMON_ASM_OP, file);
4384 assemble_name (file, name);
4385 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4386 size, align / BITS_PER_UNIT);
4388 #endif
4390 /* Utility function for targets to use in implementing
4391 ASM_OUTPUT_ALIGNED_BSS. */
4393 void
4394 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4395 const char *name, unsigned HOST_WIDE_INT size,
4396 int align)
4398 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4399 && size > (unsigned int)ix86_section_threshold)
4400 switch_to_section (get_named_section (decl, ".lbss", 0));
4401 else
4402 switch_to_section (bss_section);
4403 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4404 #ifdef ASM_DECLARE_OBJECT_NAME
4405 last_assemble_variable_decl = decl;
4406 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4407 #else
4408 /* Standard thing is just output label for the object. */
4409 ASM_OUTPUT_LABEL (file, name);
4410 #endif /* ASM_DECLARE_OBJECT_NAME */
4411 ASM_OUTPUT_SKIP (file, size ? size : 1);
4414 void
4415 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4417 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4418 make the problem with not enough registers even worse. */
4419 #ifdef INSN_SCHEDULING
4420 if (level > 1)
4421 flag_schedule_insns = 0;
4422 #endif
4424 if (TARGET_MACHO)
4425 /* The Darwin libraries never set errno, so we might as well
4426 avoid calling them when that's the only reason we would. */
4427 flag_errno_math = 0;
4429 /* The default values of these switches depend on the TARGET_64BIT
4430 that is not known at this moment. Mark these values with 2 and
4431 let user the to override these. In case there is no command line option
4432 specifying them, we will set the defaults in override_options. */
4433 if (optimize >= 1)
4434 flag_omit_frame_pointer = 2;
4436 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4437 if (level > 1)
4438 flag_zee = 2;
4440 flag_pcc_struct_return = 2;
4441 flag_asynchronous_unwind_tables = 2;
4442 flag_vect_cost_model = 1;
4443 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4444 SUBTARGET_OPTIMIZATION_OPTIONS;
4445 #endif
4448 /* Decide whether we can make a sibling call to a function. DECL is the
4449 declaration of the function being targeted by the call and EXP is the
4450 CALL_EXPR representing the call. */
4452 static bool
4453 ix86_function_ok_for_sibcall (tree decl, tree exp)
4455 tree type, decl_or_type;
4456 rtx a, b;
4458 /* If we are generating position-independent code, we cannot sibcall
4459 optimize any indirect call, or a direct call to a global function,
4460 as the PLT requires %ebx be live. */
4461 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4462 return false;
4464 /* If we need to align the outgoing stack, then sibcalling would
4465 unalign the stack, which may break the called function. */
4466 if (ix86_minimum_incoming_stack_boundary (true)
4467 < PREFERRED_STACK_BOUNDARY)
4468 return false;
4470 if (decl)
4472 decl_or_type = decl;
4473 type = TREE_TYPE (decl);
4475 else
4477 /* We're looking at the CALL_EXPR, we need the type of the function. */
4478 type = CALL_EXPR_FN (exp); /* pointer expression */
4479 type = TREE_TYPE (type); /* pointer type */
4480 type = TREE_TYPE (type); /* function type */
4481 decl_or_type = type;
4484 /* Check that the return value locations are the same. Like
4485 if we are returning floats on the 80387 register stack, we cannot
4486 make a sibcall from a function that doesn't return a float to a
4487 function that does or, conversely, from a function that does return
4488 a float to a function that doesn't; the necessary stack adjustment
4489 would not be executed. This is also the place we notice
4490 differences in the return value ABI. Note that it is ok for one
4491 of the functions to have void return type as long as the return
4492 value of the other is passed in a register. */
4493 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4494 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4495 cfun->decl, false);
4496 if (STACK_REG_P (a) || STACK_REG_P (b))
4498 if (!rtx_equal_p (a, b))
4499 return false;
4501 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4503 else if (!rtx_equal_p (a, b))
4504 return false;
4506 if (TARGET_64BIT)
4508 /* The SYSV ABI has more call-clobbered registers;
4509 disallow sibcalls from MS to SYSV. */
4510 if (cfun->machine->call_abi == MS_ABI
4511 && ix86_function_type_abi (type) == SYSV_ABI)
4512 return false;
4514 else
4516 /* If this call is indirect, we'll need to be able to use a
4517 call-clobbered register for the address of the target function.
4518 Make sure that all such registers are not used for passing
4519 parameters. Note that DLLIMPORT functions are indirect. */
4520 if (!decl
4521 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4523 if (ix86_function_regparm (type, NULL) >= 3)
4525 /* ??? Need to count the actual number of registers to be used,
4526 not the possible number of registers. Fix later. */
4527 return false;
4532 /* Otherwise okay. That also includes certain types of indirect calls. */
4533 return true;
4536 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4537 and "sseregparm" calling convention attributes;
4538 arguments as in struct attribute_spec.handler. */
4540 static tree
4541 ix86_handle_cconv_attribute (tree *node, tree name,
4542 tree args,
4543 int flags ATTRIBUTE_UNUSED,
4544 bool *no_add_attrs)
4546 if (TREE_CODE (*node) != FUNCTION_TYPE
4547 && TREE_CODE (*node) != METHOD_TYPE
4548 && TREE_CODE (*node) != FIELD_DECL
4549 && TREE_CODE (*node) != TYPE_DECL)
4551 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4552 name);
4553 *no_add_attrs = true;
4554 return NULL_TREE;
4557 /* Can combine regparm with all attributes but fastcall. */
4558 if (is_attribute_p ("regparm", name))
4560 tree cst;
4562 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4564 error ("fastcall and regparm attributes are not compatible");
4567 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4569 error ("regparam and thiscall attributes are not compatible");
4572 cst = TREE_VALUE (args);
4573 if (TREE_CODE (cst) != INTEGER_CST)
4575 warning (OPT_Wattributes,
4576 "%qE attribute requires an integer constant argument",
4577 name);
4578 *no_add_attrs = true;
4580 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4582 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4583 name, REGPARM_MAX);
4584 *no_add_attrs = true;
4587 return NULL_TREE;
4590 if (TARGET_64BIT)
4592 /* Do not warn when emulating the MS ABI. */
4593 if ((TREE_CODE (*node) != FUNCTION_TYPE
4594 && TREE_CODE (*node) != METHOD_TYPE)
4595 || ix86_function_type_abi (*node) != MS_ABI)
4596 warning (OPT_Wattributes, "%qE attribute ignored",
4597 name);
4598 *no_add_attrs = true;
4599 return NULL_TREE;
4602 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4603 if (is_attribute_p ("fastcall", name))
4605 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4607 error ("fastcall and cdecl attributes are not compatible");
4609 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4611 error ("fastcall and stdcall attributes are not compatible");
4613 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4615 error ("fastcall and regparm attributes are not compatible");
4617 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4619 error ("fastcall and thiscall attributes are not compatible");
4623 /* Can combine stdcall with fastcall (redundant), regparm and
4624 sseregparm. */
4625 else if (is_attribute_p ("stdcall", name))
4627 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4629 error ("stdcall and cdecl attributes are not compatible");
4631 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4633 error ("stdcall and fastcall attributes are not compatible");
4635 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4637 error ("stdcall and thiscall attributes are not compatible");
4641 /* Can combine cdecl with regparm and sseregparm. */
4642 else if (is_attribute_p ("cdecl", name))
4644 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4646 error ("stdcall and cdecl attributes are not compatible");
4648 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4650 error ("fastcall and cdecl attributes are not compatible");
4652 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4654 error ("cdecl and thiscall attributes are not compatible");
4657 else if (is_attribute_p ("thiscall", name))
4659 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4660 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4661 name);
4662 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4664 error ("stdcall and thiscall attributes are not compatible");
4666 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4668 error ("fastcall and thiscall attributes are not compatible");
4670 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4672 error ("cdecl and thiscall attributes are not compatible");
4676 /* Can combine sseregparm with all attributes. */
4678 return NULL_TREE;
4681 /* Return 0 if the attributes for two types are incompatible, 1 if they
4682 are compatible, and 2 if they are nearly compatible (which causes a
4683 warning to be generated). */
4685 static int
4686 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4688 /* Check for mismatch of non-default calling convention. */
4689 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4691 if (TREE_CODE (type1) != FUNCTION_TYPE
4692 && TREE_CODE (type1) != METHOD_TYPE)
4693 return 1;
4695 /* Check for mismatched fastcall/regparm types. */
4696 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4697 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4698 || (ix86_function_regparm (type1, NULL)
4699 != ix86_function_regparm (type2, NULL)))
4700 return 0;
4702 /* Check for mismatched sseregparm types. */
4703 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4704 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4705 return 0;
4707 /* Check for mismatched thiscall types. */
4708 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4709 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4710 return 0;
4712 /* Check for mismatched return types (cdecl vs stdcall). */
4713 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4714 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4715 return 0;
4717 return 1;
4720 /* Return the regparm value for a function with the indicated TYPE and DECL.
4721 DECL may be NULL when calling function indirectly
4722 or considering a libcall. */
4724 static int
4725 ix86_function_regparm (const_tree type, const_tree decl)
4727 tree attr;
4728 int regparm;
4730 if (TARGET_64BIT)
4731 return (ix86_function_type_abi (type) == SYSV_ABI
4732 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4734 regparm = ix86_regparm;
4735 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4736 if (attr)
4738 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4739 return regparm;
4742 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4743 return 2;
4745 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4746 return 1;
4748 /* Use register calling convention for local functions when possible. */
4749 if (decl
4750 && TREE_CODE (decl) == FUNCTION_DECL
4751 && optimize
4752 && !profile_flag)
4754 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4755 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4756 if (i && i->local)
4758 int local_regparm, globals = 0, regno;
4760 /* Make sure no regparm register is taken by a
4761 fixed register variable. */
4762 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4763 if (fixed_regs[local_regparm])
4764 break;
4766 /* We don't want to use regparm(3) for nested functions as
4767 these use a static chain pointer in the third argument. */
4768 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4769 local_regparm = 2;
4771 /* Each fixed register usage increases register pressure,
4772 so less registers should be used for argument passing.
4773 This functionality can be overriden by an explicit
4774 regparm value. */
4775 for (regno = 0; regno <= DI_REG; regno++)
4776 if (fixed_regs[regno])
4777 globals++;
4779 local_regparm
4780 = globals < local_regparm ? local_regparm - globals : 0;
4782 if (local_regparm > regparm)
4783 regparm = local_regparm;
4787 return regparm;
4790 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4791 DFmode (2) arguments in SSE registers for a function with the
4792 indicated TYPE and DECL. DECL may be NULL when calling function
4793 indirectly or considering a libcall. Otherwise return 0. */
4795 static int
4796 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4798 gcc_assert (!TARGET_64BIT);
4800 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4801 by the sseregparm attribute. */
4802 if (TARGET_SSEREGPARM
4803 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4805 if (!TARGET_SSE)
4807 if (warn)
4809 if (decl)
4810 error ("Calling %qD with attribute sseregparm without "
4811 "SSE/SSE2 enabled", decl);
4812 else
4813 error ("Calling %qT with attribute sseregparm without "
4814 "SSE/SSE2 enabled", type);
4816 return 0;
4819 return 2;
4822 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4823 (and DFmode for SSE2) arguments in SSE registers. */
4824 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4826 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4827 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4828 if (i && i->local)
4829 return TARGET_SSE2 ? 2 : 1;
4832 return 0;
4835 /* Return true if EAX is live at the start of the function. Used by
4836 ix86_expand_prologue to determine if we need special help before
4837 calling allocate_stack_worker. */
4839 static bool
4840 ix86_eax_live_at_start_p (void)
4842 /* Cheat. Don't bother working forward from ix86_function_regparm
4843 to the function type to whether an actual argument is located in
4844 eax. Instead just look at cfg info, which is still close enough
4845 to correct at this point. This gives false positives for broken
4846 functions that might use uninitialized data that happens to be
4847 allocated in eax, but who cares? */
4848 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4851 /* Value is the number of bytes of arguments automatically
4852 popped when returning from a subroutine call.
4853 FUNDECL is the declaration node of the function (as a tree),
4854 FUNTYPE is the data type of the function (as a tree),
4855 or for a library call it is an identifier node for the subroutine name.
4856 SIZE is the number of bytes of arguments passed on the stack.
4858 On the 80386, the RTD insn may be used to pop them if the number
4859 of args is fixed, but if the number is variable then the caller
4860 must pop them all. RTD can't be used for library calls now
4861 because the library is compiled with the Unix compiler.
4862 Use of RTD is a selectable option, since it is incompatible with
4863 standard Unix calling sequences. If the option is not selected,
4864 the caller must always pop the args.
4866 The attribute stdcall is equivalent to RTD on a per module basis. */
4868 static int
4869 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4871 int rtd;
4873 /* None of the 64-bit ABIs pop arguments. */
4874 if (TARGET_64BIT)
4875 return 0;
4877 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4879 /* Cdecl functions override -mrtd, and never pop the stack. */
4880 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4882 /* Stdcall and fastcall functions will pop the stack if not
4883 variable args. */
4884 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4885 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4886 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4887 rtd = 1;
4889 if (rtd && ! stdarg_p (funtype))
4890 return size;
4893 /* Lose any fake structure return argument if it is passed on the stack. */
4894 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4895 && !KEEP_AGGREGATE_RETURN_POINTER)
4897 int nregs = ix86_function_regparm (funtype, fundecl);
4898 if (nregs == 0)
4899 return GET_MODE_SIZE (Pmode);
4902 return 0;
4905 /* Argument support functions. */
4907 /* Return true when register may be used to pass function parameters. */
4908 bool
4909 ix86_function_arg_regno_p (int regno)
4911 int i;
4912 const int *parm_regs;
4914 if (!TARGET_64BIT)
4916 if (TARGET_MACHO)
4917 return (regno < REGPARM_MAX
4918 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4919 else
4920 return (regno < REGPARM_MAX
4921 || (TARGET_MMX && MMX_REGNO_P (regno)
4922 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4923 || (TARGET_SSE && SSE_REGNO_P (regno)
4924 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4927 if (TARGET_MACHO)
4929 if (SSE_REGNO_P (regno) && TARGET_SSE)
4930 return true;
4932 else
4934 if (TARGET_SSE && SSE_REGNO_P (regno)
4935 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4936 return true;
4939 /* TODO: The function should depend on current function ABI but
4940 builtins.c would need updating then. Therefore we use the
4941 default ABI. */
4943 /* RAX is used as hidden argument to va_arg functions. */
4944 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4945 return true;
4947 if (ix86_abi == MS_ABI)
4948 parm_regs = x86_64_ms_abi_int_parameter_registers;
4949 else
4950 parm_regs = x86_64_int_parameter_registers;
4951 for (i = 0; i < (ix86_abi == MS_ABI
4952 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4953 if (regno == parm_regs[i])
4954 return true;
4955 return false;
4958 /* Return if we do not know how to pass TYPE solely in registers. */
4960 static bool
4961 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4963 if (must_pass_in_stack_var_size_or_pad (mode, type))
4964 return true;
4966 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4967 The layout_type routine is crafty and tries to trick us into passing
4968 currently unsupported vector types on the stack by using TImode. */
4969 return (!TARGET_64BIT && mode == TImode
4970 && type && TREE_CODE (type) != VECTOR_TYPE);
4973 /* It returns the size, in bytes, of the area reserved for arguments passed
4974 in registers for the function represented by fndecl dependent to the used
4975 abi format. */
4977 ix86_reg_parm_stack_space (const_tree fndecl)
4979 enum calling_abi call_abi = SYSV_ABI;
4980 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4981 call_abi = ix86_function_abi (fndecl);
4982 else
4983 call_abi = ix86_function_type_abi (fndecl);
4984 if (call_abi == MS_ABI)
4985 return 32;
4986 return 0;
4989 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4990 call abi used. */
4991 enum calling_abi
4992 ix86_function_type_abi (const_tree fntype)
4994 if (TARGET_64BIT && fntype != NULL)
4996 enum calling_abi abi = ix86_abi;
4997 if (abi == SYSV_ABI)
4999 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
5000 abi = MS_ABI;
5002 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
5003 abi = SYSV_ABI;
5004 return abi;
5006 return ix86_abi;
5009 static bool
5010 ix86_function_ms_hook_prologue (const_tree fntype)
5012 if (!TARGET_64BIT)
5014 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
5016 if (decl_function_context (fntype) != NULL_TREE)
5018 error_at (DECL_SOURCE_LOCATION (fntype),
5019 "ms_hook_prologue is not compatible with nested function");
5022 return true;
5025 return false;
5028 static enum calling_abi
5029 ix86_function_abi (const_tree fndecl)
5031 if (! fndecl)
5032 return ix86_abi;
5033 return ix86_function_type_abi (TREE_TYPE (fndecl));
5036 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5037 call abi used. */
5038 enum calling_abi
5039 ix86_cfun_abi (void)
5041 if (! cfun || ! TARGET_64BIT)
5042 return ix86_abi;
5043 return cfun->machine->call_abi;
5046 /* regclass.c */
5047 extern void init_regs (void);
5049 /* Implementation of call abi switching target hook. Specific to FNDECL
5050 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5051 for more details. */
5052 void
5053 ix86_call_abi_override (const_tree fndecl)
5055 if (fndecl == NULL_TREE)
5056 cfun->machine->call_abi = ix86_abi;
5057 else
5058 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5061 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5062 re-initialization of init_regs each time we switch function context since
5063 this is needed only during RTL expansion. */
5064 static void
5065 ix86_maybe_switch_abi (void)
5067 if (TARGET_64BIT &&
5068 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5069 reinit_regs ();
5072 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5073 for a call to a function whose data type is FNTYPE.
5074 For a library call, FNTYPE is 0. */
5076 void
5077 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5078 tree fntype, /* tree ptr for function decl */
5079 rtx libname, /* SYMBOL_REF of library name or 0 */
5080 tree fndecl)
5082 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5083 memset (cum, 0, sizeof (*cum));
5085 if (fndecl)
5086 cum->call_abi = ix86_function_abi (fndecl);
5087 else
5088 cum->call_abi = ix86_function_type_abi (fntype);
5089 /* Set up the number of registers to use for passing arguments. */
5091 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5092 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5093 "or subtarget optimization implying it");
5094 cum->nregs = ix86_regparm;
5095 if (TARGET_64BIT)
5097 cum->nregs = (cum->call_abi == SYSV_ABI
5098 ? X86_64_REGPARM_MAX
5099 : X86_64_MS_REGPARM_MAX);
5101 if (TARGET_SSE)
5103 cum->sse_nregs = SSE_REGPARM_MAX;
5104 if (TARGET_64BIT)
5106 cum->sse_nregs = (cum->call_abi == SYSV_ABI
5107 ? X86_64_SSE_REGPARM_MAX
5108 : X86_64_MS_SSE_REGPARM_MAX);
5111 if (TARGET_MMX)
5112 cum->mmx_nregs = MMX_REGPARM_MAX;
5113 cum->warn_avx = true;
5114 cum->warn_sse = true;
5115 cum->warn_mmx = true;
5117 /* Because type might mismatch in between caller and callee, we need to
5118 use actual type of function for local calls.
5119 FIXME: cgraph_analyze can be told to actually record if function uses
5120 va_start so for local functions maybe_vaarg can be made aggressive
5121 helping K&R code.
5122 FIXME: once typesytem is fixed, we won't need this code anymore. */
5123 if (i && i->local)
5124 fntype = TREE_TYPE (fndecl);
5125 cum->maybe_vaarg = (fntype
5126 ? (!prototype_p (fntype) || stdarg_p (fntype))
5127 : !libname);
5129 if (!TARGET_64BIT)
5131 /* If there are variable arguments, then we won't pass anything
5132 in registers in 32-bit mode. */
5133 if (stdarg_p (fntype))
5135 cum->nregs = 0;
5136 cum->sse_nregs = 0;
5137 cum->mmx_nregs = 0;
5138 cum->warn_avx = 0;
5139 cum->warn_sse = 0;
5140 cum->warn_mmx = 0;
5141 return;
5144 /* Use ecx and edx registers if function has fastcall attribute,
5145 else look for regparm information. */
5146 if (fntype)
5148 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5150 cum->nregs = 1;
5151 cum->fastcall = 1; /* Same first register as in fastcall. */
5153 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5155 cum->nregs = 2;
5156 cum->fastcall = 1;
5158 else
5159 cum->nregs = ix86_function_regparm (fntype, fndecl);
5162 /* Set up the number of SSE registers used for passing SFmode
5163 and DFmode arguments. Warn for mismatching ABI. */
5164 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5168 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5169 But in the case of vector types, it is some vector mode.
5171 When we have only some of our vector isa extensions enabled, then there
5172 are some modes for which vector_mode_supported_p is false. For these
5173 modes, the generic vector support in gcc will choose some non-vector mode
5174 in order to implement the type. By computing the natural mode, we'll
5175 select the proper ABI location for the operand and not depend on whatever
5176 the middle-end decides to do with these vector types.
5178 The midde-end can't deal with the vector types > 16 bytes. In this
5179 case, we return the original mode and warn ABI change if CUM isn't
5180 NULL. */
5182 static enum machine_mode
5183 type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum)
5185 enum machine_mode mode = TYPE_MODE (type);
5187 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5189 HOST_WIDE_INT size = int_size_in_bytes (type);
5190 if ((size == 8 || size == 16 || size == 32)
5191 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5192 && TYPE_VECTOR_SUBPARTS (type) > 1)
5194 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5196 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5197 mode = MIN_MODE_VECTOR_FLOAT;
5198 else
5199 mode = MIN_MODE_VECTOR_INT;
5201 /* Get the mode which has this inner mode and number of units. */
5202 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5203 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5204 && GET_MODE_INNER (mode) == innermode)
5206 if (size == 32 && !TARGET_AVX)
5208 static bool warnedavx;
5210 if (cum
5211 && !warnedavx
5212 && cum->warn_avx)
5214 warnedavx = true;
5215 warning (0, "AVX vector argument without AVX "
5216 "enabled changes the ABI");
5218 return TYPE_MODE (type);
5220 else
5221 return mode;
5224 gcc_unreachable ();
5228 return mode;
5231 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5232 this may not agree with the mode that the type system has chosen for the
5233 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5234 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5236 static rtx
5237 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5238 unsigned int regno)
5240 rtx tmp;
5242 if (orig_mode != BLKmode)
5243 tmp = gen_rtx_REG (orig_mode, regno);
5244 else
5246 tmp = gen_rtx_REG (mode, regno);
5247 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5248 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5251 return tmp;
5254 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5255 of this code is to classify each 8bytes of incoming argument by the register
5256 class and assign registers accordingly. */
5258 /* Return the union class of CLASS1 and CLASS2.
5259 See the x86-64 PS ABI for details. */
5261 static enum x86_64_reg_class
5262 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5264 /* Rule #1: If both classes are equal, this is the resulting class. */
5265 if (class1 == class2)
5266 return class1;
5268 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5269 the other class. */
5270 if (class1 == X86_64_NO_CLASS)
5271 return class2;
5272 if (class2 == X86_64_NO_CLASS)
5273 return class1;
5275 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5276 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5277 return X86_64_MEMORY_CLASS;
5279 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5280 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5281 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5282 return X86_64_INTEGERSI_CLASS;
5283 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5284 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5285 return X86_64_INTEGER_CLASS;
5287 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5288 MEMORY is used. */
5289 if (class1 == X86_64_X87_CLASS
5290 || class1 == X86_64_X87UP_CLASS
5291 || class1 == X86_64_COMPLEX_X87_CLASS
5292 || class2 == X86_64_X87_CLASS
5293 || class2 == X86_64_X87UP_CLASS
5294 || class2 == X86_64_COMPLEX_X87_CLASS)
5295 return X86_64_MEMORY_CLASS;
5297 /* Rule #6: Otherwise class SSE is used. */
5298 return X86_64_SSE_CLASS;
5301 /* Classify the argument of type TYPE and mode MODE.
5302 CLASSES will be filled by the register class used to pass each word
5303 of the operand. The number of words is returned. In case the parameter
5304 should be passed in memory, 0 is returned. As a special case for zero
5305 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5307 BIT_OFFSET is used internally for handling records and specifies offset
5308 of the offset in bits modulo 256 to avoid overflow cases.
5310 See the x86-64 PS ABI for details.
5313 static int
5314 classify_argument (enum machine_mode mode, const_tree type,
5315 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5317 HOST_WIDE_INT bytes =
5318 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5319 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5321 /* Variable sized entities are always passed/returned in memory. */
5322 if (bytes < 0)
5323 return 0;
5325 if (mode != VOIDmode
5326 && targetm.calls.must_pass_in_stack (mode, type))
5327 return 0;
5329 if (type && AGGREGATE_TYPE_P (type))
5331 int i;
5332 tree field;
5333 enum x86_64_reg_class subclasses[MAX_CLASSES];
5335 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5336 if (bytes > 32)
5337 return 0;
5339 for (i = 0; i < words; i++)
5340 classes[i] = X86_64_NO_CLASS;
5342 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5343 signalize memory class, so handle it as special case. */
5344 if (!words)
5346 classes[0] = X86_64_NO_CLASS;
5347 return 1;
5350 /* Classify each field of record and merge classes. */
5351 switch (TREE_CODE (type))
5353 case RECORD_TYPE:
5354 /* And now merge the fields of structure. */
5355 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5357 if (TREE_CODE (field) == FIELD_DECL)
5359 int num;
5361 if (TREE_TYPE (field) == error_mark_node)
5362 continue;
5364 /* Bitfields are always classified as integer. Handle them
5365 early, since later code would consider them to be
5366 misaligned integers. */
5367 if (DECL_BIT_FIELD (field))
5369 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5370 i < ((int_bit_position (field) + (bit_offset % 64))
5371 + tree_low_cst (DECL_SIZE (field), 0)
5372 + 63) / 8 / 8; i++)
5373 classes[i] =
5374 merge_classes (X86_64_INTEGER_CLASS,
5375 classes[i]);
5377 else
5379 int pos;
5381 type = TREE_TYPE (field);
5383 /* Flexible array member is ignored. */
5384 if (TYPE_MODE (type) == BLKmode
5385 && TREE_CODE (type) == ARRAY_TYPE
5386 && TYPE_SIZE (type) == NULL_TREE
5387 && TYPE_DOMAIN (type) != NULL_TREE
5388 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5389 == NULL_TREE))
5391 static bool warned;
5393 if (!warned && warn_psabi)
5395 warned = true;
5396 inform (input_location,
5397 "The ABI of passing struct with"
5398 " a flexible array member has"
5399 " changed in GCC 4.4");
5401 continue;
5403 num = classify_argument (TYPE_MODE (type), type,
5404 subclasses,
5405 (int_bit_position (field)
5406 + bit_offset) % 256);
5407 if (!num)
5408 return 0;
5409 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5410 for (i = 0; i < num && (i + pos) < words; i++)
5411 classes[i + pos] =
5412 merge_classes (subclasses[i], classes[i + pos]);
5416 break;
5418 case ARRAY_TYPE:
5419 /* Arrays are handled as small records. */
5421 int num;
5422 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5423 TREE_TYPE (type), subclasses, bit_offset);
5424 if (!num)
5425 return 0;
5427 /* The partial classes are now full classes. */
5428 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5429 subclasses[0] = X86_64_SSE_CLASS;
5430 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5431 && !((bit_offset % 64) == 0 && bytes == 4))
5432 subclasses[0] = X86_64_INTEGER_CLASS;
5434 for (i = 0; i < words; i++)
5435 classes[i] = subclasses[i % num];
5437 break;
5439 case UNION_TYPE:
5440 case QUAL_UNION_TYPE:
5441 /* Unions are similar to RECORD_TYPE but offset is always 0.
5443 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5445 if (TREE_CODE (field) == FIELD_DECL)
5447 int num;
5449 if (TREE_TYPE (field) == error_mark_node)
5450 continue;
5452 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5453 TREE_TYPE (field), subclasses,
5454 bit_offset);
5455 if (!num)
5456 return 0;
5457 for (i = 0; i < num; i++)
5458 classes[i] = merge_classes (subclasses[i], classes[i]);
5461 break;
5463 default:
5464 gcc_unreachable ();
5467 if (words > 2)
5469 /* When size > 16 bytes, if the first one isn't
5470 X86_64_SSE_CLASS or any other ones aren't
5471 X86_64_SSEUP_CLASS, everything should be passed in
5472 memory. */
5473 if (classes[0] != X86_64_SSE_CLASS)
5474 return 0;
5476 for (i = 1; i < words; i++)
5477 if (classes[i] != X86_64_SSEUP_CLASS)
5478 return 0;
5481 /* Final merger cleanup. */
5482 for (i = 0; i < words; i++)
5484 /* If one class is MEMORY, everything should be passed in
5485 memory. */
5486 if (classes[i] == X86_64_MEMORY_CLASS)
5487 return 0;
5489 /* The X86_64_SSEUP_CLASS should be always preceded by
5490 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5491 if (classes[i] == X86_64_SSEUP_CLASS
5492 && classes[i - 1] != X86_64_SSE_CLASS
5493 && classes[i - 1] != X86_64_SSEUP_CLASS)
5495 /* The first one should never be X86_64_SSEUP_CLASS. */
5496 gcc_assert (i != 0);
5497 classes[i] = X86_64_SSE_CLASS;
5500 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5501 everything should be passed in memory. */
5502 if (classes[i] == X86_64_X87UP_CLASS
5503 && (classes[i - 1] != X86_64_X87_CLASS))
5505 static bool warned;
5507 /* The first one should never be X86_64_X87UP_CLASS. */
5508 gcc_assert (i != 0);
5509 if (!warned && warn_psabi)
5511 warned = true;
5512 inform (input_location,
5513 "The ABI of passing union with long double"
5514 " has changed in GCC 4.4");
5516 return 0;
5519 return words;
5522 /* Compute alignment needed. We align all types to natural boundaries with
5523 exception of XFmode that is aligned to 64bits. */
5524 if (mode != VOIDmode && mode != BLKmode)
5526 int mode_alignment = GET_MODE_BITSIZE (mode);
5528 if (mode == XFmode)
5529 mode_alignment = 128;
5530 else if (mode == XCmode)
5531 mode_alignment = 256;
5532 if (COMPLEX_MODE_P (mode))
5533 mode_alignment /= 2;
5534 /* Misaligned fields are always returned in memory. */
5535 if (bit_offset % mode_alignment)
5536 return 0;
5539 /* for V1xx modes, just use the base mode */
5540 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5541 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5542 mode = GET_MODE_INNER (mode);
5544 /* Classification of atomic types. */
5545 switch (mode)
5547 case SDmode:
5548 case DDmode:
5549 classes[0] = X86_64_SSE_CLASS;
5550 return 1;
5551 case TDmode:
5552 classes[0] = X86_64_SSE_CLASS;
5553 classes[1] = X86_64_SSEUP_CLASS;
5554 return 2;
5555 case DImode:
5556 case SImode:
5557 case HImode:
5558 case QImode:
5559 case CSImode:
5560 case CHImode:
5561 case CQImode:
5563 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5565 if (size <= 32)
5567 classes[0] = X86_64_INTEGERSI_CLASS;
5568 return 1;
5570 else if (size <= 64)
5572 classes[0] = X86_64_INTEGER_CLASS;
5573 return 1;
5575 else if (size <= 64+32)
5577 classes[0] = X86_64_INTEGER_CLASS;
5578 classes[1] = X86_64_INTEGERSI_CLASS;
5579 return 2;
5581 else if (size <= 64+64)
5583 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5584 return 2;
5586 else
5587 gcc_unreachable ();
5589 case CDImode:
5590 case TImode:
5591 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5592 return 2;
5593 case COImode:
5594 case OImode:
5595 /* OImode shouldn't be used directly. */
5596 gcc_unreachable ();
5597 case CTImode:
5598 return 0;
5599 case SFmode:
5600 if (!(bit_offset % 64))
5601 classes[0] = X86_64_SSESF_CLASS;
5602 else
5603 classes[0] = X86_64_SSE_CLASS;
5604 return 1;
5605 case DFmode:
5606 classes[0] = X86_64_SSEDF_CLASS;
5607 return 1;
5608 case XFmode:
5609 classes[0] = X86_64_X87_CLASS;
5610 classes[1] = X86_64_X87UP_CLASS;
5611 return 2;
5612 case TFmode:
5613 classes[0] = X86_64_SSE_CLASS;
5614 classes[1] = X86_64_SSEUP_CLASS;
5615 return 2;
5616 case SCmode:
5617 classes[0] = X86_64_SSE_CLASS;
5618 if (!(bit_offset % 64))
5619 return 1;
5620 else
5622 static bool warned;
5624 if (!warned && warn_psabi)
5626 warned = true;
5627 inform (input_location,
5628 "The ABI of passing structure with complex float"
5629 " member has changed in GCC 4.4");
5631 classes[1] = X86_64_SSESF_CLASS;
5632 return 2;
5634 case DCmode:
5635 classes[0] = X86_64_SSEDF_CLASS;
5636 classes[1] = X86_64_SSEDF_CLASS;
5637 return 2;
5638 case XCmode:
5639 classes[0] = X86_64_COMPLEX_X87_CLASS;
5640 return 1;
5641 case TCmode:
5642 /* This modes is larger than 16 bytes. */
5643 return 0;
5644 case V8SFmode:
5645 case V8SImode:
5646 case V32QImode:
5647 case V16HImode:
5648 case V4DFmode:
5649 case V4DImode:
5650 classes[0] = X86_64_SSE_CLASS;
5651 classes[1] = X86_64_SSEUP_CLASS;
5652 classes[2] = X86_64_SSEUP_CLASS;
5653 classes[3] = X86_64_SSEUP_CLASS;
5654 return 4;
5655 case V4SFmode:
5656 case V4SImode:
5657 case V16QImode:
5658 case V8HImode:
5659 case V2DFmode:
5660 case V2DImode:
5661 classes[0] = X86_64_SSE_CLASS;
5662 classes[1] = X86_64_SSEUP_CLASS;
5663 return 2;
5664 case V1TImode:
5665 case V1DImode:
5666 case V2SFmode:
5667 case V2SImode:
5668 case V4HImode:
5669 case V8QImode:
5670 classes[0] = X86_64_SSE_CLASS;
5671 return 1;
5672 case BLKmode:
5673 case VOIDmode:
5674 return 0;
5675 default:
5676 gcc_assert (VECTOR_MODE_P (mode));
5678 if (bytes > 16)
5679 return 0;
5681 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5683 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5684 classes[0] = X86_64_INTEGERSI_CLASS;
5685 else
5686 classes[0] = X86_64_INTEGER_CLASS;
5687 classes[1] = X86_64_INTEGER_CLASS;
5688 return 1 + (bytes > 8);
5692 /* Examine the argument and return set number of register required in each
5693 class. Return 0 iff parameter should be passed in memory. */
5694 static int
5695 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5696 int *int_nregs, int *sse_nregs)
5698 enum x86_64_reg_class regclass[MAX_CLASSES];
5699 int n = classify_argument (mode, type, regclass, 0);
5701 *int_nregs = 0;
5702 *sse_nregs = 0;
5703 if (!n)
5704 return 0;
5705 for (n--; n >= 0; n--)
5706 switch (regclass[n])
5708 case X86_64_INTEGER_CLASS:
5709 case X86_64_INTEGERSI_CLASS:
5710 (*int_nregs)++;
5711 break;
5712 case X86_64_SSE_CLASS:
5713 case X86_64_SSESF_CLASS:
5714 case X86_64_SSEDF_CLASS:
5715 (*sse_nregs)++;
5716 break;
5717 case X86_64_NO_CLASS:
5718 case X86_64_SSEUP_CLASS:
5719 break;
5720 case X86_64_X87_CLASS:
5721 case X86_64_X87UP_CLASS:
5722 if (!in_return)
5723 return 0;
5724 break;
5725 case X86_64_COMPLEX_X87_CLASS:
5726 return in_return ? 2 : 0;
5727 case X86_64_MEMORY_CLASS:
5728 gcc_unreachable ();
5730 return 1;
5733 /* Construct container for the argument used by GCC interface. See
5734 FUNCTION_ARG for the detailed description. */
5736 static rtx
5737 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5738 const_tree type, int in_return, int nintregs, int nsseregs,
5739 const int *intreg, int sse_regno)
5741 /* The following variables hold the static issued_error state. */
5742 static bool issued_sse_arg_error;
5743 static bool issued_sse_ret_error;
5744 static bool issued_x87_ret_error;
5746 enum machine_mode tmpmode;
5747 int bytes =
5748 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5749 enum x86_64_reg_class regclass[MAX_CLASSES];
5750 int n;
5751 int i;
5752 int nexps = 0;
5753 int needed_sseregs, needed_intregs;
5754 rtx exp[MAX_CLASSES];
5755 rtx ret;
5757 n = classify_argument (mode, type, regclass, 0);
5758 if (!n)
5759 return NULL;
5760 if (!examine_argument (mode, type, in_return, &needed_intregs,
5761 &needed_sseregs))
5762 return NULL;
5763 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5764 return NULL;
5766 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5767 some less clueful developer tries to use floating-point anyway. */
5768 if (needed_sseregs && !TARGET_SSE)
5770 if (in_return)
5772 if (!issued_sse_ret_error)
5774 error ("SSE register return with SSE disabled");
5775 issued_sse_ret_error = true;
5778 else if (!issued_sse_arg_error)
5780 error ("SSE register argument with SSE disabled");
5781 issued_sse_arg_error = true;
5783 return NULL;
5786 /* Likewise, error if the ABI requires us to return values in the
5787 x87 registers and the user specified -mno-80387. */
5788 if (!TARGET_80387 && in_return)
5789 for (i = 0; i < n; i++)
5790 if (regclass[i] == X86_64_X87_CLASS
5791 || regclass[i] == X86_64_X87UP_CLASS
5792 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5794 if (!issued_x87_ret_error)
5796 error ("x87 register return with x87 disabled");
5797 issued_x87_ret_error = true;
5799 return NULL;
5802 /* First construct simple cases. Avoid SCmode, since we want to use
5803 single register to pass this type. */
5804 if (n == 1 && mode != SCmode)
5805 switch (regclass[0])
5807 case X86_64_INTEGER_CLASS:
5808 case X86_64_INTEGERSI_CLASS:
5809 return gen_rtx_REG (mode, intreg[0]);
5810 case X86_64_SSE_CLASS:
5811 case X86_64_SSESF_CLASS:
5812 case X86_64_SSEDF_CLASS:
5813 if (mode != BLKmode)
5814 return gen_reg_or_parallel (mode, orig_mode,
5815 SSE_REGNO (sse_regno));
5816 break;
5817 case X86_64_X87_CLASS:
5818 case X86_64_COMPLEX_X87_CLASS:
5819 return gen_rtx_REG (mode, FIRST_STACK_REG);
5820 case X86_64_NO_CLASS:
5821 /* Zero sized array, struct or class. */
5822 return NULL;
5823 default:
5824 gcc_unreachable ();
5826 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5827 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5828 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5829 if (n == 4
5830 && regclass[0] == X86_64_SSE_CLASS
5831 && regclass[1] == X86_64_SSEUP_CLASS
5832 && regclass[2] == X86_64_SSEUP_CLASS
5833 && regclass[3] == X86_64_SSEUP_CLASS
5834 && mode != BLKmode)
5835 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5837 if (n == 2
5838 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5839 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5840 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5841 && regclass[1] == X86_64_INTEGER_CLASS
5842 && (mode == CDImode || mode == TImode || mode == TFmode)
5843 && intreg[0] + 1 == intreg[1])
5844 return gen_rtx_REG (mode, intreg[0]);
5846 /* Otherwise figure out the entries of the PARALLEL. */
5847 for (i = 0; i < n; i++)
5849 int pos;
5851 switch (regclass[i])
5853 case X86_64_NO_CLASS:
5854 break;
5855 case X86_64_INTEGER_CLASS:
5856 case X86_64_INTEGERSI_CLASS:
5857 /* Merge TImodes on aligned occasions here too. */
5858 if (i * 8 + 8 > bytes)
5859 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5860 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5861 tmpmode = SImode;
5862 else
5863 tmpmode = DImode;
5864 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5865 if (tmpmode == BLKmode)
5866 tmpmode = DImode;
5867 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5868 gen_rtx_REG (tmpmode, *intreg),
5869 GEN_INT (i*8));
5870 intreg++;
5871 break;
5872 case X86_64_SSESF_CLASS:
5873 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5874 gen_rtx_REG (SFmode,
5875 SSE_REGNO (sse_regno)),
5876 GEN_INT (i*8));
5877 sse_regno++;
5878 break;
5879 case X86_64_SSEDF_CLASS:
5880 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5881 gen_rtx_REG (DFmode,
5882 SSE_REGNO (sse_regno)),
5883 GEN_INT (i*8));
5884 sse_regno++;
5885 break;
5886 case X86_64_SSE_CLASS:
5887 pos = i;
5888 switch (n)
5890 case 1:
5891 tmpmode = DImode;
5892 break;
5893 case 2:
5894 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5896 tmpmode = TImode;
5897 i++;
5899 else
5900 tmpmode = DImode;
5901 break;
5902 case 4:
5903 gcc_assert (i == 0
5904 && regclass[1] == X86_64_SSEUP_CLASS
5905 && regclass[2] == X86_64_SSEUP_CLASS
5906 && regclass[3] == X86_64_SSEUP_CLASS);
5907 tmpmode = OImode;
5908 i += 3;
5909 break;
5910 default:
5911 gcc_unreachable ();
5913 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5914 gen_rtx_REG (tmpmode,
5915 SSE_REGNO (sse_regno)),
5916 GEN_INT (pos*8));
5917 sse_regno++;
5918 break;
5919 default:
5920 gcc_unreachable ();
5924 /* Empty aligned struct, union or class. */
5925 if (nexps == 0)
5926 return NULL;
5928 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5929 for (i = 0; i < nexps; i++)
5930 XVECEXP (ret, 0, i) = exp [i];
5931 return ret;
5934 /* Update the data in CUM to advance over an argument of mode MODE
5935 and data type TYPE. (TYPE is null for libcalls where that information
5936 may not be available.) */
5938 static void
5939 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5940 const_tree type, HOST_WIDE_INT bytes,
5941 HOST_WIDE_INT words)
5943 switch (mode)
5945 default:
5946 break;
5948 case BLKmode:
5949 if (bytes < 0)
5950 break;
5951 /* FALLTHRU */
5953 case DImode:
5954 case SImode:
5955 case HImode:
5956 case QImode:
5957 cum->words += words;
5958 cum->nregs -= words;
5959 cum->regno += words;
5961 if (cum->nregs <= 0)
5963 cum->nregs = 0;
5964 cum->regno = 0;
5966 break;
5968 case OImode:
5969 /* OImode shouldn't be used directly. */
5970 gcc_unreachable ();
5972 case DFmode:
5973 if (cum->float_in_sse < 2)
5974 break;
5975 case SFmode:
5976 if (cum->float_in_sse < 1)
5977 break;
5978 /* FALLTHRU */
5980 case V8SFmode:
5981 case V8SImode:
5982 case V32QImode:
5983 case V16HImode:
5984 case V4DFmode:
5985 case V4DImode:
5986 case TImode:
5987 case V16QImode:
5988 case V8HImode:
5989 case V4SImode:
5990 case V2DImode:
5991 case V4SFmode:
5992 case V2DFmode:
5993 if (!type || !AGGREGATE_TYPE_P (type))
5995 cum->sse_words += words;
5996 cum->sse_nregs -= 1;
5997 cum->sse_regno += 1;
5998 if (cum->sse_nregs <= 0)
6000 cum->sse_nregs = 0;
6001 cum->sse_regno = 0;
6004 break;
6006 case V8QImode:
6007 case V4HImode:
6008 case V2SImode:
6009 case V2SFmode:
6010 case V1TImode:
6011 case V1DImode:
6012 if (!type || !AGGREGATE_TYPE_P (type))
6014 cum->mmx_words += words;
6015 cum->mmx_nregs -= 1;
6016 cum->mmx_regno += 1;
6017 if (cum->mmx_nregs <= 0)
6019 cum->mmx_nregs = 0;
6020 cum->mmx_regno = 0;
6023 break;
6027 static void
6028 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6029 const_tree type, HOST_WIDE_INT words, bool named)
6031 int int_nregs, sse_nregs;
6033 /* Unnamed 256bit vector mode parameters are passed on stack. */
6034 if (!named && VALID_AVX256_REG_MODE (mode))
6035 return;
6037 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
6038 cum->words += words;
6039 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6041 cum->nregs -= int_nregs;
6042 cum->sse_nregs -= sse_nregs;
6043 cum->regno += int_nregs;
6044 cum->sse_regno += sse_nregs;
6046 else
6047 cum->words += words;
6050 static void
6051 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6052 HOST_WIDE_INT words)
6054 /* Otherwise, this should be passed indirect. */
6055 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6057 cum->words += words;
6058 if (cum->nregs > 0)
6060 cum->nregs -= 1;
6061 cum->regno += 1;
6065 /* Update the data in CUM to advance over an argument of mode MODE and
6066 data type TYPE. (TYPE is null for libcalls where that information
6067 may not be available.) */
6069 static void
6070 ix86_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6071 const_tree type, bool named)
6073 HOST_WIDE_INT bytes, words;
6075 if (mode == BLKmode)
6076 bytes = int_size_in_bytes (type);
6077 else
6078 bytes = GET_MODE_SIZE (mode);
6079 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6081 if (type)
6082 mode = type_natural_mode (type, NULL);
6084 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6085 function_arg_advance_ms_64 (cum, bytes, words);
6086 else if (TARGET_64BIT)
6087 function_arg_advance_64 (cum, mode, type, words, named);
6088 else
6089 function_arg_advance_32 (cum, mode, type, bytes, words);
6092 /* Define where to put the arguments to a function.
6093 Value is zero to push the argument on the stack,
6094 or a hard register in which to store the argument.
6096 MODE is the argument's machine mode.
6097 TYPE is the data type of the argument (as a tree).
6098 This is null for libcalls where that information may
6099 not be available.
6100 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6101 the preceding args and about the function being called.
6102 NAMED is nonzero if this argument is a named parameter
6103 (otherwise it is an extra parameter matching an ellipsis). */
6105 static rtx
6106 function_arg_32 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6107 enum machine_mode orig_mode, const_tree type,
6108 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6110 static bool warnedsse, warnedmmx;
6112 /* Avoid the AL settings for the Unix64 ABI. */
6113 if (mode == VOIDmode)
6114 return constm1_rtx;
6116 switch (mode)
6118 default:
6119 break;
6121 case BLKmode:
6122 if (bytes < 0)
6123 break;
6124 /* FALLTHRU */
6125 case DImode:
6126 case SImode:
6127 case HImode:
6128 case QImode:
6129 if (words <= cum->nregs)
6131 int regno = cum->regno;
6133 /* Fastcall allocates the first two DWORD (SImode) or
6134 smaller arguments to ECX and EDX if it isn't an
6135 aggregate type . */
6136 if (cum->fastcall)
6138 if (mode == BLKmode
6139 || mode == DImode
6140 || (type && AGGREGATE_TYPE_P (type)))
6141 break;
6143 /* ECX not EAX is the first allocated register. */
6144 if (regno == AX_REG)
6145 regno = CX_REG;
6147 return gen_rtx_REG (mode, regno);
6149 break;
6151 case DFmode:
6152 if (cum->float_in_sse < 2)
6153 break;
6154 case SFmode:
6155 if (cum->float_in_sse < 1)
6156 break;
6157 /* FALLTHRU */
6158 case TImode:
6159 /* In 32bit, we pass TImode in xmm registers. */
6160 case V16QImode:
6161 case V8HImode:
6162 case V4SImode:
6163 case V2DImode:
6164 case V4SFmode:
6165 case V2DFmode:
6166 if (!type || !AGGREGATE_TYPE_P (type))
6168 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6170 warnedsse = true;
6171 warning (0, "SSE vector argument without SSE enabled "
6172 "changes the ABI");
6174 if (cum->sse_nregs)
6175 return gen_reg_or_parallel (mode, orig_mode,
6176 cum->sse_regno + FIRST_SSE_REG);
6178 break;
6180 case OImode:
6181 /* OImode shouldn't be used directly. */
6182 gcc_unreachable ();
6184 case V8SFmode:
6185 case V8SImode:
6186 case V32QImode:
6187 case V16HImode:
6188 case V4DFmode:
6189 case V4DImode:
6190 if (!type || !AGGREGATE_TYPE_P (type))
6192 if (cum->sse_nregs)
6193 return gen_reg_or_parallel (mode, orig_mode,
6194 cum->sse_regno + FIRST_SSE_REG);
6196 break;
6198 case V8QImode:
6199 case V4HImode:
6200 case V2SImode:
6201 case V2SFmode:
6202 case V1TImode:
6203 case V1DImode:
6204 if (!type || !AGGREGATE_TYPE_P (type))
6206 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6208 warnedmmx = true;
6209 warning (0, "MMX vector argument without MMX enabled "
6210 "changes the ABI");
6212 if (cum->mmx_nregs)
6213 return gen_reg_or_parallel (mode, orig_mode,
6214 cum->mmx_regno + FIRST_MMX_REG);
6216 break;
6219 return NULL_RTX;
6222 static rtx
6223 function_arg_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6224 enum machine_mode orig_mode, const_tree type, bool named)
6226 /* Handle a hidden AL argument containing number of registers
6227 for varargs x86-64 functions. */
6228 if (mode == VOIDmode)
6229 return GEN_INT (cum->maybe_vaarg
6230 ? (cum->sse_nregs < 0
6231 ? X86_64_SSE_REGPARM_MAX
6232 : cum->sse_regno)
6233 : -1);
6235 switch (mode)
6237 default:
6238 break;
6240 case V8SFmode:
6241 case V8SImode:
6242 case V32QImode:
6243 case V16HImode:
6244 case V4DFmode:
6245 case V4DImode:
6246 /* Unnamed 256bit vector mode parameters are passed on stack. */
6247 if (!named)
6248 return NULL;
6249 break;
6252 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6253 cum->sse_nregs,
6254 &x86_64_int_parameter_registers [cum->regno],
6255 cum->sse_regno);
6258 static rtx
6259 function_arg_ms_64 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
6260 enum machine_mode orig_mode, bool named,
6261 HOST_WIDE_INT bytes)
6263 unsigned int regno;
6265 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6266 We use value of -2 to specify that current function call is MSABI. */
6267 if (mode == VOIDmode)
6268 return GEN_INT (-2);
6270 /* If we've run out of registers, it goes on the stack. */
6271 if (cum->nregs == 0)
6272 return NULL_RTX;
6274 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6276 /* Only floating point modes are passed in anything but integer regs. */
6277 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6279 if (named)
6280 regno = cum->regno + FIRST_SSE_REG;
6281 else
6283 rtx t1, t2;
6285 /* Unnamed floating parameters are passed in both the
6286 SSE and integer registers. */
6287 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6288 t2 = gen_rtx_REG (mode, regno);
6289 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6290 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6291 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6294 /* Handle aggregated types passed in register. */
6295 if (orig_mode == BLKmode)
6297 if (bytes > 0 && bytes <= 8)
6298 mode = (bytes > 4 ? DImode : SImode);
6299 if (mode == BLKmode)
6300 mode = DImode;
6303 return gen_reg_or_parallel (mode, orig_mode, regno);
6306 /* Return where to put the arguments to a function.
6307 Return zero to push the argument on the stack, or a hard register in which to store the argument.
6309 MODE is the argument's machine mode. TYPE is the data type of the
6310 argument. It is null for libcalls where that information may not be
6311 available. CUM gives information about the preceding args and about
6312 the function being called. NAMED is nonzero if this argument is a
6313 named parameter (otherwise it is an extra parameter matching an
6314 ellipsis). */
6316 static rtx
6317 ix86_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode omode,
6318 const_tree type, bool named)
6320 enum machine_mode mode = omode;
6321 HOST_WIDE_INT bytes, words;
6323 if (mode == BLKmode)
6324 bytes = int_size_in_bytes (type);
6325 else
6326 bytes = GET_MODE_SIZE (mode);
6327 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6329 /* To simplify the code below, represent vector types with a vector mode
6330 even if MMX/SSE are not active. */
6331 if (type && TREE_CODE (type) == VECTOR_TYPE)
6332 mode = type_natural_mode (type, cum);
6334 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6335 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6336 else if (TARGET_64BIT)
6337 return function_arg_64 (cum, mode, omode, type, named);
6338 else
6339 return function_arg_32 (cum, mode, omode, type, bytes, words);
6342 /* A C expression that indicates when an argument must be passed by
6343 reference. If nonzero for an argument, a copy of that argument is
6344 made in memory and a pointer to the argument is passed instead of
6345 the argument itself. The pointer is passed in whatever way is
6346 appropriate for passing a pointer to that type. */
6348 static bool
6349 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6350 enum machine_mode mode ATTRIBUTE_UNUSED,
6351 const_tree type, bool named ATTRIBUTE_UNUSED)
6353 /* See Windows x64 Software Convention. */
6354 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6356 int msize = (int) GET_MODE_SIZE (mode);
6357 if (type)
6359 /* Arrays are passed by reference. */
6360 if (TREE_CODE (type) == ARRAY_TYPE)
6361 return true;
6363 if (AGGREGATE_TYPE_P (type))
6365 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6366 are passed by reference. */
6367 msize = int_size_in_bytes (type);
6371 /* __m128 is passed by reference. */
6372 switch (msize) {
6373 case 1: case 2: case 4: case 8:
6374 break;
6375 default:
6376 return true;
6379 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6380 return 1;
6382 return 0;
6385 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6386 ABI. */
6387 static bool
6388 contains_aligned_value_p (tree type)
6390 enum machine_mode mode = TYPE_MODE (type);
6391 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6392 || mode == TDmode
6393 || mode == TFmode
6394 || mode == TCmode)
6395 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6396 return true;
6397 if (TYPE_ALIGN (type) < 128)
6398 return false;
6400 if (AGGREGATE_TYPE_P (type))
6402 /* Walk the aggregates recursively. */
6403 switch (TREE_CODE (type))
6405 case RECORD_TYPE:
6406 case UNION_TYPE:
6407 case QUAL_UNION_TYPE:
6409 tree field;
6411 /* Walk all the structure fields. */
6412 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6414 if (TREE_CODE (field) == FIELD_DECL
6415 && contains_aligned_value_p (TREE_TYPE (field)))
6416 return true;
6418 break;
6421 case ARRAY_TYPE:
6422 /* Just for use if some languages passes arrays by value. */
6423 if (contains_aligned_value_p (TREE_TYPE (type)))
6424 return true;
6425 break;
6427 default:
6428 gcc_unreachable ();
6431 return false;
6434 /* Gives the alignment boundary, in bits, of an argument with the
6435 specified mode and type. */
6438 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6440 int align;
6441 if (type)
6443 /* Since the main variant type is used for call, we convert it to
6444 the main variant type. */
6445 type = TYPE_MAIN_VARIANT (type);
6446 align = TYPE_ALIGN (type);
6448 else
6449 align = GET_MODE_ALIGNMENT (mode);
6450 if (align < PARM_BOUNDARY)
6451 align = PARM_BOUNDARY;
6452 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6453 natural boundaries. */
6454 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6456 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6457 make an exception for SSE modes since these require 128bit
6458 alignment.
6460 The handling here differs from field_alignment. ICC aligns MMX
6461 arguments to 4 byte boundaries, while structure fields are aligned
6462 to 8 byte boundaries. */
6463 if (!type)
6465 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6466 align = PARM_BOUNDARY;
6468 else
6470 if (!contains_aligned_value_p (type))
6471 align = PARM_BOUNDARY;
6474 if (align > BIGGEST_ALIGNMENT)
6475 align = BIGGEST_ALIGNMENT;
6476 return align;
6479 /* Return true if N is a possible register number of function value. */
6481 static bool
6482 ix86_function_value_regno_p (const unsigned int regno)
6484 switch (regno)
6486 case 0:
6487 return true;
6489 case FIRST_FLOAT_REG:
6490 /* TODO: The function should depend on current function ABI but
6491 builtins.c would need updating then. Therefore we use the
6492 default ABI. */
6493 if (TARGET_64BIT && ix86_abi == MS_ABI)
6494 return false;
6495 return TARGET_FLOAT_RETURNS_IN_80387;
6497 case FIRST_SSE_REG:
6498 return TARGET_SSE;
6500 case FIRST_MMX_REG:
6501 if (TARGET_MACHO || TARGET_64BIT)
6502 return false;
6503 return TARGET_MMX;
6506 return false;
6509 /* Define how to find the value returned by a function.
6510 VALTYPE is the data type of the value (as a tree).
6511 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6512 otherwise, FUNC is 0. */
6514 static rtx
6515 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6516 const_tree fntype, const_tree fn)
6518 unsigned int regno;
6520 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6521 we normally prevent this case when mmx is not available. However
6522 some ABIs may require the result to be returned like DImode. */
6523 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6524 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6526 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6527 we prevent this case when sse is not available. However some ABIs
6528 may require the result to be returned like integer TImode. */
6529 else if (mode == TImode
6530 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6531 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6533 /* 32-byte vector modes in %ymm0. */
6534 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6535 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6537 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6538 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6539 regno = FIRST_FLOAT_REG;
6540 else
6541 /* Most things go in %eax. */
6542 regno = AX_REG;
6544 /* Override FP return register with %xmm0 for local functions when
6545 SSE math is enabled or for functions with sseregparm attribute. */
6546 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6548 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6549 if ((sse_level >= 1 && mode == SFmode)
6550 || (sse_level == 2 && mode == DFmode))
6551 regno = FIRST_SSE_REG;
6554 /* OImode shouldn't be used directly. */
6555 gcc_assert (mode != OImode);
6557 return gen_rtx_REG (orig_mode, regno);
6560 static rtx
6561 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6562 const_tree valtype)
6564 rtx ret;
6566 /* Handle libcalls, which don't provide a type node. */
6567 if (valtype == NULL)
6569 switch (mode)
6571 case SFmode:
6572 case SCmode:
6573 case DFmode:
6574 case DCmode:
6575 case TFmode:
6576 case SDmode:
6577 case DDmode:
6578 case TDmode:
6579 return gen_rtx_REG (mode, FIRST_SSE_REG);
6580 case XFmode:
6581 case XCmode:
6582 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6583 case TCmode:
6584 return NULL;
6585 default:
6586 return gen_rtx_REG (mode, AX_REG);
6590 ret = construct_container (mode, orig_mode, valtype, 1,
6591 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6592 x86_64_int_return_registers, 0);
6594 /* For zero sized structures, construct_container returns NULL, but we
6595 need to keep rest of compiler happy by returning meaningful value. */
6596 if (!ret)
6597 ret = gen_rtx_REG (orig_mode, AX_REG);
6599 return ret;
6602 static rtx
6603 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6605 unsigned int regno = AX_REG;
6607 if (TARGET_SSE)
6609 switch (GET_MODE_SIZE (mode))
6611 case 16:
6612 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6613 && !COMPLEX_MODE_P (mode))
6614 regno = FIRST_SSE_REG;
6615 break;
6616 case 8:
6617 case 4:
6618 if (mode == SFmode || mode == DFmode)
6619 regno = FIRST_SSE_REG;
6620 break;
6621 default:
6622 break;
6625 return gen_rtx_REG (orig_mode, regno);
6628 static rtx
6629 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6630 enum machine_mode orig_mode, enum machine_mode mode)
6632 const_tree fn, fntype;
6634 fn = NULL_TREE;
6635 if (fntype_or_decl && DECL_P (fntype_or_decl))
6636 fn = fntype_or_decl;
6637 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6639 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6640 return function_value_ms_64 (orig_mode, mode);
6641 else if (TARGET_64BIT)
6642 return function_value_64 (orig_mode, mode, valtype);
6643 else
6644 return function_value_32 (orig_mode, mode, fntype, fn);
6647 static rtx
6648 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6649 bool outgoing ATTRIBUTE_UNUSED)
6651 enum machine_mode mode, orig_mode;
6653 orig_mode = TYPE_MODE (valtype);
6654 mode = type_natural_mode (valtype, NULL);
6655 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6659 ix86_libcall_value (enum machine_mode mode)
6661 return ix86_function_value_1 (NULL, NULL, mode, mode);
6664 /* Return true iff type is returned in memory. */
6666 static int ATTRIBUTE_UNUSED
6667 return_in_memory_32 (const_tree type, enum machine_mode mode)
6669 HOST_WIDE_INT size;
6671 if (mode == BLKmode)
6672 return 1;
6674 size = int_size_in_bytes (type);
6676 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6677 return 0;
6679 if (VECTOR_MODE_P (mode) || mode == TImode)
6681 /* User-created vectors small enough to fit in EAX. */
6682 if (size < 8)
6683 return 0;
6685 /* MMX/3dNow values are returned in MM0,
6686 except when it doesn't exits. */
6687 if (size == 8)
6688 return (TARGET_MMX ? 0 : 1);
6690 /* SSE values are returned in XMM0, except when it doesn't exist. */
6691 if (size == 16)
6692 return (TARGET_SSE ? 0 : 1);
6694 /* AVX values are returned in YMM0, except when it doesn't exist. */
6695 if (size == 32)
6696 return TARGET_AVX ? 0 : 1;
6699 if (mode == XFmode)
6700 return 0;
6702 if (size > 12)
6703 return 1;
6705 /* OImode shouldn't be used directly. */
6706 gcc_assert (mode != OImode);
6708 return 0;
6711 static int ATTRIBUTE_UNUSED
6712 return_in_memory_64 (const_tree type, enum machine_mode mode)
6714 int needed_intregs, needed_sseregs;
6715 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6718 static int ATTRIBUTE_UNUSED
6719 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6721 HOST_WIDE_INT size = int_size_in_bytes (type);
6723 /* __m128 is returned in xmm0. */
6724 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6725 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6726 return 0;
6728 /* Otherwise, the size must be exactly in [1248]. */
6729 return (size != 1 && size != 2 && size != 4 && size != 8);
6732 static bool
6733 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6735 #ifdef SUBTARGET_RETURN_IN_MEMORY
6736 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6737 #else
6738 const enum machine_mode mode = type_natural_mode (type, NULL);
6740 if (TARGET_64BIT)
6742 if (ix86_function_type_abi (fntype) == MS_ABI)
6743 return return_in_memory_ms_64 (type, mode);
6744 else
6745 return return_in_memory_64 (type, mode);
6747 else
6748 return return_in_memory_32 (type, mode);
6749 #endif
6752 /* Return false iff TYPE is returned in memory. This version is used
6753 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6754 but differs notably in that when MMX is available, 8-byte vectors
6755 are returned in memory, rather than in MMX registers. */
6757 bool
6758 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6760 int size;
6761 enum machine_mode mode = type_natural_mode (type, NULL);
6763 if (TARGET_64BIT)
6764 return return_in_memory_64 (type, mode);
6766 if (mode == BLKmode)
6767 return 1;
6769 size = int_size_in_bytes (type);
6771 if (VECTOR_MODE_P (mode))
6773 /* Return in memory only if MMX registers *are* available. This
6774 seems backwards, but it is consistent with the existing
6775 Solaris x86 ABI. */
6776 if (size == 8)
6777 return TARGET_MMX;
6778 if (size == 16)
6779 return !TARGET_SSE;
6781 else if (mode == TImode)
6782 return !TARGET_SSE;
6783 else if (mode == XFmode)
6784 return 0;
6786 return size > 12;
6789 /* When returning SSE vector types, we have a choice of either
6790 (1) being abi incompatible with a -march switch, or
6791 (2) generating an error.
6792 Given no good solution, I think the safest thing is one warning.
6793 The user won't be able to use -Werror, but....
6795 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6796 called in response to actually generating a caller or callee that
6797 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6798 via aggregate_value_p for general type probing from tree-ssa. */
6800 static rtx
6801 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6803 static bool warnedsse, warnedmmx;
6805 if (!TARGET_64BIT && type)
6807 /* Look at the return type of the function, not the function type. */
6808 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6810 if (!TARGET_SSE && !warnedsse)
6812 if (mode == TImode
6813 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6815 warnedsse = true;
6816 warning (0, "SSE vector return without SSE enabled "
6817 "changes the ABI");
6821 if (!TARGET_MMX && !warnedmmx)
6823 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6825 warnedmmx = true;
6826 warning (0, "MMX vector return without MMX enabled "
6827 "changes the ABI");
6832 return NULL;
6836 /* Create the va_list data type. */
6838 /* Returns the calling convention specific va_list date type.
6839 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6841 static tree
6842 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6844 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6846 /* For i386 we use plain pointer to argument area. */
6847 if (!TARGET_64BIT || abi == MS_ABI)
6848 return build_pointer_type (char_type_node);
6850 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6851 type_decl = build_decl (BUILTINS_LOCATION,
6852 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6854 f_gpr = build_decl (BUILTINS_LOCATION,
6855 FIELD_DECL, get_identifier ("gp_offset"),
6856 unsigned_type_node);
6857 f_fpr = build_decl (BUILTINS_LOCATION,
6858 FIELD_DECL, get_identifier ("fp_offset"),
6859 unsigned_type_node);
6860 f_ovf = build_decl (BUILTINS_LOCATION,
6861 FIELD_DECL, get_identifier ("overflow_arg_area"),
6862 ptr_type_node);
6863 f_sav = build_decl (BUILTINS_LOCATION,
6864 FIELD_DECL, get_identifier ("reg_save_area"),
6865 ptr_type_node);
6867 va_list_gpr_counter_field = f_gpr;
6868 va_list_fpr_counter_field = f_fpr;
6870 DECL_FIELD_CONTEXT (f_gpr) = record;
6871 DECL_FIELD_CONTEXT (f_fpr) = record;
6872 DECL_FIELD_CONTEXT (f_ovf) = record;
6873 DECL_FIELD_CONTEXT (f_sav) = record;
6875 TREE_CHAIN (record) = type_decl;
6876 TYPE_NAME (record) = type_decl;
6877 TYPE_FIELDS (record) = f_gpr;
6878 TREE_CHAIN (f_gpr) = f_fpr;
6879 TREE_CHAIN (f_fpr) = f_ovf;
6880 TREE_CHAIN (f_ovf) = f_sav;
6882 layout_type (record);
6884 /* The correct type is an array type of one element. */
6885 return build_array_type (record, build_index_type (size_zero_node));
6888 /* Setup the builtin va_list data type and for 64-bit the additional
6889 calling convention specific va_list data types. */
6891 static tree
6892 ix86_build_builtin_va_list (void)
6894 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6896 /* Initialize abi specific va_list builtin types. */
6897 if (TARGET_64BIT)
6899 tree t;
6900 if (ix86_abi == MS_ABI)
6902 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6903 if (TREE_CODE (t) != RECORD_TYPE)
6904 t = build_variant_type_copy (t);
6905 sysv_va_list_type_node = t;
6907 else
6909 t = ret;
6910 if (TREE_CODE (t) != RECORD_TYPE)
6911 t = build_variant_type_copy (t);
6912 sysv_va_list_type_node = t;
6914 if (ix86_abi != MS_ABI)
6916 t = ix86_build_builtin_va_list_abi (MS_ABI);
6917 if (TREE_CODE (t) != RECORD_TYPE)
6918 t = build_variant_type_copy (t);
6919 ms_va_list_type_node = t;
6921 else
6923 t = ret;
6924 if (TREE_CODE (t) != RECORD_TYPE)
6925 t = build_variant_type_copy (t);
6926 ms_va_list_type_node = t;
6930 return ret;
6933 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6935 static void
6936 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6938 rtx save_area, mem;
6939 rtx label;
6940 rtx tmp_reg;
6941 rtx nsse_reg;
6942 alias_set_type set;
6943 int i;
6945 /* GPR size of varargs save area. */
6946 if (cfun->va_list_gpr_size)
6947 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6948 else
6949 ix86_varargs_gpr_size = 0;
6951 /* FPR size of varargs save area. We don't need it if we don't pass
6952 anything in SSE registers. */
6953 if (cum->sse_nregs && cfun->va_list_fpr_size)
6954 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6955 else
6956 ix86_varargs_fpr_size = 0;
6958 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6959 return;
6961 save_area = frame_pointer_rtx;
6962 set = get_varargs_alias_set ();
6964 for (i = cum->regno;
6965 i < X86_64_REGPARM_MAX
6966 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6967 i++)
6969 mem = gen_rtx_MEM (Pmode,
6970 plus_constant (save_area, i * UNITS_PER_WORD));
6971 MEM_NOTRAP_P (mem) = 1;
6972 set_mem_alias_set (mem, set);
6973 emit_move_insn (mem, gen_rtx_REG (Pmode,
6974 x86_64_int_parameter_registers[i]));
6977 if (ix86_varargs_fpr_size)
6979 /* Now emit code to save SSE registers. The AX parameter contains number
6980 of SSE parameter registers used to call this function. We use
6981 sse_prologue_save insn template that produces computed jump across
6982 SSE saves. We need some preparation work to get this working. */
6984 label = gen_label_rtx ();
6986 nsse_reg = gen_reg_rtx (Pmode);
6987 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6989 /* Compute address of memory block we save into. We always use pointer
6990 pointing 127 bytes after first byte to store - this is needed to keep
6991 instruction size limited by 4 bytes (5 bytes for AVX) with one
6992 byte displacement. */
6993 tmp_reg = gen_reg_rtx (Pmode);
6994 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6995 plus_constant (save_area,
6996 ix86_varargs_gpr_size + 127)));
6997 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6998 MEM_NOTRAP_P (mem) = 1;
6999 set_mem_alias_set (mem, set);
7000 set_mem_align (mem, 64);
7002 /* And finally do the dirty job! */
7003 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
7004 GEN_INT (cum->sse_regno), label,
7005 gen_reg_rtx (Pmode)));
7009 static void
7010 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
7012 alias_set_type set = get_varargs_alias_set ();
7013 int i;
7015 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
7017 rtx reg, mem;
7019 mem = gen_rtx_MEM (Pmode,
7020 plus_constant (virtual_incoming_args_rtx,
7021 i * UNITS_PER_WORD));
7022 MEM_NOTRAP_P (mem) = 1;
7023 set_mem_alias_set (mem, set);
7025 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
7026 emit_move_insn (mem, reg);
7030 static void
7031 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7032 tree type, int *pretend_size ATTRIBUTE_UNUSED,
7033 int no_rtl)
7035 CUMULATIVE_ARGS next_cum;
7036 tree fntype;
7038 /* This argument doesn't appear to be used anymore. Which is good,
7039 because the old code here didn't suppress rtl generation. */
7040 gcc_assert (!no_rtl);
7042 if (!TARGET_64BIT)
7043 return;
7045 fntype = TREE_TYPE (current_function_decl);
7047 /* For varargs, we do not want to skip the dummy va_dcl argument.
7048 For stdargs, we do want to skip the last named argument. */
7049 next_cum = *cum;
7050 if (stdarg_p (fntype))
7051 ix86_function_arg_advance (&next_cum, mode, type, true);
7053 if (cum->call_abi == MS_ABI)
7054 setup_incoming_varargs_ms_64 (&next_cum);
7055 else
7056 setup_incoming_varargs_64 (&next_cum);
7059 /* Checks if TYPE is of kind va_list char *. */
7061 static bool
7062 is_va_list_char_pointer (tree type)
7064 tree canonic;
7066 /* For 32-bit it is always true. */
7067 if (!TARGET_64BIT)
7068 return true;
7069 canonic = ix86_canonical_va_list_type (type);
7070 return (canonic == ms_va_list_type_node
7071 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7074 /* Implement va_start. */
7076 static void
7077 ix86_va_start (tree valist, rtx nextarg)
7079 HOST_WIDE_INT words, n_gpr, n_fpr;
7080 tree f_gpr, f_fpr, f_ovf, f_sav;
7081 tree gpr, fpr, ovf, sav, t;
7082 tree type;
7084 /* Only 64bit target needs something special. */
7085 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7087 std_expand_builtin_va_start (valist, nextarg);
7088 return;
7091 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7092 f_fpr = TREE_CHAIN (f_gpr);
7093 f_ovf = TREE_CHAIN (f_fpr);
7094 f_sav = TREE_CHAIN (f_ovf);
7096 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
7097 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7098 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7099 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7100 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7102 /* Count number of gp and fp argument registers used. */
7103 words = crtl->args.info.words;
7104 n_gpr = crtl->args.info.regno;
7105 n_fpr = crtl->args.info.sse_regno;
7107 if (cfun->va_list_gpr_size)
7109 type = TREE_TYPE (gpr);
7110 t = build2 (MODIFY_EXPR, type,
7111 gpr, build_int_cst (type, n_gpr * 8));
7112 TREE_SIDE_EFFECTS (t) = 1;
7113 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7116 if (TARGET_SSE && cfun->va_list_fpr_size)
7118 type = TREE_TYPE (fpr);
7119 t = build2 (MODIFY_EXPR, type, fpr,
7120 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7121 TREE_SIDE_EFFECTS (t) = 1;
7122 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7125 /* Find the overflow area. */
7126 type = TREE_TYPE (ovf);
7127 t = make_tree (type, crtl->args.internal_arg_pointer);
7128 if (words != 0)
7129 t = build2 (POINTER_PLUS_EXPR, type, t,
7130 size_int (words * UNITS_PER_WORD));
7131 t = build2 (MODIFY_EXPR, type, ovf, t);
7132 TREE_SIDE_EFFECTS (t) = 1;
7133 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7135 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7137 /* Find the register save area.
7138 Prologue of the function save it right above stack frame. */
7139 type = TREE_TYPE (sav);
7140 t = make_tree (type, frame_pointer_rtx);
7141 if (!ix86_varargs_gpr_size)
7142 t = build2 (POINTER_PLUS_EXPR, type, t,
7143 size_int (-8 * X86_64_REGPARM_MAX));
7144 t = build2 (MODIFY_EXPR, type, sav, t);
7145 TREE_SIDE_EFFECTS (t) = 1;
7146 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7150 /* Implement va_arg. */
7152 static tree
7153 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7154 gimple_seq *post_p)
7156 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7157 tree f_gpr, f_fpr, f_ovf, f_sav;
7158 tree gpr, fpr, ovf, sav, t;
7159 int size, rsize;
7160 tree lab_false, lab_over = NULL_TREE;
7161 tree addr, t2;
7162 rtx container;
7163 int indirect_p = 0;
7164 tree ptrtype;
7165 enum machine_mode nat_mode;
7166 unsigned int arg_boundary;
7168 /* Only 64bit target needs something special. */
7169 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7170 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7172 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7173 f_fpr = TREE_CHAIN (f_gpr);
7174 f_ovf = TREE_CHAIN (f_fpr);
7175 f_sav = TREE_CHAIN (f_ovf);
7177 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7178 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7179 valist = build_va_arg_indirect_ref (valist);
7180 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7181 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7182 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7184 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7185 if (indirect_p)
7186 type = build_pointer_type (type);
7187 size = int_size_in_bytes (type);
7188 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7190 nat_mode = type_natural_mode (type, NULL);
7191 switch (nat_mode)
7193 case V8SFmode:
7194 case V8SImode:
7195 case V32QImode:
7196 case V16HImode:
7197 case V4DFmode:
7198 case V4DImode:
7199 /* Unnamed 256bit vector mode parameters are passed on stack. */
7200 if (ix86_cfun_abi () == SYSV_ABI)
7202 container = NULL;
7203 break;
7206 default:
7207 container = construct_container (nat_mode, TYPE_MODE (type),
7208 type, 0, X86_64_REGPARM_MAX,
7209 X86_64_SSE_REGPARM_MAX, intreg,
7211 break;
7214 /* Pull the value out of the saved registers. */
7216 addr = create_tmp_var (ptr_type_node, "addr");
7218 if (container)
7220 int needed_intregs, needed_sseregs;
7221 bool need_temp;
7222 tree int_addr, sse_addr;
7224 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7225 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7227 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7229 need_temp = (!REG_P (container)
7230 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7231 || TYPE_ALIGN (type) > 128));
7233 /* In case we are passing structure, verify that it is consecutive block
7234 on the register save area. If not we need to do moves. */
7235 if (!need_temp && !REG_P (container))
7237 /* Verify that all registers are strictly consecutive */
7238 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7240 int i;
7242 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7244 rtx slot = XVECEXP (container, 0, i);
7245 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7246 || INTVAL (XEXP (slot, 1)) != i * 16)
7247 need_temp = 1;
7250 else
7252 int i;
7254 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7256 rtx slot = XVECEXP (container, 0, i);
7257 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7258 || INTVAL (XEXP (slot, 1)) != i * 8)
7259 need_temp = 1;
7263 if (!need_temp)
7265 int_addr = addr;
7266 sse_addr = addr;
7268 else
7270 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7271 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7274 /* First ensure that we fit completely in registers. */
7275 if (needed_intregs)
7277 t = build_int_cst (TREE_TYPE (gpr),
7278 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7279 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7280 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7281 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7282 gimplify_and_add (t, pre_p);
7284 if (needed_sseregs)
7286 t = build_int_cst (TREE_TYPE (fpr),
7287 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7288 + X86_64_REGPARM_MAX * 8);
7289 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7290 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7291 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7292 gimplify_and_add (t, pre_p);
7295 /* Compute index to start of area used for integer regs. */
7296 if (needed_intregs)
7298 /* int_addr = gpr + sav; */
7299 t = fold_convert (sizetype, gpr);
7300 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7301 gimplify_assign (int_addr, t, pre_p);
7303 if (needed_sseregs)
7305 /* sse_addr = fpr + sav; */
7306 t = fold_convert (sizetype, fpr);
7307 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7308 gimplify_assign (sse_addr, t, pre_p);
7310 if (need_temp)
7312 int i, prev_size = 0;
7313 tree temp = create_tmp_var (type, "va_arg_tmp");
7315 /* addr = &temp; */
7316 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7317 gimplify_assign (addr, t, pre_p);
7319 for (i = 0; i < XVECLEN (container, 0); i++)
7321 rtx slot = XVECEXP (container, 0, i);
7322 rtx reg = XEXP (slot, 0);
7323 enum machine_mode mode = GET_MODE (reg);
7324 tree piece_type;
7325 tree addr_type;
7326 tree daddr_type;
7327 tree src_addr, src;
7328 int src_offset;
7329 tree dest_addr, dest;
7330 int cur_size = GET_MODE_SIZE (mode);
7332 if (prev_size + cur_size > size)
7334 cur_size = size - prev_size;
7335 mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
7336 if (mode == BLKmode)
7337 mode = QImode;
7339 piece_type = lang_hooks.types.type_for_mode (mode, 1);
7340 if (mode == GET_MODE (reg))
7341 addr_type = build_pointer_type (piece_type);
7342 else
7343 addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7344 true);
7345 daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
7346 true);
7348 if (SSE_REGNO_P (REGNO (reg)))
7350 src_addr = sse_addr;
7351 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7353 else
7355 src_addr = int_addr;
7356 src_offset = REGNO (reg) * 8;
7358 src_addr = fold_convert (addr_type, src_addr);
7359 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7360 size_int (src_offset));
7362 dest_addr = fold_convert (daddr_type, addr);
7363 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7364 size_int (INTVAL (XEXP (slot, 1))));
7365 if (cur_size == GET_MODE_SIZE (mode))
7367 src = build_va_arg_indirect_ref (src_addr);
7368 dest = build_va_arg_indirect_ref (dest_addr);
7370 gimplify_assign (dest, src, pre_p);
7372 else
7374 tree copy
7375 = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
7376 3, dest_addr, src_addr,
7377 size_int (cur_size));
7378 gimplify_and_add (copy, pre_p);
7380 prev_size += cur_size;
7384 if (needed_intregs)
7386 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7387 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7388 gimplify_assign (gpr, t, pre_p);
7391 if (needed_sseregs)
7393 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7394 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7395 gimplify_assign (fpr, t, pre_p);
7398 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7400 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7403 /* ... otherwise out of the overflow area. */
7405 /* When we align parameter on stack for caller, if the parameter
7406 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7407 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7408 here with caller. */
7409 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7410 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7411 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7413 /* Care for on-stack alignment if needed. */
7414 if (arg_boundary <= 64
7415 || integer_zerop (TYPE_SIZE (type)))
7416 t = ovf;
7417 else
7419 HOST_WIDE_INT align = arg_boundary / 8;
7420 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7421 size_int (align - 1));
7422 t = fold_convert (sizetype, t);
7423 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7424 size_int (-align));
7425 t = fold_convert (TREE_TYPE (ovf), t);
7426 if (crtl->stack_alignment_needed < arg_boundary)
7427 crtl->stack_alignment_needed = arg_boundary;
7429 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7430 gimplify_assign (addr, t, pre_p);
7432 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7433 size_int (rsize * UNITS_PER_WORD));
7434 gimplify_assign (unshare_expr (ovf), t, pre_p);
7436 if (container)
7437 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7439 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7440 addr = fold_convert (ptrtype, addr);
7442 if (indirect_p)
7443 addr = build_va_arg_indirect_ref (addr);
7444 return build_va_arg_indirect_ref (addr);
7447 /* Return nonzero if OPNUM's MEM should be matched
7448 in movabs* patterns. */
7451 ix86_check_movabs (rtx insn, int opnum)
7453 rtx set, mem;
7455 set = PATTERN (insn);
7456 if (GET_CODE (set) == PARALLEL)
7457 set = XVECEXP (set, 0, 0);
7458 gcc_assert (GET_CODE (set) == SET);
7459 mem = XEXP (set, opnum);
7460 while (GET_CODE (mem) == SUBREG)
7461 mem = SUBREG_REG (mem);
7462 gcc_assert (MEM_P (mem));
7463 return (volatile_ok || !MEM_VOLATILE_P (mem));
7466 /* Initialize the table of extra 80387 mathematical constants. */
7468 static void
7469 init_ext_80387_constants (void)
7471 static const char * cst[5] =
7473 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7474 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7475 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7476 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7477 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7479 int i;
7481 for (i = 0; i < 5; i++)
7483 real_from_string (&ext_80387_constants_table[i], cst[i]);
7484 /* Ensure each constant is rounded to XFmode precision. */
7485 real_convert (&ext_80387_constants_table[i],
7486 XFmode, &ext_80387_constants_table[i]);
7489 ext_80387_constants_init = 1;
7492 /* Return true if the constant is something that can be loaded with
7493 a special instruction. */
7496 standard_80387_constant_p (rtx x)
7498 enum machine_mode mode = GET_MODE (x);
7500 REAL_VALUE_TYPE r;
7502 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7503 return -1;
7505 if (x == CONST0_RTX (mode))
7506 return 1;
7507 if (x == CONST1_RTX (mode))
7508 return 2;
7510 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7512 /* For XFmode constants, try to find a special 80387 instruction when
7513 optimizing for size or on those CPUs that benefit from them. */
7514 if (mode == XFmode
7515 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7517 int i;
7519 if (! ext_80387_constants_init)
7520 init_ext_80387_constants ();
7522 for (i = 0; i < 5; i++)
7523 if (real_identical (&r, &ext_80387_constants_table[i]))
7524 return i + 3;
7527 /* Load of the constant -0.0 or -1.0 will be split as
7528 fldz;fchs or fld1;fchs sequence. */
7529 if (real_isnegzero (&r))
7530 return 8;
7531 if (real_identical (&r, &dconstm1))
7532 return 9;
7534 return 0;
7537 /* Return the opcode of the special instruction to be used to load
7538 the constant X. */
7540 const char *
7541 standard_80387_constant_opcode (rtx x)
7543 switch (standard_80387_constant_p (x))
7545 case 1:
7546 return "fldz";
7547 case 2:
7548 return "fld1";
7549 case 3:
7550 return "fldlg2";
7551 case 4:
7552 return "fldln2";
7553 case 5:
7554 return "fldl2e";
7555 case 6:
7556 return "fldl2t";
7557 case 7:
7558 return "fldpi";
7559 case 8:
7560 case 9:
7561 return "#";
7562 default:
7563 gcc_unreachable ();
7567 /* Return the CONST_DOUBLE representing the 80387 constant that is
7568 loaded by the specified special instruction. The argument IDX
7569 matches the return value from standard_80387_constant_p. */
7572 standard_80387_constant_rtx (int idx)
7574 int i;
7576 if (! ext_80387_constants_init)
7577 init_ext_80387_constants ();
7579 switch (idx)
7581 case 3:
7582 case 4:
7583 case 5:
7584 case 6:
7585 case 7:
7586 i = idx - 3;
7587 break;
7589 default:
7590 gcc_unreachable ();
7593 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7594 XFmode);
7597 /* Return 1 if X is all 0s and 2 if x is all 1s
7598 in supported SSE vector mode. */
7601 standard_sse_constant_p (rtx x)
7603 enum machine_mode mode = GET_MODE (x);
7605 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7606 return 1;
7607 if (vector_all_ones_operand (x, mode))
7608 switch (mode)
7610 case V16QImode:
7611 case V8HImode:
7612 case V4SImode:
7613 case V2DImode:
7614 if (TARGET_SSE2)
7615 return 2;
7616 default:
7617 break;
7620 return 0;
7623 /* Return the opcode of the special instruction to be used to load
7624 the constant X. */
7626 const char *
7627 standard_sse_constant_opcode (rtx insn, rtx x)
7629 switch (standard_sse_constant_p (x))
7631 case 1:
7632 switch (get_attr_mode (insn))
7634 case MODE_V4SF:
7635 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7636 case MODE_V2DF:
7637 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7638 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7639 else
7640 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7641 case MODE_TI:
7642 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7643 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7644 else
7645 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7646 case MODE_V8SF:
7647 return "vxorps\t%x0, %x0, %x0";
7648 case MODE_V4DF:
7649 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7650 return "vxorps\t%x0, %x0, %x0";
7651 else
7652 return "vxorpd\t%x0, %x0, %x0";
7653 case MODE_OI:
7654 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7655 return "vxorps\t%x0, %x0, %x0";
7656 else
7657 return "vpxor\t%x0, %x0, %x0";
7658 default:
7659 break;
7661 case 2:
7662 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7663 default:
7664 break;
7666 gcc_unreachable ();
7669 /* Returns 1 if OP contains a symbol reference */
7672 symbolic_reference_mentioned_p (rtx op)
7674 const char *fmt;
7675 int i;
7677 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7678 return 1;
7680 fmt = GET_RTX_FORMAT (GET_CODE (op));
7681 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7683 if (fmt[i] == 'E')
7685 int j;
7687 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7688 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7689 return 1;
7692 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7693 return 1;
7696 return 0;
7699 /* Return 1 if it is appropriate to emit `ret' instructions in the
7700 body of a function. Do this only if the epilogue is simple, needing a
7701 couple of insns. Prior to reloading, we can't tell how many registers
7702 must be saved, so return 0 then. Return 0 if there is no frame
7703 marker to de-allocate. */
7706 ix86_can_use_return_insn_p (void)
7708 struct ix86_frame frame;
7710 if (! reload_completed || frame_pointer_needed)
7711 return 0;
7713 /* Don't allow more than 32 pop, since that's all we can do
7714 with one instruction. */
7715 if (crtl->args.pops_args
7716 && crtl->args.size >= 32768)
7717 return 0;
7719 ix86_compute_frame_layout (&frame);
7720 return frame.to_allocate == 0 && frame.padding0 == 0
7721 && (frame.nregs + frame.nsseregs) == 0;
7724 /* Value should be nonzero if functions must have frame pointers.
7725 Zero means the frame pointer need not be set up (and parms may
7726 be accessed via the stack pointer) in functions that seem suitable. */
7728 static bool
7729 ix86_frame_pointer_required (void)
7731 /* If we accessed previous frames, then the generated code expects
7732 to be able to access the saved ebp value in our frame. */
7733 if (cfun->machine->accesses_prev_frame)
7734 return true;
7736 /* Several x86 os'es need a frame pointer for other reasons,
7737 usually pertaining to setjmp. */
7738 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7739 return true;
7741 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7742 the frame pointer by default. Turn it back on now if we've not
7743 got a leaf function. */
7744 if (TARGET_OMIT_LEAF_FRAME_POINTER
7745 && (!current_function_is_leaf
7746 || ix86_current_function_calls_tls_descriptor))
7747 return true;
7749 if (crtl->profile)
7750 return true;
7752 return false;
7755 /* Record that the current function accesses previous call frames. */
7757 void
7758 ix86_setup_frame_addresses (void)
7760 cfun->machine->accesses_prev_frame = 1;
7763 #ifndef USE_HIDDEN_LINKONCE
7764 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7765 # define USE_HIDDEN_LINKONCE 1
7766 # else
7767 # define USE_HIDDEN_LINKONCE 0
7768 # endif
7769 #endif
7771 static int pic_labels_used;
7773 /* Fills in the label name that should be used for a pc thunk for
7774 the given register. */
7776 static void
7777 get_pc_thunk_name (char name[32], unsigned int regno)
7779 gcc_assert (!TARGET_64BIT);
7781 if (USE_HIDDEN_LINKONCE)
7782 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7783 else
7784 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7788 /* This function generates code for -fpic that loads %ebx with
7789 the return address of the caller and then returns. */
7791 static void
7792 ix86_code_end (void)
7794 rtx xops[2];
7795 int regno;
7797 for (regno = 0; regno < 8; ++regno)
7799 char name[32];
7800 tree decl;
7802 if (! ((pic_labels_used >> regno) & 1))
7803 continue;
7805 get_pc_thunk_name (name, regno);
7807 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7808 get_identifier (name),
7809 build_function_type (void_type_node, void_list_node));
7810 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7811 NULL_TREE, void_type_node);
7812 TREE_PUBLIC (decl) = 1;
7813 TREE_STATIC (decl) = 1;
7815 #if TARGET_MACHO
7816 if (TARGET_MACHO)
7818 switch_to_section (darwin_sections[text_coal_section]);
7819 fputs ("\t.weak_definition\t", asm_out_file);
7820 assemble_name (asm_out_file, name);
7821 fputs ("\n\t.private_extern\t", asm_out_file);
7822 assemble_name (asm_out_file, name);
7823 fputs ("\n", asm_out_file);
7824 ASM_OUTPUT_LABEL (asm_out_file, name);
7825 DECL_WEAK (decl) = 1;
7827 else
7828 #endif
7829 if (USE_HIDDEN_LINKONCE)
7831 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7833 (*targetm.asm_out.unique_section) (decl, 0);
7834 switch_to_section (get_named_section (decl, NULL, 0));
7836 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7837 fputs ("\t.hidden\t", asm_out_file);
7838 assemble_name (asm_out_file, name);
7839 putc ('\n', asm_out_file);
7840 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7842 else
7844 switch_to_section (text_section);
7845 ASM_OUTPUT_LABEL (asm_out_file, name);
7848 DECL_INITIAL (decl) = make_node (BLOCK);
7849 current_function_decl = decl;
7850 init_function_start (decl);
7851 first_function_block_is_cold = false;
7852 /* Make sure unwind info is emitted for the thunk if needed. */
7853 final_start_function (emit_barrier (), asm_out_file, 1);
7855 xops[0] = gen_rtx_REG (Pmode, regno);
7856 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7857 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7858 output_asm_insn ("ret", xops);
7859 final_end_function ();
7860 init_insn_lengths ();
7861 free_after_compilation (cfun);
7862 set_cfun (NULL);
7863 current_function_decl = NULL;
7867 /* Emit code for the SET_GOT patterns. */
7869 const char *
7870 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7872 rtx xops[3];
7874 xops[0] = dest;
7876 if (TARGET_VXWORKS_RTP && flag_pic)
7878 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7879 xops[2] = gen_rtx_MEM (Pmode,
7880 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7881 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7883 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7884 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7885 an unadorned address. */
7886 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7887 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7888 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7889 return "";
7892 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7894 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7896 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7898 if (!flag_pic)
7899 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7900 else
7902 output_asm_insn ("call\t%a2", xops);
7903 #ifdef DWARF2_UNWIND_INFO
7904 /* The call to next label acts as a push. */
7905 if (dwarf2out_do_frame ())
7907 rtx insn;
7908 start_sequence ();
7909 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7910 gen_rtx_PLUS (Pmode,
7911 stack_pointer_rtx,
7912 GEN_INT (-4))));
7913 RTX_FRAME_RELATED_P (insn) = 1;
7914 dwarf2out_frame_debug (insn, true);
7915 end_sequence ();
7917 #endif
7920 #if TARGET_MACHO
7921 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7922 is what will be referenced by the Mach-O PIC subsystem. */
7923 if (!label)
7924 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7925 #endif
7927 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7928 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7930 if (flag_pic)
7932 output_asm_insn ("pop%z0\t%0", xops);
7933 #ifdef DWARF2_UNWIND_INFO
7934 /* The pop is a pop and clobbers dest, but doesn't restore it
7935 for unwind info purposes. */
7936 if (dwarf2out_do_frame ())
7938 rtx insn;
7939 start_sequence ();
7940 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7941 dwarf2out_frame_debug (insn, true);
7942 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7943 gen_rtx_PLUS (Pmode,
7944 stack_pointer_rtx,
7945 GEN_INT (4))));
7946 RTX_FRAME_RELATED_P (insn) = 1;
7947 dwarf2out_frame_debug (insn, true);
7948 end_sequence ();
7950 #endif
7953 else
7955 char name[32];
7956 get_pc_thunk_name (name, REGNO (dest));
7957 pic_labels_used |= 1 << REGNO (dest);
7959 #ifdef DWARF2_UNWIND_INFO
7960 /* Ensure all queued register saves are flushed before the
7961 call. */
7962 if (dwarf2out_do_frame ())
7964 rtx insn;
7965 start_sequence ();
7966 insn = emit_barrier ();
7967 end_sequence ();
7968 dwarf2out_frame_debug (insn, false);
7970 #endif
7971 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7972 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7973 output_asm_insn ("call\t%X2", xops);
7974 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7975 is what will be referenced by the Mach-O PIC subsystem. */
7976 #if TARGET_MACHO
7977 if (!label)
7978 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7979 else
7980 targetm.asm_out.internal_label (asm_out_file, "L",
7981 CODE_LABEL_NUMBER (label));
7982 #endif
7985 if (TARGET_MACHO)
7986 return "";
7988 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7989 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7990 else
7991 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7993 return "";
7996 /* Generate an "push" pattern for input ARG. */
7998 static rtx
7999 gen_push (rtx arg)
8001 if (ix86_cfa_state->reg == stack_pointer_rtx)
8002 ix86_cfa_state->offset += UNITS_PER_WORD;
8004 return gen_rtx_SET (VOIDmode,
8005 gen_rtx_MEM (Pmode,
8006 gen_rtx_PRE_DEC (Pmode,
8007 stack_pointer_rtx)),
8008 arg);
8011 /* Return >= 0 if there is an unused call-clobbered register available
8012 for the entire function. */
8014 static unsigned int
8015 ix86_select_alt_pic_regnum (void)
8017 if (current_function_is_leaf && !crtl->profile
8018 && !ix86_current_function_calls_tls_descriptor)
8020 int i, drap;
8021 /* Can't use the same register for both PIC and DRAP. */
8022 if (crtl->drap_reg)
8023 drap = REGNO (crtl->drap_reg);
8024 else
8025 drap = -1;
8026 for (i = 2; i >= 0; --i)
8027 if (i != drap && !df_regs_ever_live_p (i))
8028 return i;
8031 return INVALID_REGNUM;
8034 /* Return 1 if we need to save REGNO. */
8035 static int
8036 ix86_save_reg (unsigned int regno, int maybe_eh_return)
8038 if (pic_offset_table_rtx
8039 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
8040 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8041 || crtl->profile
8042 || crtl->calls_eh_return
8043 || crtl->uses_const_pool))
8045 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
8046 return 0;
8047 return 1;
8050 if (crtl->calls_eh_return && maybe_eh_return)
8052 unsigned i;
8053 for (i = 0; ; i++)
8055 unsigned test = EH_RETURN_DATA_REGNO (i);
8056 if (test == INVALID_REGNUM)
8057 break;
8058 if (test == regno)
8059 return 1;
8063 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
8064 return 1;
8066 return (df_regs_ever_live_p (regno)
8067 && !call_used_regs[regno]
8068 && !fixed_regs[regno]
8069 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8072 /* Return number of saved general prupose registers. */
8074 static int
8075 ix86_nsaved_regs (void)
8077 int nregs = 0;
8078 int regno;
8080 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8081 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8082 nregs ++;
8083 return nregs;
8086 /* Return number of saved SSE registrers. */
8088 static int
8089 ix86_nsaved_sseregs (void)
8091 int nregs = 0;
8092 int regno;
8094 if (ix86_cfun_abi () != MS_ABI)
8095 return 0;
8096 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8097 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8098 nregs ++;
8099 return nregs;
8102 /* Given FROM and TO register numbers, say whether this elimination is
8103 allowed. If stack alignment is needed, we can only replace argument
8104 pointer with hard frame pointer, or replace frame pointer with stack
8105 pointer. Otherwise, frame pointer elimination is automatically
8106 handled and all other eliminations are valid. */
8108 static bool
8109 ix86_can_eliminate (const int from, const int to)
8111 if (stack_realign_fp)
8112 return ((from == ARG_POINTER_REGNUM
8113 && to == HARD_FRAME_POINTER_REGNUM)
8114 || (from == FRAME_POINTER_REGNUM
8115 && to == STACK_POINTER_REGNUM));
8116 else
8117 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8120 /* Return the offset between two registers, one to be eliminated, and the other
8121 its replacement, at the start of a routine. */
8123 HOST_WIDE_INT
8124 ix86_initial_elimination_offset (int from, int to)
8126 struct ix86_frame frame;
8127 ix86_compute_frame_layout (&frame);
8129 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8130 return frame.hard_frame_pointer_offset;
8131 else if (from == FRAME_POINTER_REGNUM
8132 && to == HARD_FRAME_POINTER_REGNUM)
8133 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8134 else
8136 gcc_assert (to == STACK_POINTER_REGNUM);
8138 if (from == ARG_POINTER_REGNUM)
8139 return frame.stack_pointer_offset;
8141 gcc_assert (from == FRAME_POINTER_REGNUM);
8142 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8146 /* In a dynamically-aligned function, we can't know the offset from
8147 stack pointer to frame pointer, so we must ensure that setjmp
8148 eliminates fp against the hard fp (%ebp) rather than trying to
8149 index from %esp up to the top of the frame across a gap that is
8150 of unknown (at compile-time) size. */
8151 static rtx
8152 ix86_builtin_setjmp_frame_value (void)
8154 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8157 /* Fill structure ix86_frame about frame of currently computed function. */
8159 static void
8160 ix86_compute_frame_layout (struct ix86_frame *frame)
8162 unsigned int stack_alignment_needed;
8163 HOST_WIDE_INT offset;
8164 unsigned int preferred_alignment;
8165 HOST_WIDE_INT size = get_frame_size ();
8167 frame->nregs = ix86_nsaved_regs ();
8168 frame->nsseregs = ix86_nsaved_sseregs ();
8170 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8171 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8173 /* MS ABI seem to require stack alignment to be always 16 except for function
8174 prologues and leaf. */
8175 if ((ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8176 && (!current_function_is_leaf || cfun->calls_alloca != 0
8177 || ix86_current_function_calls_tls_descriptor))
8179 preferred_alignment = 16;
8180 stack_alignment_needed = 16;
8181 crtl->preferred_stack_boundary = 128;
8182 crtl->stack_alignment_needed = 128;
8185 gcc_assert (!size || stack_alignment_needed);
8186 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8187 gcc_assert (preferred_alignment <= stack_alignment_needed);
8189 /* During reload iteration the amount of registers saved can change.
8190 Recompute the value as needed. Do not recompute when amount of registers
8191 didn't change as reload does multiple calls to the function and does not
8192 expect the decision to change within single iteration. */
8193 if (!optimize_function_for_size_p (cfun)
8194 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8196 int count = frame->nregs;
8197 struct cgraph_node *node = cgraph_node (current_function_decl);
8199 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8200 /* The fast prologue uses move instead of push to save registers. This
8201 is significantly longer, but also executes faster as modern hardware
8202 can execute the moves in parallel, but can't do that for push/pop.
8204 Be careful about choosing what prologue to emit: When function takes
8205 many instructions to execute we may use slow version as well as in
8206 case function is known to be outside hot spot (this is known with
8207 feedback only). Weight the size of function by number of registers
8208 to save as it is cheap to use one or two push instructions but very
8209 slow to use many of them. */
8210 if (count)
8211 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8212 if (node->frequency < NODE_FREQUENCY_NORMAL
8213 || (flag_branch_probabilities
8214 && node->frequency < NODE_FREQUENCY_HOT))
8215 cfun->machine->use_fast_prologue_epilogue = false;
8216 else
8217 cfun->machine->use_fast_prologue_epilogue
8218 = !expensive_function_p (count);
8220 if (TARGET_PROLOGUE_USING_MOVE
8221 && cfun->machine->use_fast_prologue_epilogue)
8222 frame->save_regs_using_mov = true;
8223 else
8224 frame->save_regs_using_mov = false;
8226 /* Skip return address. */
8227 offset = UNITS_PER_WORD;
8229 /* Skip pushed static chain. */
8230 if (ix86_static_chain_on_stack)
8231 offset += UNITS_PER_WORD;
8233 /* Skip saved base pointer. */
8234 if (frame_pointer_needed)
8235 offset += UNITS_PER_WORD;
8237 frame->hard_frame_pointer_offset = offset;
8239 /* Set offset to aligned because the realigned frame starts from
8240 here. */
8241 if (stack_realign_fp)
8242 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8244 /* Register save area */
8245 offset += frame->nregs * UNITS_PER_WORD;
8247 /* Align SSE reg save area. */
8248 if (frame->nsseregs)
8249 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8250 else
8251 frame->padding0 = 0;
8253 /* SSE register save area. */
8254 offset += frame->padding0 + frame->nsseregs * 16;
8256 /* Va-arg area */
8257 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8258 offset += frame->va_arg_size;
8260 /* Align start of frame for local function. */
8261 frame->padding1 = ((offset + stack_alignment_needed - 1)
8262 & -stack_alignment_needed) - offset;
8264 offset += frame->padding1;
8266 /* Frame pointer points here. */
8267 frame->frame_pointer_offset = offset;
8269 offset += size;
8271 /* Add outgoing arguments area. Can be skipped if we eliminated
8272 all the function calls as dead code.
8273 Skipping is however impossible when function calls alloca. Alloca
8274 expander assumes that last crtl->outgoing_args_size
8275 of stack frame are unused. */
8276 if (ACCUMULATE_OUTGOING_ARGS
8277 && (!current_function_is_leaf || cfun->calls_alloca
8278 || ix86_current_function_calls_tls_descriptor))
8280 offset += crtl->outgoing_args_size;
8281 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8283 else
8284 frame->outgoing_arguments_size = 0;
8286 /* Align stack boundary. Only needed if we're calling another function
8287 or using alloca. */
8288 if (!current_function_is_leaf || cfun->calls_alloca
8289 || ix86_current_function_calls_tls_descriptor)
8290 frame->padding2 = ((offset + preferred_alignment - 1)
8291 & -preferred_alignment) - offset;
8292 else
8293 frame->padding2 = 0;
8295 offset += frame->padding2;
8297 /* We've reached end of stack frame. */
8298 frame->stack_pointer_offset = offset;
8300 /* Size prologue needs to allocate. */
8301 frame->to_allocate =
8302 (size + frame->padding1 + frame->padding2
8303 + frame->outgoing_arguments_size + frame->va_arg_size);
8305 if ((!frame->to_allocate && frame->nregs <= 1)
8306 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8307 frame->save_regs_using_mov = false;
8309 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8310 && current_function_sp_is_unchanging
8311 && current_function_is_leaf
8312 && !ix86_current_function_calls_tls_descriptor)
8314 frame->red_zone_size = frame->to_allocate;
8315 if (frame->save_regs_using_mov)
8316 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8317 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8318 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8320 else
8321 frame->red_zone_size = 0;
8322 frame->to_allocate -= frame->red_zone_size;
8323 frame->stack_pointer_offset -= frame->red_zone_size;
8326 /* Emit code to save registers in the prologue. */
8328 static void
8329 ix86_emit_save_regs (void)
8331 unsigned int regno;
8332 rtx insn;
8334 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8335 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8337 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8338 RTX_FRAME_RELATED_P (insn) = 1;
8342 /* Emit code to save registers using MOV insns. First register
8343 is restored from POINTER + OFFSET. */
8344 static void
8345 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8347 unsigned int regno;
8348 rtx insn;
8350 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8351 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8353 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8354 Pmode, offset),
8355 gen_rtx_REG (Pmode, regno));
8356 RTX_FRAME_RELATED_P (insn) = 1;
8357 offset += UNITS_PER_WORD;
8361 /* Emit code to save registers using MOV insns. First register
8362 is restored from POINTER + OFFSET. */
8363 static void
8364 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8366 unsigned int regno;
8367 rtx insn;
8368 rtx mem;
8370 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8371 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8373 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8374 set_mem_align (mem, 128);
8375 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8376 RTX_FRAME_RELATED_P (insn) = 1;
8377 offset += 16;
8381 static GTY(()) rtx queued_cfa_restores;
8383 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8384 manipulation insn. Don't add it if the previously
8385 saved value will be left untouched within stack red-zone till return,
8386 as unwinders can find the same value in the register and
8387 on the stack. */
8389 static void
8390 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8392 if (TARGET_RED_ZONE
8393 && !TARGET_64BIT_MS_ABI
8394 && red_offset + RED_ZONE_SIZE >= 0
8395 && crtl->args.pops_args < 65536)
8396 return;
8398 if (insn)
8400 add_reg_note (insn, REG_CFA_RESTORE, reg);
8401 RTX_FRAME_RELATED_P (insn) = 1;
8403 else
8404 queued_cfa_restores
8405 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8408 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8410 static void
8411 ix86_add_queued_cfa_restore_notes (rtx insn)
8413 rtx last;
8414 if (!queued_cfa_restores)
8415 return;
8416 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8418 XEXP (last, 1) = REG_NOTES (insn);
8419 REG_NOTES (insn) = queued_cfa_restores;
8420 queued_cfa_restores = NULL_RTX;
8421 RTX_FRAME_RELATED_P (insn) = 1;
8424 /* Expand prologue or epilogue stack adjustment.
8425 The pattern exist to put a dependency on all ebp-based memory accesses.
8426 STYLE should be negative if instructions should be marked as frame related,
8427 zero if %r11 register is live and cannot be freely used and positive
8428 otherwise. */
8430 static void
8431 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8432 int style, bool set_cfa)
8434 rtx insn;
8436 if (! TARGET_64BIT)
8437 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8438 else if (x86_64_immediate_operand (offset, DImode))
8439 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8440 else
8442 rtx tmp;
8443 /* r11 is used by indirect sibcall return as well, set before the
8444 epilogue and used after the epilogue. */
8445 if (style)
8446 tmp = gen_rtx_REG (DImode, R11_REG);
8447 else
8449 gcc_assert (src != hard_frame_pointer_rtx
8450 && dest != hard_frame_pointer_rtx);
8451 tmp = hard_frame_pointer_rtx;
8453 insn = emit_insn (gen_rtx_SET (DImode, tmp, offset));
8454 if (style < 0)
8455 RTX_FRAME_RELATED_P (insn) = 1;
8456 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, tmp,
8457 offset));
8460 if (style >= 0)
8461 ix86_add_queued_cfa_restore_notes (insn);
8463 if (set_cfa)
8465 rtx r;
8467 gcc_assert (ix86_cfa_state->reg == src);
8468 ix86_cfa_state->offset += INTVAL (offset);
8469 ix86_cfa_state->reg = dest;
8471 r = gen_rtx_PLUS (Pmode, src, offset);
8472 r = gen_rtx_SET (VOIDmode, dest, r);
8473 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8474 RTX_FRAME_RELATED_P (insn) = 1;
8476 else if (style < 0)
8477 RTX_FRAME_RELATED_P (insn) = 1;
8480 /* Find an available register to be used as dynamic realign argument
8481 pointer regsiter. Such a register will be written in prologue and
8482 used in begin of body, so it must not be
8483 1. parameter passing register.
8484 2. GOT pointer.
8485 We reuse static-chain register if it is available. Otherwise, we
8486 use DI for i386 and R13 for x86-64. We chose R13 since it has
8487 shorter encoding.
8489 Return: the regno of chosen register. */
8491 static unsigned int
8492 find_drap_reg (void)
8494 tree decl = cfun->decl;
8496 if (TARGET_64BIT)
8498 /* Use R13 for nested function or function need static chain.
8499 Since function with tail call may use any caller-saved
8500 registers in epilogue, DRAP must not use caller-saved
8501 register in such case. */
8502 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8503 return R13_REG;
8505 return R10_REG;
8507 else
8509 /* Use DI for nested function or function need static chain.
8510 Since function with tail call may use any caller-saved
8511 registers in epilogue, DRAP must not use caller-saved
8512 register in such case. */
8513 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8514 return DI_REG;
8516 /* Reuse static chain register if it isn't used for parameter
8517 passing. */
8518 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8519 && !lookup_attribute ("fastcall",
8520 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8521 && !lookup_attribute ("thiscall",
8522 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8523 return CX_REG;
8524 else
8525 return DI_REG;
8529 /* Return minimum incoming stack alignment. */
8531 static unsigned int
8532 ix86_minimum_incoming_stack_boundary (bool sibcall)
8534 unsigned int incoming_stack_boundary;
8536 /* Prefer the one specified at command line. */
8537 if (ix86_user_incoming_stack_boundary)
8538 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8539 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8540 if -mstackrealign is used, it isn't used for sibcall check and
8541 estimated stack alignment is 128bit. */
8542 else if (!sibcall
8543 && !TARGET_64BIT
8544 && ix86_force_align_arg_pointer
8545 && crtl->stack_alignment_estimated == 128)
8546 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8547 else
8548 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8550 /* Incoming stack alignment can be changed on individual functions
8551 via force_align_arg_pointer attribute. We use the smallest
8552 incoming stack boundary. */
8553 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8554 && lookup_attribute (ix86_force_align_arg_pointer_string,
8555 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8556 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8558 /* The incoming stack frame has to be aligned at least at
8559 parm_stack_boundary. */
8560 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8561 incoming_stack_boundary = crtl->parm_stack_boundary;
8563 /* Stack at entrance of main is aligned by runtime. We use the
8564 smallest incoming stack boundary. */
8565 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8566 && DECL_NAME (current_function_decl)
8567 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8568 && DECL_FILE_SCOPE_P (current_function_decl))
8569 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8571 return incoming_stack_boundary;
8574 /* Update incoming stack boundary and estimated stack alignment. */
8576 static void
8577 ix86_update_stack_boundary (void)
8579 ix86_incoming_stack_boundary
8580 = ix86_minimum_incoming_stack_boundary (false);
8582 /* x86_64 vararg needs 16byte stack alignment for register save
8583 area. */
8584 if (TARGET_64BIT
8585 && cfun->stdarg
8586 && crtl->stack_alignment_estimated < 128)
8587 crtl->stack_alignment_estimated = 128;
8590 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8591 needed or an rtx for DRAP otherwise. */
8593 static rtx
8594 ix86_get_drap_rtx (void)
8596 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8597 crtl->need_drap = true;
8599 if (stack_realign_drap)
8601 /* Assign DRAP to vDRAP and returns vDRAP */
8602 unsigned int regno = find_drap_reg ();
8603 rtx drap_vreg;
8604 rtx arg_ptr;
8605 rtx seq, insn;
8607 arg_ptr = gen_rtx_REG (Pmode, regno);
8608 crtl->drap_reg = arg_ptr;
8610 start_sequence ();
8611 drap_vreg = copy_to_reg (arg_ptr);
8612 seq = get_insns ();
8613 end_sequence ();
8615 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8616 if (!optimize)
8618 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8619 RTX_FRAME_RELATED_P (insn) = 1;
8621 return drap_vreg;
8623 else
8624 return NULL;
8627 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8629 static rtx
8630 ix86_internal_arg_pointer (void)
8632 return virtual_incoming_args_rtx;
8635 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8636 to be generated in correct form. */
8637 static void
8638 ix86_finalize_stack_realign_flags (void)
8640 /* Check if stack realign is really needed after reload, and
8641 stores result in cfun */
8642 unsigned int incoming_stack_boundary
8643 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8644 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8645 unsigned int stack_realign = (incoming_stack_boundary
8646 < (current_function_is_leaf
8647 ? crtl->max_used_stack_slot_alignment
8648 : crtl->stack_alignment_needed));
8650 if (crtl->stack_realign_finalized)
8652 /* After stack_realign_needed is finalized, we can't no longer
8653 change it. */
8654 gcc_assert (crtl->stack_realign_needed == stack_realign);
8656 else
8658 crtl->stack_realign_needed = stack_realign;
8659 crtl->stack_realign_finalized = true;
8663 /* Expand the prologue into a bunch of separate insns. */
8665 void
8666 ix86_expand_prologue (void)
8668 rtx insn;
8669 bool pic_reg_used;
8670 struct ix86_frame frame;
8671 HOST_WIDE_INT allocate;
8672 int gen_frame_pointer = frame_pointer_needed;
8674 ix86_finalize_stack_realign_flags ();
8676 /* DRAP should not coexist with stack_realign_fp */
8677 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8679 /* Initialize CFA state for before the prologue. */
8680 ix86_cfa_state->reg = stack_pointer_rtx;
8681 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8683 ix86_compute_frame_layout (&frame);
8685 if (ix86_function_ms_hook_prologue (current_function_decl))
8687 rtx push, mov;
8689 /* Make sure the function starts with
8690 8b ff movl.s %edi,%edi
8691 55 push %ebp
8692 8b ec movl.s %esp,%ebp
8694 This matches the hookable function prologue in Win32 API
8695 functions in Microsoft Windows XP Service Pack 2 and newer.
8696 Wine uses this to enable Windows apps to hook the Win32 API
8697 functions provided by Wine. */
8698 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8699 gen_rtx_REG (SImode, DI_REG)));
8700 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8701 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8702 stack_pointer_rtx));
8704 if (frame_pointer_needed && !(crtl->drap_reg
8705 && crtl->stack_realign_needed))
8707 /* The push %ebp and movl.s %esp, %ebp already set up
8708 the frame pointer. No need to do this again. */
8709 gen_frame_pointer = 0;
8710 RTX_FRAME_RELATED_P (push) = 1;
8711 RTX_FRAME_RELATED_P (mov) = 1;
8712 if (ix86_cfa_state->reg == stack_pointer_rtx)
8713 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8715 else
8716 /* If the frame pointer is not needed, pop %ebp again. This
8717 could be optimized for cases where ebp needs to be backed up
8718 for some other reason. If stack realignment is needed, pop
8719 the base pointer again, align the stack, and later regenerate
8720 the frame pointer setup. The frame pointer generated by the
8721 hook prologue is not aligned, so it can't be used. */
8722 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8725 /* The first insn of a function that accepts its static chain on the
8726 stack is to push the register that would be filled in by a direct
8727 call. This insn will be skipped by the trampoline. */
8728 if (ix86_static_chain_on_stack)
8730 rtx t;
8732 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8733 emit_insn (gen_blockage ());
8735 /* We don't want to interpret this push insn as a register save,
8736 only as a stack adjustment. The real copy of the register as
8737 a save will be done later, if needed. */
8738 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8739 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8740 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8741 RTX_FRAME_RELATED_P (insn) = 1;
8744 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8745 of DRAP is needed and stack realignment is really needed after reload */
8746 if (crtl->drap_reg && crtl->stack_realign_needed)
8748 rtx x, y;
8749 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8750 int param_ptr_offset = UNITS_PER_WORD;
8752 if (ix86_static_chain_on_stack)
8753 param_ptr_offset += UNITS_PER_WORD;
8754 if (!call_used_regs[REGNO (crtl->drap_reg)])
8755 param_ptr_offset += UNITS_PER_WORD;
8757 gcc_assert (stack_realign_drap);
8759 /* Grab the argument pointer. */
8760 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8761 y = crtl->drap_reg;
8763 /* Only need to push parameter pointer reg if it is caller
8764 saved reg */
8765 if (!call_used_regs[REGNO (crtl->drap_reg)])
8767 /* Push arg pointer reg */
8768 insn = emit_insn (gen_push (y));
8769 RTX_FRAME_RELATED_P (insn) = 1;
8772 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8773 RTX_FRAME_RELATED_P (insn) = 1;
8774 ix86_cfa_state->reg = crtl->drap_reg;
8776 /* Align the stack. */
8777 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8778 stack_pointer_rtx,
8779 GEN_INT (-align_bytes)));
8780 RTX_FRAME_RELATED_P (insn) = 1;
8782 /* Replicate the return address on the stack so that return
8783 address can be reached via (argp - 1) slot. This is needed
8784 to implement macro RETURN_ADDR_RTX and intrinsic function
8785 expand_builtin_return_addr etc. */
8786 x = crtl->drap_reg;
8787 x = gen_frame_mem (Pmode,
8788 plus_constant (x, -UNITS_PER_WORD));
8789 insn = emit_insn (gen_push (x));
8790 RTX_FRAME_RELATED_P (insn) = 1;
8793 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8794 slower on all targets. Also sdb doesn't like it. */
8796 if (gen_frame_pointer)
8798 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8799 RTX_FRAME_RELATED_P (insn) = 1;
8801 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8802 RTX_FRAME_RELATED_P (insn) = 1;
8804 if (ix86_cfa_state->reg == stack_pointer_rtx)
8805 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8808 if (stack_realign_fp)
8810 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8811 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8813 /* Align the stack. */
8814 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8815 stack_pointer_rtx,
8816 GEN_INT (-align_bytes)));
8817 RTX_FRAME_RELATED_P (insn) = 1;
8820 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8822 if (!frame.save_regs_using_mov)
8823 ix86_emit_save_regs ();
8824 else
8825 allocate += frame.nregs * UNITS_PER_WORD;
8827 /* When using red zone we may start register saving before allocating
8828 the stack frame saving one cycle of the prologue. However I will
8829 avoid doing this if I am going to have to probe the stack since
8830 at least on x86_64 the stack probe can turn into a call that clobbers
8831 a red zone location */
8832 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8833 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8834 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8835 && !crtl->stack_realign_needed)
8836 ? hard_frame_pointer_rtx
8837 : stack_pointer_rtx,
8838 -frame.nregs * UNITS_PER_WORD);
8840 if (allocate == 0)
8842 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8843 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8844 GEN_INT (-allocate), -1,
8845 ix86_cfa_state->reg == stack_pointer_rtx);
8846 else
8848 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8849 bool eax_live;
8850 rtx t;
8852 if (cfun->machine->call_abi == MS_ABI)
8853 eax_live = false;
8854 else
8855 eax_live = ix86_eax_live_at_start_p ();
8857 if (eax_live)
8859 emit_insn (gen_push (eax));
8860 allocate -= UNITS_PER_WORD;
8863 emit_move_insn (eax, GEN_INT (allocate));
8865 if (TARGET_64BIT)
8866 insn = gen_allocate_stack_worker_64 (eax, eax);
8867 else
8868 insn = gen_allocate_stack_worker_32 (eax, eax);
8869 insn = emit_insn (insn);
8871 if (ix86_cfa_state->reg == stack_pointer_rtx)
8873 ix86_cfa_state->offset += allocate;
8874 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8875 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8876 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8877 RTX_FRAME_RELATED_P (insn) = 1;
8880 if (eax_live)
8882 if (frame_pointer_needed)
8883 t = plus_constant (hard_frame_pointer_rtx,
8884 allocate
8885 - frame.to_allocate
8886 - frame.nregs * UNITS_PER_WORD);
8887 else
8888 t = plus_constant (stack_pointer_rtx, allocate);
8889 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8893 if (frame.save_regs_using_mov
8894 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8895 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8897 if (!frame_pointer_needed
8898 || !(frame.to_allocate + frame.padding0)
8899 || crtl->stack_realign_needed)
8900 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8901 frame.to_allocate
8902 + frame.nsseregs * 16 + frame.padding0);
8903 else
8904 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8905 -frame.nregs * UNITS_PER_WORD);
8907 if (!frame_pointer_needed
8908 || !(frame.to_allocate + frame.padding0)
8909 || crtl->stack_realign_needed)
8910 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8911 frame.to_allocate);
8912 else
8913 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8914 - frame.nregs * UNITS_PER_WORD
8915 - frame.nsseregs * 16
8916 - frame.padding0);
8918 pic_reg_used = false;
8919 if (pic_offset_table_rtx
8920 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8921 || crtl->profile))
8923 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8925 if (alt_pic_reg_used != INVALID_REGNUM)
8926 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8928 pic_reg_used = true;
8931 if (pic_reg_used)
8933 if (TARGET_64BIT)
8935 if (ix86_cmodel == CM_LARGE_PIC)
8937 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8938 rtx label = gen_label_rtx ();
8939 emit_label (label);
8940 LABEL_PRESERVE_P (label) = 1;
8941 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8942 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8943 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8944 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8945 pic_offset_table_rtx, tmp_reg));
8947 else
8948 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8950 else
8951 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8954 /* In the pic_reg_used case, make sure that the got load isn't deleted
8955 when mcount needs it. Blockage to avoid call movement across mcount
8956 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8957 note. */
8958 if (crtl->profile && pic_reg_used)
8959 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8961 if (crtl->drap_reg && !crtl->stack_realign_needed)
8963 /* vDRAP is setup but after reload it turns out stack realign
8964 isn't necessary, here we will emit prologue to setup DRAP
8965 without stack realign adjustment */
8966 rtx x;
8967 int drap_bp_offset = UNITS_PER_WORD * 2;
8969 if (ix86_static_chain_on_stack)
8970 drap_bp_offset += UNITS_PER_WORD;
8971 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8972 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8975 /* Prevent instructions from being scheduled into register save push
8976 sequence when access to the redzone area is done through frame pointer.
8977 The offset between the frame pointer and the stack pointer is calculated
8978 relative to the value of the stack pointer at the end of the function
8979 prologue, and moving instructions that access redzone area via frame
8980 pointer inside push sequence violates this assumption. */
8981 if (frame_pointer_needed && frame.red_zone_size)
8982 emit_insn (gen_memory_blockage ());
8984 /* Emit cld instruction if stringops are used in the function. */
8985 if (TARGET_CLD && ix86_current_function_needs_cld)
8986 emit_insn (gen_cld ());
8989 /* Emit code to restore REG using a POP insn. */
8991 static void
8992 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8994 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8996 if (ix86_cfa_state->reg == crtl->drap_reg
8997 && REGNO (reg) == REGNO (crtl->drap_reg))
8999 /* Previously we'd represented the CFA as an expression
9000 like *(%ebp - 8). We've just popped that value from
9001 the stack, which means we need to reset the CFA to
9002 the drap register. This will remain until we restore
9003 the stack pointer. */
9004 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9005 RTX_FRAME_RELATED_P (insn) = 1;
9006 return;
9009 if (ix86_cfa_state->reg == stack_pointer_rtx)
9011 ix86_cfa_state->offset -= UNITS_PER_WORD;
9012 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9013 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9014 RTX_FRAME_RELATED_P (insn) = 1;
9017 /* When the frame pointer is the CFA, and we pop it, we are
9018 swapping back to the stack pointer as the CFA. This happens
9019 for stack frames that don't allocate other data, so we assume
9020 the stack pointer is now pointing at the return address, i.e.
9021 the function entry state, which makes the offset be 1 word. */
9022 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
9023 && reg == hard_frame_pointer_rtx)
9025 ix86_cfa_state->reg = stack_pointer_rtx;
9026 ix86_cfa_state->offset -= UNITS_PER_WORD;
9028 add_reg_note (insn, REG_CFA_DEF_CFA,
9029 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
9030 GEN_INT (ix86_cfa_state->offset)));
9031 RTX_FRAME_RELATED_P (insn) = 1;
9034 ix86_add_cfa_restore_note (insn, reg, red_offset);
9037 /* Emit code to restore saved registers using POP insns. */
9039 static void
9040 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
9042 int regno;
9044 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9045 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
9047 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
9048 red_offset);
9049 red_offset += UNITS_PER_WORD;
9053 /* Emit code and notes for the LEAVE instruction. */
9055 static void
9056 ix86_emit_leave (HOST_WIDE_INT red_offset)
9058 rtx insn = emit_insn (ix86_gen_leave ());
9060 ix86_add_queued_cfa_restore_notes (insn);
9062 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
9064 ix86_cfa_state->reg = stack_pointer_rtx;
9065 ix86_cfa_state->offset -= UNITS_PER_WORD;
9067 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9068 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
9069 RTX_FRAME_RELATED_P (insn) = 1;
9070 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
9074 /* Emit code to restore saved registers using MOV insns. First register
9075 is restored from POINTER + OFFSET. */
9076 static void
9077 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9078 HOST_WIDE_INT red_offset,
9079 int maybe_eh_return)
9081 unsigned int regno;
9082 rtx base_address = gen_rtx_MEM (Pmode, pointer);
9083 rtx insn;
9085 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9086 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9088 rtx reg = gen_rtx_REG (Pmode, regno);
9090 /* Ensure that adjust_address won't be forced to produce pointer
9091 out of range allowed by x86-64 instruction set. */
9092 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9094 rtx r11;
9096 r11 = gen_rtx_REG (DImode, R11_REG);
9097 emit_move_insn (r11, GEN_INT (offset));
9098 emit_insn (gen_adddi3 (r11, r11, pointer));
9099 base_address = gen_rtx_MEM (Pmode, r11);
9100 offset = 0;
9102 insn = emit_move_insn (reg,
9103 adjust_address (base_address, Pmode, offset));
9104 offset += UNITS_PER_WORD;
9106 if (ix86_cfa_state->reg == crtl->drap_reg
9107 && regno == REGNO (crtl->drap_reg))
9109 /* Previously we'd represented the CFA as an expression
9110 like *(%ebp - 8). We've just popped that value from
9111 the stack, which means we need to reset the CFA to
9112 the drap register. This will remain until we restore
9113 the stack pointer. */
9114 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9115 RTX_FRAME_RELATED_P (insn) = 1;
9117 else
9118 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9120 red_offset += UNITS_PER_WORD;
9124 /* Emit code to restore saved registers using MOV insns. First register
9125 is restored from POINTER + OFFSET. */
9126 static void
9127 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9128 HOST_WIDE_INT red_offset,
9129 int maybe_eh_return)
9131 int regno;
9132 rtx base_address = gen_rtx_MEM (TImode, pointer);
9133 rtx mem;
9135 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9136 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9138 rtx reg = gen_rtx_REG (TImode, regno);
9140 /* Ensure that adjust_address won't be forced to produce pointer
9141 out of range allowed by x86-64 instruction set. */
9142 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9144 rtx r11;
9146 r11 = gen_rtx_REG (DImode, R11_REG);
9147 emit_move_insn (r11, GEN_INT (offset));
9148 emit_insn (gen_adddi3 (r11, r11, pointer));
9149 base_address = gen_rtx_MEM (TImode, r11);
9150 offset = 0;
9152 mem = adjust_address (base_address, TImode, offset);
9153 set_mem_align (mem, 128);
9154 emit_move_insn (reg, mem);
9155 offset += 16;
9157 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9159 red_offset += 16;
9163 /* Restore function stack, frame, and registers. */
9165 void
9166 ix86_expand_epilogue (int style)
9168 int sp_valid;
9169 struct ix86_frame frame;
9170 HOST_WIDE_INT offset, red_offset;
9171 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9172 bool using_drap;
9174 ix86_finalize_stack_realign_flags ();
9176 /* When stack is realigned, SP must be valid. */
9177 sp_valid = (!frame_pointer_needed
9178 || current_function_sp_is_unchanging
9179 || stack_realign_fp);
9181 ix86_compute_frame_layout (&frame);
9183 /* See the comment about red zone and frame
9184 pointer usage in ix86_expand_prologue. */
9185 if (frame_pointer_needed && frame.red_zone_size)
9186 emit_insn (gen_memory_blockage ());
9188 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9189 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9191 /* Calculate start of saved registers relative to ebp. Special care
9192 must be taken for the normal return case of a function using
9193 eh_return: the eax and edx registers are marked as saved, but not
9194 restored along this path. */
9195 offset = frame.nregs;
9196 if (crtl->calls_eh_return && style != 2)
9197 offset -= 2;
9198 offset *= -UNITS_PER_WORD;
9199 offset -= frame.nsseregs * 16 + frame.padding0;
9201 /* Calculate start of saved registers relative to esp on entry of the
9202 function. When realigning stack, this needs to be the most negative
9203 value possible at runtime. */
9204 red_offset = offset;
9205 if (using_drap)
9206 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9207 + UNITS_PER_WORD;
9208 else if (stack_realign_fp)
9209 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9210 - UNITS_PER_WORD;
9211 if (ix86_static_chain_on_stack)
9212 red_offset -= UNITS_PER_WORD;
9213 if (frame_pointer_needed)
9214 red_offset -= UNITS_PER_WORD;
9216 /* If we're only restoring one register and sp is not valid then
9217 using a move instruction to restore the register since it's
9218 less work than reloading sp and popping the register.
9220 The default code result in stack adjustment using add/lea instruction,
9221 while this code results in LEAVE instruction (or discrete equivalent),
9222 so it is profitable in some other cases as well. Especially when there
9223 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9224 and there is exactly one register to pop. This heuristic may need some
9225 tuning in future. */
9226 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9227 || (TARGET_EPILOGUE_USING_MOVE
9228 && cfun->machine->use_fast_prologue_epilogue
9229 && ((frame.nregs + frame.nsseregs) > 1
9230 || (frame.to_allocate + frame.padding0) != 0))
9231 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9232 && (frame.to_allocate + frame.padding0) != 0)
9233 || (frame_pointer_needed && TARGET_USE_LEAVE
9234 && cfun->machine->use_fast_prologue_epilogue
9235 && (frame.nregs + frame.nsseregs) == 1)
9236 || crtl->calls_eh_return)
9238 /* Restore registers. We can use ebp or esp to address the memory
9239 locations. If both are available, default to ebp, since offsets
9240 are known to be small. Only exception is esp pointing directly
9241 to the end of block of saved registers, where we may simplify
9242 addressing mode.
9244 If we are realigning stack with bp and sp, regs restore can't
9245 be addressed by bp. sp must be used instead. */
9247 if (!frame_pointer_needed
9248 || (sp_valid && !(frame.to_allocate + frame.padding0))
9249 || stack_realign_fp)
9251 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9252 frame.to_allocate, red_offset,
9253 style == 2);
9254 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9255 frame.to_allocate
9256 + frame.nsseregs * 16
9257 + frame.padding0,
9258 red_offset
9259 + frame.nsseregs * 16
9260 + frame.padding0, style == 2);
9262 else
9264 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9265 offset, red_offset,
9266 style == 2);
9267 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9268 offset
9269 + frame.nsseregs * 16
9270 + frame.padding0,
9271 red_offset
9272 + frame.nsseregs * 16
9273 + frame.padding0, style == 2);
9276 red_offset -= offset;
9278 /* eh_return epilogues need %ecx added to the stack pointer. */
9279 if (style == 2)
9281 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9283 /* Stack align doesn't work with eh_return. */
9284 gcc_assert (!crtl->stack_realign_needed);
9285 /* Neither does regparm nested functions. */
9286 gcc_assert (!ix86_static_chain_on_stack);
9288 if (frame_pointer_needed)
9290 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9291 tmp = plus_constant (tmp, UNITS_PER_WORD);
9292 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9294 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9295 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9297 /* Note that we use SA as a temporary CFA, as the return
9298 address is at the proper place relative to it. We
9299 pretend this happens at the FP restore insn because
9300 prior to this insn the FP would be stored at the wrong
9301 offset relative to SA, and after this insn we have no
9302 other reasonable register to use for the CFA. We don't
9303 bother resetting the CFA to the SP for the duration of
9304 the return insn. */
9305 add_reg_note (tmp, REG_CFA_DEF_CFA,
9306 plus_constant (sa, UNITS_PER_WORD));
9307 ix86_add_queued_cfa_restore_notes (tmp);
9308 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9309 RTX_FRAME_RELATED_P (tmp) = 1;
9310 ix86_cfa_state->reg = sa;
9311 ix86_cfa_state->offset = UNITS_PER_WORD;
9313 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9314 const0_rtx, style, false);
9316 else
9318 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9319 tmp = plus_constant (tmp, (frame.to_allocate
9320 + frame.nregs * UNITS_PER_WORD
9321 + frame.nsseregs * 16
9322 + frame.padding0));
9323 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9324 ix86_add_queued_cfa_restore_notes (tmp);
9326 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9327 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9329 ix86_cfa_state->offset = UNITS_PER_WORD;
9330 add_reg_note (tmp, REG_CFA_DEF_CFA,
9331 plus_constant (stack_pointer_rtx,
9332 UNITS_PER_WORD));
9333 RTX_FRAME_RELATED_P (tmp) = 1;
9337 else if (!frame_pointer_needed)
9338 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9339 GEN_INT (frame.to_allocate
9340 + frame.nregs * UNITS_PER_WORD
9341 + frame.nsseregs * 16
9342 + frame.padding0),
9343 style, !using_drap);
9344 /* If not an i386, mov & pop is faster than "leave". */
9345 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9346 || !cfun->machine->use_fast_prologue_epilogue)
9347 ix86_emit_leave (red_offset);
9348 else
9350 pro_epilogue_adjust_stack (stack_pointer_rtx,
9351 hard_frame_pointer_rtx,
9352 const0_rtx, style, !using_drap);
9354 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9357 else
9359 /* First step is to deallocate the stack frame so that we can
9360 pop the registers.
9362 If we realign stack with frame pointer, then stack pointer
9363 won't be able to recover via lea $offset(%bp), %sp, because
9364 there is a padding area between bp and sp for realign.
9365 "add $to_allocate, %sp" must be used instead. */
9366 if (!sp_valid)
9368 gcc_assert (frame_pointer_needed);
9369 gcc_assert (!stack_realign_fp);
9370 pro_epilogue_adjust_stack (stack_pointer_rtx,
9371 hard_frame_pointer_rtx,
9372 GEN_INT (offset), style, false);
9373 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9374 0, red_offset,
9375 style == 2);
9376 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9377 GEN_INT (frame.nsseregs * 16
9378 + frame.padding0),
9379 style, false);
9381 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9383 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9384 frame.to_allocate, red_offset,
9385 style == 2);
9386 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9387 GEN_INT (frame.to_allocate
9388 + frame.nsseregs * 16
9389 + frame.padding0), style,
9390 !using_drap && !frame_pointer_needed);
9393 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9394 + frame.padding0);
9395 red_offset -= offset;
9397 if (frame_pointer_needed)
9399 /* Leave results in shorter dependency chains on CPUs that are
9400 able to grok it fast. */
9401 if (TARGET_USE_LEAVE)
9402 ix86_emit_leave (red_offset);
9403 else
9405 /* For stack realigned really happens, recover stack
9406 pointer to hard frame pointer is a must, if not using
9407 leave. */
9408 if (stack_realign_fp)
9409 pro_epilogue_adjust_stack (stack_pointer_rtx,
9410 hard_frame_pointer_rtx,
9411 const0_rtx, style, !using_drap);
9412 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9413 red_offset);
9418 if (using_drap)
9420 int param_ptr_offset = UNITS_PER_WORD;
9421 rtx insn;
9423 gcc_assert (stack_realign_drap);
9425 if (ix86_static_chain_on_stack)
9426 param_ptr_offset += UNITS_PER_WORD;
9427 if (!call_used_regs[REGNO (crtl->drap_reg)])
9428 param_ptr_offset += UNITS_PER_WORD;
9430 insn = emit_insn (gen_rtx_SET
9431 (VOIDmode, stack_pointer_rtx,
9432 gen_rtx_PLUS (Pmode,
9433 crtl->drap_reg,
9434 GEN_INT (-param_ptr_offset))));
9435 ix86_cfa_state->reg = stack_pointer_rtx;
9436 ix86_cfa_state->offset = param_ptr_offset;
9438 add_reg_note (insn, REG_CFA_DEF_CFA,
9439 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9440 GEN_INT (ix86_cfa_state->offset)));
9441 RTX_FRAME_RELATED_P (insn) = 1;
9443 if (!call_used_regs[REGNO (crtl->drap_reg)])
9444 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9447 /* Remove the saved static chain from the stack. The use of ECX is
9448 merely as a scratch register, not as the actual static chain. */
9449 if (ix86_static_chain_on_stack)
9451 rtx r, insn;
9453 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9454 ix86_cfa_state->offset += UNITS_PER_WORD;
9456 r = gen_rtx_REG (Pmode, CX_REG);
9457 insn = emit_insn (ix86_gen_pop1 (r));
9459 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9460 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9461 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9462 RTX_FRAME_RELATED_P (insn) = 1;
9465 /* Sibcall epilogues don't want a return instruction. */
9466 if (style == 0)
9468 *ix86_cfa_state = cfa_state_save;
9469 return;
9472 if (crtl->args.pops_args && crtl->args.size)
9474 rtx popc = GEN_INT (crtl->args.pops_args);
9476 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9477 address, do explicit add, and jump indirectly to the caller. */
9479 if (crtl->args.pops_args >= 65536)
9481 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9482 rtx insn;
9484 /* There is no "pascal" calling convention in any 64bit ABI. */
9485 gcc_assert (!TARGET_64BIT);
9487 insn = emit_insn (gen_popsi1 (ecx));
9488 ix86_cfa_state->offset -= UNITS_PER_WORD;
9490 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9491 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9492 add_reg_note (insn, REG_CFA_REGISTER,
9493 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9494 RTX_FRAME_RELATED_P (insn) = 1;
9496 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9497 popc, -1, true);
9498 emit_jump_insn (gen_return_indirect_internal (ecx));
9500 else
9501 emit_jump_insn (gen_return_pop_internal (popc));
9503 else
9504 emit_jump_insn (gen_return_internal ());
9506 /* Restore the state back to the state from the prologue,
9507 so that it's correct for the next epilogue. */
9508 *ix86_cfa_state = cfa_state_save;
9511 /* Reset from the function's potential modifications. */
9513 static void
9514 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9515 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9517 if (pic_offset_table_rtx)
9518 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9519 #if TARGET_MACHO
9520 /* Mach-O doesn't support labels at the end of objects, so if
9521 it looks like we might want one, insert a NOP. */
9523 rtx insn = get_last_insn ();
9524 while (insn
9525 && NOTE_P (insn)
9526 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9527 insn = PREV_INSN (insn);
9528 if (insn
9529 && (LABEL_P (insn)
9530 || (NOTE_P (insn)
9531 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9532 fputs ("\tnop\n", file);
9534 #endif
9538 /* Extract the parts of an RTL expression that is a valid memory address
9539 for an instruction. Return 0 if the structure of the address is
9540 grossly off. Return -1 if the address contains ASHIFT, so it is not
9541 strictly valid, but still used for computing length of lea instruction. */
9544 ix86_decompose_address (rtx addr, struct ix86_address *out)
9546 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9547 rtx base_reg, index_reg;
9548 HOST_WIDE_INT scale = 1;
9549 rtx scale_rtx = NULL_RTX;
9550 rtx tmp;
9551 int retval = 1;
9552 enum ix86_address_seg seg = SEG_DEFAULT;
9554 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9555 base = addr;
9556 else if (GET_CODE (addr) == PLUS)
9558 rtx addends[4], op;
9559 int n = 0, i;
9561 op = addr;
9564 if (n >= 4)
9565 return 0;
9566 addends[n++] = XEXP (op, 1);
9567 op = XEXP (op, 0);
9569 while (GET_CODE (op) == PLUS);
9570 if (n >= 4)
9571 return 0;
9572 addends[n] = op;
9574 for (i = n; i >= 0; --i)
9576 op = addends[i];
9577 switch (GET_CODE (op))
9579 case MULT:
9580 if (index)
9581 return 0;
9582 index = XEXP (op, 0);
9583 scale_rtx = XEXP (op, 1);
9584 break;
9586 case ASHIFT:
9587 if (index)
9588 return 0;
9589 index = XEXP (op, 0);
9590 tmp = XEXP (op, 1);
9591 if (!CONST_INT_P (tmp))
9592 return 0;
9593 scale = INTVAL (tmp);
9594 if ((unsigned HOST_WIDE_INT) scale > 3)
9595 return 0;
9596 scale = 1 << scale;
9597 break;
9599 case UNSPEC:
9600 if (XINT (op, 1) == UNSPEC_TP
9601 && TARGET_TLS_DIRECT_SEG_REFS
9602 && seg == SEG_DEFAULT)
9603 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9604 else
9605 return 0;
9606 break;
9608 case REG:
9609 case SUBREG:
9610 if (!base)
9611 base = op;
9612 else if (!index)
9613 index = op;
9614 else
9615 return 0;
9616 break;
9618 case CONST:
9619 case CONST_INT:
9620 case SYMBOL_REF:
9621 case LABEL_REF:
9622 if (disp)
9623 return 0;
9624 disp = op;
9625 break;
9627 default:
9628 return 0;
9632 else if (GET_CODE (addr) == MULT)
9634 index = XEXP (addr, 0); /* index*scale */
9635 scale_rtx = XEXP (addr, 1);
9637 else if (GET_CODE (addr) == ASHIFT)
9639 /* We're called for lea too, which implements ashift on occasion. */
9640 index = XEXP (addr, 0);
9641 tmp = XEXP (addr, 1);
9642 if (!CONST_INT_P (tmp))
9643 return 0;
9644 scale = INTVAL (tmp);
9645 if ((unsigned HOST_WIDE_INT) scale > 3)
9646 return 0;
9647 scale = 1 << scale;
9648 retval = -1;
9650 else
9651 disp = addr; /* displacement */
9653 /* Extract the integral value of scale. */
9654 if (scale_rtx)
9656 if (!CONST_INT_P (scale_rtx))
9657 return 0;
9658 scale = INTVAL (scale_rtx);
9661 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9662 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9664 /* Avoid useless 0 displacement. */
9665 if (disp == const0_rtx && (base || index))
9666 disp = NULL_RTX;
9668 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9669 if (base_reg && index_reg && scale == 1
9670 && (index_reg == arg_pointer_rtx
9671 || index_reg == frame_pointer_rtx
9672 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9674 rtx tmp;
9675 tmp = base, base = index, index = tmp;
9676 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9679 /* Special case: %ebp cannot be encoded as a base without a displacement.
9680 Similarly %r13. */
9681 if (!disp
9682 && base_reg
9683 && (base_reg == hard_frame_pointer_rtx
9684 || base_reg == frame_pointer_rtx
9685 || base_reg == arg_pointer_rtx
9686 || (REG_P (base_reg)
9687 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9688 || REGNO (base_reg) == R13_REG))))
9689 disp = const0_rtx;
9691 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9692 Avoid this by transforming to [%esi+0].
9693 Reload calls address legitimization without cfun defined, so we need
9694 to test cfun for being non-NULL. */
9695 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9696 && base_reg && !index_reg && !disp
9697 && REG_P (base_reg)
9698 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9699 disp = const0_rtx;
9701 /* Special case: encode reg+reg instead of reg*2. */
9702 if (!base && index && scale == 2)
9703 base = index, base_reg = index_reg, scale = 1;
9705 /* Special case: scaling cannot be encoded without base or displacement. */
9706 if (!base && !disp && index && scale != 1)
9707 disp = const0_rtx;
9709 out->base = base;
9710 out->index = index;
9711 out->disp = disp;
9712 out->scale = scale;
9713 out->seg = seg;
9715 return retval;
9718 /* Return cost of the memory address x.
9719 For i386, it is better to use a complex address than let gcc copy
9720 the address into a reg and make a new pseudo. But not if the address
9721 requires to two regs - that would mean more pseudos with longer
9722 lifetimes. */
9723 static int
9724 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9726 struct ix86_address parts;
9727 int cost = 1;
9728 int ok = ix86_decompose_address (x, &parts);
9730 gcc_assert (ok);
9732 if (parts.base && GET_CODE (parts.base) == SUBREG)
9733 parts.base = SUBREG_REG (parts.base);
9734 if (parts.index && GET_CODE (parts.index) == SUBREG)
9735 parts.index = SUBREG_REG (parts.index);
9737 /* Attempt to minimize number of registers in the address. */
9738 if ((parts.base
9739 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9740 || (parts.index
9741 && (!REG_P (parts.index)
9742 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9743 cost++;
9745 if (parts.base
9746 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9747 && parts.index
9748 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9749 && parts.base != parts.index)
9750 cost++;
9752 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9753 since it's predecode logic can't detect the length of instructions
9754 and it degenerates to vector decoded. Increase cost of such
9755 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9756 to split such addresses or even refuse such addresses at all.
9758 Following addressing modes are affected:
9759 [base+scale*index]
9760 [scale*index+disp]
9761 [base+index]
9763 The first and last case may be avoidable by explicitly coding the zero in
9764 memory address, but I don't have AMD-K6 machine handy to check this
9765 theory. */
9767 if (TARGET_K6
9768 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9769 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9770 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9771 cost += 10;
9773 return cost;
9776 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9777 this is used for to form addresses to local data when -fPIC is in
9778 use. */
9780 static bool
9781 darwin_local_data_pic (rtx disp)
9783 return (GET_CODE (disp) == UNSPEC
9784 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9787 /* Determine if a given RTX is a valid constant. We already know this
9788 satisfies CONSTANT_P. */
9790 bool
9791 legitimate_constant_p (rtx x)
9793 switch (GET_CODE (x))
9795 case CONST:
9796 x = XEXP (x, 0);
9798 if (GET_CODE (x) == PLUS)
9800 if (!CONST_INT_P (XEXP (x, 1)))
9801 return false;
9802 x = XEXP (x, 0);
9805 if (TARGET_MACHO && darwin_local_data_pic (x))
9806 return true;
9808 /* Only some unspecs are valid as "constants". */
9809 if (GET_CODE (x) == UNSPEC)
9810 switch (XINT (x, 1))
9812 case UNSPEC_GOT:
9813 case UNSPEC_GOTOFF:
9814 case UNSPEC_PLTOFF:
9815 return TARGET_64BIT;
9816 case UNSPEC_TPOFF:
9817 case UNSPEC_NTPOFF:
9818 x = XVECEXP (x, 0, 0);
9819 return (GET_CODE (x) == SYMBOL_REF
9820 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9821 case UNSPEC_DTPOFF:
9822 x = XVECEXP (x, 0, 0);
9823 return (GET_CODE (x) == SYMBOL_REF
9824 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9825 default:
9826 return false;
9829 /* We must have drilled down to a symbol. */
9830 if (GET_CODE (x) == LABEL_REF)
9831 return true;
9832 if (GET_CODE (x) != SYMBOL_REF)
9833 return false;
9834 /* FALLTHRU */
9836 case SYMBOL_REF:
9837 /* TLS symbols are never valid. */
9838 if (SYMBOL_REF_TLS_MODEL (x))
9839 return false;
9841 /* DLLIMPORT symbols are never valid. */
9842 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9843 && SYMBOL_REF_DLLIMPORT_P (x))
9844 return false;
9845 break;
9847 case CONST_DOUBLE:
9848 if (GET_MODE (x) == TImode
9849 && x != CONST0_RTX (TImode)
9850 && !TARGET_64BIT)
9851 return false;
9852 break;
9854 case CONST_VECTOR:
9855 if (!standard_sse_constant_p (x))
9856 return false;
9858 default:
9859 break;
9862 /* Otherwise we handle everything else in the move patterns. */
9863 return true;
9866 /* Determine if it's legal to put X into the constant pool. This
9867 is not possible for the address of thread-local symbols, which
9868 is checked above. */
9870 static bool
9871 ix86_cannot_force_const_mem (rtx x)
9873 /* We can always put integral constants and vectors in memory. */
9874 switch (GET_CODE (x))
9876 case CONST_INT:
9877 case CONST_DOUBLE:
9878 case CONST_VECTOR:
9879 return false;
9881 default:
9882 break;
9884 return !legitimate_constant_p (x);
9888 /* Nonzero if the constant value X is a legitimate general operand
9889 when generating PIC code. It is given that flag_pic is on and
9890 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9892 bool
9893 legitimate_pic_operand_p (rtx x)
9895 rtx inner;
9897 switch (GET_CODE (x))
9899 case CONST:
9900 inner = XEXP (x, 0);
9901 if (GET_CODE (inner) == PLUS
9902 && CONST_INT_P (XEXP (inner, 1)))
9903 inner = XEXP (inner, 0);
9905 /* Only some unspecs are valid as "constants". */
9906 if (GET_CODE (inner) == UNSPEC)
9907 switch (XINT (inner, 1))
9909 case UNSPEC_GOT:
9910 case UNSPEC_GOTOFF:
9911 case UNSPEC_PLTOFF:
9912 return TARGET_64BIT;
9913 case UNSPEC_TPOFF:
9914 x = XVECEXP (inner, 0, 0);
9915 return (GET_CODE (x) == SYMBOL_REF
9916 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9917 case UNSPEC_MACHOPIC_OFFSET:
9918 return legitimate_pic_address_disp_p (x);
9919 default:
9920 return false;
9922 /* FALLTHRU */
9924 case SYMBOL_REF:
9925 case LABEL_REF:
9926 return legitimate_pic_address_disp_p (x);
9928 default:
9929 return true;
9933 /* Determine if a given CONST RTX is a valid memory displacement
9934 in PIC mode. */
9937 legitimate_pic_address_disp_p (rtx disp)
9939 bool saw_plus;
9941 /* In 64bit mode we can allow direct addresses of symbols and labels
9942 when they are not dynamic symbols. */
9943 if (TARGET_64BIT)
9945 rtx op0 = disp, op1;
9947 switch (GET_CODE (disp))
9949 case LABEL_REF:
9950 return true;
9952 case CONST:
9953 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9954 break;
9955 op0 = XEXP (XEXP (disp, 0), 0);
9956 op1 = XEXP (XEXP (disp, 0), 1);
9957 if (!CONST_INT_P (op1)
9958 || INTVAL (op1) >= 16*1024*1024
9959 || INTVAL (op1) < -16*1024*1024)
9960 break;
9961 if (GET_CODE (op0) == LABEL_REF)
9962 return true;
9963 if (GET_CODE (op0) != SYMBOL_REF)
9964 break;
9965 /* FALLTHRU */
9967 case SYMBOL_REF:
9968 /* TLS references should always be enclosed in UNSPEC. */
9969 if (SYMBOL_REF_TLS_MODEL (op0))
9970 return false;
9971 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9972 && ix86_cmodel != CM_LARGE_PIC)
9973 return true;
9974 break;
9976 default:
9977 break;
9980 if (GET_CODE (disp) != CONST)
9981 return 0;
9982 disp = XEXP (disp, 0);
9984 if (TARGET_64BIT)
9986 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9987 of GOT tables. We should not need these anyway. */
9988 if (GET_CODE (disp) != UNSPEC
9989 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9990 && XINT (disp, 1) != UNSPEC_GOTOFF
9991 && XINT (disp, 1) != UNSPEC_PLTOFF))
9992 return 0;
9994 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9995 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9996 return 0;
9997 return 1;
10000 saw_plus = false;
10001 if (GET_CODE (disp) == PLUS)
10003 if (!CONST_INT_P (XEXP (disp, 1)))
10004 return 0;
10005 disp = XEXP (disp, 0);
10006 saw_plus = true;
10009 if (TARGET_MACHO && darwin_local_data_pic (disp))
10010 return 1;
10012 if (GET_CODE (disp) != UNSPEC)
10013 return 0;
10015 switch (XINT (disp, 1))
10017 case UNSPEC_GOT:
10018 if (saw_plus)
10019 return false;
10020 /* We need to check for both symbols and labels because VxWorks loads
10021 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
10022 details. */
10023 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
10024 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
10025 case UNSPEC_GOTOFF:
10026 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
10027 While ABI specify also 32bit relocation but we don't produce it in
10028 small PIC model at all. */
10029 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
10030 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
10031 && !TARGET_64BIT)
10032 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
10033 return false;
10034 case UNSPEC_GOTTPOFF:
10035 case UNSPEC_GOTNTPOFF:
10036 case UNSPEC_INDNTPOFF:
10037 if (saw_plus)
10038 return false;
10039 disp = XVECEXP (disp, 0, 0);
10040 return (GET_CODE (disp) == SYMBOL_REF
10041 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
10042 case UNSPEC_NTPOFF:
10043 disp = XVECEXP (disp, 0, 0);
10044 return (GET_CODE (disp) == SYMBOL_REF
10045 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
10046 case UNSPEC_DTPOFF:
10047 disp = XVECEXP (disp, 0, 0);
10048 return (GET_CODE (disp) == SYMBOL_REF
10049 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
10052 return 0;
10055 /* Recognizes RTL expressions that are valid memory addresses for an
10056 instruction. The MODE argument is the machine mode for the MEM
10057 expression that wants to use this address.
10059 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
10060 convert common non-canonical forms to canonical form so that they will
10061 be recognized. */
10063 static bool
10064 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
10065 rtx addr, bool strict)
10067 struct ix86_address parts;
10068 rtx base, index, disp;
10069 HOST_WIDE_INT scale;
10071 if (ix86_decompose_address (addr, &parts) <= 0)
10072 /* Decomposition failed. */
10073 return false;
10075 base = parts.base;
10076 index = parts.index;
10077 disp = parts.disp;
10078 scale = parts.scale;
10080 /* Validate base register.
10082 Don't allow SUBREG's that span more than a word here. It can lead to spill
10083 failures when the base is one word out of a two word structure, which is
10084 represented internally as a DImode int. */
10086 if (base)
10088 rtx reg;
10090 if (REG_P (base))
10091 reg = base;
10092 else if (GET_CODE (base) == SUBREG
10093 && REG_P (SUBREG_REG (base))
10094 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
10095 <= UNITS_PER_WORD)
10096 reg = SUBREG_REG (base);
10097 else
10098 /* Base is not a register. */
10099 return false;
10101 if (GET_MODE (base) != Pmode)
10102 /* Base is not in Pmode. */
10103 return false;
10105 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
10106 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
10107 /* Base is not valid. */
10108 return false;
10111 /* Validate index register.
10113 Don't allow SUBREG's that span more than a word here -- same as above. */
10115 if (index)
10117 rtx reg;
10119 if (REG_P (index))
10120 reg = index;
10121 else if (GET_CODE (index) == SUBREG
10122 && REG_P (SUBREG_REG (index))
10123 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
10124 <= UNITS_PER_WORD)
10125 reg = SUBREG_REG (index);
10126 else
10127 /* Index is not a register. */
10128 return false;
10130 if (GET_MODE (index) != Pmode)
10131 /* Index is not in Pmode. */
10132 return false;
10134 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
10135 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
10136 /* Index is not valid. */
10137 return false;
10140 /* Validate scale factor. */
10141 if (scale != 1)
10143 if (!index)
10144 /* Scale without index. */
10145 return false;
10147 if (scale != 2 && scale != 4 && scale != 8)
10148 /* Scale is not a valid multiplier. */
10149 return false;
10152 /* Validate displacement. */
10153 if (disp)
10155 if (GET_CODE (disp) == CONST
10156 && GET_CODE (XEXP (disp, 0)) == UNSPEC
10157 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
10158 switch (XINT (XEXP (disp, 0), 1))
10160 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
10161 used. While ABI specify also 32bit relocations, we don't produce
10162 them at all and use IP relative instead. */
10163 case UNSPEC_GOT:
10164 case UNSPEC_GOTOFF:
10165 gcc_assert (flag_pic);
10166 if (!TARGET_64BIT)
10167 goto is_legitimate_pic;
10169 /* 64bit address unspec. */
10170 return false;
10172 case UNSPEC_GOTPCREL:
10173 gcc_assert (flag_pic);
10174 goto is_legitimate_pic;
10176 case UNSPEC_GOTTPOFF:
10177 case UNSPEC_GOTNTPOFF:
10178 case UNSPEC_INDNTPOFF:
10179 case UNSPEC_NTPOFF:
10180 case UNSPEC_DTPOFF:
10181 break;
10183 default:
10184 /* Invalid address unspec. */
10185 return false;
10188 else if (SYMBOLIC_CONST (disp)
10189 && (flag_pic
10190 || (TARGET_MACHO
10191 #if TARGET_MACHO
10192 && MACHOPIC_INDIRECT
10193 && !machopic_operand_p (disp)
10194 #endif
10198 is_legitimate_pic:
10199 if (TARGET_64BIT && (index || base))
10201 /* foo@dtpoff(%rX) is ok. */
10202 if (GET_CODE (disp) != CONST
10203 || GET_CODE (XEXP (disp, 0)) != PLUS
10204 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10205 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10206 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10207 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10208 /* Non-constant pic memory reference. */
10209 return false;
10211 else if (! legitimate_pic_address_disp_p (disp))
10212 /* Displacement is an invalid pic construct. */
10213 return false;
10215 /* This code used to verify that a symbolic pic displacement
10216 includes the pic_offset_table_rtx register.
10218 While this is good idea, unfortunately these constructs may
10219 be created by "adds using lea" optimization for incorrect
10220 code like:
10222 int a;
10223 int foo(int i)
10225 return *(&a+i);
10228 This code is nonsensical, but results in addressing
10229 GOT table with pic_offset_table_rtx base. We can't
10230 just refuse it easily, since it gets matched by
10231 "addsi3" pattern, that later gets split to lea in the
10232 case output register differs from input. While this
10233 can be handled by separate addsi pattern for this case
10234 that never results in lea, this seems to be easier and
10235 correct fix for crash to disable this test. */
10237 else if (GET_CODE (disp) != LABEL_REF
10238 && !CONST_INT_P (disp)
10239 && (GET_CODE (disp) != CONST
10240 || !legitimate_constant_p (disp))
10241 && (GET_CODE (disp) != SYMBOL_REF
10242 || !legitimate_constant_p (disp)))
10243 /* Displacement is not constant. */
10244 return false;
10245 else if (TARGET_64BIT
10246 && !x86_64_immediate_operand (disp, VOIDmode))
10247 /* Displacement is out of range. */
10248 return false;
10251 /* Everything looks valid. */
10252 return true;
10255 /* Determine if a given RTX is a valid constant address. */
10257 bool
10258 constant_address_p (rtx x)
10260 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10263 /* Return a unique alias set for the GOT. */
10265 static alias_set_type
10266 ix86_GOT_alias_set (void)
10268 static alias_set_type set = -1;
10269 if (set == -1)
10270 set = new_alias_set ();
10271 return set;
10274 /* Return a legitimate reference for ORIG (an address) using the
10275 register REG. If REG is 0, a new pseudo is generated.
10277 There are two types of references that must be handled:
10279 1. Global data references must load the address from the GOT, via
10280 the PIC reg. An insn is emitted to do this load, and the reg is
10281 returned.
10283 2. Static data references, constant pool addresses, and code labels
10284 compute the address as an offset from the GOT, whose base is in
10285 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10286 differentiate them from global data objects. The returned
10287 address is the PIC reg + an unspec constant.
10289 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10290 reg also appears in the address. */
10292 static rtx
10293 legitimize_pic_address (rtx orig, rtx reg)
10295 rtx addr = orig;
10296 rtx new_rtx = orig;
10297 rtx base;
10299 #if TARGET_MACHO
10300 if (TARGET_MACHO && !TARGET_64BIT)
10302 if (reg == 0)
10303 reg = gen_reg_rtx (Pmode);
10304 /* Use the generic Mach-O PIC machinery. */
10305 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10307 #endif
10309 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10310 new_rtx = addr;
10311 else if (TARGET_64BIT
10312 && ix86_cmodel != CM_SMALL_PIC
10313 && gotoff_operand (addr, Pmode))
10315 rtx tmpreg;
10316 /* This symbol may be referenced via a displacement from the PIC
10317 base address (@GOTOFF). */
10319 if (reload_in_progress)
10320 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10321 if (GET_CODE (addr) == CONST)
10322 addr = XEXP (addr, 0);
10323 if (GET_CODE (addr) == PLUS)
10325 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10326 UNSPEC_GOTOFF);
10327 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10329 else
10330 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10331 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10332 if (!reg)
10333 tmpreg = gen_reg_rtx (Pmode);
10334 else
10335 tmpreg = reg;
10336 emit_move_insn (tmpreg, new_rtx);
10338 if (reg != 0)
10340 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10341 tmpreg, 1, OPTAB_DIRECT);
10342 new_rtx = reg;
10344 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10346 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10348 /* This symbol may be referenced via a displacement from the PIC
10349 base address (@GOTOFF). */
10351 if (reload_in_progress)
10352 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10353 if (GET_CODE (addr) == CONST)
10354 addr = XEXP (addr, 0);
10355 if (GET_CODE (addr) == PLUS)
10357 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10358 UNSPEC_GOTOFF);
10359 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10361 else
10362 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10363 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10364 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10366 if (reg != 0)
10368 emit_move_insn (reg, new_rtx);
10369 new_rtx = reg;
10372 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10373 /* We can't use @GOTOFF for text labels on VxWorks;
10374 see gotoff_operand. */
10375 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10377 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10379 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10380 return legitimize_dllimport_symbol (addr, true);
10381 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10382 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10383 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10385 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10386 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10390 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10392 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10393 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10394 new_rtx = gen_const_mem (Pmode, new_rtx);
10395 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10397 if (reg == 0)
10398 reg = gen_reg_rtx (Pmode);
10399 /* Use directly gen_movsi, otherwise the address is loaded
10400 into register for CSE. We don't want to CSE this addresses,
10401 instead we CSE addresses from the GOT table, so skip this. */
10402 emit_insn (gen_movsi (reg, new_rtx));
10403 new_rtx = reg;
10405 else
10407 /* This symbol must be referenced via a load from the
10408 Global Offset Table (@GOT). */
10410 if (reload_in_progress)
10411 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10412 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10413 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10414 if (TARGET_64BIT)
10415 new_rtx = force_reg (Pmode, new_rtx);
10416 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10417 new_rtx = gen_const_mem (Pmode, new_rtx);
10418 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10420 if (reg == 0)
10421 reg = gen_reg_rtx (Pmode);
10422 emit_move_insn (reg, new_rtx);
10423 new_rtx = reg;
10426 else
10428 if (CONST_INT_P (addr)
10429 && !x86_64_immediate_operand (addr, VOIDmode))
10431 if (reg)
10433 emit_move_insn (reg, addr);
10434 new_rtx = reg;
10436 else
10437 new_rtx = force_reg (Pmode, addr);
10439 else if (GET_CODE (addr) == CONST)
10441 addr = XEXP (addr, 0);
10443 /* We must match stuff we generate before. Assume the only
10444 unspecs that can get here are ours. Not that we could do
10445 anything with them anyway.... */
10446 if (GET_CODE (addr) == UNSPEC
10447 || (GET_CODE (addr) == PLUS
10448 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10449 return orig;
10450 gcc_assert (GET_CODE (addr) == PLUS);
10452 if (GET_CODE (addr) == PLUS)
10454 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10456 /* Check first to see if this is a constant offset from a @GOTOFF
10457 symbol reference. */
10458 if (gotoff_operand (op0, Pmode)
10459 && CONST_INT_P (op1))
10461 if (!TARGET_64BIT)
10463 if (reload_in_progress)
10464 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10465 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10466 UNSPEC_GOTOFF);
10467 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10468 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10469 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10471 if (reg != 0)
10473 emit_move_insn (reg, new_rtx);
10474 new_rtx = reg;
10477 else
10479 if (INTVAL (op1) < -16*1024*1024
10480 || INTVAL (op1) >= 16*1024*1024)
10482 if (!x86_64_immediate_operand (op1, Pmode))
10483 op1 = force_reg (Pmode, op1);
10484 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10488 else
10490 base = legitimize_pic_address (XEXP (addr, 0), reg);
10491 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10492 base == reg ? NULL_RTX : reg);
10494 if (CONST_INT_P (new_rtx))
10495 new_rtx = plus_constant (base, INTVAL (new_rtx));
10496 else
10498 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10500 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10501 new_rtx = XEXP (new_rtx, 1);
10503 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10508 return new_rtx;
10511 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10513 static rtx
10514 get_thread_pointer (int to_reg)
10516 rtx tp, reg, insn;
10518 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10519 if (!to_reg)
10520 return tp;
10522 reg = gen_reg_rtx (Pmode);
10523 insn = gen_rtx_SET (VOIDmode, reg, tp);
10524 insn = emit_insn (insn);
10526 return reg;
10529 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10530 false if we expect this to be used for a memory address and true if
10531 we expect to load the address into a register. */
10533 static rtx
10534 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10536 rtx dest, base, off, pic, tp;
10537 int type;
10539 switch (model)
10541 case TLS_MODEL_GLOBAL_DYNAMIC:
10542 dest = gen_reg_rtx (Pmode);
10543 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10545 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10547 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10549 start_sequence ();
10550 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10551 insns = get_insns ();
10552 end_sequence ();
10554 RTL_CONST_CALL_P (insns) = 1;
10555 emit_libcall_block (insns, dest, rax, x);
10557 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10558 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10559 else
10560 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10562 if (TARGET_GNU2_TLS)
10564 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10566 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10568 break;
10570 case TLS_MODEL_LOCAL_DYNAMIC:
10571 base = gen_reg_rtx (Pmode);
10572 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10574 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10576 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10578 start_sequence ();
10579 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10580 insns = get_insns ();
10581 end_sequence ();
10583 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10584 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10585 RTL_CONST_CALL_P (insns) = 1;
10586 emit_libcall_block (insns, base, rax, note);
10588 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10589 emit_insn (gen_tls_local_dynamic_base_64 (base));
10590 else
10591 emit_insn (gen_tls_local_dynamic_base_32 (base));
10593 if (TARGET_GNU2_TLS)
10595 rtx x = ix86_tls_module_base ();
10597 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10598 gen_rtx_MINUS (Pmode, x, tp));
10601 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10602 off = gen_rtx_CONST (Pmode, off);
10604 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10606 if (TARGET_GNU2_TLS)
10608 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10610 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10613 break;
10615 case TLS_MODEL_INITIAL_EXEC:
10616 if (TARGET_64BIT)
10618 pic = NULL;
10619 type = UNSPEC_GOTNTPOFF;
10621 else if (flag_pic)
10623 if (reload_in_progress)
10624 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10625 pic = pic_offset_table_rtx;
10626 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10628 else if (!TARGET_ANY_GNU_TLS)
10630 pic = gen_reg_rtx (Pmode);
10631 emit_insn (gen_set_got (pic));
10632 type = UNSPEC_GOTTPOFF;
10634 else
10636 pic = NULL;
10637 type = UNSPEC_INDNTPOFF;
10640 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10641 off = gen_rtx_CONST (Pmode, off);
10642 if (pic)
10643 off = gen_rtx_PLUS (Pmode, pic, off);
10644 off = gen_const_mem (Pmode, off);
10645 set_mem_alias_set (off, ix86_GOT_alias_set ());
10647 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10649 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10650 off = force_reg (Pmode, off);
10651 return gen_rtx_PLUS (Pmode, base, off);
10653 else
10655 base = get_thread_pointer (true);
10656 dest = gen_reg_rtx (Pmode);
10657 emit_insn (gen_subsi3 (dest, base, off));
10659 break;
10661 case TLS_MODEL_LOCAL_EXEC:
10662 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10663 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10664 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10665 off = gen_rtx_CONST (Pmode, off);
10667 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10669 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10670 return gen_rtx_PLUS (Pmode, base, off);
10672 else
10674 base = get_thread_pointer (true);
10675 dest = gen_reg_rtx (Pmode);
10676 emit_insn (gen_subsi3 (dest, base, off));
10678 break;
10680 default:
10681 gcc_unreachable ();
10684 return dest;
10687 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10688 to symbol DECL. */
10690 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10691 htab_t dllimport_map;
10693 static tree
10694 get_dllimport_decl (tree decl)
10696 struct tree_map *h, in;
10697 void **loc;
10698 const char *name;
10699 const char *prefix;
10700 size_t namelen, prefixlen;
10701 char *imp_name;
10702 tree to;
10703 rtx rtl;
10705 if (!dllimport_map)
10706 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10708 in.hash = htab_hash_pointer (decl);
10709 in.base.from = decl;
10710 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10711 h = (struct tree_map *) *loc;
10712 if (h)
10713 return h->to;
10715 *loc = h = ggc_alloc_tree_map ();
10716 h->hash = in.hash;
10717 h->base.from = decl;
10718 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10719 VAR_DECL, NULL, ptr_type_node);
10720 DECL_ARTIFICIAL (to) = 1;
10721 DECL_IGNORED_P (to) = 1;
10722 DECL_EXTERNAL (to) = 1;
10723 TREE_READONLY (to) = 1;
10725 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10726 name = targetm.strip_name_encoding (name);
10727 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10728 ? "*__imp_" : "*__imp__";
10729 namelen = strlen (name);
10730 prefixlen = strlen (prefix);
10731 imp_name = (char *) alloca (namelen + prefixlen + 1);
10732 memcpy (imp_name, prefix, prefixlen);
10733 memcpy (imp_name + prefixlen, name, namelen + 1);
10735 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10736 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10737 SET_SYMBOL_REF_DECL (rtl, to);
10738 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10740 rtl = gen_const_mem (Pmode, rtl);
10741 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10743 SET_DECL_RTL (to, rtl);
10744 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10746 return to;
10749 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10750 true if we require the result be a register. */
10752 static rtx
10753 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10755 tree imp_decl;
10756 rtx x;
10758 gcc_assert (SYMBOL_REF_DECL (symbol));
10759 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10761 x = DECL_RTL (imp_decl);
10762 if (want_reg)
10763 x = force_reg (Pmode, x);
10764 return x;
10767 /* Try machine-dependent ways of modifying an illegitimate address
10768 to be legitimate. If we find one, return the new, valid address.
10769 This macro is used in only one place: `memory_address' in explow.c.
10771 OLDX is the address as it was before break_out_memory_refs was called.
10772 In some cases it is useful to look at this to decide what needs to be done.
10774 It is always safe for this macro to do nothing. It exists to recognize
10775 opportunities to optimize the output.
10777 For the 80386, we handle X+REG by loading X into a register R and
10778 using R+REG. R will go in a general reg and indexing will be used.
10779 However, if REG is a broken-out memory address or multiplication,
10780 nothing needs to be done because REG can certainly go in a general reg.
10782 When -fpic is used, special handling is needed for symbolic references.
10783 See comments by legitimize_pic_address in i386.c for details. */
10785 static rtx
10786 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10787 enum machine_mode mode)
10789 int changed = 0;
10790 unsigned log;
10792 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10793 if (log)
10794 return legitimize_tls_address (x, (enum tls_model) log, false);
10795 if (GET_CODE (x) == CONST
10796 && GET_CODE (XEXP (x, 0)) == PLUS
10797 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10798 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10800 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10801 (enum tls_model) log, false);
10802 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10805 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10807 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10808 return legitimize_dllimport_symbol (x, true);
10809 if (GET_CODE (x) == CONST
10810 && GET_CODE (XEXP (x, 0)) == PLUS
10811 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10812 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10814 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10815 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10819 if (flag_pic && SYMBOLIC_CONST (x))
10820 return legitimize_pic_address (x, 0);
10822 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10823 if (GET_CODE (x) == ASHIFT
10824 && CONST_INT_P (XEXP (x, 1))
10825 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10827 changed = 1;
10828 log = INTVAL (XEXP (x, 1));
10829 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10830 GEN_INT (1 << log));
10833 if (GET_CODE (x) == PLUS)
10835 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10837 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10838 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10839 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10841 changed = 1;
10842 log = INTVAL (XEXP (XEXP (x, 0), 1));
10843 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10844 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10845 GEN_INT (1 << log));
10848 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10849 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10850 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10852 changed = 1;
10853 log = INTVAL (XEXP (XEXP (x, 1), 1));
10854 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10855 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10856 GEN_INT (1 << log));
10859 /* Put multiply first if it isn't already. */
10860 if (GET_CODE (XEXP (x, 1)) == MULT)
10862 rtx tmp = XEXP (x, 0);
10863 XEXP (x, 0) = XEXP (x, 1);
10864 XEXP (x, 1) = tmp;
10865 changed = 1;
10868 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10869 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10870 created by virtual register instantiation, register elimination, and
10871 similar optimizations. */
10872 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10874 changed = 1;
10875 x = gen_rtx_PLUS (Pmode,
10876 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10877 XEXP (XEXP (x, 1), 0)),
10878 XEXP (XEXP (x, 1), 1));
10881 /* Canonicalize
10882 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10883 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10884 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10885 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10886 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10887 && CONSTANT_P (XEXP (x, 1)))
10889 rtx constant;
10890 rtx other = NULL_RTX;
10892 if (CONST_INT_P (XEXP (x, 1)))
10894 constant = XEXP (x, 1);
10895 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10897 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10899 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10900 other = XEXP (x, 1);
10902 else
10903 constant = 0;
10905 if (constant)
10907 changed = 1;
10908 x = gen_rtx_PLUS (Pmode,
10909 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10910 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10911 plus_constant (other, INTVAL (constant)));
10915 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10916 return x;
10918 if (GET_CODE (XEXP (x, 0)) == MULT)
10920 changed = 1;
10921 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10924 if (GET_CODE (XEXP (x, 1)) == MULT)
10926 changed = 1;
10927 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10930 if (changed
10931 && REG_P (XEXP (x, 1))
10932 && REG_P (XEXP (x, 0)))
10933 return x;
10935 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10937 changed = 1;
10938 x = legitimize_pic_address (x, 0);
10941 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10942 return x;
10944 if (REG_P (XEXP (x, 0)))
10946 rtx temp = gen_reg_rtx (Pmode);
10947 rtx val = force_operand (XEXP (x, 1), temp);
10948 if (val != temp)
10949 emit_move_insn (temp, val);
10951 XEXP (x, 1) = temp;
10952 return x;
10955 else if (REG_P (XEXP (x, 1)))
10957 rtx temp = gen_reg_rtx (Pmode);
10958 rtx val = force_operand (XEXP (x, 0), temp);
10959 if (val != temp)
10960 emit_move_insn (temp, val);
10962 XEXP (x, 0) = temp;
10963 return x;
10967 return x;
10970 /* Print an integer constant expression in assembler syntax. Addition
10971 and subtraction are the only arithmetic that may appear in these
10972 expressions. FILE is the stdio stream to write to, X is the rtx, and
10973 CODE is the operand print code from the output string. */
10975 static void
10976 output_pic_addr_const (FILE *file, rtx x, int code)
10978 char buf[256];
10980 switch (GET_CODE (x))
10982 case PC:
10983 gcc_assert (flag_pic);
10984 putc ('.', file);
10985 break;
10987 case SYMBOL_REF:
10988 if (! TARGET_MACHO || TARGET_64BIT)
10989 output_addr_const (file, x);
10990 else
10992 const char *name = XSTR (x, 0);
10994 /* Mark the decl as referenced so that cgraph will
10995 output the function. */
10996 if (SYMBOL_REF_DECL (x))
10997 mark_decl_referenced (SYMBOL_REF_DECL (x));
10999 #if TARGET_MACHO
11000 if (MACHOPIC_INDIRECT
11001 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
11002 name = machopic_indirection_name (x, /*stub_p=*/true);
11003 #endif
11004 assemble_name (file, name);
11006 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
11007 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
11008 fputs ("@PLT", file);
11009 break;
11011 case LABEL_REF:
11012 x = XEXP (x, 0);
11013 /* FALLTHRU */
11014 case CODE_LABEL:
11015 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
11016 assemble_name (asm_out_file, buf);
11017 break;
11019 case CONST_INT:
11020 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11021 break;
11023 case CONST:
11024 /* This used to output parentheses around the expression,
11025 but that does not work on the 386 (either ATT or BSD assembler). */
11026 output_pic_addr_const (file, XEXP (x, 0), code);
11027 break;
11029 case CONST_DOUBLE:
11030 if (GET_MODE (x) == VOIDmode)
11032 /* We can use %d if the number is <32 bits and positive. */
11033 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
11034 fprintf (file, "0x%lx%08lx",
11035 (unsigned long) CONST_DOUBLE_HIGH (x),
11036 (unsigned long) CONST_DOUBLE_LOW (x));
11037 else
11038 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
11040 else
11041 /* We can't handle floating point constants;
11042 TARGET_PRINT_OPERAND must handle them. */
11043 output_operand_lossage ("floating constant misused");
11044 break;
11046 case PLUS:
11047 /* Some assemblers need integer constants to appear first. */
11048 if (CONST_INT_P (XEXP (x, 0)))
11050 output_pic_addr_const (file, XEXP (x, 0), code);
11051 putc ('+', file);
11052 output_pic_addr_const (file, XEXP (x, 1), code);
11054 else
11056 gcc_assert (CONST_INT_P (XEXP (x, 1)));
11057 output_pic_addr_const (file, XEXP (x, 1), code);
11058 putc ('+', file);
11059 output_pic_addr_const (file, XEXP (x, 0), code);
11061 break;
11063 case MINUS:
11064 if (!TARGET_MACHO)
11065 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
11066 output_pic_addr_const (file, XEXP (x, 0), code);
11067 putc ('-', file);
11068 output_pic_addr_const (file, XEXP (x, 1), code);
11069 if (!TARGET_MACHO)
11070 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
11071 break;
11073 case UNSPEC:
11074 gcc_assert (XVECLEN (x, 0) == 1);
11075 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
11076 switch (XINT (x, 1))
11078 case UNSPEC_GOT:
11079 fputs ("@GOT", file);
11080 break;
11081 case UNSPEC_GOTOFF:
11082 fputs ("@GOTOFF", file);
11083 break;
11084 case UNSPEC_PLTOFF:
11085 fputs ("@PLTOFF", file);
11086 break;
11087 case UNSPEC_GOTPCREL:
11088 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11089 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
11090 break;
11091 case UNSPEC_GOTTPOFF:
11092 /* FIXME: This might be @TPOFF in Sun ld too. */
11093 fputs ("@gottpoff", file);
11094 break;
11095 case UNSPEC_TPOFF:
11096 fputs ("@tpoff", file);
11097 break;
11098 case UNSPEC_NTPOFF:
11099 if (TARGET_64BIT)
11100 fputs ("@tpoff", file);
11101 else
11102 fputs ("@ntpoff", file);
11103 break;
11104 case UNSPEC_DTPOFF:
11105 fputs ("@dtpoff", file);
11106 break;
11107 case UNSPEC_GOTNTPOFF:
11108 if (TARGET_64BIT)
11109 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11110 "@gottpoff(%rip)": "@gottpoff[rip]", file);
11111 else
11112 fputs ("@gotntpoff", file);
11113 break;
11114 case UNSPEC_INDNTPOFF:
11115 fputs ("@indntpoff", file);
11116 break;
11117 #if TARGET_MACHO
11118 case UNSPEC_MACHOPIC_OFFSET:
11119 putc ('-', file);
11120 machopic_output_function_base_name (file);
11121 break;
11122 #endif
11123 default:
11124 output_operand_lossage ("invalid UNSPEC as operand");
11125 break;
11127 break;
11129 default:
11130 output_operand_lossage ("invalid expression as operand");
11134 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11135 We need to emit DTP-relative relocations. */
11137 static void ATTRIBUTE_UNUSED
11138 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
11140 fputs (ASM_LONG, file);
11141 output_addr_const (file, x);
11142 fputs ("@dtpoff", file);
11143 switch (size)
11145 case 4:
11146 break;
11147 case 8:
11148 fputs (", 0", file);
11149 break;
11150 default:
11151 gcc_unreachable ();
11155 /* Return true if X is a representation of the PIC register. This copes
11156 with calls from ix86_find_base_term, where the register might have
11157 been replaced by a cselib value. */
11159 static bool
11160 ix86_pic_register_p (rtx x)
11162 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
11163 return (pic_offset_table_rtx
11164 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
11165 else
11166 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
11169 /* In the name of slightly smaller debug output, and to cater to
11170 general assembler lossage, recognize PIC+GOTOFF and turn it back
11171 into a direct symbol reference.
11173 On Darwin, this is necessary to avoid a crash, because Darwin
11174 has a different PIC label for each routine but the DWARF debugging
11175 information is not associated with any particular routine, so it's
11176 necessary to remove references to the PIC label from RTL stored by
11177 the DWARF output code. */
11179 static rtx
11180 ix86_delegitimize_address (rtx x)
11182 rtx orig_x = delegitimize_mem_from_attrs (x);
11183 /* addend is NULL or some rtx if x is something+GOTOFF where
11184 something doesn't include the PIC register. */
11185 rtx addend = NULL_RTX;
11186 /* reg_addend is NULL or a multiple of some register. */
11187 rtx reg_addend = NULL_RTX;
11188 /* const_addend is NULL or a const_int. */
11189 rtx const_addend = NULL_RTX;
11190 /* This is the result, or NULL. */
11191 rtx result = NULL_RTX;
11193 x = orig_x;
11195 if (MEM_P (x))
11196 x = XEXP (x, 0);
11198 if (TARGET_64BIT)
11200 if (GET_CODE (x) != CONST
11201 || GET_CODE (XEXP (x, 0)) != UNSPEC
11202 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11203 || !MEM_P (orig_x))
11204 return orig_x;
11205 x = XVECEXP (XEXP (x, 0), 0, 0);
11206 if (GET_MODE (orig_x) != Pmode)
11207 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11208 return x;
11211 if (GET_CODE (x) != PLUS
11212 || GET_CODE (XEXP (x, 1)) != CONST)
11213 return orig_x;
11215 if (ix86_pic_register_p (XEXP (x, 0)))
11216 /* %ebx + GOT/GOTOFF */
11218 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11220 /* %ebx + %reg * scale + GOT/GOTOFF */
11221 reg_addend = XEXP (x, 0);
11222 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11223 reg_addend = XEXP (reg_addend, 1);
11224 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11225 reg_addend = XEXP (reg_addend, 0);
11226 else
11228 reg_addend = NULL_RTX;
11229 addend = XEXP (x, 0);
11232 else
11233 addend = XEXP (x, 0);
11235 x = XEXP (XEXP (x, 1), 0);
11236 if (GET_CODE (x) == PLUS
11237 && CONST_INT_P (XEXP (x, 1)))
11239 const_addend = XEXP (x, 1);
11240 x = XEXP (x, 0);
11243 if (GET_CODE (x) == UNSPEC
11244 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11245 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11246 result = XVECEXP (x, 0, 0);
11248 if (TARGET_MACHO && darwin_local_data_pic (x)
11249 && !MEM_P (orig_x))
11250 result = XVECEXP (x, 0, 0);
11252 if (! result)
11253 return orig_x;
11255 if (const_addend)
11256 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11257 if (reg_addend)
11258 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11259 if (addend)
11261 /* If the rest of original X doesn't involve the PIC register, add
11262 addend and subtract pic_offset_table_rtx. This can happen e.g.
11263 for code like:
11264 leal (%ebx, %ecx, 4), %ecx
11266 movl foo@GOTOFF(%ecx), %edx
11267 in which case we return (%ecx - %ebx) + foo. */
11268 if (pic_offset_table_rtx)
11269 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11270 pic_offset_table_rtx),
11271 result);
11272 else
11273 return orig_x;
11275 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11276 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11277 return result;
11280 /* If X is a machine specific address (i.e. a symbol or label being
11281 referenced as a displacement from the GOT implemented using an
11282 UNSPEC), then return the base term. Otherwise return X. */
11285 ix86_find_base_term (rtx x)
11287 rtx term;
11289 if (TARGET_64BIT)
11291 if (GET_CODE (x) != CONST)
11292 return x;
11293 term = XEXP (x, 0);
11294 if (GET_CODE (term) == PLUS
11295 && (CONST_INT_P (XEXP (term, 1))
11296 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11297 term = XEXP (term, 0);
11298 if (GET_CODE (term) != UNSPEC
11299 || XINT (term, 1) != UNSPEC_GOTPCREL)
11300 return x;
11302 return XVECEXP (term, 0, 0);
11305 return ix86_delegitimize_address (x);
11308 static void
11309 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11310 int fp, FILE *file)
11312 const char *suffix;
11314 if (mode == CCFPmode || mode == CCFPUmode)
11316 code = ix86_fp_compare_code_to_integer (code);
11317 mode = CCmode;
11319 if (reverse)
11320 code = reverse_condition (code);
11322 switch (code)
11324 case EQ:
11325 switch (mode)
11327 case CCAmode:
11328 suffix = "a";
11329 break;
11331 case CCCmode:
11332 suffix = "c";
11333 break;
11335 case CCOmode:
11336 suffix = "o";
11337 break;
11339 case CCSmode:
11340 suffix = "s";
11341 break;
11343 default:
11344 suffix = "e";
11346 break;
11347 case NE:
11348 switch (mode)
11350 case CCAmode:
11351 suffix = "na";
11352 break;
11354 case CCCmode:
11355 suffix = "nc";
11356 break;
11358 case CCOmode:
11359 suffix = "no";
11360 break;
11362 case CCSmode:
11363 suffix = "ns";
11364 break;
11366 default:
11367 suffix = "ne";
11369 break;
11370 case GT:
11371 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11372 suffix = "g";
11373 break;
11374 case GTU:
11375 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11376 Those same assemblers have the same but opposite lossage on cmov. */
11377 if (mode == CCmode)
11378 suffix = fp ? "nbe" : "a";
11379 else if (mode == CCCmode)
11380 suffix = "b";
11381 else
11382 gcc_unreachable ();
11383 break;
11384 case LT:
11385 switch (mode)
11387 case CCNOmode:
11388 case CCGOCmode:
11389 suffix = "s";
11390 break;
11392 case CCmode:
11393 case CCGCmode:
11394 suffix = "l";
11395 break;
11397 default:
11398 gcc_unreachable ();
11400 break;
11401 case LTU:
11402 gcc_assert (mode == CCmode || mode == CCCmode);
11403 suffix = "b";
11404 break;
11405 case GE:
11406 switch (mode)
11408 case CCNOmode:
11409 case CCGOCmode:
11410 suffix = "ns";
11411 break;
11413 case CCmode:
11414 case CCGCmode:
11415 suffix = "ge";
11416 break;
11418 default:
11419 gcc_unreachable ();
11421 break;
11422 case GEU:
11423 /* ??? As above. */
11424 gcc_assert (mode == CCmode || mode == CCCmode);
11425 suffix = fp ? "nb" : "ae";
11426 break;
11427 case LE:
11428 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11429 suffix = "le";
11430 break;
11431 case LEU:
11432 /* ??? As above. */
11433 if (mode == CCmode)
11434 suffix = "be";
11435 else if (mode == CCCmode)
11436 suffix = fp ? "nb" : "ae";
11437 else
11438 gcc_unreachable ();
11439 break;
11440 case UNORDERED:
11441 suffix = fp ? "u" : "p";
11442 break;
11443 case ORDERED:
11444 suffix = fp ? "nu" : "np";
11445 break;
11446 default:
11447 gcc_unreachable ();
11449 fputs (suffix, file);
11452 /* Print the name of register X to FILE based on its machine mode and number.
11453 If CODE is 'w', pretend the mode is HImode.
11454 If CODE is 'b', pretend the mode is QImode.
11455 If CODE is 'k', pretend the mode is SImode.
11456 If CODE is 'q', pretend the mode is DImode.
11457 If CODE is 'x', pretend the mode is V4SFmode.
11458 If CODE is 't', pretend the mode is V8SFmode.
11459 If CODE is 'h', pretend the reg is the 'high' byte register.
11460 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11461 If CODE is 'd', duplicate the operand for AVX instruction.
11464 void
11465 print_reg (rtx x, int code, FILE *file)
11467 const char *reg;
11468 bool duplicated = code == 'd' && TARGET_AVX;
11470 gcc_assert (x == pc_rtx
11471 || (REGNO (x) != ARG_POINTER_REGNUM
11472 && REGNO (x) != FRAME_POINTER_REGNUM
11473 && REGNO (x) != FLAGS_REG
11474 && REGNO (x) != FPSR_REG
11475 && REGNO (x) != FPCR_REG));
11477 if (ASSEMBLER_DIALECT == ASM_ATT)
11478 putc ('%', file);
11480 if (x == pc_rtx)
11482 gcc_assert (TARGET_64BIT);
11483 fputs ("rip", file);
11484 return;
11487 if (code == 'w' || MMX_REG_P (x))
11488 code = 2;
11489 else if (code == 'b')
11490 code = 1;
11491 else if (code == 'k')
11492 code = 4;
11493 else if (code == 'q')
11494 code = 8;
11495 else if (code == 'y')
11496 code = 3;
11497 else if (code == 'h')
11498 code = 0;
11499 else if (code == 'x')
11500 code = 16;
11501 else if (code == 't')
11502 code = 32;
11503 else
11504 code = GET_MODE_SIZE (GET_MODE (x));
11506 /* Irritatingly, AMD extended registers use different naming convention
11507 from the normal registers. */
11508 if (REX_INT_REG_P (x))
11510 gcc_assert (TARGET_64BIT);
11511 switch (code)
11513 case 0:
11514 error ("extended registers have no high halves");
11515 break;
11516 case 1:
11517 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11518 break;
11519 case 2:
11520 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11521 break;
11522 case 4:
11523 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11524 break;
11525 case 8:
11526 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11527 break;
11528 default:
11529 error ("unsupported operand size for extended register");
11530 break;
11532 return;
11535 reg = NULL;
11536 switch (code)
11538 case 3:
11539 if (STACK_TOP_P (x))
11541 reg = "st(0)";
11542 break;
11544 /* FALLTHRU */
11545 case 8:
11546 case 4:
11547 case 12:
11548 if (! ANY_FP_REG_P (x))
11549 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11550 /* FALLTHRU */
11551 case 16:
11552 case 2:
11553 normal:
11554 reg = hi_reg_name[REGNO (x)];
11555 break;
11556 case 1:
11557 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11558 goto normal;
11559 reg = qi_reg_name[REGNO (x)];
11560 break;
11561 case 0:
11562 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11563 goto normal;
11564 reg = qi_high_reg_name[REGNO (x)];
11565 break;
11566 case 32:
11567 if (SSE_REG_P (x))
11569 gcc_assert (!duplicated);
11570 putc ('y', file);
11571 fputs (hi_reg_name[REGNO (x)] + 1, file);
11572 return;
11574 break;
11575 default:
11576 gcc_unreachable ();
11579 fputs (reg, file);
11580 if (duplicated)
11582 if (ASSEMBLER_DIALECT == ASM_ATT)
11583 fprintf (file, ", %%%s", reg);
11584 else
11585 fprintf (file, ", %s", reg);
11589 /* Locate some local-dynamic symbol still in use by this function
11590 so that we can print its name in some tls_local_dynamic_base
11591 pattern. */
11593 static int
11594 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11596 rtx x = *px;
11598 if (GET_CODE (x) == SYMBOL_REF
11599 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11601 cfun->machine->some_ld_name = XSTR (x, 0);
11602 return 1;
11605 return 0;
11608 static const char *
11609 get_some_local_dynamic_name (void)
11611 rtx insn;
11613 if (cfun->machine->some_ld_name)
11614 return cfun->machine->some_ld_name;
11616 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11617 if (NONDEBUG_INSN_P (insn)
11618 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11619 return cfun->machine->some_ld_name;
11621 return NULL;
11624 /* Meaning of CODE:
11625 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11626 C -- print opcode suffix for set/cmov insn.
11627 c -- like C, but print reversed condition
11628 F,f -- likewise, but for floating-point.
11629 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11630 otherwise nothing
11631 R -- print the prefix for register names.
11632 z -- print the opcode suffix for the size of the current operand.
11633 Z -- likewise, with special suffixes for x87 instructions.
11634 * -- print a star (in certain assembler syntax)
11635 A -- print an absolute memory reference.
11636 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11637 s -- print a shift double count, followed by the assemblers argument
11638 delimiter.
11639 b -- print the QImode name of the register for the indicated operand.
11640 %b0 would print %al if operands[0] is reg 0.
11641 w -- likewise, print the HImode name of the register.
11642 k -- likewise, print the SImode name of the register.
11643 q -- likewise, print the DImode name of the register.
11644 x -- likewise, print the V4SFmode name of the register.
11645 t -- likewise, print the V8SFmode name of the register.
11646 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11647 y -- print "st(0)" instead of "st" as a register.
11648 d -- print duplicated register operand for AVX instruction.
11649 D -- print condition for SSE cmp instruction.
11650 P -- if PIC, print an @PLT suffix.
11651 X -- don't print any sort of PIC '@' suffix for a symbol.
11652 & -- print some in-use local-dynamic symbol name.
11653 H -- print a memory address offset by 8; used for sse high-parts
11654 Y -- print condition for XOP pcom* instruction.
11655 + -- print a branch hint as 'cs' or 'ds' prefix
11656 ; -- print a semicolon (after prefixes due to bug in older gas).
11659 void
11660 ix86_print_operand (FILE *file, rtx x, int code)
11662 if (code)
11664 switch (code)
11666 case '*':
11667 if (ASSEMBLER_DIALECT == ASM_ATT)
11668 putc ('*', file);
11669 return;
11671 case '&':
11673 const char *name = get_some_local_dynamic_name ();
11674 if (name == NULL)
11675 output_operand_lossage ("'%%&' used without any "
11676 "local dynamic TLS references");
11677 else
11678 assemble_name (file, name);
11679 return;
11682 case 'A':
11683 switch (ASSEMBLER_DIALECT)
11685 case ASM_ATT:
11686 putc ('*', file);
11687 break;
11689 case ASM_INTEL:
11690 /* Intel syntax. For absolute addresses, registers should not
11691 be surrounded by braces. */
11692 if (!REG_P (x))
11694 putc ('[', file);
11695 ix86_print_operand (file, x, 0);
11696 putc (']', file);
11697 return;
11699 break;
11701 default:
11702 gcc_unreachable ();
11705 ix86_print_operand (file, x, 0);
11706 return;
11709 case 'L':
11710 if (ASSEMBLER_DIALECT == ASM_ATT)
11711 putc ('l', file);
11712 return;
11714 case 'W':
11715 if (ASSEMBLER_DIALECT == ASM_ATT)
11716 putc ('w', file);
11717 return;
11719 case 'B':
11720 if (ASSEMBLER_DIALECT == ASM_ATT)
11721 putc ('b', file);
11722 return;
11724 case 'Q':
11725 if (ASSEMBLER_DIALECT == ASM_ATT)
11726 putc ('l', file);
11727 return;
11729 case 'S':
11730 if (ASSEMBLER_DIALECT == ASM_ATT)
11731 putc ('s', file);
11732 return;
11734 case 'T':
11735 if (ASSEMBLER_DIALECT == ASM_ATT)
11736 putc ('t', file);
11737 return;
11739 case 'z':
11740 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11742 /* Opcodes don't get size suffixes if using Intel opcodes. */
11743 if (ASSEMBLER_DIALECT == ASM_INTEL)
11744 return;
11746 switch (GET_MODE_SIZE (GET_MODE (x)))
11748 case 1:
11749 putc ('b', file);
11750 return;
11752 case 2:
11753 putc ('w', file);
11754 return;
11756 case 4:
11757 putc ('l', file);
11758 return;
11760 case 8:
11761 putc ('q', file);
11762 return;
11764 default:
11765 output_operand_lossage
11766 ("invalid operand size for operand code '%c'", code);
11767 return;
11771 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11772 warning
11773 (0, "non-integer operand used with operand code '%c'", code);
11774 /* FALLTHRU */
11776 case 'Z':
11777 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11778 if (ASSEMBLER_DIALECT == ASM_INTEL)
11779 return;
11781 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11783 switch (GET_MODE_SIZE (GET_MODE (x)))
11785 case 2:
11786 #ifdef HAVE_AS_IX86_FILDS
11787 putc ('s', file);
11788 #endif
11789 return;
11791 case 4:
11792 putc ('l', file);
11793 return;
11795 case 8:
11796 #ifdef HAVE_AS_IX86_FILDQ
11797 putc ('q', file);
11798 #else
11799 fputs ("ll", file);
11800 #endif
11801 return;
11803 default:
11804 break;
11807 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11809 /* 387 opcodes don't get size suffixes
11810 if the operands are registers. */
11811 if (STACK_REG_P (x))
11812 return;
11814 switch (GET_MODE_SIZE (GET_MODE (x)))
11816 case 4:
11817 putc ('s', file);
11818 return;
11820 case 8:
11821 putc ('l', file);
11822 return;
11824 case 12:
11825 case 16:
11826 putc ('t', file);
11827 return;
11829 default:
11830 break;
11833 else
11835 output_operand_lossage
11836 ("invalid operand type used with operand code '%c'", code);
11837 return;
11840 output_operand_lossage
11841 ("invalid operand size for operand code '%c'", code);
11842 return;
11844 case 'd':
11845 case 'b':
11846 case 'w':
11847 case 'k':
11848 case 'q':
11849 case 'h':
11850 case 't':
11851 case 'y':
11852 case 'x':
11853 case 'X':
11854 case 'P':
11855 break;
11857 case 's':
11858 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11860 ix86_print_operand (file, x, 0);
11861 fputs (", ", file);
11863 return;
11865 case 'D':
11866 /* Little bit of braindamage here. The SSE compare instructions
11867 does use completely different names for the comparisons that the
11868 fp conditional moves. */
11869 if (TARGET_AVX)
11871 switch (GET_CODE (x))
11873 case EQ:
11874 fputs ("eq", file);
11875 break;
11876 case UNEQ:
11877 fputs ("eq_us", file);
11878 break;
11879 case LT:
11880 fputs ("lt", file);
11881 break;
11882 case UNLT:
11883 fputs ("nge", file);
11884 break;
11885 case LE:
11886 fputs ("le", file);
11887 break;
11888 case UNLE:
11889 fputs ("ngt", file);
11890 break;
11891 case UNORDERED:
11892 fputs ("unord", file);
11893 break;
11894 case NE:
11895 fputs ("neq", file);
11896 break;
11897 case LTGT:
11898 fputs ("neq_oq", file);
11899 break;
11900 case GE:
11901 fputs ("ge", file);
11902 break;
11903 case UNGE:
11904 fputs ("nlt", file);
11905 break;
11906 case GT:
11907 fputs ("gt", file);
11908 break;
11909 case UNGT:
11910 fputs ("nle", file);
11911 break;
11912 case ORDERED:
11913 fputs ("ord", file);
11914 break;
11915 default:
11916 output_operand_lossage ("operand is not a condition code, "
11917 "invalid operand code 'D'");
11918 return;
11921 else
11923 switch (GET_CODE (x))
11925 case EQ:
11926 case UNEQ:
11927 fputs ("eq", file);
11928 break;
11929 case LT:
11930 case UNLT:
11931 fputs ("lt", file);
11932 break;
11933 case LE:
11934 case UNLE:
11935 fputs ("le", file);
11936 break;
11937 case UNORDERED:
11938 fputs ("unord", file);
11939 break;
11940 case NE:
11941 case LTGT:
11942 fputs ("neq", file);
11943 break;
11944 case UNGE:
11945 case GE:
11946 fputs ("nlt", file);
11947 break;
11948 case UNGT:
11949 case GT:
11950 fputs ("nle", file);
11951 break;
11952 case ORDERED:
11953 fputs ("ord", file);
11954 break;
11955 default:
11956 output_operand_lossage ("operand is not a condition code, "
11957 "invalid operand code 'D'");
11958 return;
11961 return;
11962 case 'O':
11963 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11964 if (ASSEMBLER_DIALECT == ASM_ATT)
11966 switch (GET_MODE (x))
11968 case HImode: putc ('w', file); break;
11969 case SImode:
11970 case SFmode: putc ('l', file); break;
11971 case DImode:
11972 case DFmode: putc ('q', file); break;
11973 default: gcc_unreachable ();
11975 putc ('.', file);
11977 #endif
11978 return;
11979 case 'C':
11980 if (!COMPARISON_P (x))
11982 output_operand_lossage ("operand is neither a constant nor a "
11983 "condition code, invalid operand code "
11984 "'C'");
11985 return;
11987 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11988 return;
11989 case 'F':
11990 if (!COMPARISON_P (x))
11992 output_operand_lossage ("operand is neither a constant nor a "
11993 "condition code, invalid operand code "
11994 "'F'");
11995 return;
11997 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11998 if (ASSEMBLER_DIALECT == ASM_ATT)
11999 putc ('.', file);
12000 #endif
12001 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
12002 return;
12004 /* Like above, but reverse condition */
12005 case 'c':
12006 /* Check to see if argument to %c is really a constant
12007 and not a condition code which needs to be reversed. */
12008 if (!COMPARISON_P (x))
12010 output_operand_lossage ("operand is neither a constant nor a "
12011 "condition code, invalid operand "
12012 "code 'c'");
12013 return;
12015 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
12016 return;
12017 case 'f':
12018 if (!COMPARISON_P (x))
12020 output_operand_lossage ("operand is neither a constant nor a "
12021 "condition code, invalid operand "
12022 "code 'f'");
12023 return;
12025 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
12026 if (ASSEMBLER_DIALECT == ASM_ATT)
12027 putc ('.', file);
12028 #endif
12029 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
12030 return;
12032 case 'H':
12033 /* It doesn't actually matter what mode we use here, as we're
12034 only going to use this for printing. */
12035 x = adjust_address_nv (x, DImode, 8);
12036 break;
12038 case '+':
12040 rtx x;
12042 if (!optimize
12043 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
12044 return;
12046 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
12047 if (x)
12049 int pred_val = INTVAL (XEXP (x, 0));
12051 if (pred_val < REG_BR_PROB_BASE * 45 / 100
12052 || pred_val > REG_BR_PROB_BASE * 55 / 100)
12054 int taken = pred_val > REG_BR_PROB_BASE / 2;
12055 int cputaken = final_forward_branch_p (current_output_insn) == 0;
12057 /* Emit hints only in the case default branch prediction
12058 heuristics would fail. */
12059 if (taken != cputaken)
12061 /* We use 3e (DS) prefix for taken branches and
12062 2e (CS) prefix for not taken branches. */
12063 if (taken)
12064 fputs ("ds ; ", file);
12065 else
12066 fputs ("cs ; ", file);
12070 return;
12073 case 'Y':
12074 switch (GET_CODE (x))
12076 case NE:
12077 fputs ("neq", file);
12078 break;
12079 case EQ:
12080 fputs ("eq", file);
12081 break;
12082 case GE:
12083 case GEU:
12084 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
12085 break;
12086 case GT:
12087 case GTU:
12088 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
12089 break;
12090 case LE:
12091 case LEU:
12092 fputs ("le", file);
12093 break;
12094 case LT:
12095 case LTU:
12096 fputs ("lt", file);
12097 break;
12098 case UNORDERED:
12099 fputs ("unord", file);
12100 break;
12101 case ORDERED:
12102 fputs ("ord", file);
12103 break;
12104 case UNEQ:
12105 fputs ("ueq", file);
12106 break;
12107 case UNGE:
12108 fputs ("nlt", file);
12109 break;
12110 case UNGT:
12111 fputs ("nle", file);
12112 break;
12113 case UNLE:
12114 fputs ("ule", file);
12115 break;
12116 case UNLT:
12117 fputs ("ult", file);
12118 break;
12119 case LTGT:
12120 fputs ("une", file);
12121 break;
12122 default:
12123 output_operand_lossage ("operand is not a condition code, "
12124 "invalid operand code 'Y'");
12125 return;
12127 return;
12129 case ';':
12130 #if TARGET_MACHO || !HAVE_AS_IX86_REP_LOCK_PREFIX
12131 fputs (";", file);
12132 #endif
12133 return;
12135 default:
12136 output_operand_lossage ("invalid operand code '%c'", code);
12140 if (REG_P (x))
12141 print_reg (x, code, file);
12143 else if (MEM_P (x))
12145 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
12146 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
12147 && GET_MODE (x) != BLKmode)
12149 const char * size;
12150 switch (GET_MODE_SIZE (GET_MODE (x)))
12152 case 1: size = "BYTE"; break;
12153 case 2: size = "WORD"; break;
12154 case 4: size = "DWORD"; break;
12155 case 8: size = "QWORD"; break;
12156 case 12: size = "TBYTE"; break;
12157 case 16:
12158 if (GET_MODE (x) == XFmode)
12159 size = "TBYTE";
12160 else
12161 size = "XMMWORD";
12162 break;
12163 case 32: size = "YMMWORD"; break;
12164 default:
12165 gcc_unreachable ();
12168 /* Check for explicit size override (codes 'b', 'w' and 'k') */
12169 if (code == 'b')
12170 size = "BYTE";
12171 else if (code == 'w')
12172 size = "WORD";
12173 else if (code == 'k')
12174 size = "DWORD";
12176 fputs (size, file);
12177 fputs (" PTR ", file);
12180 x = XEXP (x, 0);
12181 /* Avoid (%rip) for call operands. */
12182 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12183 && !CONST_INT_P (x))
12184 output_addr_const (file, x);
12185 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12186 output_operand_lossage ("invalid constraints for operand");
12187 else
12188 output_address (x);
12191 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12193 REAL_VALUE_TYPE r;
12194 long l;
12196 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12197 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12199 if (ASSEMBLER_DIALECT == ASM_ATT)
12200 putc ('$', file);
12201 fprintf (file, "0x%08lx", (long unsigned int) l);
12204 /* These float cases don't actually occur as immediate operands. */
12205 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12207 char dstr[30];
12209 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12210 fputs (dstr, file);
12213 else if (GET_CODE (x) == CONST_DOUBLE
12214 && GET_MODE (x) == XFmode)
12216 char dstr[30];
12218 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12219 fputs (dstr, file);
12222 else
12224 /* We have patterns that allow zero sets of memory, for instance.
12225 In 64-bit mode, we should probably support all 8-byte vectors,
12226 since we can in fact encode that into an immediate. */
12227 if (GET_CODE (x) == CONST_VECTOR)
12229 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12230 x = const0_rtx;
12233 if (code != 'P')
12235 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12237 if (ASSEMBLER_DIALECT == ASM_ATT)
12238 putc ('$', file);
12240 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12241 || GET_CODE (x) == LABEL_REF)
12243 if (ASSEMBLER_DIALECT == ASM_ATT)
12244 putc ('$', file);
12245 else
12246 fputs ("OFFSET FLAT:", file);
12249 if (CONST_INT_P (x))
12250 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12251 else if (flag_pic)
12252 output_pic_addr_const (file, x, code);
12253 else
12254 output_addr_const (file, x);
12258 static bool
12259 ix86_print_operand_punct_valid_p (unsigned char code)
12261 return (code == '*' || code == '+' || code == '&' || code == ';');
12264 /* Print a memory operand whose address is ADDR. */
12266 static void
12267 ix86_print_operand_address (FILE *file, rtx addr)
12269 struct ix86_address parts;
12270 rtx base, index, disp;
12271 int scale;
12272 int ok = ix86_decompose_address (addr, &parts);
12274 gcc_assert (ok);
12276 base = parts.base;
12277 index = parts.index;
12278 disp = parts.disp;
12279 scale = parts.scale;
12281 switch (parts.seg)
12283 case SEG_DEFAULT:
12284 break;
12285 case SEG_FS:
12286 case SEG_GS:
12287 if (ASSEMBLER_DIALECT == ASM_ATT)
12288 putc ('%', file);
12289 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12290 break;
12291 default:
12292 gcc_unreachable ();
12295 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12296 if (TARGET_64BIT && !base && !index)
12298 rtx symbol = disp;
12300 if (GET_CODE (disp) == CONST
12301 && GET_CODE (XEXP (disp, 0)) == PLUS
12302 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12303 symbol = XEXP (XEXP (disp, 0), 0);
12305 if (GET_CODE (symbol) == LABEL_REF
12306 || (GET_CODE (symbol) == SYMBOL_REF
12307 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12308 base = pc_rtx;
12310 if (!base && !index)
12312 /* Displacement only requires special attention. */
12314 if (CONST_INT_P (disp))
12316 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12317 fputs ("ds:", file);
12318 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12320 else if (flag_pic)
12321 output_pic_addr_const (file, disp, 0);
12322 else
12323 output_addr_const (file, disp);
12325 else
12327 if (ASSEMBLER_DIALECT == ASM_ATT)
12329 if (disp)
12331 if (flag_pic)
12332 output_pic_addr_const (file, disp, 0);
12333 else if (GET_CODE (disp) == LABEL_REF)
12334 output_asm_label (disp);
12335 else
12336 output_addr_const (file, disp);
12339 putc ('(', file);
12340 if (base)
12341 print_reg (base, 0, file);
12342 if (index)
12344 putc (',', file);
12345 print_reg (index, 0, file);
12346 if (scale != 1)
12347 fprintf (file, ",%d", scale);
12349 putc (')', file);
12351 else
12353 rtx offset = NULL_RTX;
12355 if (disp)
12357 /* Pull out the offset of a symbol; print any symbol itself. */
12358 if (GET_CODE (disp) == CONST
12359 && GET_CODE (XEXP (disp, 0)) == PLUS
12360 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12362 offset = XEXP (XEXP (disp, 0), 1);
12363 disp = gen_rtx_CONST (VOIDmode,
12364 XEXP (XEXP (disp, 0), 0));
12367 if (flag_pic)
12368 output_pic_addr_const (file, disp, 0);
12369 else if (GET_CODE (disp) == LABEL_REF)
12370 output_asm_label (disp);
12371 else if (CONST_INT_P (disp))
12372 offset = disp;
12373 else
12374 output_addr_const (file, disp);
12377 putc ('[', file);
12378 if (base)
12380 print_reg (base, 0, file);
12381 if (offset)
12383 if (INTVAL (offset) >= 0)
12384 putc ('+', file);
12385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12388 else if (offset)
12389 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12390 else
12391 putc ('0', file);
12393 if (index)
12395 putc ('+', file);
12396 print_reg (index, 0, file);
12397 if (scale != 1)
12398 fprintf (file, "*%d", scale);
12400 putc (']', file);
12405 bool
12406 output_addr_const_extra (FILE *file, rtx x)
12408 rtx op;
12410 if (GET_CODE (x) != UNSPEC)
12411 return false;
12413 op = XVECEXP (x, 0, 0);
12414 switch (XINT (x, 1))
12416 case UNSPEC_GOTTPOFF:
12417 output_addr_const (file, op);
12418 /* FIXME: This might be @TPOFF in Sun ld. */
12419 fputs ("@gottpoff", file);
12420 break;
12421 case UNSPEC_TPOFF:
12422 output_addr_const (file, op);
12423 fputs ("@tpoff", file);
12424 break;
12425 case UNSPEC_NTPOFF:
12426 output_addr_const (file, op);
12427 if (TARGET_64BIT)
12428 fputs ("@tpoff", file);
12429 else
12430 fputs ("@ntpoff", file);
12431 break;
12432 case UNSPEC_DTPOFF:
12433 output_addr_const (file, op);
12434 fputs ("@dtpoff", file);
12435 break;
12436 case UNSPEC_GOTNTPOFF:
12437 output_addr_const (file, op);
12438 if (TARGET_64BIT)
12439 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12440 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12441 else
12442 fputs ("@gotntpoff", file);
12443 break;
12444 case UNSPEC_INDNTPOFF:
12445 output_addr_const (file, op);
12446 fputs ("@indntpoff", file);
12447 break;
12448 #if TARGET_MACHO
12449 case UNSPEC_MACHOPIC_OFFSET:
12450 output_addr_const (file, op);
12451 putc ('-', file);
12452 machopic_output_function_base_name (file);
12453 break;
12454 #endif
12456 default:
12457 return false;
12460 return true;
12463 /* Split one or more DImode RTL references into pairs of SImode
12464 references. The RTL can be REG, offsettable MEM, integer constant, or
12465 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12466 split and "num" is its length. lo_half and hi_half are output arrays
12467 that parallel "operands". */
12469 void
12470 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12472 while (num--)
12474 rtx op = operands[num];
12476 /* simplify_subreg refuse to split volatile memory addresses,
12477 but we still have to handle it. */
12478 if (MEM_P (op))
12480 lo_half[num] = adjust_address (op, SImode, 0);
12481 hi_half[num] = adjust_address (op, SImode, 4);
12483 else
12485 lo_half[num] = simplify_gen_subreg (SImode, op,
12486 GET_MODE (op) == VOIDmode
12487 ? DImode : GET_MODE (op), 0);
12488 hi_half[num] = simplify_gen_subreg (SImode, op,
12489 GET_MODE (op) == VOIDmode
12490 ? DImode : GET_MODE (op), 4);
12494 /* Split one or more TImode RTL references into pairs of DImode
12495 references. The RTL can be REG, offsettable MEM, integer constant, or
12496 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12497 split and "num" is its length. lo_half and hi_half are output arrays
12498 that parallel "operands". */
12500 void
12501 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12503 while (num--)
12505 rtx op = operands[num];
12507 /* simplify_subreg refuse to split volatile memory addresses, but we
12508 still have to handle it. */
12509 if (MEM_P (op))
12511 lo_half[num] = adjust_address (op, DImode, 0);
12512 hi_half[num] = adjust_address (op, DImode, 8);
12514 else
12516 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12517 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12522 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12523 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12524 is the expression of the binary operation. The output may either be
12525 emitted here, or returned to the caller, like all output_* functions.
12527 There is no guarantee that the operands are the same mode, as they
12528 might be within FLOAT or FLOAT_EXTEND expressions. */
12530 #ifndef SYSV386_COMPAT
12531 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12532 wants to fix the assemblers because that causes incompatibility
12533 with gcc. No-one wants to fix gcc because that causes
12534 incompatibility with assemblers... You can use the option of
12535 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12536 #define SYSV386_COMPAT 1
12537 #endif
12539 const char *
12540 output_387_binary_op (rtx insn, rtx *operands)
12542 static char buf[40];
12543 const char *p;
12544 const char *ssep;
12545 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12547 #ifdef ENABLE_CHECKING
12548 /* Even if we do not want to check the inputs, this documents input
12549 constraints. Which helps in understanding the following code. */
12550 if (STACK_REG_P (operands[0])
12551 && ((REG_P (operands[1])
12552 && REGNO (operands[0]) == REGNO (operands[1])
12553 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12554 || (REG_P (operands[2])
12555 && REGNO (operands[0]) == REGNO (operands[2])
12556 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12557 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12558 ; /* ok */
12559 else
12560 gcc_assert (is_sse);
12561 #endif
12563 switch (GET_CODE (operands[3]))
12565 case PLUS:
12566 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12567 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12568 p = "fiadd";
12569 else
12570 p = "fadd";
12571 ssep = "vadd";
12572 break;
12574 case MINUS:
12575 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12576 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12577 p = "fisub";
12578 else
12579 p = "fsub";
12580 ssep = "vsub";
12581 break;
12583 case MULT:
12584 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12585 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12586 p = "fimul";
12587 else
12588 p = "fmul";
12589 ssep = "vmul";
12590 break;
12592 case DIV:
12593 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12594 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12595 p = "fidiv";
12596 else
12597 p = "fdiv";
12598 ssep = "vdiv";
12599 break;
12601 default:
12602 gcc_unreachable ();
12605 if (is_sse)
12607 if (TARGET_AVX)
12609 strcpy (buf, ssep);
12610 if (GET_MODE (operands[0]) == SFmode)
12611 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12612 else
12613 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12615 else
12617 strcpy (buf, ssep + 1);
12618 if (GET_MODE (operands[0]) == SFmode)
12619 strcat (buf, "ss\t{%2, %0|%0, %2}");
12620 else
12621 strcat (buf, "sd\t{%2, %0|%0, %2}");
12623 return buf;
12625 strcpy (buf, p);
12627 switch (GET_CODE (operands[3]))
12629 case MULT:
12630 case PLUS:
12631 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12633 rtx temp = operands[2];
12634 operands[2] = operands[1];
12635 operands[1] = temp;
12638 /* know operands[0] == operands[1]. */
12640 if (MEM_P (operands[2]))
12642 p = "%Z2\t%2";
12643 break;
12646 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12648 if (STACK_TOP_P (operands[0]))
12649 /* How is it that we are storing to a dead operand[2]?
12650 Well, presumably operands[1] is dead too. We can't
12651 store the result to st(0) as st(0) gets popped on this
12652 instruction. Instead store to operands[2] (which I
12653 think has to be st(1)). st(1) will be popped later.
12654 gcc <= 2.8.1 didn't have this check and generated
12655 assembly code that the Unixware assembler rejected. */
12656 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12657 else
12658 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12659 break;
12662 if (STACK_TOP_P (operands[0]))
12663 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12664 else
12665 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12666 break;
12668 case MINUS:
12669 case DIV:
12670 if (MEM_P (operands[1]))
12672 p = "r%Z1\t%1";
12673 break;
12676 if (MEM_P (operands[2]))
12678 p = "%Z2\t%2";
12679 break;
12682 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12684 #if SYSV386_COMPAT
12685 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12686 derived assemblers, confusingly reverse the direction of
12687 the operation for fsub{r} and fdiv{r} when the
12688 destination register is not st(0). The Intel assembler
12689 doesn't have this brain damage. Read !SYSV386_COMPAT to
12690 figure out what the hardware really does. */
12691 if (STACK_TOP_P (operands[0]))
12692 p = "{p\t%0, %2|rp\t%2, %0}";
12693 else
12694 p = "{rp\t%2, %0|p\t%0, %2}";
12695 #else
12696 if (STACK_TOP_P (operands[0]))
12697 /* As above for fmul/fadd, we can't store to st(0). */
12698 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12699 else
12700 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12701 #endif
12702 break;
12705 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12707 #if SYSV386_COMPAT
12708 if (STACK_TOP_P (operands[0]))
12709 p = "{rp\t%0, %1|p\t%1, %0}";
12710 else
12711 p = "{p\t%1, %0|rp\t%0, %1}";
12712 #else
12713 if (STACK_TOP_P (operands[0]))
12714 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12715 else
12716 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12717 #endif
12718 break;
12721 if (STACK_TOP_P (operands[0]))
12723 if (STACK_TOP_P (operands[1]))
12724 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12725 else
12726 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12727 break;
12729 else if (STACK_TOP_P (operands[1]))
12731 #if SYSV386_COMPAT
12732 p = "{\t%1, %0|r\t%0, %1}";
12733 #else
12734 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12735 #endif
12737 else
12739 #if SYSV386_COMPAT
12740 p = "{r\t%2, %0|\t%0, %2}";
12741 #else
12742 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12743 #endif
12745 break;
12747 default:
12748 gcc_unreachable ();
12751 strcat (buf, p);
12752 return buf;
12755 /* Return needed mode for entity in optimize_mode_switching pass. */
12758 ix86_mode_needed (int entity, rtx insn)
12760 enum attr_i387_cw mode;
12762 /* The mode UNINITIALIZED is used to store control word after a
12763 function call or ASM pattern. The mode ANY specify that function
12764 has no requirements on the control word and make no changes in the
12765 bits we are interested in. */
12767 if (CALL_P (insn)
12768 || (NONJUMP_INSN_P (insn)
12769 && (asm_noperands (PATTERN (insn)) >= 0
12770 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12771 return I387_CW_UNINITIALIZED;
12773 if (recog_memoized (insn) < 0)
12774 return I387_CW_ANY;
12776 mode = get_attr_i387_cw (insn);
12778 switch (entity)
12780 case I387_TRUNC:
12781 if (mode == I387_CW_TRUNC)
12782 return mode;
12783 break;
12785 case I387_FLOOR:
12786 if (mode == I387_CW_FLOOR)
12787 return mode;
12788 break;
12790 case I387_CEIL:
12791 if (mode == I387_CW_CEIL)
12792 return mode;
12793 break;
12795 case I387_MASK_PM:
12796 if (mode == I387_CW_MASK_PM)
12797 return mode;
12798 break;
12800 default:
12801 gcc_unreachable ();
12804 return I387_CW_ANY;
12807 /* Output code to initialize control word copies used by trunc?f?i and
12808 rounding patterns. CURRENT_MODE is set to current control word,
12809 while NEW_MODE is set to new control word. */
12811 void
12812 emit_i387_cw_initialization (int mode)
12814 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12815 rtx new_mode;
12817 enum ix86_stack_slot slot;
12819 rtx reg = gen_reg_rtx (HImode);
12821 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12822 emit_move_insn (reg, copy_rtx (stored_mode));
12824 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12825 || optimize_function_for_size_p (cfun))
12827 switch (mode)
12829 case I387_CW_TRUNC:
12830 /* round toward zero (truncate) */
12831 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12832 slot = SLOT_CW_TRUNC;
12833 break;
12835 case I387_CW_FLOOR:
12836 /* round down toward -oo */
12837 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12838 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12839 slot = SLOT_CW_FLOOR;
12840 break;
12842 case I387_CW_CEIL:
12843 /* round up toward +oo */
12844 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12845 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12846 slot = SLOT_CW_CEIL;
12847 break;
12849 case I387_CW_MASK_PM:
12850 /* mask precision exception for nearbyint() */
12851 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12852 slot = SLOT_CW_MASK_PM;
12853 break;
12855 default:
12856 gcc_unreachable ();
12859 else
12861 switch (mode)
12863 case I387_CW_TRUNC:
12864 /* round toward zero (truncate) */
12865 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12866 slot = SLOT_CW_TRUNC;
12867 break;
12869 case I387_CW_FLOOR:
12870 /* round down toward -oo */
12871 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12872 slot = SLOT_CW_FLOOR;
12873 break;
12875 case I387_CW_CEIL:
12876 /* round up toward +oo */
12877 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12878 slot = SLOT_CW_CEIL;
12879 break;
12881 case I387_CW_MASK_PM:
12882 /* mask precision exception for nearbyint() */
12883 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12884 slot = SLOT_CW_MASK_PM;
12885 break;
12887 default:
12888 gcc_unreachable ();
12892 gcc_assert (slot < MAX_386_STACK_LOCALS);
12894 new_mode = assign_386_stack_local (HImode, slot);
12895 emit_move_insn (new_mode, reg);
12898 /* Output code for INSN to convert a float to a signed int. OPERANDS
12899 are the insn operands. The output may be [HSD]Imode and the input
12900 operand may be [SDX]Fmode. */
12902 const char *
12903 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12905 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12906 int dimode_p = GET_MODE (operands[0]) == DImode;
12907 int round_mode = get_attr_i387_cw (insn);
12909 /* Jump through a hoop or two for DImode, since the hardware has no
12910 non-popping instruction. We used to do this a different way, but
12911 that was somewhat fragile and broke with post-reload splitters. */
12912 if ((dimode_p || fisttp) && !stack_top_dies)
12913 output_asm_insn ("fld\t%y1", operands);
12915 gcc_assert (STACK_TOP_P (operands[1]));
12916 gcc_assert (MEM_P (operands[0]));
12917 gcc_assert (GET_MODE (operands[1]) != TFmode);
12919 if (fisttp)
12920 output_asm_insn ("fisttp%Z0\t%0", operands);
12921 else
12923 if (round_mode != I387_CW_ANY)
12924 output_asm_insn ("fldcw\t%3", operands);
12925 if (stack_top_dies || dimode_p)
12926 output_asm_insn ("fistp%Z0\t%0", operands);
12927 else
12928 output_asm_insn ("fist%Z0\t%0", operands);
12929 if (round_mode != I387_CW_ANY)
12930 output_asm_insn ("fldcw\t%2", operands);
12933 return "";
12936 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12937 have the values zero or one, indicates the ffreep insn's operand
12938 from the OPERANDS array. */
12940 static const char *
12941 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12943 if (TARGET_USE_FFREEP)
12944 #ifdef HAVE_AS_IX86_FFREEP
12945 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12946 #else
12948 static char retval[32];
12949 int regno = REGNO (operands[opno]);
12951 gcc_assert (FP_REGNO_P (regno));
12953 regno -= FIRST_STACK_REG;
12955 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12956 return retval;
12958 #endif
12960 return opno ? "fstp\t%y1" : "fstp\t%y0";
12964 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12965 should be used. UNORDERED_P is true when fucom should be used. */
12967 const char *
12968 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12970 int stack_top_dies;
12971 rtx cmp_op0, cmp_op1;
12972 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12974 if (eflags_p)
12976 cmp_op0 = operands[0];
12977 cmp_op1 = operands[1];
12979 else
12981 cmp_op0 = operands[1];
12982 cmp_op1 = operands[2];
12985 if (is_sse)
12987 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12988 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12989 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12990 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12992 if (GET_MODE (operands[0]) == SFmode)
12993 if (unordered_p)
12994 return &ucomiss[TARGET_AVX ? 0 : 1];
12995 else
12996 return &comiss[TARGET_AVX ? 0 : 1];
12997 else
12998 if (unordered_p)
12999 return &ucomisd[TARGET_AVX ? 0 : 1];
13000 else
13001 return &comisd[TARGET_AVX ? 0 : 1];
13004 gcc_assert (STACK_TOP_P (cmp_op0));
13006 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
13008 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
13010 if (stack_top_dies)
13012 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
13013 return output_387_ffreep (operands, 1);
13015 else
13016 return "ftst\n\tfnstsw\t%0";
13019 if (STACK_REG_P (cmp_op1)
13020 && stack_top_dies
13021 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
13022 && REGNO (cmp_op1) != FIRST_STACK_REG)
13024 /* If both the top of the 387 stack dies, and the other operand
13025 is also a stack register that dies, then this must be a
13026 `fcompp' float compare */
13028 if (eflags_p)
13030 /* There is no double popping fcomi variant. Fortunately,
13031 eflags is immune from the fstp's cc clobbering. */
13032 if (unordered_p)
13033 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
13034 else
13035 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
13036 return output_387_ffreep (operands, 0);
13038 else
13040 if (unordered_p)
13041 return "fucompp\n\tfnstsw\t%0";
13042 else
13043 return "fcompp\n\tfnstsw\t%0";
13046 else
13048 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
13050 static const char * const alt[16] =
13052 "fcom%Z2\t%y2\n\tfnstsw\t%0",
13053 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
13054 "fucom%Z2\t%y2\n\tfnstsw\t%0",
13055 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
13057 "ficom%Z2\t%y2\n\tfnstsw\t%0",
13058 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
13059 NULL,
13060 NULL,
13062 "fcomi\t{%y1, %0|%0, %y1}",
13063 "fcomip\t{%y1, %0|%0, %y1}",
13064 "fucomi\t{%y1, %0|%0, %y1}",
13065 "fucomip\t{%y1, %0|%0, %y1}",
13067 NULL,
13068 NULL,
13069 NULL,
13070 NULL
13073 int mask;
13074 const char *ret;
13076 mask = eflags_p << 3;
13077 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
13078 mask |= unordered_p << 1;
13079 mask |= stack_top_dies;
13081 gcc_assert (mask < 16);
13082 ret = alt[mask];
13083 gcc_assert (ret);
13085 return ret;
13089 void
13090 ix86_output_addr_vec_elt (FILE *file, int value)
13092 const char *directive = ASM_LONG;
13094 #ifdef ASM_QUAD
13095 if (TARGET_64BIT)
13096 directive = ASM_QUAD;
13097 #else
13098 gcc_assert (!TARGET_64BIT);
13099 #endif
13101 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
13104 void
13105 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
13107 const char *directive = ASM_LONG;
13109 #ifdef ASM_QUAD
13110 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
13111 directive = ASM_QUAD;
13112 #else
13113 gcc_assert (!TARGET_64BIT);
13114 #endif
13115 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
13116 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
13117 fprintf (file, "%s%s%d-%s%d\n",
13118 directive, LPREFIX, value, LPREFIX, rel);
13119 else if (HAVE_AS_GOTOFF_IN_DATA)
13120 fprintf (file, ASM_LONG "%s%d@GOTOFF\n", LPREFIX, value);
13121 #if TARGET_MACHO
13122 else if (TARGET_MACHO)
13124 fprintf (file, ASM_LONG "%s%d-", LPREFIX, value);
13125 machopic_output_function_base_name (file);
13126 putc ('\n', file);
13128 #endif
13129 else
13130 asm_fprintf (file, ASM_LONG "%U%s+[.-%s%d]\n",
13131 GOT_SYMBOL_NAME, LPREFIX, value);
13134 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
13135 for the target. */
13137 void
13138 ix86_expand_clear (rtx dest)
13140 rtx tmp;
13142 /* We play register width games, which are only valid after reload. */
13143 gcc_assert (reload_completed);
13145 /* Avoid HImode and its attendant prefix byte. */
13146 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
13147 dest = gen_rtx_REG (SImode, REGNO (dest));
13148 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
13150 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
13151 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
13153 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13154 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
13157 emit_insn (tmp);
13160 /* X is an unchanging MEM. If it is a constant pool reference, return
13161 the constant pool rtx, else NULL. */
13164 maybe_get_pool_constant (rtx x)
13166 x = ix86_delegitimize_address (XEXP (x, 0));
13168 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
13169 return get_pool_constant (x);
13171 return NULL_RTX;
13174 void
13175 ix86_expand_move (enum machine_mode mode, rtx operands[])
13177 rtx op0, op1;
13178 enum tls_model model;
13180 op0 = operands[0];
13181 op1 = operands[1];
13183 if (GET_CODE (op1) == SYMBOL_REF)
13185 model = SYMBOL_REF_TLS_MODEL (op1);
13186 if (model)
13188 op1 = legitimize_tls_address (op1, model, true);
13189 op1 = force_operand (op1, op0);
13190 if (op1 == op0)
13191 return;
13193 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13194 && SYMBOL_REF_DLLIMPORT_P (op1))
13195 op1 = legitimize_dllimport_symbol (op1, false);
13197 else if (GET_CODE (op1) == CONST
13198 && GET_CODE (XEXP (op1, 0)) == PLUS
13199 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13201 rtx addend = XEXP (XEXP (op1, 0), 1);
13202 rtx symbol = XEXP (XEXP (op1, 0), 0);
13203 rtx tmp = NULL;
13205 model = SYMBOL_REF_TLS_MODEL (symbol);
13206 if (model)
13207 tmp = legitimize_tls_address (symbol, model, true);
13208 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13209 && SYMBOL_REF_DLLIMPORT_P (symbol))
13210 tmp = legitimize_dllimport_symbol (symbol, true);
13212 if (tmp)
13214 tmp = force_operand (tmp, NULL);
13215 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13216 op0, 1, OPTAB_DIRECT);
13217 if (tmp == op0)
13218 return;
13222 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13224 if (TARGET_MACHO && !TARGET_64BIT)
13226 #if TARGET_MACHO
13227 if (MACHOPIC_PURE)
13229 rtx temp = ((reload_in_progress
13230 || ((op0 && REG_P (op0))
13231 && mode == Pmode))
13232 ? op0 : gen_reg_rtx (Pmode));
13233 op1 = machopic_indirect_data_reference (op1, temp);
13234 op1 = machopic_legitimize_pic_address (op1, mode,
13235 temp == op1 ? 0 : temp);
13237 else if (MACHOPIC_INDIRECT)
13238 op1 = machopic_indirect_data_reference (op1, 0);
13239 if (op0 == op1)
13240 return;
13241 #endif
13243 else
13245 if (MEM_P (op0))
13246 op1 = force_reg (Pmode, op1);
13247 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13249 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13250 op1 = legitimize_pic_address (op1, reg);
13251 if (op0 == op1)
13252 return;
13256 else
13258 if (MEM_P (op0)
13259 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13260 || !push_operand (op0, mode))
13261 && MEM_P (op1))
13262 op1 = force_reg (mode, op1);
13264 if (push_operand (op0, mode)
13265 && ! general_no_elim_operand (op1, mode))
13266 op1 = copy_to_mode_reg (mode, op1);
13268 /* Force large constants in 64bit compilation into register
13269 to get them CSEed. */
13270 if (can_create_pseudo_p ()
13271 && (mode == DImode) && TARGET_64BIT
13272 && immediate_operand (op1, mode)
13273 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13274 && !register_operand (op0, mode)
13275 && optimize)
13276 op1 = copy_to_mode_reg (mode, op1);
13278 if (can_create_pseudo_p ()
13279 && FLOAT_MODE_P (mode)
13280 && GET_CODE (op1) == CONST_DOUBLE)
13282 /* If we are loading a floating point constant to a register,
13283 force the value to memory now, since we'll get better code
13284 out the back end. */
13286 op1 = validize_mem (force_const_mem (mode, op1));
13287 if (!register_operand (op0, mode))
13289 rtx temp = gen_reg_rtx (mode);
13290 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13291 emit_move_insn (op0, temp);
13292 return;
13297 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13300 void
13301 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13303 rtx op0 = operands[0], op1 = operands[1];
13304 unsigned int align = GET_MODE_ALIGNMENT (mode);
13306 /* Force constants other than zero into memory. We do not know how
13307 the instructions used to build constants modify the upper 64 bits
13308 of the register, once we have that information we may be able
13309 to handle some of them more efficiently. */
13310 if (can_create_pseudo_p ()
13311 && register_operand (op0, mode)
13312 && (CONSTANT_P (op1)
13313 || (GET_CODE (op1) == SUBREG
13314 && CONSTANT_P (SUBREG_REG (op1))))
13315 && !standard_sse_constant_p (op1))
13316 op1 = validize_mem (force_const_mem (mode, op1));
13318 /* We need to check memory alignment for SSE mode since attribute
13319 can make operands unaligned. */
13320 if (can_create_pseudo_p ()
13321 && SSE_REG_MODE_P (mode)
13322 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13323 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13325 rtx tmp[2];
13327 /* ix86_expand_vector_move_misalign() does not like constants ... */
13328 if (CONSTANT_P (op1)
13329 || (GET_CODE (op1) == SUBREG
13330 && CONSTANT_P (SUBREG_REG (op1))))
13331 op1 = validize_mem (force_const_mem (mode, op1));
13333 /* ... nor both arguments in memory. */
13334 if (!register_operand (op0, mode)
13335 && !register_operand (op1, mode))
13336 op1 = force_reg (mode, op1);
13338 tmp[0] = op0; tmp[1] = op1;
13339 ix86_expand_vector_move_misalign (mode, tmp);
13340 return;
13343 /* Make operand1 a register if it isn't already. */
13344 if (can_create_pseudo_p ()
13345 && !register_operand (op0, mode)
13346 && !register_operand (op1, mode))
13348 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13349 return;
13352 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13355 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13356 straight to ix86_expand_vector_move. */
13357 /* Code generation for scalar reg-reg moves of single and double precision data:
13358 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13359 movaps reg, reg
13360 else
13361 movss reg, reg
13362 if (x86_sse_partial_reg_dependency == true)
13363 movapd reg, reg
13364 else
13365 movsd reg, reg
13367 Code generation for scalar loads of double precision data:
13368 if (x86_sse_split_regs == true)
13369 movlpd mem, reg (gas syntax)
13370 else
13371 movsd mem, reg
13373 Code generation for unaligned packed loads of single precision data
13374 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13375 if (x86_sse_unaligned_move_optimal)
13376 movups mem, reg
13378 if (x86_sse_partial_reg_dependency == true)
13380 xorps reg, reg
13381 movlps mem, reg
13382 movhps mem+8, reg
13384 else
13386 movlps mem, reg
13387 movhps mem+8, reg
13390 Code generation for unaligned packed loads of double precision data
13391 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13392 if (x86_sse_unaligned_move_optimal)
13393 movupd mem, reg
13395 if (x86_sse_split_regs == true)
13397 movlpd mem, reg
13398 movhpd mem+8, reg
13400 else
13402 movsd mem, reg
13403 movhpd mem+8, reg
13407 void
13408 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13410 rtx op0, op1, m;
13412 op0 = operands[0];
13413 op1 = operands[1];
13415 if (TARGET_AVX)
13417 switch (GET_MODE_CLASS (mode))
13419 case MODE_VECTOR_INT:
13420 case MODE_INT:
13421 switch (GET_MODE_SIZE (mode))
13423 case 16:
13424 /* If we're optimizing for size, movups is the smallest. */
13425 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13427 op0 = gen_lowpart (V4SFmode, op0);
13428 op1 = gen_lowpart (V4SFmode, op1);
13429 emit_insn (gen_avx_movups (op0, op1));
13430 return;
13432 op0 = gen_lowpart (V16QImode, op0);
13433 op1 = gen_lowpart (V16QImode, op1);
13434 emit_insn (gen_avx_movdqu (op0, op1));
13435 break;
13436 case 32:
13437 op0 = gen_lowpart (V32QImode, op0);
13438 op1 = gen_lowpart (V32QImode, op1);
13439 emit_insn (gen_avx_movdqu256 (op0, op1));
13440 break;
13441 default:
13442 gcc_unreachable ();
13444 break;
13445 case MODE_VECTOR_FLOAT:
13446 op0 = gen_lowpart (mode, op0);
13447 op1 = gen_lowpart (mode, op1);
13449 switch (mode)
13451 case V4SFmode:
13452 emit_insn (gen_avx_movups (op0, op1));
13453 break;
13454 case V8SFmode:
13455 emit_insn (gen_avx_movups256 (op0, op1));
13456 break;
13457 case V2DFmode:
13458 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13460 op0 = gen_lowpart (V4SFmode, op0);
13461 op1 = gen_lowpart (V4SFmode, op1);
13462 emit_insn (gen_avx_movups (op0, op1));
13463 return;
13465 emit_insn (gen_avx_movupd (op0, op1));
13466 break;
13467 case V4DFmode:
13468 emit_insn (gen_avx_movupd256 (op0, op1));
13469 break;
13470 default:
13471 gcc_unreachable ();
13473 break;
13475 default:
13476 gcc_unreachable ();
13479 return;
13482 if (MEM_P (op1))
13484 /* If we're optimizing for size, movups is the smallest. */
13485 if (optimize_insn_for_size_p ()
13486 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13488 op0 = gen_lowpart (V4SFmode, op0);
13489 op1 = gen_lowpart (V4SFmode, op1);
13490 emit_insn (gen_sse_movups (op0, op1));
13491 return;
13494 /* ??? If we have typed data, then it would appear that using
13495 movdqu is the only way to get unaligned data loaded with
13496 integer type. */
13497 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13499 op0 = gen_lowpart (V16QImode, op0);
13500 op1 = gen_lowpart (V16QImode, op1);
13501 emit_insn (gen_sse2_movdqu (op0, op1));
13502 return;
13505 if (TARGET_SSE2 && mode == V2DFmode)
13507 rtx zero;
13509 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13511 op0 = gen_lowpart (V2DFmode, op0);
13512 op1 = gen_lowpart (V2DFmode, op1);
13513 emit_insn (gen_sse2_movupd (op0, op1));
13514 return;
13517 /* When SSE registers are split into halves, we can avoid
13518 writing to the top half twice. */
13519 if (TARGET_SSE_SPLIT_REGS)
13521 emit_clobber (op0);
13522 zero = op0;
13524 else
13526 /* ??? Not sure about the best option for the Intel chips.
13527 The following would seem to satisfy; the register is
13528 entirely cleared, breaking the dependency chain. We
13529 then store to the upper half, with a dependency depth
13530 of one. A rumor has it that Intel recommends two movsd
13531 followed by an unpacklpd, but this is unconfirmed. And
13532 given that the dependency depth of the unpacklpd would
13533 still be one, I'm not sure why this would be better. */
13534 zero = CONST0_RTX (V2DFmode);
13537 m = adjust_address (op1, DFmode, 0);
13538 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13539 m = adjust_address (op1, DFmode, 8);
13540 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13542 else
13544 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13546 op0 = gen_lowpart (V4SFmode, op0);
13547 op1 = gen_lowpart (V4SFmode, op1);
13548 emit_insn (gen_sse_movups (op0, op1));
13549 return;
13552 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13553 emit_move_insn (op0, CONST0_RTX (mode));
13554 else
13555 emit_clobber (op0);
13557 if (mode != V4SFmode)
13558 op0 = gen_lowpart (V4SFmode, op0);
13559 m = adjust_address (op1, V2SFmode, 0);
13560 emit_insn (gen_sse_loadlps (op0, op0, m));
13561 m = adjust_address (op1, V2SFmode, 8);
13562 emit_insn (gen_sse_loadhps (op0, op0, m));
13565 else if (MEM_P (op0))
13567 /* If we're optimizing for size, movups is the smallest. */
13568 if (optimize_insn_for_size_p ()
13569 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13571 op0 = gen_lowpart (V4SFmode, op0);
13572 op1 = gen_lowpart (V4SFmode, op1);
13573 emit_insn (gen_sse_movups (op0, op1));
13574 return;
13577 /* ??? Similar to above, only less clear because of quote
13578 typeless stores unquote. */
13579 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13580 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13582 op0 = gen_lowpart (V16QImode, op0);
13583 op1 = gen_lowpart (V16QImode, op1);
13584 emit_insn (gen_sse2_movdqu (op0, op1));
13585 return;
13588 if (TARGET_SSE2 && mode == V2DFmode)
13590 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13592 op0 = gen_lowpart (V2DFmode, op0);
13593 op1 = gen_lowpart (V2DFmode, op1);
13594 emit_insn (gen_sse2_movupd (op0, op1));
13596 else
13598 m = adjust_address (op0, DFmode, 0);
13599 emit_insn (gen_sse2_storelpd (m, op1));
13600 m = adjust_address (op0, DFmode, 8);
13601 emit_insn (gen_sse2_storehpd (m, op1));
13604 else
13606 if (mode != V4SFmode)
13607 op1 = gen_lowpart (V4SFmode, op1);
13609 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13611 op0 = gen_lowpart (V4SFmode, op0);
13612 emit_insn (gen_sse_movups (op0, op1));
13614 else
13616 m = adjust_address (op0, V2SFmode, 0);
13617 emit_insn (gen_sse_storelps (m, op1));
13618 m = adjust_address (op0, V2SFmode, 8);
13619 emit_insn (gen_sse_storehps (m, op1));
13623 else
13624 gcc_unreachable ();
13627 /* Expand a push in MODE. This is some mode for which we do not support
13628 proper push instructions, at least from the registers that we expect
13629 the value to live in. */
13631 void
13632 ix86_expand_push (enum machine_mode mode, rtx x)
13634 rtx tmp;
13636 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13637 GEN_INT (-GET_MODE_SIZE (mode)),
13638 stack_pointer_rtx, 1, OPTAB_DIRECT);
13639 if (tmp != stack_pointer_rtx)
13640 emit_move_insn (stack_pointer_rtx, tmp);
13642 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13644 /* When we push an operand onto stack, it has to be aligned at least
13645 at the function argument boundary. However since we don't have
13646 the argument type, we can't determine the actual argument
13647 boundary. */
13648 emit_move_insn (tmp, x);
13651 /* Helper function of ix86_fixup_binary_operands to canonicalize
13652 operand order. Returns true if the operands should be swapped. */
13654 static bool
13655 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13656 rtx operands[])
13658 rtx dst = operands[0];
13659 rtx src1 = operands[1];
13660 rtx src2 = operands[2];
13662 /* If the operation is not commutative, we can't do anything. */
13663 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13664 return false;
13666 /* Highest priority is that src1 should match dst. */
13667 if (rtx_equal_p (dst, src1))
13668 return false;
13669 if (rtx_equal_p (dst, src2))
13670 return true;
13672 /* Next highest priority is that immediate constants come second. */
13673 if (immediate_operand (src2, mode))
13674 return false;
13675 if (immediate_operand (src1, mode))
13676 return true;
13678 /* Lowest priority is that memory references should come second. */
13679 if (MEM_P (src2))
13680 return false;
13681 if (MEM_P (src1))
13682 return true;
13684 return false;
13688 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13689 destination to use for the operation. If different from the true
13690 destination in operands[0], a copy operation will be required. */
13693 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13694 rtx operands[])
13696 rtx dst = operands[0];
13697 rtx src1 = operands[1];
13698 rtx src2 = operands[2];
13700 /* Canonicalize operand order. */
13701 if (ix86_swap_binary_operands_p (code, mode, operands))
13703 rtx temp;
13705 /* It is invalid to swap operands of different modes. */
13706 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13708 temp = src1;
13709 src1 = src2;
13710 src2 = temp;
13713 /* Both source operands cannot be in memory. */
13714 if (MEM_P (src1) && MEM_P (src2))
13716 /* Optimization: Only read from memory once. */
13717 if (rtx_equal_p (src1, src2))
13719 src2 = force_reg (mode, src2);
13720 src1 = src2;
13722 else
13723 src2 = force_reg (mode, src2);
13726 /* If the destination is memory, and we do not have matching source
13727 operands, do things in registers. */
13728 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13729 dst = gen_reg_rtx (mode);
13731 /* Source 1 cannot be a constant. */
13732 if (CONSTANT_P (src1))
13733 src1 = force_reg (mode, src1);
13735 /* Source 1 cannot be a non-matching memory. */
13736 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13737 src1 = force_reg (mode, src1);
13739 operands[1] = src1;
13740 operands[2] = src2;
13741 return dst;
13744 /* Similarly, but assume that the destination has already been
13745 set up properly. */
13747 void
13748 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13749 enum machine_mode mode, rtx operands[])
13751 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13752 gcc_assert (dst == operands[0]);
13755 /* Attempt to expand a binary operator. Make the expansion closer to the
13756 actual machine, then just general_operand, which will allow 3 separate
13757 memory references (one output, two input) in a single insn. */
13759 void
13760 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13761 rtx operands[])
13763 rtx src1, src2, dst, op, clob;
13765 dst = ix86_fixup_binary_operands (code, mode, operands);
13766 src1 = operands[1];
13767 src2 = operands[2];
13769 /* Emit the instruction. */
13771 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13772 if (reload_in_progress)
13774 /* Reload doesn't know about the flags register, and doesn't know that
13775 it doesn't want to clobber it. We can only do this with PLUS. */
13776 gcc_assert (code == PLUS);
13777 emit_insn (op);
13779 else
13781 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13782 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13785 /* Fix up the destination if needed. */
13786 if (dst != operands[0])
13787 emit_move_insn (operands[0], dst);
13790 /* Return TRUE or FALSE depending on whether the binary operator meets the
13791 appropriate constraints. */
13794 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13795 rtx operands[3])
13797 rtx dst = operands[0];
13798 rtx src1 = operands[1];
13799 rtx src2 = operands[2];
13801 /* Both source operands cannot be in memory. */
13802 if (MEM_P (src1) && MEM_P (src2))
13803 return 0;
13805 /* Canonicalize operand order for commutative operators. */
13806 if (ix86_swap_binary_operands_p (code, mode, operands))
13808 rtx temp = src1;
13809 src1 = src2;
13810 src2 = temp;
13813 /* If the destination is memory, we must have a matching source operand. */
13814 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13815 return 0;
13817 /* Source 1 cannot be a constant. */
13818 if (CONSTANT_P (src1))
13819 return 0;
13821 /* Source 1 cannot be a non-matching memory. */
13822 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13823 return 0;
13825 return 1;
13828 /* Attempt to expand a unary operator. Make the expansion closer to the
13829 actual machine, then just general_operand, which will allow 2 separate
13830 memory references (one output, one input) in a single insn. */
13832 void
13833 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13834 rtx operands[])
13836 int matching_memory;
13837 rtx src, dst, op, clob;
13839 dst = operands[0];
13840 src = operands[1];
13842 /* If the destination is memory, and we do not have matching source
13843 operands, do things in registers. */
13844 matching_memory = 0;
13845 if (MEM_P (dst))
13847 if (rtx_equal_p (dst, src))
13848 matching_memory = 1;
13849 else
13850 dst = gen_reg_rtx (mode);
13853 /* When source operand is memory, destination must match. */
13854 if (MEM_P (src) && !matching_memory)
13855 src = force_reg (mode, src);
13857 /* Emit the instruction. */
13859 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13860 if (reload_in_progress || code == NOT)
13862 /* Reload doesn't know about the flags register, and doesn't know that
13863 it doesn't want to clobber it. */
13864 gcc_assert (code == NOT);
13865 emit_insn (op);
13867 else
13869 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13870 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13873 /* Fix up the destination if needed. */
13874 if (dst != operands[0])
13875 emit_move_insn (operands[0], dst);
13878 #define LEA_SEARCH_THRESHOLD 12
13880 /* Search backward for non-agu definition of register number REGNO1
13881 or register number REGNO2 in INSN's basic block until
13882 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13883 2. Reach BB boundary, or
13884 3. Reach agu definition.
13885 Returns the distance between the non-agu definition point and INSN.
13886 If no definition point, returns -1. */
13888 static int
13889 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13890 rtx insn)
13892 basic_block bb = BLOCK_FOR_INSN (insn);
13893 int distance = 0;
13894 df_ref *def_rec;
13895 enum attr_type insn_type;
13897 if (insn != BB_HEAD (bb))
13899 rtx prev = PREV_INSN (insn);
13900 while (prev && distance < LEA_SEARCH_THRESHOLD)
13902 if (NONDEBUG_INSN_P (prev))
13904 distance++;
13905 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13906 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13907 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13908 && (regno1 == DF_REF_REGNO (*def_rec)
13909 || regno2 == DF_REF_REGNO (*def_rec)))
13911 insn_type = get_attr_type (prev);
13912 if (insn_type != TYPE_LEA)
13913 goto done;
13916 if (prev == BB_HEAD (bb))
13917 break;
13918 prev = PREV_INSN (prev);
13922 if (distance < LEA_SEARCH_THRESHOLD)
13924 edge e;
13925 edge_iterator ei;
13926 bool simple_loop = false;
13928 FOR_EACH_EDGE (e, ei, bb->preds)
13929 if (e->src == bb)
13931 simple_loop = true;
13932 break;
13935 if (simple_loop)
13937 rtx prev = BB_END (bb);
13938 while (prev
13939 && prev != insn
13940 && distance < LEA_SEARCH_THRESHOLD)
13942 if (NONDEBUG_INSN_P (prev))
13944 distance++;
13945 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13946 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13947 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13948 && (regno1 == DF_REF_REGNO (*def_rec)
13949 || regno2 == DF_REF_REGNO (*def_rec)))
13951 insn_type = get_attr_type (prev);
13952 if (insn_type != TYPE_LEA)
13953 goto done;
13956 prev = PREV_INSN (prev);
13961 distance = -1;
13963 done:
13964 /* get_attr_type may modify recog data. We want to make sure
13965 that recog data is valid for instruction INSN, on which
13966 distance_non_agu_define is called. INSN is unchanged here. */
13967 extract_insn_cached (insn);
13968 return distance;
13971 /* Return the distance between INSN and the next insn that uses
13972 register number REGNO0 in memory address. Return -1 if no such
13973 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13975 static int
13976 distance_agu_use (unsigned int regno0, rtx insn)
13978 basic_block bb = BLOCK_FOR_INSN (insn);
13979 int distance = 0;
13980 df_ref *def_rec;
13981 df_ref *use_rec;
13983 if (insn != BB_END (bb))
13985 rtx next = NEXT_INSN (insn);
13986 while (next && distance < LEA_SEARCH_THRESHOLD)
13988 if (NONDEBUG_INSN_P (next))
13990 distance++;
13992 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13993 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13994 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13995 && regno0 == DF_REF_REGNO (*use_rec))
13997 /* Return DISTANCE if OP0 is used in memory
13998 address in NEXT. */
13999 return distance;
14002 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
14003 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
14004 && !DF_REF_IS_ARTIFICIAL (*def_rec)
14005 && regno0 == DF_REF_REGNO (*def_rec))
14007 /* Return -1 if OP0 is set in NEXT. */
14008 return -1;
14011 if (next == BB_END (bb))
14012 break;
14013 next = NEXT_INSN (next);
14017 if (distance < LEA_SEARCH_THRESHOLD)
14019 edge e;
14020 edge_iterator ei;
14021 bool simple_loop = false;
14023 FOR_EACH_EDGE (e, ei, bb->succs)
14024 if (e->dest == bb)
14026 simple_loop = true;
14027 break;
14030 if (simple_loop)
14032 rtx next = BB_HEAD (bb);
14033 while (next
14034 && next != insn
14035 && distance < LEA_SEARCH_THRESHOLD)
14037 if (NONDEBUG_INSN_P (next))
14039 distance++;
14041 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
14042 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
14043 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
14044 && regno0 == DF_REF_REGNO (*use_rec))
14046 /* Return DISTANCE if OP0 is used in memory
14047 address in NEXT. */
14048 return distance;
14051 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
14052 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
14053 && !DF_REF_IS_ARTIFICIAL (*def_rec)
14054 && regno0 == DF_REF_REGNO (*def_rec))
14056 /* Return -1 if OP0 is set in NEXT. */
14057 return -1;
14061 next = NEXT_INSN (next);
14066 return -1;
14069 /* Define this macro to tune LEA priority vs ADD, it take effect when
14070 there is a dilemma of choicing LEA or ADD
14071 Negative value: ADD is more preferred than LEA
14072 Zero: Netrual
14073 Positive value: LEA is more preferred than ADD*/
14074 #define IX86_LEA_PRIORITY 2
14076 /* Return true if it is ok to optimize an ADD operation to LEA
14077 operation to avoid flag register consumation. For the processors
14078 like ATOM, if the destination register of LEA holds an actual
14079 address which will be used soon, LEA is better and otherwise ADD
14080 is better. */
14082 bool
14083 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14084 rtx insn, rtx operands[])
14086 unsigned int regno0 = true_regnum (operands[0]);
14087 unsigned int regno1 = true_regnum (operands[1]);
14088 unsigned int regno2;
14090 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
14091 return regno0 != regno1;
14093 regno2 = true_regnum (operands[2]);
14095 /* If a = b + c, (a!=b && a!=c), must use lea form. */
14096 if (regno0 != regno1 && regno0 != regno2)
14097 return true;
14098 else
14100 int dist_define, dist_use;
14101 dist_define = distance_non_agu_define (regno1, regno2, insn);
14102 if (dist_define <= 0)
14103 return true;
14105 /* If this insn has both backward non-agu dependence and forward
14106 agu dependence, the one with short distance take effect. */
14107 dist_use = distance_agu_use (regno0, insn);
14108 if (dist_use <= 0
14109 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
14110 return false;
14112 return true;
14116 /* Return true if destination reg of SET_BODY is shift count of
14117 USE_BODY. */
14119 static bool
14120 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
14122 rtx set_dest;
14123 rtx shift_rtx;
14124 int i;
14126 /* Retrieve destination of SET_BODY. */
14127 switch (GET_CODE (set_body))
14129 case SET:
14130 set_dest = SET_DEST (set_body);
14131 if (!set_dest || !REG_P (set_dest))
14132 return false;
14133 break;
14134 case PARALLEL:
14135 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
14136 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
14137 use_body))
14138 return true;
14139 default:
14140 return false;
14141 break;
14144 /* Retrieve shift count of USE_BODY. */
14145 switch (GET_CODE (use_body))
14147 case SET:
14148 shift_rtx = XEXP (use_body, 1);
14149 break;
14150 case PARALLEL:
14151 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
14152 if (ix86_dep_by_shift_count_body (set_body,
14153 XVECEXP (use_body, 0, i)))
14154 return true;
14155 default:
14156 return false;
14157 break;
14160 if (shift_rtx
14161 && (GET_CODE (shift_rtx) == ASHIFT
14162 || GET_CODE (shift_rtx) == LSHIFTRT
14163 || GET_CODE (shift_rtx) == ASHIFTRT
14164 || GET_CODE (shift_rtx) == ROTATE
14165 || GET_CODE (shift_rtx) == ROTATERT))
14167 rtx shift_count = XEXP (shift_rtx, 1);
14169 /* Return true if shift count is dest of SET_BODY. */
14170 if (REG_P (shift_count)
14171 && true_regnum (set_dest) == true_regnum (shift_count))
14172 return true;
14175 return false;
14178 /* Return true if destination reg of SET_INSN is shift count of
14179 USE_INSN. */
14181 bool
14182 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
14184 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
14185 PATTERN (use_insn));
14188 /* Return TRUE or FALSE depending on whether the unary operator meets the
14189 appropriate constraints. */
14192 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14193 enum machine_mode mode ATTRIBUTE_UNUSED,
14194 rtx operands[2] ATTRIBUTE_UNUSED)
14196 /* If one of operands is memory, source and destination must match. */
14197 if ((MEM_P (operands[0])
14198 || MEM_P (operands[1]))
14199 && ! rtx_equal_p (operands[0], operands[1]))
14200 return FALSE;
14201 return TRUE;
14204 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
14205 are ok, keeping in mind the possible movddup alternative. */
14207 bool
14208 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
14210 if (MEM_P (operands[0]))
14211 return rtx_equal_p (operands[0], operands[1 + high]);
14212 if (MEM_P (operands[1]) && MEM_P (operands[2]))
14213 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
14214 return true;
14217 /* Post-reload splitter for converting an SF or DFmode value in an
14218 SSE register into an unsigned SImode. */
14220 void
14221 ix86_split_convert_uns_si_sse (rtx operands[])
14223 enum machine_mode vecmode;
14224 rtx value, large, zero_or_two31, input, two31, x;
14226 large = operands[1];
14227 zero_or_two31 = operands[2];
14228 input = operands[3];
14229 two31 = operands[4];
14230 vecmode = GET_MODE (large);
14231 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14233 /* Load up the value into the low element. We must ensure that the other
14234 elements are valid floats -- zero is the easiest such value. */
14235 if (MEM_P (input))
14237 if (vecmode == V4SFmode)
14238 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14239 else
14240 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14242 else
14244 input = gen_rtx_REG (vecmode, REGNO (input));
14245 emit_move_insn (value, CONST0_RTX (vecmode));
14246 if (vecmode == V4SFmode)
14247 emit_insn (gen_sse_movss (value, value, input));
14248 else
14249 emit_insn (gen_sse2_movsd (value, value, input));
14252 emit_move_insn (large, two31);
14253 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14255 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14256 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14258 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14259 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14261 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14262 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14264 large = gen_rtx_REG (V4SImode, REGNO (large));
14265 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14267 x = gen_rtx_REG (V4SImode, REGNO (value));
14268 if (vecmode == V4SFmode)
14269 emit_insn (gen_sse2_cvttps2dq (x, value));
14270 else
14271 emit_insn (gen_sse2_cvttpd2dq (x, value));
14272 value = x;
14274 emit_insn (gen_xorv4si3 (value, value, large));
14277 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14278 Expects the 64-bit DImode to be supplied in a pair of integral
14279 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14280 -mfpmath=sse, !optimize_size only. */
14282 void
14283 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14285 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14286 rtx int_xmm, fp_xmm;
14287 rtx biases, exponents;
14288 rtx x;
14290 int_xmm = gen_reg_rtx (V4SImode);
14291 if (TARGET_INTER_UNIT_MOVES)
14292 emit_insn (gen_movdi_to_sse (int_xmm, input));
14293 else if (TARGET_SSE_SPLIT_REGS)
14295 emit_clobber (int_xmm);
14296 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14298 else
14300 x = gen_reg_rtx (V2DImode);
14301 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14302 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14305 x = gen_rtx_CONST_VECTOR (V4SImode,
14306 gen_rtvec (4, GEN_INT (0x43300000UL),
14307 GEN_INT (0x45300000UL),
14308 const0_rtx, const0_rtx));
14309 exponents = validize_mem (force_const_mem (V4SImode, x));
14311 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14312 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14314 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14315 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14316 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14317 (0x1.0p84 + double(fp_value_hi_xmm)).
14318 Note these exponents differ by 32. */
14320 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14322 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14323 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14324 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14325 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14326 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14327 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14328 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14329 biases = validize_mem (force_const_mem (V2DFmode, biases));
14330 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14332 /* Add the upper and lower DFmode values together. */
14333 if (TARGET_SSE3)
14334 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14335 else
14337 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14338 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14339 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14342 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14345 /* Not used, but eases macroization of patterns. */
14346 void
14347 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14348 rtx input ATTRIBUTE_UNUSED)
14350 gcc_unreachable ();
14353 /* Convert an unsigned SImode value into a DFmode. Only currently used
14354 for SSE, but applicable anywhere. */
14356 void
14357 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14359 REAL_VALUE_TYPE TWO31r;
14360 rtx x, fp;
14362 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14363 NULL, 1, OPTAB_DIRECT);
14365 fp = gen_reg_rtx (DFmode);
14366 emit_insn (gen_floatsidf2 (fp, x));
14368 real_ldexp (&TWO31r, &dconst1, 31);
14369 x = const_double_from_real_value (TWO31r, DFmode);
14371 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14372 if (x != target)
14373 emit_move_insn (target, x);
14376 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14377 32-bit mode; otherwise we have a direct convert instruction. */
14379 void
14380 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14382 REAL_VALUE_TYPE TWO32r;
14383 rtx fp_lo, fp_hi, x;
14385 fp_lo = gen_reg_rtx (DFmode);
14386 fp_hi = gen_reg_rtx (DFmode);
14388 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14390 real_ldexp (&TWO32r, &dconst1, 32);
14391 x = const_double_from_real_value (TWO32r, DFmode);
14392 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14394 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14396 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14397 0, OPTAB_DIRECT);
14398 if (x != target)
14399 emit_move_insn (target, x);
14402 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14403 For x86_32, -mfpmath=sse, !optimize_size only. */
14404 void
14405 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14407 REAL_VALUE_TYPE ONE16r;
14408 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14410 real_ldexp (&ONE16r, &dconst1, 16);
14411 x = const_double_from_real_value (ONE16r, SFmode);
14412 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14413 NULL, 0, OPTAB_DIRECT);
14414 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14415 NULL, 0, OPTAB_DIRECT);
14416 fp_hi = gen_reg_rtx (SFmode);
14417 fp_lo = gen_reg_rtx (SFmode);
14418 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14419 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14420 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14421 0, OPTAB_DIRECT);
14422 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14423 0, OPTAB_DIRECT);
14424 if (!rtx_equal_p (target, fp_hi))
14425 emit_move_insn (target, fp_hi);
14428 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14429 then replicate the value for all elements of the vector
14430 register. */
14433 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14435 rtvec v;
14436 switch (mode)
14438 case SImode:
14439 gcc_assert (vect);
14440 v = gen_rtvec (4, value, value, value, value);
14441 return gen_rtx_CONST_VECTOR (V4SImode, v);
14443 case DImode:
14444 gcc_assert (vect);
14445 v = gen_rtvec (2, value, value);
14446 return gen_rtx_CONST_VECTOR (V2DImode, v);
14448 case SFmode:
14449 if (vect)
14450 v = gen_rtvec (4, value, value, value, value);
14451 else
14452 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14453 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14454 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14456 case DFmode:
14457 if (vect)
14458 v = gen_rtvec (2, value, value);
14459 else
14460 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14461 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14463 default:
14464 gcc_unreachable ();
14468 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14469 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14470 for an SSE register. If VECT is true, then replicate the mask for
14471 all elements of the vector register. If INVERT is true, then create
14472 a mask excluding the sign bit. */
14475 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14477 enum machine_mode vec_mode, imode;
14478 HOST_WIDE_INT hi, lo;
14479 int shift = 63;
14480 rtx v;
14481 rtx mask;
14483 /* Find the sign bit, sign extended to 2*HWI. */
14484 switch (mode)
14486 case SImode:
14487 case SFmode:
14488 imode = SImode;
14489 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14490 lo = 0x80000000, hi = lo < 0;
14491 break;
14493 case DImode:
14494 case DFmode:
14495 imode = DImode;
14496 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14497 if (HOST_BITS_PER_WIDE_INT >= 64)
14498 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14499 else
14500 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14501 break;
14503 case TImode:
14504 case TFmode:
14505 vec_mode = VOIDmode;
14506 if (HOST_BITS_PER_WIDE_INT >= 64)
14508 imode = TImode;
14509 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14511 else
14513 rtvec vec;
14515 imode = DImode;
14516 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14518 if (invert)
14520 lo = ~lo, hi = ~hi;
14521 v = constm1_rtx;
14523 else
14524 v = const0_rtx;
14526 mask = immed_double_const (lo, hi, imode);
14528 vec = gen_rtvec (2, v, mask);
14529 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14530 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14532 return v;
14534 break;
14536 default:
14537 gcc_unreachable ();
14540 if (invert)
14541 lo = ~lo, hi = ~hi;
14543 /* Force this value into the low part of a fp vector constant. */
14544 mask = immed_double_const (lo, hi, imode);
14545 mask = gen_lowpart (mode, mask);
14547 if (vec_mode == VOIDmode)
14548 return force_reg (mode, mask);
14550 v = ix86_build_const_vector (mode, vect, mask);
14551 return force_reg (vec_mode, v);
14554 /* Generate code for floating point ABS or NEG. */
14556 void
14557 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14558 rtx operands[])
14560 rtx mask, set, use, clob, dst, src;
14561 bool use_sse = false;
14562 bool vector_mode = VECTOR_MODE_P (mode);
14563 enum machine_mode elt_mode = mode;
14565 if (vector_mode)
14567 elt_mode = GET_MODE_INNER (mode);
14568 use_sse = true;
14570 else if (mode == TFmode)
14571 use_sse = true;
14572 else if (TARGET_SSE_MATH)
14573 use_sse = SSE_FLOAT_MODE_P (mode);
14575 /* NEG and ABS performed with SSE use bitwise mask operations.
14576 Create the appropriate mask now. */
14577 if (use_sse)
14578 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14579 else
14580 mask = NULL_RTX;
14582 dst = operands[0];
14583 src = operands[1];
14585 if (vector_mode)
14587 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14588 set = gen_rtx_SET (VOIDmode, dst, set);
14589 emit_insn (set);
14591 else
14593 set = gen_rtx_fmt_e (code, mode, src);
14594 set = gen_rtx_SET (VOIDmode, dst, set);
14595 if (mask)
14597 use = gen_rtx_USE (VOIDmode, mask);
14598 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14599 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14600 gen_rtvec (3, set, use, clob)));
14602 else
14603 emit_insn (set);
14607 /* Expand a copysign operation. Special case operand 0 being a constant. */
14609 void
14610 ix86_expand_copysign (rtx operands[])
14612 enum machine_mode mode;
14613 rtx dest, op0, op1, mask, nmask;
14615 dest = operands[0];
14616 op0 = operands[1];
14617 op1 = operands[2];
14619 mode = GET_MODE (dest);
14621 if (GET_CODE (op0) == CONST_DOUBLE)
14623 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14625 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14626 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14628 if (mode == SFmode || mode == DFmode)
14630 enum machine_mode vmode;
14632 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14634 if (op0 == CONST0_RTX (mode))
14635 op0 = CONST0_RTX (vmode);
14636 else
14638 rtx v = ix86_build_const_vector (mode, false, op0);
14640 op0 = force_reg (vmode, v);
14643 else if (op0 != CONST0_RTX (mode))
14644 op0 = force_reg (mode, op0);
14646 mask = ix86_build_signbit_mask (mode, 0, 0);
14648 if (mode == SFmode)
14649 copysign_insn = gen_copysignsf3_const;
14650 else if (mode == DFmode)
14651 copysign_insn = gen_copysigndf3_const;
14652 else
14653 copysign_insn = gen_copysigntf3_const;
14655 emit_insn (copysign_insn (dest, op0, op1, mask));
14657 else
14659 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14661 nmask = ix86_build_signbit_mask (mode, 0, 1);
14662 mask = ix86_build_signbit_mask (mode, 0, 0);
14664 if (mode == SFmode)
14665 copysign_insn = gen_copysignsf3_var;
14666 else if (mode == DFmode)
14667 copysign_insn = gen_copysigndf3_var;
14668 else
14669 copysign_insn = gen_copysigntf3_var;
14671 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14675 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14676 be a constant, and so has already been expanded into a vector constant. */
14678 void
14679 ix86_split_copysign_const (rtx operands[])
14681 enum machine_mode mode, vmode;
14682 rtx dest, op0, mask, x;
14684 dest = operands[0];
14685 op0 = operands[1];
14686 mask = operands[3];
14688 mode = GET_MODE (dest);
14689 vmode = GET_MODE (mask);
14691 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14692 x = gen_rtx_AND (vmode, dest, mask);
14693 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14695 if (op0 != CONST0_RTX (vmode))
14697 x = gen_rtx_IOR (vmode, dest, op0);
14698 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14702 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14703 so we have to do two masks. */
14705 void
14706 ix86_split_copysign_var (rtx operands[])
14708 enum machine_mode mode, vmode;
14709 rtx dest, scratch, op0, op1, mask, nmask, x;
14711 dest = operands[0];
14712 scratch = operands[1];
14713 op0 = operands[2];
14714 op1 = operands[3];
14715 nmask = operands[4];
14716 mask = operands[5];
14718 mode = GET_MODE (dest);
14719 vmode = GET_MODE (mask);
14721 if (rtx_equal_p (op0, op1))
14723 /* Shouldn't happen often (it's useless, obviously), but when it does
14724 we'd generate incorrect code if we continue below. */
14725 emit_move_insn (dest, op0);
14726 return;
14729 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14731 gcc_assert (REGNO (op1) == REGNO (scratch));
14733 x = gen_rtx_AND (vmode, scratch, mask);
14734 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14736 dest = mask;
14737 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14738 x = gen_rtx_NOT (vmode, dest);
14739 x = gen_rtx_AND (vmode, x, op0);
14740 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14742 else
14744 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14746 x = gen_rtx_AND (vmode, scratch, mask);
14748 else /* alternative 2,4 */
14750 gcc_assert (REGNO (mask) == REGNO (scratch));
14751 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14752 x = gen_rtx_AND (vmode, scratch, op1);
14754 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14756 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14758 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14759 x = gen_rtx_AND (vmode, dest, nmask);
14761 else /* alternative 3,4 */
14763 gcc_assert (REGNO (nmask) == REGNO (dest));
14764 dest = nmask;
14765 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14766 x = gen_rtx_AND (vmode, dest, op0);
14768 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14771 x = gen_rtx_IOR (vmode, dest, scratch);
14772 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14775 /* Return TRUE or FALSE depending on whether the first SET in INSN
14776 has source and destination with matching CC modes, and that the
14777 CC mode is at least as constrained as REQ_MODE. */
14780 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14782 rtx set;
14783 enum machine_mode set_mode;
14785 set = PATTERN (insn);
14786 if (GET_CODE (set) == PARALLEL)
14787 set = XVECEXP (set, 0, 0);
14788 gcc_assert (GET_CODE (set) == SET);
14789 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14791 set_mode = GET_MODE (SET_DEST (set));
14792 switch (set_mode)
14794 case CCNOmode:
14795 if (req_mode != CCNOmode
14796 && (req_mode != CCmode
14797 || XEXP (SET_SRC (set), 1) != const0_rtx))
14798 return 0;
14799 break;
14800 case CCmode:
14801 if (req_mode == CCGCmode)
14802 return 0;
14803 /* FALLTHRU */
14804 case CCGCmode:
14805 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14806 return 0;
14807 /* FALLTHRU */
14808 case CCGOCmode:
14809 if (req_mode == CCZmode)
14810 return 0;
14811 /* FALLTHRU */
14812 case CCAmode:
14813 case CCCmode:
14814 case CCOmode:
14815 case CCSmode:
14816 case CCZmode:
14817 break;
14819 default:
14820 gcc_unreachable ();
14823 return (GET_MODE (SET_SRC (set)) == set_mode);
14826 /* Generate insn patterns to do an integer compare of OPERANDS. */
14828 static rtx
14829 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14831 enum machine_mode cmpmode;
14832 rtx tmp, flags;
14834 cmpmode = SELECT_CC_MODE (code, op0, op1);
14835 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14837 /* This is very simple, but making the interface the same as in the
14838 FP case makes the rest of the code easier. */
14839 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14840 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14842 /* Return the test that should be put into the flags user, i.e.
14843 the bcc, scc, or cmov instruction. */
14844 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14847 /* Figure out whether to use ordered or unordered fp comparisons.
14848 Return the appropriate mode to use. */
14850 enum machine_mode
14851 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14853 /* ??? In order to make all comparisons reversible, we do all comparisons
14854 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14855 all forms trapping and nontrapping comparisons, we can make inequality
14856 comparisons trapping again, since it results in better code when using
14857 FCOM based compares. */
14858 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14861 enum machine_mode
14862 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14864 enum machine_mode mode = GET_MODE (op0);
14866 if (SCALAR_FLOAT_MODE_P (mode))
14868 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14869 return ix86_fp_compare_mode (code);
14872 switch (code)
14874 /* Only zero flag is needed. */
14875 case EQ: /* ZF=0 */
14876 case NE: /* ZF!=0 */
14877 return CCZmode;
14878 /* Codes needing carry flag. */
14879 case GEU: /* CF=0 */
14880 case LTU: /* CF=1 */
14881 /* Detect overflow checks. They need just the carry flag. */
14882 if (GET_CODE (op0) == PLUS
14883 && rtx_equal_p (op1, XEXP (op0, 0)))
14884 return CCCmode;
14885 else
14886 return CCmode;
14887 case GTU: /* CF=0 & ZF=0 */
14888 case LEU: /* CF=1 | ZF=1 */
14889 /* Detect overflow checks. They need just the carry flag. */
14890 if (GET_CODE (op0) == MINUS
14891 && rtx_equal_p (op1, XEXP (op0, 0)))
14892 return CCCmode;
14893 else
14894 return CCmode;
14895 /* Codes possibly doable only with sign flag when
14896 comparing against zero. */
14897 case GE: /* SF=OF or SF=0 */
14898 case LT: /* SF<>OF or SF=1 */
14899 if (op1 == const0_rtx)
14900 return CCGOCmode;
14901 else
14902 /* For other cases Carry flag is not required. */
14903 return CCGCmode;
14904 /* Codes doable only with sign flag when comparing
14905 against zero, but we miss jump instruction for it
14906 so we need to use relational tests against overflow
14907 that thus needs to be zero. */
14908 case GT: /* ZF=0 & SF=OF */
14909 case LE: /* ZF=1 | SF<>OF */
14910 if (op1 == const0_rtx)
14911 return CCNOmode;
14912 else
14913 return CCGCmode;
14914 /* strcmp pattern do (use flags) and combine may ask us for proper
14915 mode. */
14916 case USE:
14917 return CCmode;
14918 default:
14919 gcc_unreachable ();
14923 /* Return the fixed registers used for condition codes. */
14925 static bool
14926 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14928 *p1 = FLAGS_REG;
14929 *p2 = FPSR_REG;
14930 return true;
14933 /* If two condition code modes are compatible, return a condition code
14934 mode which is compatible with both. Otherwise, return
14935 VOIDmode. */
14937 static enum machine_mode
14938 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14940 if (m1 == m2)
14941 return m1;
14943 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14944 return VOIDmode;
14946 if ((m1 == CCGCmode && m2 == CCGOCmode)
14947 || (m1 == CCGOCmode && m2 == CCGCmode))
14948 return CCGCmode;
14950 switch (m1)
14952 default:
14953 gcc_unreachable ();
14955 case CCmode:
14956 case CCGCmode:
14957 case CCGOCmode:
14958 case CCNOmode:
14959 case CCAmode:
14960 case CCCmode:
14961 case CCOmode:
14962 case CCSmode:
14963 case CCZmode:
14964 switch (m2)
14966 default:
14967 return VOIDmode;
14969 case CCmode:
14970 case CCGCmode:
14971 case CCGOCmode:
14972 case CCNOmode:
14973 case CCAmode:
14974 case CCCmode:
14975 case CCOmode:
14976 case CCSmode:
14977 case CCZmode:
14978 return CCmode;
14981 case CCFPmode:
14982 case CCFPUmode:
14983 /* These are only compatible with themselves, which we already
14984 checked above. */
14985 return VOIDmode;
14990 /* Return a comparison we can do and that it is equivalent to
14991 swap_condition (code) apart possibly from orderedness.
14992 But, never change orderedness if TARGET_IEEE_FP, returning
14993 UNKNOWN in that case if necessary. */
14995 static enum rtx_code
14996 ix86_fp_swap_condition (enum rtx_code code)
14998 switch (code)
15000 case GT: /* GTU - CF=0 & ZF=0 */
15001 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
15002 case GE: /* GEU - CF=0 */
15003 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
15004 case UNLT: /* LTU - CF=1 */
15005 return TARGET_IEEE_FP ? UNKNOWN : GT;
15006 case UNLE: /* LEU - CF=1 | ZF=1 */
15007 return TARGET_IEEE_FP ? UNKNOWN : GE;
15008 default:
15009 return swap_condition (code);
15013 /* Return cost of comparison CODE using the best strategy for performance.
15014 All following functions do use number of instructions as a cost metrics.
15015 In future this should be tweaked to compute bytes for optimize_size and
15016 take into account performance of various instructions on various CPUs. */
15018 static int
15019 ix86_fp_comparison_cost (enum rtx_code code)
15021 int arith_cost;
15023 /* The cost of code using bit-twiddling on %ah. */
15024 switch (code)
15026 case UNLE:
15027 case UNLT:
15028 case LTGT:
15029 case GT:
15030 case GE:
15031 case UNORDERED:
15032 case ORDERED:
15033 case UNEQ:
15034 arith_cost = 4;
15035 break;
15036 case LT:
15037 case NE:
15038 case EQ:
15039 case UNGE:
15040 arith_cost = TARGET_IEEE_FP ? 5 : 4;
15041 break;
15042 case LE:
15043 case UNGT:
15044 arith_cost = TARGET_IEEE_FP ? 6 : 4;
15045 break;
15046 default:
15047 gcc_unreachable ();
15050 switch (ix86_fp_comparison_strategy (code))
15052 case IX86_FPCMP_COMI:
15053 return arith_cost > 4 ? 3 : 2;
15054 case IX86_FPCMP_SAHF:
15055 return arith_cost > 4 ? 4 : 3;
15056 default:
15057 return arith_cost;
15061 /* Return strategy to use for floating-point. We assume that fcomi is always
15062 preferrable where available, since that is also true when looking at size
15063 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
15065 enum ix86_fpcmp_strategy
15066 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
15068 /* Do fcomi/sahf based test when profitable. */
15070 if (TARGET_CMOVE)
15071 return IX86_FPCMP_COMI;
15073 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
15074 return IX86_FPCMP_SAHF;
15076 return IX86_FPCMP_ARITH;
15079 /* Swap, force into registers, or otherwise massage the two operands
15080 to a fp comparison. The operands are updated in place; the new
15081 comparison code is returned. */
15083 static enum rtx_code
15084 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
15086 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
15087 rtx op0 = *pop0, op1 = *pop1;
15088 enum machine_mode op_mode = GET_MODE (op0);
15089 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
15091 /* All of the unordered compare instructions only work on registers.
15092 The same is true of the fcomi compare instructions. The XFmode
15093 compare instructions require registers except when comparing
15094 against zero or when converting operand 1 from fixed point to
15095 floating point. */
15097 if (!is_sse
15098 && (fpcmp_mode == CCFPUmode
15099 || (op_mode == XFmode
15100 && ! (standard_80387_constant_p (op0) == 1
15101 || standard_80387_constant_p (op1) == 1)
15102 && GET_CODE (op1) != FLOAT)
15103 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
15105 op0 = force_reg (op_mode, op0);
15106 op1 = force_reg (op_mode, op1);
15108 else
15110 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
15111 things around if they appear profitable, otherwise force op0
15112 into a register. */
15114 if (standard_80387_constant_p (op0) == 0
15115 || (MEM_P (op0)
15116 && ! (standard_80387_constant_p (op1) == 0
15117 || MEM_P (op1))))
15119 enum rtx_code new_code = ix86_fp_swap_condition (code);
15120 if (new_code != UNKNOWN)
15122 rtx tmp;
15123 tmp = op0, op0 = op1, op1 = tmp;
15124 code = new_code;
15128 if (!REG_P (op0))
15129 op0 = force_reg (op_mode, op0);
15131 if (CONSTANT_P (op1))
15133 int tmp = standard_80387_constant_p (op1);
15134 if (tmp == 0)
15135 op1 = validize_mem (force_const_mem (op_mode, op1));
15136 else if (tmp == 1)
15138 if (TARGET_CMOVE)
15139 op1 = force_reg (op_mode, op1);
15141 else
15142 op1 = force_reg (op_mode, op1);
15146 /* Try to rearrange the comparison to make it cheaper. */
15147 if (ix86_fp_comparison_cost (code)
15148 > ix86_fp_comparison_cost (swap_condition (code))
15149 && (REG_P (op1) || can_create_pseudo_p ()))
15151 rtx tmp;
15152 tmp = op0, op0 = op1, op1 = tmp;
15153 code = swap_condition (code);
15154 if (!REG_P (op0))
15155 op0 = force_reg (op_mode, op0);
15158 *pop0 = op0;
15159 *pop1 = op1;
15160 return code;
15163 /* Convert comparison codes we use to represent FP comparison to integer
15164 code that will result in proper branch. Return UNKNOWN if no such code
15165 is available. */
15167 enum rtx_code
15168 ix86_fp_compare_code_to_integer (enum rtx_code code)
15170 switch (code)
15172 case GT:
15173 return GTU;
15174 case GE:
15175 return GEU;
15176 case ORDERED:
15177 case UNORDERED:
15178 return code;
15179 break;
15180 case UNEQ:
15181 return EQ;
15182 break;
15183 case UNLT:
15184 return LTU;
15185 break;
15186 case UNLE:
15187 return LEU;
15188 break;
15189 case LTGT:
15190 return NE;
15191 break;
15192 default:
15193 return UNKNOWN;
15197 /* Generate insn patterns to do a floating point compare of OPERANDS. */
15199 static rtx
15200 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
15202 enum machine_mode fpcmp_mode, intcmp_mode;
15203 rtx tmp, tmp2;
15205 fpcmp_mode = ix86_fp_compare_mode (code);
15206 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
15208 /* Do fcomi/sahf based test when profitable. */
15209 switch (ix86_fp_comparison_strategy (code))
15211 case IX86_FPCMP_COMI:
15212 intcmp_mode = fpcmp_mode;
15213 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15214 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15215 tmp);
15216 emit_insn (tmp);
15217 break;
15219 case IX86_FPCMP_SAHF:
15220 intcmp_mode = fpcmp_mode;
15221 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15222 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15223 tmp);
15225 if (!scratch)
15226 scratch = gen_reg_rtx (HImode);
15227 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15228 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15229 break;
15231 case IX86_FPCMP_ARITH:
15232 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15233 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15234 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15235 if (!scratch)
15236 scratch = gen_reg_rtx (HImode);
15237 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15239 /* In the unordered case, we have to check C2 for NaN's, which
15240 doesn't happen to work out to anything nice combination-wise.
15241 So do some bit twiddling on the value we've got in AH to come
15242 up with an appropriate set of condition codes. */
15244 intcmp_mode = CCNOmode;
15245 switch (code)
15247 case GT:
15248 case UNGT:
15249 if (code == GT || !TARGET_IEEE_FP)
15251 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15252 code = EQ;
15254 else
15256 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15257 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15258 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15259 intcmp_mode = CCmode;
15260 code = GEU;
15262 break;
15263 case LT:
15264 case UNLT:
15265 if (code == LT && TARGET_IEEE_FP)
15267 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15268 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15269 intcmp_mode = CCmode;
15270 code = EQ;
15272 else
15274 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15275 code = NE;
15277 break;
15278 case GE:
15279 case UNGE:
15280 if (code == GE || !TARGET_IEEE_FP)
15282 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15283 code = EQ;
15285 else
15287 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15288 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15289 code = NE;
15291 break;
15292 case LE:
15293 case UNLE:
15294 if (code == LE && TARGET_IEEE_FP)
15296 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15297 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15298 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15299 intcmp_mode = CCmode;
15300 code = LTU;
15302 else
15304 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15305 code = NE;
15307 break;
15308 case EQ:
15309 case UNEQ:
15310 if (code == EQ && TARGET_IEEE_FP)
15312 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15313 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15314 intcmp_mode = CCmode;
15315 code = EQ;
15317 else
15319 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15320 code = NE;
15322 break;
15323 case NE:
15324 case LTGT:
15325 if (code == NE && TARGET_IEEE_FP)
15327 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15328 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15329 GEN_INT (0x40)));
15330 code = NE;
15332 else
15334 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15335 code = EQ;
15337 break;
15339 case UNORDERED:
15340 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15341 code = NE;
15342 break;
15343 case ORDERED:
15344 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15345 code = EQ;
15346 break;
15348 default:
15349 gcc_unreachable ();
15351 break;
15353 default:
15354 gcc_unreachable();
15357 /* Return the test that should be put into the flags user, i.e.
15358 the bcc, scc, or cmov instruction. */
15359 return gen_rtx_fmt_ee (code, VOIDmode,
15360 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15361 const0_rtx);
15365 ix86_expand_compare (enum rtx_code code)
15367 rtx op0, op1, ret;
15368 op0 = ix86_compare_op0;
15369 op1 = ix86_compare_op1;
15371 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15372 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15374 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15376 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15377 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15379 else
15380 ret = ix86_expand_int_compare (code, op0, op1);
15382 return ret;
15385 void
15386 ix86_expand_branch (enum rtx_code code, rtx label)
15388 rtx tmp;
15390 switch (GET_MODE (ix86_compare_op0))
15392 case SFmode:
15393 case DFmode:
15394 case XFmode:
15395 case QImode:
15396 case HImode:
15397 case SImode:
15398 simple:
15399 tmp = ix86_expand_compare (code);
15400 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15401 gen_rtx_LABEL_REF (VOIDmode, label),
15402 pc_rtx);
15403 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15404 return;
15406 case DImode:
15407 if (TARGET_64BIT)
15408 goto simple;
15409 case TImode:
15410 /* Expand DImode branch into multiple compare+branch. */
15412 rtx lo[2], hi[2], label2;
15413 enum rtx_code code1, code2, code3;
15414 enum machine_mode submode;
15416 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15418 tmp = ix86_compare_op0;
15419 ix86_compare_op0 = ix86_compare_op1;
15420 ix86_compare_op1 = tmp;
15421 code = swap_condition (code);
15423 if (GET_MODE (ix86_compare_op0) == DImode)
15425 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15426 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15427 submode = SImode;
15429 else
15431 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15432 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15433 submode = DImode;
15436 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15437 avoid two branches. This costs one extra insn, so disable when
15438 optimizing for size. */
15440 if ((code == EQ || code == NE)
15441 && (!optimize_insn_for_size_p ()
15442 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15444 rtx xor0, xor1;
15446 xor1 = hi[0];
15447 if (hi[1] != const0_rtx)
15448 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15449 NULL_RTX, 0, OPTAB_WIDEN);
15451 xor0 = lo[0];
15452 if (lo[1] != const0_rtx)
15453 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15454 NULL_RTX, 0, OPTAB_WIDEN);
15456 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15457 NULL_RTX, 0, OPTAB_WIDEN);
15459 ix86_compare_op0 = tmp;
15460 ix86_compare_op1 = const0_rtx;
15461 ix86_expand_branch (code, label);
15462 return;
15465 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15466 op1 is a constant and the low word is zero, then we can just
15467 examine the high word. Similarly for low word -1 and
15468 less-or-equal-than or greater-than. */
15470 if (CONST_INT_P (hi[1]))
15471 switch (code)
15473 case LT: case LTU: case GE: case GEU:
15474 if (lo[1] == const0_rtx)
15476 ix86_compare_op0 = hi[0];
15477 ix86_compare_op1 = hi[1];
15478 ix86_expand_branch (code, label);
15479 return;
15481 break;
15482 case LE: case LEU: case GT: case GTU:
15483 if (lo[1] == constm1_rtx)
15485 ix86_compare_op0 = hi[0];
15486 ix86_compare_op1 = hi[1];
15487 ix86_expand_branch (code, label);
15488 return;
15490 break;
15491 default:
15492 break;
15495 /* Otherwise, we need two or three jumps. */
15497 label2 = gen_label_rtx ();
15499 code1 = code;
15500 code2 = swap_condition (code);
15501 code3 = unsigned_condition (code);
15503 switch (code)
15505 case LT: case GT: case LTU: case GTU:
15506 break;
15508 case LE: code1 = LT; code2 = GT; break;
15509 case GE: code1 = GT; code2 = LT; break;
15510 case LEU: code1 = LTU; code2 = GTU; break;
15511 case GEU: code1 = GTU; code2 = LTU; break;
15513 case EQ: code1 = UNKNOWN; code2 = NE; break;
15514 case NE: code2 = UNKNOWN; break;
15516 default:
15517 gcc_unreachable ();
15521 * a < b =>
15522 * if (hi(a) < hi(b)) goto true;
15523 * if (hi(a) > hi(b)) goto false;
15524 * if (lo(a) < lo(b)) goto true;
15525 * false:
15528 ix86_compare_op0 = hi[0];
15529 ix86_compare_op1 = hi[1];
15531 if (code1 != UNKNOWN)
15532 ix86_expand_branch (code1, label);
15533 if (code2 != UNKNOWN)
15534 ix86_expand_branch (code2, label2);
15536 ix86_compare_op0 = lo[0];
15537 ix86_compare_op1 = lo[1];
15538 ix86_expand_branch (code3, label);
15540 if (code2 != UNKNOWN)
15541 emit_label (label2);
15542 return;
15545 default:
15546 /* If we have already emitted a compare insn, go straight to simple.
15547 ix86_expand_compare won't emit anything if ix86_compare_emitted
15548 is non NULL. */
15549 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15550 goto simple;
15554 /* Split branch based on floating point condition. */
15555 void
15556 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15557 rtx target1, rtx target2, rtx tmp, rtx pushed)
15559 rtx condition;
15560 rtx i;
15562 if (target2 != pc_rtx)
15564 rtx tmp = target2;
15565 code = reverse_condition_maybe_unordered (code);
15566 target2 = target1;
15567 target1 = tmp;
15570 condition = ix86_expand_fp_compare (code, op1, op2,
15571 tmp);
15573 /* Remove pushed operand from stack. */
15574 if (pushed)
15575 ix86_free_from_memory (GET_MODE (pushed));
15577 i = emit_jump_insn (gen_rtx_SET
15578 (VOIDmode, pc_rtx,
15579 gen_rtx_IF_THEN_ELSE (VOIDmode,
15580 condition, target1, target2)));
15581 if (split_branch_probability >= 0)
15582 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15585 void
15586 ix86_expand_setcc (enum rtx_code code, rtx dest)
15588 rtx ret;
15590 gcc_assert (GET_MODE (dest) == QImode);
15592 ret = ix86_expand_compare (code);
15593 PUT_MODE (ret, QImode);
15594 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15597 /* Expand comparison setting or clearing carry flag. Return true when
15598 successful and set pop for the operation. */
15599 static bool
15600 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15602 enum machine_mode mode =
15603 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15605 /* Do not handle DImode compares that go through special path. */
15606 if (mode == (TARGET_64BIT ? TImode : DImode))
15607 return false;
15609 if (SCALAR_FLOAT_MODE_P (mode))
15611 rtx compare_op, compare_seq;
15613 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15615 /* Shortcut: following common codes never translate
15616 into carry flag compares. */
15617 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15618 || code == ORDERED || code == UNORDERED)
15619 return false;
15621 /* These comparisons require zero flag; swap operands so they won't. */
15622 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15623 && !TARGET_IEEE_FP)
15625 rtx tmp = op0;
15626 op0 = op1;
15627 op1 = tmp;
15628 code = swap_condition (code);
15631 /* Try to expand the comparison and verify that we end up with
15632 carry flag based comparison. This fails to be true only when
15633 we decide to expand comparison using arithmetic that is not
15634 too common scenario. */
15635 start_sequence ();
15636 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15637 compare_seq = get_insns ();
15638 end_sequence ();
15640 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15641 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15642 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15643 else
15644 code = GET_CODE (compare_op);
15646 if (code != LTU && code != GEU)
15647 return false;
15649 emit_insn (compare_seq);
15650 *pop = compare_op;
15651 return true;
15654 if (!INTEGRAL_MODE_P (mode))
15655 return false;
15657 switch (code)
15659 case LTU:
15660 case GEU:
15661 break;
15663 /* Convert a==0 into (unsigned)a<1. */
15664 case EQ:
15665 case NE:
15666 if (op1 != const0_rtx)
15667 return false;
15668 op1 = const1_rtx;
15669 code = (code == EQ ? LTU : GEU);
15670 break;
15672 /* Convert a>b into b<a or a>=b-1. */
15673 case GTU:
15674 case LEU:
15675 if (CONST_INT_P (op1))
15677 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15678 /* Bail out on overflow. We still can swap operands but that
15679 would force loading of the constant into register. */
15680 if (op1 == const0_rtx
15681 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15682 return false;
15683 code = (code == GTU ? GEU : LTU);
15685 else
15687 rtx tmp = op1;
15688 op1 = op0;
15689 op0 = tmp;
15690 code = (code == GTU ? LTU : GEU);
15692 break;
15694 /* Convert a>=0 into (unsigned)a<0x80000000. */
15695 case LT:
15696 case GE:
15697 if (mode == DImode || op1 != const0_rtx)
15698 return false;
15699 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15700 code = (code == LT ? GEU : LTU);
15701 break;
15702 case LE:
15703 case GT:
15704 if (mode == DImode || op1 != constm1_rtx)
15705 return false;
15706 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15707 code = (code == LE ? GEU : LTU);
15708 break;
15710 default:
15711 return false;
15713 /* Swapping operands may cause constant to appear as first operand. */
15714 if (!nonimmediate_operand (op0, VOIDmode))
15716 if (!can_create_pseudo_p ())
15717 return false;
15718 op0 = force_reg (mode, op0);
15720 ix86_compare_op0 = op0;
15721 ix86_compare_op1 = op1;
15722 *pop = ix86_expand_compare (code);
15723 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15724 return true;
15728 ix86_expand_int_movcc (rtx operands[])
15730 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15731 rtx compare_seq, compare_op;
15732 enum machine_mode mode = GET_MODE (operands[0]);
15733 bool sign_bit_compare_p = false;
15735 start_sequence ();
15736 ix86_compare_op0 = XEXP (operands[1], 0);
15737 ix86_compare_op1 = XEXP (operands[1], 1);
15738 compare_op = ix86_expand_compare (code);
15739 compare_seq = get_insns ();
15740 end_sequence ();
15742 compare_code = GET_CODE (compare_op);
15744 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15745 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15746 sign_bit_compare_p = true;
15748 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15749 HImode insns, we'd be swallowed in word prefix ops. */
15751 if ((mode != HImode || TARGET_FAST_PREFIX)
15752 && (mode != (TARGET_64BIT ? TImode : DImode))
15753 && CONST_INT_P (operands[2])
15754 && CONST_INT_P (operands[3]))
15756 rtx out = operands[0];
15757 HOST_WIDE_INT ct = INTVAL (operands[2]);
15758 HOST_WIDE_INT cf = INTVAL (operands[3]);
15759 HOST_WIDE_INT diff;
15761 diff = ct - cf;
15762 /* Sign bit compares are better done using shifts than we do by using
15763 sbb. */
15764 if (sign_bit_compare_p
15765 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15766 ix86_compare_op1, &compare_op))
15768 /* Detect overlap between destination and compare sources. */
15769 rtx tmp = out;
15771 if (!sign_bit_compare_p)
15773 rtx flags;
15774 bool fpcmp = false;
15776 compare_code = GET_CODE (compare_op);
15778 flags = XEXP (compare_op, 0);
15780 if (GET_MODE (flags) == CCFPmode
15781 || GET_MODE (flags) == CCFPUmode)
15783 fpcmp = true;
15784 compare_code
15785 = ix86_fp_compare_code_to_integer (compare_code);
15788 /* To simplify rest of code, restrict to the GEU case. */
15789 if (compare_code == LTU)
15791 HOST_WIDE_INT tmp = ct;
15792 ct = cf;
15793 cf = tmp;
15794 compare_code = reverse_condition (compare_code);
15795 code = reverse_condition (code);
15797 else
15799 if (fpcmp)
15800 PUT_CODE (compare_op,
15801 reverse_condition_maybe_unordered
15802 (GET_CODE (compare_op)));
15803 else
15804 PUT_CODE (compare_op,
15805 reverse_condition (GET_CODE (compare_op)));
15807 diff = ct - cf;
15809 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15810 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15811 tmp = gen_reg_rtx (mode);
15813 if (mode == DImode)
15814 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15815 else
15816 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15817 flags, compare_op));
15819 else
15821 if (code == GT || code == GE)
15822 code = reverse_condition (code);
15823 else
15825 HOST_WIDE_INT tmp = ct;
15826 ct = cf;
15827 cf = tmp;
15828 diff = ct - cf;
15830 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15831 ix86_compare_op1, VOIDmode, 0, -1);
15834 if (diff == 1)
15837 * cmpl op0,op1
15838 * sbbl dest,dest
15839 * [addl dest, ct]
15841 * Size 5 - 8.
15843 if (ct)
15844 tmp = expand_simple_binop (mode, PLUS,
15845 tmp, GEN_INT (ct),
15846 copy_rtx (tmp), 1, OPTAB_DIRECT);
15848 else if (cf == -1)
15851 * cmpl op0,op1
15852 * sbbl dest,dest
15853 * orl $ct, dest
15855 * Size 8.
15857 tmp = expand_simple_binop (mode, IOR,
15858 tmp, GEN_INT (ct),
15859 copy_rtx (tmp), 1, OPTAB_DIRECT);
15861 else if (diff == -1 && ct)
15864 * cmpl op0,op1
15865 * sbbl dest,dest
15866 * notl dest
15867 * [addl dest, cf]
15869 * Size 8 - 11.
15871 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15872 if (cf)
15873 tmp = expand_simple_binop (mode, PLUS,
15874 copy_rtx (tmp), GEN_INT (cf),
15875 copy_rtx (tmp), 1, OPTAB_DIRECT);
15877 else
15880 * cmpl op0,op1
15881 * sbbl dest,dest
15882 * [notl dest]
15883 * andl cf - ct, dest
15884 * [addl dest, ct]
15886 * Size 8 - 11.
15889 if (cf == 0)
15891 cf = ct;
15892 ct = 0;
15893 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15896 tmp = expand_simple_binop (mode, AND,
15897 copy_rtx (tmp),
15898 gen_int_mode (cf - ct, mode),
15899 copy_rtx (tmp), 1, OPTAB_DIRECT);
15900 if (ct)
15901 tmp = expand_simple_binop (mode, PLUS,
15902 copy_rtx (tmp), GEN_INT (ct),
15903 copy_rtx (tmp), 1, OPTAB_DIRECT);
15906 if (!rtx_equal_p (tmp, out))
15907 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15909 return 1; /* DONE */
15912 if (diff < 0)
15914 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15916 HOST_WIDE_INT tmp;
15917 tmp = ct, ct = cf, cf = tmp;
15918 diff = -diff;
15920 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15922 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15924 /* We may be reversing unordered compare to normal compare, that
15925 is not valid in general (we may convert non-trapping condition
15926 to trapping one), however on i386 we currently emit all
15927 comparisons unordered. */
15928 compare_code = reverse_condition_maybe_unordered (compare_code);
15929 code = reverse_condition_maybe_unordered (code);
15931 else
15933 compare_code = reverse_condition (compare_code);
15934 code = reverse_condition (code);
15938 compare_code = UNKNOWN;
15939 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15940 && CONST_INT_P (ix86_compare_op1))
15942 if (ix86_compare_op1 == const0_rtx
15943 && (code == LT || code == GE))
15944 compare_code = code;
15945 else if (ix86_compare_op1 == constm1_rtx)
15947 if (code == LE)
15948 compare_code = LT;
15949 else if (code == GT)
15950 compare_code = GE;
15954 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15955 if (compare_code != UNKNOWN
15956 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15957 && (cf == -1 || ct == -1))
15959 /* If lea code below could be used, only optimize
15960 if it results in a 2 insn sequence. */
15962 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15963 || diff == 3 || diff == 5 || diff == 9)
15964 || (compare_code == LT && ct == -1)
15965 || (compare_code == GE && cf == -1))
15968 * notl op1 (if necessary)
15969 * sarl $31, op1
15970 * orl cf, op1
15972 if (ct != -1)
15974 cf = ct;
15975 ct = -1;
15976 code = reverse_condition (code);
15979 out = emit_store_flag (out, code, ix86_compare_op0,
15980 ix86_compare_op1, VOIDmode, 0, -1);
15982 out = expand_simple_binop (mode, IOR,
15983 out, GEN_INT (cf),
15984 out, 1, OPTAB_DIRECT);
15985 if (out != operands[0])
15986 emit_move_insn (operands[0], out);
15988 return 1; /* DONE */
15993 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15994 || diff == 3 || diff == 5 || diff == 9)
15995 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15996 && (mode != DImode
15997 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
16000 * xorl dest,dest
16001 * cmpl op1,op2
16002 * setcc dest
16003 * lea cf(dest*(ct-cf)),dest
16005 * Size 14.
16007 * This also catches the degenerate setcc-only case.
16010 rtx tmp;
16011 int nops;
16013 out = emit_store_flag (out, code, ix86_compare_op0,
16014 ix86_compare_op1, VOIDmode, 0, 1);
16016 nops = 0;
16017 /* On x86_64 the lea instruction operates on Pmode, so we need
16018 to get arithmetics done in proper mode to match. */
16019 if (diff == 1)
16020 tmp = copy_rtx (out);
16021 else
16023 rtx out1;
16024 out1 = copy_rtx (out);
16025 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
16026 nops++;
16027 if (diff & 1)
16029 tmp = gen_rtx_PLUS (mode, tmp, out1);
16030 nops++;
16033 if (cf != 0)
16035 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
16036 nops++;
16038 if (!rtx_equal_p (tmp, out))
16040 if (nops == 1)
16041 out = force_operand (tmp, copy_rtx (out));
16042 else
16043 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
16045 if (!rtx_equal_p (out, operands[0]))
16046 emit_move_insn (operands[0], copy_rtx (out));
16048 return 1; /* DONE */
16052 * General case: Jumpful:
16053 * xorl dest,dest cmpl op1, op2
16054 * cmpl op1, op2 movl ct, dest
16055 * setcc dest jcc 1f
16056 * decl dest movl cf, dest
16057 * andl (cf-ct),dest 1:
16058 * addl ct,dest
16060 * Size 20. Size 14.
16062 * This is reasonably steep, but branch mispredict costs are
16063 * high on modern cpus, so consider failing only if optimizing
16064 * for space.
16067 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16068 && BRANCH_COST (optimize_insn_for_speed_p (),
16069 false) >= 2)
16071 if (cf == 0)
16073 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
16075 cf = ct;
16076 ct = 0;
16078 if (SCALAR_FLOAT_MODE_P (cmp_mode))
16080 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
16082 /* We may be reversing unordered compare to normal compare,
16083 that is not valid in general (we may convert non-trapping
16084 condition to trapping one), however on i386 we currently
16085 emit all comparisons unordered. */
16086 code = reverse_condition_maybe_unordered (code);
16088 else
16090 code = reverse_condition (code);
16091 if (compare_code != UNKNOWN)
16092 compare_code = reverse_condition (compare_code);
16096 if (compare_code != UNKNOWN)
16098 /* notl op1 (if needed)
16099 sarl $31, op1
16100 andl (cf-ct), op1
16101 addl ct, op1
16103 For x < 0 (resp. x <= -1) there will be no notl,
16104 so if possible swap the constants to get rid of the
16105 complement.
16106 True/false will be -1/0 while code below (store flag
16107 followed by decrement) is 0/-1, so the constants need
16108 to be exchanged once more. */
16110 if (compare_code == GE || !cf)
16112 code = reverse_condition (code);
16113 compare_code = LT;
16115 else
16117 HOST_WIDE_INT tmp = cf;
16118 cf = ct;
16119 ct = tmp;
16122 out = emit_store_flag (out, code, ix86_compare_op0,
16123 ix86_compare_op1, VOIDmode, 0, -1);
16125 else
16127 out = emit_store_flag (out, code, ix86_compare_op0,
16128 ix86_compare_op1, VOIDmode, 0, 1);
16130 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
16131 copy_rtx (out), 1, OPTAB_DIRECT);
16134 out = expand_simple_binop (mode, AND, copy_rtx (out),
16135 gen_int_mode (cf - ct, mode),
16136 copy_rtx (out), 1, OPTAB_DIRECT);
16137 if (ct)
16138 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
16139 copy_rtx (out), 1, OPTAB_DIRECT);
16140 if (!rtx_equal_p (out, operands[0]))
16141 emit_move_insn (operands[0], copy_rtx (out));
16143 return 1; /* DONE */
16147 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16149 /* Try a few things more with specific constants and a variable. */
16151 optab op;
16152 rtx var, orig_out, out, tmp;
16154 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
16155 return 0; /* FAIL */
16157 /* If one of the two operands is an interesting constant, load a
16158 constant with the above and mask it in with a logical operation. */
16160 if (CONST_INT_P (operands[2]))
16162 var = operands[3];
16163 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
16164 operands[3] = constm1_rtx, op = and_optab;
16165 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
16166 operands[3] = const0_rtx, op = ior_optab;
16167 else
16168 return 0; /* FAIL */
16170 else if (CONST_INT_P (operands[3]))
16172 var = operands[2];
16173 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
16174 operands[2] = constm1_rtx, op = and_optab;
16175 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
16176 operands[2] = const0_rtx, op = ior_optab;
16177 else
16178 return 0; /* FAIL */
16180 else
16181 return 0; /* FAIL */
16183 orig_out = operands[0];
16184 tmp = gen_reg_rtx (mode);
16185 operands[0] = tmp;
16187 /* Recurse to get the constant loaded. */
16188 if (ix86_expand_int_movcc (operands) == 0)
16189 return 0; /* FAIL */
16191 /* Mask in the interesting variable. */
16192 out = expand_binop (mode, op, var, tmp, orig_out, 0,
16193 OPTAB_WIDEN);
16194 if (!rtx_equal_p (out, orig_out))
16195 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
16197 return 1; /* DONE */
16201 * For comparison with above,
16203 * movl cf,dest
16204 * movl ct,tmp
16205 * cmpl op1,op2
16206 * cmovcc tmp,dest
16208 * Size 15.
16211 if (! nonimmediate_operand (operands[2], mode))
16212 operands[2] = force_reg (mode, operands[2]);
16213 if (! nonimmediate_operand (operands[3], mode))
16214 operands[3] = force_reg (mode, operands[3]);
16216 if (! register_operand (operands[2], VOIDmode)
16217 && (mode == QImode
16218 || ! register_operand (operands[3], VOIDmode)))
16219 operands[2] = force_reg (mode, operands[2]);
16221 if (mode == QImode
16222 && ! register_operand (operands[3], VOIDmode))
16223 operands[3] = force_reg (mode, operands[3]);
16225 emit_insn (compare_seq);
16226 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16227 gen_rtx_IF_THEN_ELSE (mode,
16228 compare_op, operands[2],
16229 operands[3])));
16231 return 1; /* DONE */
16234 /* Swap, force into registers, or otherwise massage the two operands
16235 to an sse comparison with a mask result. Thus we differ a bit from
16236 ix86_prepare_fp_compare_args which expects to produce a flags result.
16238 The DEST operand exists to help determine whether to commute commutative
16239 operators. The POP0/POP1 operands are updated in place. The new
16240 comparison code is returned, or UNKNOWN if not implementable. */
16242 static enum rtx_code
16243 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16244 rtx *pop0, rtx *pop1)
16246 rtx tmp;
16248 switch (code)
16250 case LTGT:
16251 case UNEQ:
16252 /* We have no LTGT as an operator. We could implement it with
16253 NE & ORDERED, but this requires an extra temporary. It's
16254 not clear that it's worth it. */
16255 return UNKNOWN;
16257 case LT:
16258 case LE:
16259 case UNGT:
16260 case UNGE:
16261 /* These are supported directly. */
16262 break;
16264 case EQ:
16265 case NE:
16266 case UNORDERED:
16267 case ORDERED:
16268 /* For commutative operators, try to canonicalize the destination
16269 operand to be first in the comparison - this helps reload to
16270 avoid extra moves. */
16271 if (!dest || !rtx_equal_p (dest, *pop1))
16272 break;
16273 /* FALLTHRU */
16275 case GE:
16276 case GT:
16277 case UNLE:
16278 case UNLT:
16279 /* These are not supported directly. Swap the comparison operands
16280 to transform into something that is supported. */
16281 tmp = *pop0;
16282 *pop0 = *pop1;
16283 *pop1 = tmp;
16284 code = swap_condition (code);
16285 break;
16287 default:
16288 gcc_unreachable ();
16291 return code;
16294 /* Detect conditional moves that exactly match min/max operational
16295 semantics. Note that this is IEEE safe, as long as we don't
16296 interchange the operands.
16298 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16299 and TRUE if the operation is successful and instructions are emitted. */
16301 static bool
16302 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16303 rtx cmp_op1, rtx if_true, rtx if_false)
16305 enum machine_mode mode;
16306 bool is_min;
16307 rtx tmp;
16309 if (code == LT)
16311 else if (code == UNGE)
16313 tmp = if_true;
16314 if_true = if_false;
16315 if_false = tmp;
16317 else
16318 return false;
16320 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16321 is_min = true;
16322 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16323 is_min = false;
16324 else
16325 return false;
16327 mode = GET_MODE (dest);
16329 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16330 but MODE may be a vector mode and thus not appropriate. */
16331 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16333 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16334 rtvec v;
16336 if_true = force_reg (mode, if_true);
16337 v = gen_rtvec (2, if_true, if_false);
16338 tmp = gen_rtx_UNSPEC (mode, v, u);
16340 else
16342 code = is_min ? SMIN : SMAX;
16343 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16346 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16347 return true;
16350 /* Expand an sse vector comparison. Return the register with the result. */
16352 static rtx
16353 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16354 rtx op_true, rtx op_false)
16356 enum machine_mode mode = GET_MODE (dest);
16357 rtx x;
16359 cmp_op0 = force_reg (mode, cmp_op0);
16360 if (!nonimmediate_operand (cmp_op1, mode))
16361 cmp_op1 = force_reg (mode, cmp_op1);
16363 if (optimize
16364 || reg_overlap_mentioned_p (dest, op_true)
16365 || reg_overlap_mentioned_p (dest, op_false))
16366 dest = gen_reg_rtx (mode);
16368 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16369 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16371 return dest;
16374 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16375 operations. This is used for both scalar and vector conditional moves. */
16377 static void
16378 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16380 enum machine_mode mode = GET_MODE (dest);
16381 rtx t2, t3, x;
16383 if (op_false == CONST0_RTX (mode))
16385 op_true = force_reg (mode, op_true);
16386 x = gen_rtx_AND (mode, cmp, op_true);
16387 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16389 else if (op_true == CONST0_RTX (mode))
16391 op_false = force_reg (mode, op_false);
16392 x = gen_rtx_NOT (mode, cmp);
16393 x = gen_rtx_AND (mode, x, op_false);
16394 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16396 else if (TARGET_XOP)
16398 rtx pcmov = gen_rtx_SET (mode, dest,
16399 gen_rtx_IF_THEN_ELSE (mode, cmp,
16400 op_true,
16401 op_false));
16402 emit_insn (pcmov);
16404 else
16406 op_true = force_reg (mode, op_true);
16407 op_false = force_reg (mode, op_false);
16409 t2 = gen_reg_rtx (mode);
16410 if (optimize)
16411 t3 = gen_reg_rtx (mode);
16412 else
16413 t3 = dest;
16415 x = gen_rtx_AND (mode, op_true, cmp);
16416 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16418 x = gen_rtx_NOT (mode, cmp);
16419 x = gen_rtx_AND (mode, x, op_false);
16420 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16422 x = gen_rtx_IOR (mode, t3, t2);
16423 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16427 /* Expand a floating-point conditional move. Return true if successful. */
16430 ix86_expand_fp_movcc (rtx operands[])
16432 enum machine_mode mode = GET_MODE (operands[0]);
16433 enum rtx_code code = GET_CODE (operands[1]);
16434 rtx tmp, compare_op;
16436 ix86_compare_op0 = XEXP (operands[1], 0);
16437 ix86_compare_op1 = XEXP (operands[1], 1);
16438 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16440 enum machine_mode cmode;
16442 /* Since we've no cmove for sse registers, don't force bad register
16443 allocation just to gain access to it. Deny movcc when the
16444 comparison mode doesn't match the move mode. */
16445 cmode = GET_MODE (ix86_compare_op0);
16446 if (cmode == VOIDmode)
16447 cmode = GET_MODE (ix86_compare_op1);
16448 if (cmode != mode)
16449 return 0;
16451 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16452 &ix86_compare_op0,
16453 &ix86_compare_op1);
16454 if (code == UNKNOWN)
16455 return 0;
16457 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16458 ix86_compare_op1, operands[2],
16459 operands[3]))
16460 return 1;
16462 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16463 ix86_compare_op1, operands[2], operands[3]);
16464 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16465 return 1;
16468 /* The floating point conditional move instructions don't directly
16469 support conditions resulting from a signed integer comparison. */
16471 compare_op = ix86_expand_compare (code);
16472 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16474 tmp = gen_reg_rtx (QImode);
16475 ix86_expand_setcc (code, tmp);
16476 code = NE;
16477 ix86_compare_op0 = tmp;
16478 ix86_compare_op1 = const0_rtx;
16479 compare_op = ix86_expand_compare (code);
16482 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16483 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16484 operands[2], operands[3])));
16486 return 1;
16489 /* Expand a floating-point vector conditional move; a vcond operation
16490 rather than a movcc operation. */
16492 bool
16493 ix86_expand_fp_vcond (rtx operands[])
16495 enum rtx_code code = GET_CODE (operands[3]);
16496 rtx cmp;
16498 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16499 &operands[4], &operands[5]);
16500 if (code == UNKNOWN)
16501 return false;
16503 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16504 operands[5], operands[1], operands[2]))
16505 return true;
16507 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16508 operands[1], operands[2]);
16509 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16510 return true;
16513 /* Expand a signed/unsigned integral vector conditional move. */
16515 bool
16516 ix86_expand_int_vcond (rtx operands[])
16518 enum machine_mode mode = GET_MODE (operands[0]);
16519 enum rtx_code code = GET_CODE (operands[3]);
16520 bool negate = false;
16521 rtx x, cop0, cop1;
16523 cop0 = operands[4];
16524 cop1 = operands[5];
16526 /* XOP supports all of the comparisons on all vector int types. */
16527 if (!TARGET_XOP)
16529 /* Canonicalize the comparison to EQ, GT, GTU. */
16530 switch (code)
16532 case EQ:
16533 case GT:
16534 case GTU:
16535 break;
16537 case NE:
16538 case LE:
16539 case LEU:
16540 code = reverse_condition (code);
16541 negate = true;
16542 break;
16544 case GE:
16545 case GEU:
16546 code = reverse_condition (code);
16547 negate = true;
16548 /* FALLTHRU */
16550 case LT:
16551 case LTU:
16552 code = swap_condition (code);
16553 x = cop0, cop0 = cop1, cop1 = x;
16554 break;
16556 default:
16557 gcc_unreachable ();
16560 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16561 if (mode == V2DImode)
16563 switch (code)
16565 case EQ:
16566 /* SSE4.1 supports EQ. */
16567 if (!TARGET_SSE4_1)
16568 return false;
16569 break;
16571 case GT:
16572 case GTU:
16573 /* SSE4.2 supports GT/GTU. */
16574 if (!TARGET_SSE4_2)
16575 return false;
16576 break;
16578 default:
16579 gcc_unreachable ();
16583 /* Unsigned parallel compare is not supported by the hardware.
16584 Play some tricks to turn this into a signed comparison
16585 against 0. */
16586 if (code == GTU)
16588 cop0 = force_reg (mode, cop0);
16590 switch (mode)
16592 case V4SImode:
16593 case V2DImode:
16595 rtx t1, t2, mask;
16596 rtx (*gen_sub3) (rtx, rtx, rtx);
16598 /* Subtract (-(INT MAX) - 1) from both operands to make
16599 them signed. */
16600 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16601 true, false);
16602 gen_sub3 = (mode == V4SImode
16603 ? gen_subv4si3 : gen_subv2di3);
16604 t1 = gen_reg_rtx (mode);
16605 emit_insn (gen_sub3 (t1, cop0, mask));
16607 t2 = gen_reg_rtx (mode);
16608 emit_insn (gen_sub3 (t2, cop1, mask));
16610 cop0 = t1;
16611 cop1 = t2;
16612 code = GT;
16614 break;
16616 case V16QImode:
16617 case V8HImode:
16618 /* Perform a parallel unsigned saturating subtraction. */
16619 x = gen_reg_rtx (mode);
16620 emit_insn (gen_rtx_SET (VOIDmode, x,
16621 gen_rtx_US_MINUS (mode, cop0, cop1)));
16623 cop0 = x;
16624 cop1 = CONST0_RTX (mode);
16625 code = EQ;
16626 negate = !negate;
16627 break;
16629 default:
16630 gcc_unreachable ();
16635 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16636 operands[1+negate], operands[2-negate]);
16638 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16639 operands[2-negate]);
16640 return true;
16643 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16644 true if we should do zero extension, else sign extension. HIGH_P is
16645 true if we want the N/2 high elements, else the low elements. */
16647 void
16648 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16650 enum machine_mode imode = GET_MODE (operands[1]);
16651 rtx (*unpack)(rtx, rtx, rtx);
16652 rtx se, dest;
16654 switch (imode)
16656 case V16QImode:
16657 if (high_p)
16658 unpack = gen_vec_interleave_highv16qi;
16659 else
16660 unpack = gen_vec_interleave_lowv16qi;
16661 break;
16662 case V8HImode:
16663 if (high_p)
16664 unpack = gen_vec_interleave_highv8hi;
16665 else
16666 unpack = gen_vec_interleave_lowv8hi;
16667 break;
16668 case V4SImode:
16669 if (high_p)
16670 unpack = gen_vec_interleave_highv4si;
16671 else
16672 unpack = gen_vec_interleave_lowv4si;
16673 break;
16674 default:
16675 gcc_unreachable ();
16678 dest = gen_lowpart (imode, operands[0]);
16680 if (unsigned_p)
16681 se = force_reg (imode, CONST0_RTX (imode));
16682 else
16683 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16684 operands[1], pc_rtx, pc_rtx);
16686 emit_insn (unpack (dest, operands[1], se));
16689 /* This function performs the same task as ix86_expand_sse_unpack,
16690 but with SSE4.1 instructions. */
16692 void
16693 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16695 enum machine_mode imode = GET_MODE (operands[1]);
16696 rtx (*unpack)(rtx, rtx);
16697 rtx src, dest;
16699 switch (imode)
16701 case V16QImode:
16702 if (unsigned_p)
16703 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16704 else
16705 unpack = gen_sse4_1_extendv8qiv8hi2;
16706 break;
16707 case V8HImode:
16708 if (unsigned_p)
16709 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16710 else
16711 unpack = gen_sse4_1_extendv4hiv4si2;
16712 break;
16713 case V4SImode:
16714 if (unsigned_p)
16715 unpack = gen_sse4_1_zero_extendv2siv2di2;
16716 else
16717 unpack = gen_sse4_1_extendv2siv2di2;
16718 break;
16719 default:
16720 gcc_unreachable ();
16723 dest = operands[0];
16724 if (high_p)
16726 /* Shift higher 8 bytes to lower 8 bytes. */
16727 src = gen_reg_rtx (imode);
16728 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16729 gen_lowpart (V1TImode, operands[1]),
16730 GEN_INT (64)));
16732 else
16733 src = operands[1];
16735 emit_insn (unpack (dest, src));
16738 /* Expand conditional increment or decrement using adb/sbb instructions.
16739 The default case using setcc followed by the conditional move can be
16740 done by generic code. */
16742 ix86_expand_int_addcc (rtx operands[])
16744 enum rtx_code code = GET_CODE (operands[1]);
16745 rtx flags;
16746 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16747 rtx compare_op;
16748 rtx val = const0_rtx;
16749 bool fpcmp = false;
16750 enum machine_mode mode;
16752 ix86_compare_op0 = XEXP (operands[1], 0);
16753 ix86_compare_op1 = XEXP (operands[1], 1);
16754 if (operands[3] != const1_rtx
16755 && operands[3] != constm1_rtx)
16756 return 0;
16757 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16758 ix86_compare_op1, &compare_op))
16759 return 0;
16760 code = GET_CODE (compare_op);
16762 flags = XEXP (compare_op, 0);
16764 if (GET_MODE (flags) == CCFPmode
16765 || GET_MODE (flags) == CCFPUmode)
16767 fpcmp = true;
16768 code = ix86_fp_compare_code_to_integer (code);
16771 if (code != LTU)
16773 val = constm1_rtx;
16774 if (fpcmp)
16775 PUT_CODE (compare_op,
16776 reverse_condition_maybe_unordered
16777 (GET_CODE (compare_op)));
16778 else
16779 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16782 mode = GET_MODE (operands[0]);
16784 /* Construct either adc or sbb insn. */
16785 if ((code == LTU) == (operands[3] == constm1_rtx))
16787 switch (mode)
16789 case QImode:
16790 insn = gen_subqi3_carry;
16791 break;
16792 case HImode:
16793 insn = gen_subhi3_carry;
16794 break;
16795 case SImode:
16796 insn = gen_subsi3_carry;
16797 break;
16798 case DImode:
16799 insn = gen_subdi3_carry;
16800 break;
16801 default:
16802 gcc_unreachable ();
16805 else
16807 switch (mode)
16809 case QImode:
16810 insn = gen_addqi3_carry;
16811 break;
16812 case HImode:
16813 insn = gen_addhi3_carry;
16814 break;
16815 case SImode:
16816 insn = gen_addsi3_carry;
16817 break;
16818 case DImode:
16819 insn = gen_adddi3_carry;
16820 break;
16821 default:
16822 gcc_unreachable ();
16825 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16827 return 1; /* DONE */
16831 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16832 works for floating pointer parameters and nonoffsetable memories.
16833 For pushes, it returns just stack offsets; the values will be saved
16834 in the right order. Maximally three parts are generated. */
16836 static int
16837 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16839 int size;
16841 if (!TARGET_64BIT)
16842 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16843 else
16844 size = (GET_MODE_SIZE (mode) + 4) / 8;
16846 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16847 gcc_assert (size >= 2 && size <= 4);
16849 /* Optimize constant pool reference to immediates. This is used by fp
16850 moves, that force all constants to memory to allow combining. */
16851 if (MEM_P (operand) && MEM_READONLY_P (operand))
16853 rtx tmp = maybe_get_pool_constant (operand);
16854 if (tmp)
16855 operand = tmp;
16858 if (MEM_P (operand) && !offsettable_memref_p (operand))
16860 /* The only non-offsetable memories we handle are pushes. */
16861 int ok = push_operand (operand, VOIDmode);
16863 gcc_assert (ok);
16865 operand = copy_rtx (operand);
16866 PUT_MODE (operand, Pmode);
16867 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16868 return size;
16871 if (GET_CODE (operand) == CONST_VECTOR)
16873 enum machine_mode imode = int_mode_for_mode (mode);
16874 /* Caution: if we looked through a constant pool memory above,
16875 the operand may actually have a different mode now. That's
16876 ok, since we want to pun this all the way back to an integer. */
16877 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16878 gcc_assert (operand != NULL);
16879 mode = imode;
16882 if (!TARGET_64BIT)
16884 if (mode == DImode)
16885 split_di (&operand, 1, &parts[0], &parts[1]);
16886 else
16888 int i;
16890 if (REG_P (operand))
16892 gcc_assert (reload_completed);
16893 for (i = 0; i < size; i++)
16894 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16896 else if (offsettable_memref_p (operand))
16898 operand = adjust_address (operand, SImode, 0);
16899 parts[0] = operand;
16900 for (i = 1; i < size; i++)
16901 parts[i] = adjust_address (operand, SImode, 4 * i);
16903 else if (GET_CODE (operand) == CONST_DOUBLE)
16905 REAL_VALUE_TYPE r;
16906 long l[4];
16908 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16909 switch (mode)
16911 case TFmode:
16912 real_to_target (l, &r, mode);
16913 parts[3] = gen_int_mode (l[3], SImode);
16914 parts[2] = gen_int_mode (l[2], SImode);
16915 break;
16916 case XFmode:
16917 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16918 parts[2] = gen_int_mode (l[2], SImode);
16919 break;
16920 case DFmode:
16921 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16922 break;
16923 default:
16924 gcc_unreachable ();
16926 parts[1] = gen_int_mode (l[1], SImode);
16927 parts[0] = gen_int_mode (l[0], SImode);
16929 else
16930 gcc_unreachable ();
16933 else
16935 if (mode == TImode)
16936 split_ti (&operand, 1, &parts[0], &parts[1]);
16937 if (mode == XFmode || mode == TFmode)
16939 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16940 if (REG_P (operand))
16942 gcc_assert (reload_completed);
16943 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16944 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16946 else if (offsettable_memref_p (operand))
16948 operand = adjust_address (operand, DImode, 0);
16949 parts[0] = operand;
16950 parts[1] = adjust_address (operand, upper_mode, 8);
16952 else if (GET_CODE (operand) == CONST_DOUBLE)
16954 REAL_VALUE_TYPE r;
16955 long l[4];
16957 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16958 real_to_target (l, &r, mode);
16960 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16961 if (HOST_BITS_PER_WIDE_INT >= 64)
16962 parts[0]
16963 = gen_int_mode
16964 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16965 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16966 DImode);
16967 else
16968 parts[0] = immed_double_const (l[0], l[1], DImode);
16970 if (upper_mode == SImode)
16971 parts[1] = gen_int_mode (l[2], SImode);
16972 else if (HOST_BITS_PER_WIDE_INT >= 64)
16973 parts[1]
16974 = gen_int_mode
16975 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16976 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16977 DImode);
16978 else
16979 parts[1] = immed_double_const (l[2], l[3], DImode);
16981 else
16982 gcc_unreachable ();
16986 return size;
16989 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16990 Return false when normal moves are needed; true when all required
16991 insns have been emitted. Operands 2-4 contain the input values
16992 int the correct order; operands 5-7 contain the output values. */
16994 void
16995 ix86_split_long_move (rtx operands[])
16997 rtx part[2][4];
16998 int nparts, i, j;
16999 int push = 0;
17000 int collisions = 0;
17001 enum machine_mode mode = GET_MODE (operands[0]);
17002 bool collisionparts[4];
17004 /* The DFmode expanders may ask us to move double.
17005 For 64bit target this is single move. By hiding the fact
17006 here we simplify i386.md splitters. */
17007 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
17009 /* Optimize constant pool reference to immediates. This is used by
17010 fp moves, that force all constants to memory to allow combining. */
17012 if (MEM_P (operands[1])
17013 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
17014 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
17015 operands[1] = get_pool_constant (XEXP (operands[1], 0));
17016 if (push_operand (operands[0], VOIDmode))
17018 operands[0] = copy_rtx (operands[0]);
17019 PUT_MODE (operands[0], Pmode);
17021 else
17022 operands[0] = gen_lowpart (DImode, operands[0]);
17023 operands[1] = gen_lowpart (DImode, operands[1]);
17024 emit_move_insn (operands[0], operands[1]);
17025 return;
17028 /* The only non-offsettable memory we handle is push. */
17029 if (push_operand (operands[0], VOIDmode))
17030 push = 1;
17031 else
17032 gcc_assert (!MEM_P (operands[0])
17033 || offsettable_memref_p (operands[0]));
17035 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
17036 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
17038 /* When emitting push, take care for source operands on the stack. */
17039 if (push && MEM_P (operands[1])
17040 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
17042 rtx src_base = XEXP (part[1][nparts - 1], 0);
17044 /* Compensate for the stack decrement by 4. */
17045 if (!TARGET_64BIT && nparts == 3
17046 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
17047 src_base = plus_constant (src_base, 4);
17049 /* src_base refers to the stack pointer and is
17050 automatically decreased by emitted push. */
17051 for (i = 0; i < nparts; i++)
17052 part[1][i] = change_address (part[1][i],
17053 GET_MODE (part[1][i]), src_base);
17056 /* We need to do copy in the right order in case an address register
17057 of the source overlaps the destination. */
17058 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
17060 rtx tmp;
17062 for (i = 0; i < nparts; i++)
17064 collisionparts[i]
17065 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
17066 if (collisionparts[i])
17067 collisions++;
17070 /* Collision in the middle part can be handled by reordering. */
17071 if (collisions == 1 && nparts == 3 && collisionparts [1])
17073 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17074 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17076 else if (collisions == 1
17077 && nparts == 4
17078 && (collisionparts [1] || collisionparts [2]))
17080 if (collisionparts [1])
17082 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17083 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17085 else
17087 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
17088 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
17092 /* If there are more collisions, we can't handle it by reordering.
17093 Do an lea to the last part and use only one colliding move. */
17094 else if (collisions > 1)
17096 rtx base;
17098 collisions = 1;
17100 base = part[0][nparts - 1];
17102 /* Handle the case when the last part isn't valid for lea.
17103 Happens in 64-bit mode storing the 12-byte XFmode. */
17104 if (GET_MODE (base) != Pmode)
17105 base = gen_rtx_REG (Pmode, REGNO (base));
17107 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
17108 part[1][0] = replace_equiv_address (part[1][0], base);
17109 for (i = 1; i < nparts; i++)
17111 tmp = plus_constant (base, UNITS_PER_WORD * i);
17112 part[1][i] = replace_equiv_address (part[1][i], tmp);
17117 if (push)
17119 if (!TARGET_64BIT)
17121 if (nparts == 3)
17123 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
17124 emit_insn (gen_addsi3 (stack_pointer_rtx,
17125 stack_pointer_rtx, GEN_INT (-4)));
17126 emit_move_insn (part[0][2], part[1][2]);
17128 else if (nparts == 4)
17130 emit_move_insn (part[0][3], part[1][3]);
17131 emit_move_insn (part[0][2], part[1][2]);
17134 else
17136 /* In 64bit mode we don't have 32bit push available. In case this is
17137 register, it is OK - we will just use larger counterpart. We also
17138 retype memory - these comes from attempt to avoid REX prefix on
17139 moving of second half of TFmode value. */
17140 if (GET_MODE (part[1][1]) == SImode)
17142 switch (GET_CODE (part[1][1]))
17144 case MEM:
17145 part[1][1] = adjust_address (part[1][1], DImode, 0);
17146 break;
17148 case REG:
17149 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
17150 break;
17152 default:
17153 gcc_unreachable ();
17156 if (GET_MODE (part[1][0]) == SImode)
17157 part[1][0] = part[1][1];
17160 emit_move_insn (part[0][1], part[1][1]);
17161 emit_move_insn (part[0][0], part[1][0]);
17162 return;
17165 /* Choose correct order to not overwrite the source before it is copied. */
17166 if ((REG_P (part[0][0])
17167 && REG_P (part[1][1])
17168 && (REGNO (part[0][0]) == REGNO (part[1][1])
17169 || (nparts == 3
17170 && REGNO (part[0][0]) == REGNO (part[1][2]))
17171 || (nparts == 4
17172 && REGNO (part[0][0]) == REGNO (part[1][3]))))
17173 || (collisions > 0
17174 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
17176 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
17178 operands[2 + i] = part[0][j];
17179 operands[6 + i] = part[1][j];
17182 else
17184 for (i = 0; i < nparts; i++)
17186 operands[2 + i] = part[0][i];
17187 operands[6 + i] = part[1][i];
17191 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
17192 if (optimize_insn_for_size_p ())
17194 for (j = 0; j < nparts - 1; j++)
17195 if (CONST_INT_P (operands[6 + j])
17196 && operands[6 + j] != const0_rtx
17197 && REG_P (operands[2 + j]))
17198 for (i = j; i < nparts - 1; i++)
17199 if (CONST_INT_P (operands[7 + i])
17200 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
17201 operands[7 + i] = operands[2 + j];
17204 for (i = 0; i < nparts; i++)
17205 emit_move_insn (operands[2 + i], operands[6 + i]);
17207 return;
17210 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
17211 left shift by a constant, either using a single shift or
17212 a sequence of add instructions. */
17214 static void
17215 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
17217 if (count == 1)
17219 emit_insn ((mode == DImode
17220 ? gen_addsi3
17221 : gen_adddi3) (operand, operand, operand));
17223 else if (!optimize_insn_for_size_p ()
17224 && count * ix86_cost->add <= ix86_cost->shift_const)
17226 int i;
17227 for (i=0; i<count; i++)
17229 emit_insn ((mode == DImode
17230 ? gen_addsi3
17231 : gen_adddi3) (operand, operand, operand));
17234 else
17235 emit_insn ((mode == DImode
17236 ? gen_ashlsi3
17237 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17240 void
17241 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17243 rtx low[2], high[2];
17244 int count;
17245 const int single_width = mode == DImode ? 32 : 64;
17247 if (CONST_INT_P (operands[2]))
17249 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17250 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17252 if (count >= single_width)
17254 emit_move_insn (high[0], low[1]);
17255 emit_move_insn (low[0], const0_rtx);
17257 if (count > single_width)
17258 ix86_expand_ashl_const (high[0], count - single_width, mode);
17260 else
17262 if (!rtx_equal_p (operands[0], operands[1]))
17263 emit_move_insn (operands[0], operands[1]);
17264 emit_insn ((mode == DImode
17265 ? gen_x86_shld
17266 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17267 ix86_expand_ashl_const (low[0], count, mode);
17269 return;
17272 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17274 if (operands[1] == const1_rtx)
17276 /* Assuming we've chosen a QImode capable registers, then 1 << N
17277 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17278 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17280 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17282 ix86_expand_clear (low[0]);
17283 ix86_expand_clear (high[0]);
17284 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17286 d = gen_lowpart (QImode, low[0]);
17287 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17288 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17289 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17291 d = gen_lowpart (QImode, high[0]);
17292 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17293 s = gen_rtx_NE (QImode, flags, const0_rtx);
17294 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17297 /* Otherwise, we can get the same results by manually performing
17298 a bit extract operation on bit 5/6, and then performing the two
17299 shifts. The two methods of getting 0/1 into low/high are exactly
17300 the same size. Avoiding the shift in the bit extract case helps
17301 pentium4 a bit; no one else seems to care much either way. */
17302 else
17304 rtx x;
17306 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17307 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17308 else
17309 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17310 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17312 emit_insn ((mode == DImode
17313 ? gen_lshrsi3
17314 : gen_lshrdi3) (high[0], high[0],
17315 GEN_INT (mode == DImode ? 5 : 6)));
17316 emit_insn ((mode == DImode
17317 ? gen_andsi3
17318 : gen_anddi3) (high[0], high[0], const1_rtx));
17319 emit_move_insn (low[0], high[0]);
17320 emit_insn ((mode == DImode
17321 ? gen_xorsi3
17322 : gen_xordi3) (low[0], low[0], const1_rtx));
17325 emit_insn ((mode == DImode
17326 ? gen_ashlsi3
17327 : gen_ashldi3) (low[0], low[0], operands[2]));
17328 emit_insn ((mode == DImode
17329 ? gen_ashlsi3
17330 : gen_ashldi3) (high[0], high[0], operands[2]));
17331 return;
17334 if (operands[1] == constm1_rtx)
17336 /* For -1 << N, we can avoid the shld instruction, because we
17337 know that we're shifting 0...31/63 ones into a -1. */
17338 emit_move_insn (low[0], constm1_rtx);
17339 if (optimize_insn_for_size_p ())
17340 emit_move_insn (high[0], low[0]);
17341 else
17342 emit_move_insn (high[0], constm1_rtx);
17344 else
17346 if (!rtx_equal_p (operands[0], operands[1]))
17347 emit_move_insn (operands[0], operands[1]);
17349 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17350 emit_insn ((mode == DImode
17351 ? gen_x86_shld
17352 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17355 emit_insn ((mode == DImode
17356 ? gen_ashlsi3
17357 : gen_ashldi3) (low[0], low[0], operands[2]));
17359 if (TARGET_CMOVE && scratch)
17361 ix86_expand_clear (scratch);
17362 emit_insn ((mode == DImode
17363 ? gen_x86_shiftsi_adj_1
17364 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17365 scratch));
17367 else
17368 emit_insn ((mode == DImode
17369 ? gen_x86_shiftsi_adj_2
17370 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17373 void
17374 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17376 rtx low[2], high[2];
17377 int count;
17378 const int single_width = mode == DImode ? 32 : 64;
17380 if (CONST_INT_P (operands[2]))
17382 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17383 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17385 if (count == single_width * 2 - 1)
17387 emit_move_insn (high[0], high[1]);
17388 emit_insn ((mode == DImode
17389 ? gen_ashrsi3
17390 : gen_ashrdi3) (high[0], high[0],
17391 GEN_INT (single_width - 1)));
17392 emit_move_insn (low[0], high[0]);
17395 else if (count >= single_width)
17397 emit_move_insn (low[0], high[1]);
17398 emit_move_insn (high[0], low[0]);
17399 emit_insn ((mode == DImode
17400 ? gen_ashrsi3
17401 : gen_ashrdi3) (high[0], high[0],
17402 GEN_INT (single_width - 1)));
17403 if (count > single_width)
17404 emit_insn ((mode == DImode
17405 ? gen_ashrsi3
17406 : gen_ashrdi3) (low[0], low[0],
17407 GEN_INT (count - single_width)));
17409 else
17411 if (!rtx_equal_p (operands[0], operands[1]))
17412 emit_move_insn (operands[0], operands[1]);
17413 emit_insn ((mode == DImode
17414 ? gen_x86_shrd
17415 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17416 emit_insn ((mode == DImode
17417 ? gen_ashrsi3
17418 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17421 else
17423 if (!rtx_equal_p (operands[0], operands[1]))
17424 emit_move_insn (operands[0], operands[1]);
17426 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17428 emit_insn ((mode == DImode
17429 ? gen_x86_shrd
17430 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17431 emit_insn ((mode == DImode
17432 ? gen_ashrsi3
17433 : gen_ashrdi3) (high[0], high[0], operands[2]));
17435 if (TARGET_CMOVE && scratch)
17437 emit_move_insn (scratch, high[0]);
17438 emit_insn ((mode == DImode
17439 ? gen_ashrsi3
17440 : gen_ashrdi3) (scratch, scratch,
17441 GEN_INT (single_width - 1)));
17442 emit_insn ((mode == DImode
17443 ? gen_x86_shiftsi_adj_1
17444 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17445 scratch));
17447 else
17448 emit_insn ((mode == DImode
17449 ? gen_x86_shiftsi_adj_3
17450 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17454 void
17455 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17457 rtx low[2], high[2];
17458 int count;
17459 const int single_width = mode == DImode ? 32 : 64;
17461 if (CONST_INT_P (operands[2]))
17463 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17464 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17466 if (count >= single_width)
17468 emit_move_insn (low[0], high[1]);
17469 ix86_expand_clear (high[0]);
17471 if (count > single_width)
17472 emit_insn ((mode == DImode
17473 ? gen_lshrsi3
17474 : gen_lshrdi3) (low[0], low[0],
17475 GEN_INT (count - single_width)));
17477 else
17479 if (!rtx_equal_p (operands[0], operands[1]))
17480 emit_move_insn (operands[0], operands[1]);
17481 emit_insn ((mode == DImode
17482 ? gen_x86_shrd
17483 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17484 emit_insn ((mode == DImode
17485 ? gen_lshrsi3
17486 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17489 else
17491 if (!rtx_equal_p (operands[0], operands[1]))
17492 emit_move_insn (operands[0], operands[1]);
17494 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17496 emit_insn ((mode == DImode
17497 ? gen_x86_shrd
17498 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17499 emit_insn ((mode == DImode
17500 ? gen_lshrsi3
17501 : gen_lshrdi3) (high[0], high[0], operands[2]));
17503 /* Heh. By reversing the arguments, we can reuse this pattern. */
17504 if (TARGET_CMOVE && scratch)
17506 ix86_expand_clear (scratch);
17507 emit_insn ((mode == DImode
17508 ? gen_x86_shiftsi_adj_1
17509 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17510 scratch));
17512 else
17513 emit_insn ((mode == DImode
17514 ? gen_x86_shiftsi_adj_2
17515 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17519 /* Predict just emitted jump instruction to be taken with probability PROB. */
17520 static void
17521 predict_jump (int prob)
17523 rtx insn = get_last_insn ();
17524 gcc_assert (JUMP_P (insn));
17525 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17528 /* Helper function for the string operations below. Dest VARIABLE whether
17529 it is aligned to VALUE bytes. If true, jump to the label. */
17530 static rtx
17531 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17533 rtx label = gen_label_rtx ();
17534 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17535 if (GET_MODE (variable) == DImode)
17536 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17537 else
17538 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17539 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17540 1, label);
17541 if (epilogue)
17542 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17543 else
17544 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17545 return label;
17548 /* Adjust COUNTER by the VALUE. */
17549 static void
17550 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17552 if (GET_MODE (countreg) == DImode)
17553 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17554 else
17555 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17558 /* Zero extend possibly SImode EXP to Pmode register. */
17560 ix86_zero_extend_to_Pmode (rtx exp)
17562 rtx r;
17563 if (GET_MODE (exp) == VOIDmode)
17564 return force_reg (Pmode, exp);
17565 if (GET_MODE (exp) == Pmode)
17566 return copy_to_mode_reg (Pmode, exp);
17567 r = gen_reg_rtx (Pmode);
17568 emit_insn (gen_zero_extendsidi2 (r, exp));
17569 return r;
17572 /* Divide COUNTREG by SCALE. */
17573 static rtx
17574 scale_counter (rtx countreg, int scale)
17576 rtx sc;
17578 if (scale == 1)
17579 return countreg;
17580 if (CONST_INT_P (countreg))
17581 return GEN_INT (INTVAL (countreg) / scale);
17582 gcc_assert (REG_P (countreg));
17584 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17585 GEN_INT (exact_log2 (scale)),
17586 NULL, 1, OPTAB_DIRECT);
17587 return sc;
17590 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17591 DImode for constant loop counts. */
17593 static enum machine_mode
17594 counter_mode (rtx count_exp)
17596 if (GET_MODE (count_exp) != VOIDmode)
17597 return GET_MODE (count_exp);
17598 if (!CONST_INT_P (count_exp))
17599 return Pmode;
17600 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17601 return DImode;
17602 return SImode;
17605 /* When SRCPTR is non-NULL, output simple loop to move memory
17606 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17607 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17608 equivalent loop to set memory by VALUE (supposed to be in MODE).
17610 The size is rounded down to whole number of chunk size moved at once.
17611 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17614 static void
17615 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17616 rtx destptr, rtx srcptr, rtx value,
17617 rtx count, enum machine_mode mode, int unroll,
17618 int expected_size)
17620 rtx out_label, top_label, iter, tmp;
17621 enum machine_mode iter_mode = counter_mode (count);
17622 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17623 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17624 rtx size;
17625 rtx x_addr;
17626 rtx y_addr;
17627 int i;
17629 top_label = gen_label_rtx ();
17630 out_label = gen_label_rtx ();
17631 iter = gen_reg_rtx (iter_mode);
17633 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17634 NULL, 1, OPTAB_DIRECT);
17635 /* Those two should combine. */
17636 if (piece_size == const1_rtx)
17638 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17639 true, out_label);
17640 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17642 emit_move_insn (iter, const0_rtx);
17644 emit_label (top_label);
17646 tmp = convert_modes (Pmode, iter_mode, iter, true);
17647 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17648 destmem = change_address (destmem, mode, x_addr);
17650 if (srcmem)
17652 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17653 srcmem = change_address (srcmem, mode, y_addr);
17655 /* When unrolling for chips that reorder memory reads and writes,
17656 we can save registers by using single temporary.
17657 Also using 4 temporaries is overkill in 32bit mode. */
17658 if (!TARGET_64BIT && 0)
17660 for (i = 0; i < unroll; i++)
17662 if (i)
17664 destmem =
17665 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17666 srcmem =
17667 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17669 emit_move_insn (destmem, srcmem);
17672 else
17674 rtx tmpreg[4];
17675 gcc_assert (unroll <= 4);
17676 for (i = 0; i < unroll; i++)
17678 tmpreg[i] = gen_reg_rtx (mode);
17679 if (i)
17681 srcmem =
17682 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17684 emit_move_insn (tmpreg[i], srcmem);
17686 for (i = 0; i < unroll; i++)
17688 if (i)
17690 destmem =
17691 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17693 emit_move_insn (destmem, tmpreg[i]);
17697 else
17698 for (i = 0; i < unroll; i++)
17700 if (i)
17701 destmem =
17702 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17703 emit_move_insn (destmem, value);
17706 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17707 true, OPTAB_LIB_WIDEN);
17708 if (tmp != iter)
17709 emit_move_insn (iter, tmp);
17711 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17712 true, top_label);
17713 if (expected_size != -1)
17715 expected_size /= GET_MODE_SIZE (mode) * unroll;
17716 if (expected_size == 0)
17717 predict_jump (0);
17718 else if (expected_size > REG_BR_PROB_BASE)
17719 predict_jump (REG_BR_PROB_BASE - 1);
17720 else
17721 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17723 else
17724 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17725 iter = ix86_zero_extend_to_Pmode (iter);
17726 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17727 true, OPTAB_LIB_WIDEN);
17728 if (tmp != destptr)
17729 emit_move_insn (destptr, tmp);
17730 if (srcptr)
17732 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17733 true, OPTAB_LIB_WIDEN);
17734 if (tmp != srcptr)
17735 emit_move_insn (srcptr, tmp);
17737 emit_label (out_label);
17740 /* Output "rep; mov" instruction.
17741 Arguments have same meaning as for previous function */
17742 static void
17743 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17744 rtx destptr, rtx srcptr,
17745 rtx count,
17746 enum machine_mode mode)
17748 rtx destexp;
17749 rtx srcexp;
17750 rtx countreg;
17752 /* If the size is known, it is shorter to use rep movs. */
17753 if (mode == QImode && CONST_INT_P (count)
17754 && !(INTVAL (count) & 3))
17755 mode = SImode;
17757 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17758 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17759 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17760 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17761 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17762 if (mode != QImode)
17764 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17765 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17766 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17767 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17768 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17769 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17771 else
17773 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17774 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17776 if (CONST_INT_P (count))
17778 count = GEN_INT (INTVAL (count)
17779 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17780 destmem = shallow_copy_rtx (destmem);
17781 srcmem = shallow_copy_rtx (srcmem);
17782 set_mem_size (destmem, count);
17783 set_mem_size (srcmem, count);
17785 else
17787 if (MEM_SIZE (destmem))
17788 set_mem_size (destmem, NULL_RTX);
17789 if (MEM_SIZE (srcmem))
17790 set_mem_size (srcmem, NULL_RTX);
17792 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17793 destexp, srcexp));
17796 /* Output "rep; stos" instruction.
17797 Arguments have same meaning as for previous function */
17798 static void
17799 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17800 rtx count, enum machine_mode mode,
17801 rtx orig_value)
17803 rtx destexp;
17804 rtx countreg;
17806 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17807 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17808 value = force_reg (mode, gen_lowpart (mode, value));
17809 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17810 if (mode != QImode)
17812 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17813 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17814 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17816 else
17817 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17818 if (orig_value == const0_rtx && CONST_INT_P (count))
17820 count = GEN_INT (INTVAL (count)
17821 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17822 destmem = shallow_copy_rtx (destmem);
17823 set_mem_size (destmem, count);
17825 else if (MEM_SIZE (destmem))
17826 set_mem_size (destmem, NULL_RTX);
17827 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17830 static void
17831 emit_strmov (rtx destmem, rtx srcmem,
17832 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17834 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17835 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17836 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17839 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17840 static void
17841 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17842 rtx destptr, rtx srcptr, rtx count, int max_size)
17844 rtx src, dest;
17845 if (CONST_INT_P (count))
17847 HOST_WIDE_INT countval = INTVAL (count);
17848 int offset = 0;
17850 if ((countval & 0x10) && max_size > 16)
17852 if (TARGET_64BIT)
17854 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17855 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17857 else
17858 gcc_unreachable ();
17859 offset += 16;
17861 if ((countval & 0x08) && max_size > 8)
17863 if (TARGET_64BIT)
17864 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17865 else
17867 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17868 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17870 offset += 8;
17872 if ((countval & 0x04) && max_size > 4)
17874 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17875 offset += 4;
17877 if ((countval & 0x02) && max_size > 2)
17879 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17880 offset += 2;
17882 if ((countval & 0x01) && max_size > 1)
17884 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17885 offset += 1;
17887 return;
17889 if (max_size > 8)
17891 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17892 count, 1, OPTAB_DIRECT);
17893 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17894 count, QImode, 1, 4);
17895 return;
17898 /* When there are stringops, we can cheaply increase dest and src pointers.
17899 Otherwise we save code size by maintaining offset (zero is readily
17900 available from preceding rep operation) and using x86 addressing modes.
17902 if (TARGET_SINGLE_STRINGOP)
17904 if (max_size > 4)
17906 rtx label = ix86_expand_aligntest (count, 4, true);
17907 src = change_address (srcmem, SImode, srcptr);
17908 dest = change_address (destmem, SImode, destptr);
17909 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17910 emit_label (label);
17911 LABEL_NUSES (label) = 1;
17913 if (max_size > 2)
17915 rtx label = ix86_expand_aligntest (count, 2, true);
17916 src = change_address (srcmem, HImode, srcptr);
17917 dest = change_address (destmem, HImode, destptr);
17918 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17919 emit_label (label);
17920 LABEL_NUSES (label) = 1;
17922 if (max_size > 1)
17924 rtx label = ix86_expand_aligntest (count, 1, true);
17925 src = change_address (srcmem, QImode, srcptr);
17926 dest = change_address (destmem, QImode, destptr);
17927 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17928 emit_label (label);
17929 LABEL_NUSES (label) = 1;
17932 else
17934 rtx offset = force_reg (Pmode, const0_rtx);
17935 rtx tmp;
17937 if (max_size > 4)
17939 rtx label = ix86_expand_aligntest (count, 4, true);
17940 src = change_address (srcmem, SImode, srcptr);
17941 dest = change_address (destmem, SImode, destptr);
17942 emit_move_insn (dest, src);
17943 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17944 true, OPTAB_LIB_WIDEN);
17945 if (tmp != offset)
17946 emit_move_insn (offset, tmp);
17947 emit_label (label);
17948 LABEL_NUSES (label) = 1;
17950 if (max_size > 2)
17952 rtx label = ix86_expand_aligntest (count, 2, true);
17953 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17954 src = change_address (srcmem, HImode, tmp);
17955 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17956 dest = change_address (destmem, HImode, tmp);
17957 emit_move_insn (dest, src);
17958 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17959 true, OPTAB_LIB_WIDEN);
17960 if (tmp != offset)
17961 emit_move_insn (offset, tmp);
17962 emit_label (label);
17963 LABEL_NUSES (label) = 1;
17965 if (max_size > 1)
17967 rtx label = ix86_expand_aligntest (count, 1, true);
17968 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17969 src = change_address (srcmem, QImode, tmp);
17970 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17971 dest = change_address (destmem, QImode, tmp);
17972 emit_move_insn (dest, src);
17973 emit_label (label);
17974 LABEL_NUSES (label) = 1;
17979 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17980 static void
17981 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17982 rtx count, int max_size)
17984 count =
17985 expand_simple_binop (counter_mode (count), AND, count,
17986 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17987 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17988 gen_lowpart (QImode, value), count, QImode,
17989 1, max_size / 2);
17992 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17993 static void
17994 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17996 rtx dest;
17998 if (CONST_INT_P (count))
18000 HOST_WIDE_INT countval = INTVAL (count);
18001 int offset = 0;
18003 if ((countval & 0x10) && max_size > 16)
18005 if (TARGET_64BIT)
18007 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
18008 emit_insn (gen_strset (destptr, dest, value));
18009 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
18010 emit_insn (gen_strset (destptr, dest, value));
18012 else
18013 gcc_unreachable ();
18014 offset += 16;
18016 if ((countval & 0x08) && max_size > 8)
18018 if (TARGET_64BIT)
18020 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
18021 emit_insn (gen_strset (destptr, dest, value));
18023 else
18025 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
18026 emit_insn (gen_strset (destptr, dest, value));
18027 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
18028 emit_insn (gen_strset (destptr, dest, value));
18030 offset += 8;
18032 if ((countval & 0x04) && max_size > 4)
18034 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
18035 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18036 offset += 4;
18038 if ((countval & 0x02) && max_size > 2)
18040 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
18041 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18042 offset += 2;
18044 if ((countval & 0x01) && max_size > 1)
18046 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
18047 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18048 offset += 1;
18050 return;
18052 if (max_size > 32)
18054 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
18055 return;
18057 if (max_size > 16)
18059 rtx label = ix86_expand_aligntest (count, 16, true);
18060 if (TARGET_64BIT)
18062 dest = change_address (destmem, DImode, destptr);
18063 emit_insn (gen_strset (destptr, dest, value));
18064 emit_insn (gen_strset (destptr, dest, value));
18066 else
18068 dest = change_address (destmem, SImode, destptr);
18069 emit_insn (gen_strset (destptr, dest, value));
18070 emit_insn (gen_strset (destptr, dest, value));
18071 emit_insn (gen_strset (destptr, dest, value));
18072 emit_insn (gen_strset (destptr, dest, value));
18074 emit_label (label);
18075 LABEL_NUSES (label) = 1;
18077 if (max_size > 8)
18079 rtx label = ix86_expand_aligntest (count, 8, true);
18080 if (TARGET_64BIT)
18082 dest = change_address (destmem, DImode, destptr);
18083 emit_insn (gen_strset (destptr, dest, value));
18085 else
18087 dest = change_address (destmem, SImode, destptr);
18088 emit_insn (gen_strset (destptr, dest, value));
18089 emit_insn (gen_strset (destptr, dest, value));
18091 emit_label (label);
18092 LABEL_NUSES (label) = 1;
18094 if (max_size > 4)
18096 rtx label = ix86_expand_aligntest (count, 4, true);
18097 dest = change_address (destmem, SImode, destptr);
18098 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18099 emit_label (label);
18100 LABEL_NUSES (label) = 1;
18102 if (max_size > 2)
18104 rtx label = ix86_expand_aligntest (count, 2, true);
18105 dest = change_address (destmem, HImode, destptr);
18106 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18107 emit_label (label);
18108 LABEL_NUSES (label) = 1;
18110 if (max_size > 1)
18112 rtx label = ix86_expand_aligntest (count, 1, true);
18113 dest = change_address (destmem, QImode, destptr);
18114 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18115 emit_label (label);
18116 LABEL_NUSES (label) = 1;
18120 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
18121 DESIRED_ALIGNMENT. */
18122 static void
18123 expand_movmem_prologue (rtx destmem, rtx srcmem,
18124 rtx destptr, rtx srcptr, rtx count,
18125 int align, int desired_alignment)
18127 if (align <= 1 && desired_alignment > 1)
18129 rtx label = ix86_expand_aligntest (destptr, 1, false);
18130 srcmem = change_address (srcmem, QImode, srcptr);
18131 destmem = change_address (destmem, QImode, destptr);
18132 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18133 ix86_adjust_counter (count, 1);
18134 emit_label (label);
18135 LABEL_NUSES (label) = 1;
18137 if (align <= 2 && desired_alignment > 2)
18139 rtx label = ix86_expand_aligntest (destptr, 2, false);
18140 srcmem = change_address (srcmem, HImode, srcptr);
18141 destmem = change_address (destmem, HImode, destptr);
18142 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18143 ix86_adjust_counter (count, 2);
18144 emit_label (label);
18145 LABEL_NUSES (label) = 1;
18147 if (align <= 4 && desired_alignment > 4)
18149 rtx label = ix86_expand_aligntest (destptr, 4, false);
18150 srcmem = change_address (srcmem, SImode, srcptr);
18151 destmem = change_address (destmem, SImode, destptr);
18152 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18153 ix86_adjust_counter (count, 4);
18154 emit_label (label);
18155 LABEL_NUSES (label) = 1;
18157 gcc_assert (desired_alignment <= 8);
18160 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
18161 ALIGN_BYTES is how many bytes need to be copied. */
18162 static rtx
18163 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
18164 int desired_align, int align_bytes)
18166 rtx src = *srcp;
18167 rtx src_size, dst_size;
18168 int off = 0;
18169 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
18170 if (src_align_bytes >= 0)
18171 src_align_bytes = desired_align - src_align_bytes;
18172 src_size = MEM_SIZE (src);
18173 dst_size = MEM_SIZE (dst);
18174 if (align_bytes & 1)
18176 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18177 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
18178 off = 1;
18179 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18181 if (align_bytes & 2)
18183 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18184 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
18185 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18186 set_mem_align (dst, 2 * BITS_PER_UNIT);
18187 if (src_align_bytes >= 0
18188 && (src_align_bytes & 1) == (align_bytes & 1)
18189 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
18190 set_mem_align (src, 2 * BITS_PER_UNIT);
18191 off = 2;
18192 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18194 if (align_bytes & 4)
18196 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18197 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
18198 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18199 set_mem_align (dst, 4 * BITS_PER_UNIT);
18200 if (src_align_bytes >= 0)
18202 unsigned int src_align = 0;
18203 if ((src_align_bytes & 3) == (align_bytes & 3))
18204 src_align = 4;
18205 else if ((src_align_bytes & 1) == (align_bytes & 1))
18206 src_align = 2;
18207 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18208 set_mem_align (src, src_align * BITS_PER_UNIT);
18210 off = 4;
18211 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18213 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18214 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
18215 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18216 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18217 if (src_align_bytes >= 0)
18219 unsigned int src_align = 0;
18220 if ((src_align_bytes & 7) == (align_bytes & 7))
18221 src_align = 8;
18222 else if ((src_align_bytes & 3) == (align_bytes & 3))
18223 src_align = 4;
18224 else if ((src_align_bytes & 1) == (align_bytes & 1))
18225 src_align = 2;
18226 if (src_align > (unsigned int) desired_align)
18227 src_align = desired_align;
18228 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18229 set_mem_align (src, src_align * BITS_PER_UNIT);
18231 if (dst_size)
18232 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18233 if (src_size)
18234 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18235 *srcp = src;
18236 return dst;
18239 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18240 DESIRED_ALIGNMENT. */
18241 static void
18242 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18243 int align, int desired_alignment)
18245 if (align <= 1 && desired_alignment > 1)
18247 rtx label = ix86_expand_aligntest (destptr, 1, false);
18248 destmem = change_address (destmem, QImode, destptr);
18249 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18250 ix86_adjust_counter (count, 1);
18251 emit_label (label);
18252 LABEL_NUSES (label) = 1;
18254 if (align <= 2 && desired_alignment > 2)
18256 rtx label = ix86_expand_aligntest (destptr, 2, false);
18257 destmem = change_address (destmem, HImode, destptr);
18258 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18259 ix86_adjust_counter (count, 2);
18260 emit_label (label);
18261 LABEL_NUSES (label) = 1;
18263 if (align <= 4 && desired_alignment > 4)
18265 rtx label = ix86_expand_aligntest (destptr, 4, false);
18266 destmem = change_address (destmem, SImode, destptr);
18267 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18268 ix86_adjust_counter (count, 4);
18269 emit_label (label);
18270 LABEL_NUSES (label) = 1;
18272 gcc_assert (desired_alignment <= 8);
18275 /* Set enough from DST to align DST known to by aligned by ALIGN to
18276 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18277 static rtx
18278 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18279 int desired_align, int align_bytes)
18281 int off = 0;
18282 rtx dst_size = MEM_SIZE (dst);
18283 if (align_bytes & 1)
18285 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18286 off = 1;
18287 emit_insn (gen_strset (destreg, dst,
18288 gen_lowpart (QImode, value)));
18290 if (align_bytes & 2)
18292 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18293 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18294 set_mem_align (dst, 2 * BITS_PER_UNIT);
18295 off = 2;
18296 emit_insn (gen_strset (destreg, dst,
18297 gen_lowpart (HImode, value)));
18299 if (align_bytes & 4)
18301 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18302 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18303 set_mem_align (dst, 4 * BITS_PER_UNIT);
18304 off = 4;
18305 emit_insn (gen_strset (destreg, dst,
18306 gen_lowpart (SImode, value)));
18308 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18309 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18310 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18311 if (dst_size)
18312 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18313 return dst;
18316 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18317 static enum stringop_alg
18318 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18319 int *dynamic_check)
18321 const struct stringop_algs * algs;
18322 bool optimize_for_speed;
18323 /* Algorithms using the rep prefix want at least edi and ecx;
18324 additionally, memset wants eax and memcpy wants esi. Don't
18325 consider such algorithms if the user has appropriated those
18326 registers for their own purposes. */
18327 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18328 || (memset
18329 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18331 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18332 || (alg != rep_prefix_1_byte \
18333 && alg != rep_prefix_4_byte \
18334 && alg != rep_prefix_8_byte))
18335 const struct processor_costs *cost;
18337 /* Even if the string operation call is cold, we still might spend a lot
18338 of time processing large blocks. */
18339 if (optimize_function_for_size_p (cfun)
18340 || (optimize_insn_for_size_p ()
18341 && expected_size != -1 && expected_size < 256))
18342 optimize_for_speed = false;
18343 else
18344 optimize_for_speed = true;
18346 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18348 *dynamic_check = -1;
18349 if (memset)
18350 algs = &cost->memset[TARGET_64BIT != 0];
18351 else
18352 algs = &cost->memcpy[TARGET_64BIT != 0];
18353 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18354 return stringop_alg;
18355 /* rep; movq or rep; movl is the smallest variant. */
18356 else if (!optimize_for_speed)
18358 if (!count || (count & 3))
18359 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18360 else
18361 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18363 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18365 else if (expected_size != -1 && expected_size < 4)
18366 return loop_1_byte;
18367 else if (expected_size != -1)
18369 unsigned int i;
18370 enum stringop_alg alg = libcall;
18371 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18373 /* We get here if the algorithms that were not libcall-based
18374 were rep-prefix based and we are unable to use rep prefixes
18375 based on global register usage. Break out of the loop and
18376 use the heuristic below. */
18377 if (algs->size[i].max == 0)
18378 break;
18379 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18381 enum stringop_alg candidate = algs->size[i].alg;
18383 if (candidate != libcall && ALG_USABLE_P (candidate))
18384 alg = candidate;
18385 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18386 last non-libcall inline algorithm. */
18387 if (TARGET_INLINE_ALL_STRINGOPS)
18389 /* When the current size is best to be copied by a libcall,
18390 but we are still forced to inline, run the heuristic below
18391 that will pick code for medium sized blocks. */
18392 if (alg != libcall)
18393 return alg;
18394 break;
18396 else if (ALG_USABLE_P (candidate))
18397 return candidate;
18400 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18402 /* When asked to inline the call anyway, try to pick meaningful choice.
18403 We look for maximal size of block that is faster to copy by hand and
18404 take blocks of at most of that size guessing that average size will
18405 be roughly half of the block.
18407 If this turns out to be bad, we might simply specify the preferred
18408 choice in ix86_costs. */
18409 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18410 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18412 int max = -1;
18413 enum stringop_alg alg;
18414 int i;
18415 bool any_alg_usable_p = true;
18417 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18419 enum stringop_alg candidate = algs->size[i].alg;
18420 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18422 if (candidate != libcall && candidate
18423 && ALG_USABLE_P (candidate))
18424 max = algs->size[i].max;
18426 /* If there aren't any usable algorithms, then recursing on
18427 smaller sizes isn't going to find anything. Just return the
18428 simple byte-at-a-time copy loop. */
18429 if (!any_alg_usable_p)
18431 /* Pick something reasonable. */
18432 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18433 *dynamic_check = 128;
18434 return loop_1_byte;
18436 if (max == -1)
18437 max = 4096;
18438 alg = decide_alg (count, max / 2, memset, dynamic_check);
18439 gcc_assert (*dynamic_check == -1);
18440 gcc_assert (alg != libcall);
18441 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18442 *dynamic_check = max;
18443 return alg;
18445 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18446 #undef ALG_USABLE_P
18449 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18450 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18451 static int
18452 decide_alignment (int align,
18453 enum stringop_alg alg,
18454 int expected_size)
18456 int desired_align = 0;
18457 switch (alg)
18459 case no_stringop:
18460 gcc_unreachable ();
18461 case loop:
18462 case unrolled_loop:
18463 desired_align = GET_MODE_SIZE (Pmode);
18464 break;
18465 case rep_prefix_8_byte:
18466 desired_align = 8;
18467 break;
18468 case rep_prefix_4_byte:
18469 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18470 copying whole cacheline at once. */
18471 if (TARGET_PENTIUMPRO)
18472 desired_align = 8;
18473 else
18474 desired_align = 4;
18475 break;
18476 case rep_prefix_1_byte:
18477 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18478 copying whole cacheline at once. */
18479 if (TARGET_PENTIUMPRO)
18480 desired_align = 8;
18481 else
18482 desired_align = 1;
18483 break;
18484 case loop_1_byte:
18485 desired_align = 1;
18486 break;
18487 case libcall:
18488 return 0;
18491 if (optimize_size)
18492 desired_align = 1;
18493 if (desired_align < align)
18494 desired_align = align;
18495 if (expected_size != -1 && expected_size < 4)
18496 desired_align = align;
18497 return desired_align;
18500 /* Return the smallest power of 2 greater than VAL. */
18501 static int
18502 smallest_pow2_greater_than (int val)
18504 int ret = 1;
18505 while (ret <= val)
18506 ret <<= 1;
18507 return ret;
18510 /* Expand string move (memcpy) operation. Use i386 string operations when
18511 profitable. expand_setmem contains similar code. The code depends upon
18512 architecture, block size and alignment, but always has the same
18513 overall structure:
18515 1) Prologue guard: Conditional that jumps up to epilogues for small
18516 blocks that can be handled by epilogue alone. This is faster but
18517 also needed for correctness, since prologue assume the block is larger
18518 than the desired alignment.
18520 Optional dynamic check for size and libcall for large
18521 blocks is emitted here too, with -minline-stringops-dynamically.
18523 2) Prologue: copy first few bytes in order to get destination aligned
18524 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18525 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18526 We emit either a jump tree on power of two sized blocks, or a byte loop.
18528 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18529 with specified algorithm.
18531 4) Epilogue: code copying tail of the block that is too small to be
18532 handled by main body (or up to size guarded by prologue guard). */
18535 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18536 rtx expected_align_exp, rtx expected_size_exp)
18538 rtx destreg;
18539 rtx srcreg;
18540 rtx label = NULL;
18541 rtx tmp;
18542 rtx jump_around_label = NULL;
18543 HOST_WIDE_INT align = 1;
18544 unsigned HOST_WIDE_INT count = 0;
18545 HOST_WIDE_INT expected_size = -1;
18546 int size_needed = 0, epilogue_size_needed;
18547 int desired_align = 0, align_bytes = 0;
18548 enum stringop_alg alg;
18549 int dynamic_check;
18550 bool need_zero_guard = false;
18552 if (CONST_INT_P (align_exp))
18553 align = INTVAL (align_exp);
18554 /* i386 can do misaligned access on reasonably increased cost. */
18555 if (CONST_INT_P (expected_align_exp)
18556 && INTVAL (expected_align_exp) > align)
18557 align = INTVAL (expected_align_exp);
18558 /* ALIGN is the minimum of destination and source alignment, but we care here
18559 just about destination alignment. */
18560 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18561 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18563 if (CONST_INT_P (count_exp))
18564 count = expected_size = INTVAL (count_exp);
18565 if (CONST_INT_P (expected_size_exp) && count == 0)
18566 expected_size = INTVAL (expected_size_exp);
18568 /* Make sure we don't need to care about overflow later on. */
18569 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18570 return 0;
18572 /* Step 0: Decide on preferred algorithm, desired alignment and
18573 size of chunks to be copied by main loop. */
18575 alg = decide_alg (count, expected_size, false, &dynamic_check);
18576 desired_align = decide_alignment (align, alg, expected_size);
18578 if (!TARGET_ALIGN_STRINGOPS)
18579 align = desired_align;
18581 if (alg == libcall)
18582 return 0;
18583 gcc_assert (alg != no_stringop);
18584 if (!count)
18585 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18586 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18587 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18588 switch (alg)
18590 case libcall:
18591 case no_stringop:
18592 gcc_unreachable ();
18593 case loop:
18594 need_zero_guard = true;
18595 size_needed = GET_MODE_SIZE (Pmode);
18596 break;
18597 case unrolled_loop:
18598 need_zero_guard = true;
18599 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18600 break;
18601 case rep_prefix_8_byte:
18602 size_needed = 8;
18603 break;
18604 case rep_prefix_4_byte:
18605 size_needed = 4;
18606 break;
18607 case rep_prefix_1_byte:
18608 size_needed = 1;
18609 break;
18610 case loop_1_byte:
18611 need_zero_guard = true;
18612 size_needed = 1;
18613 break;
18616 epilogue_size_needed = size_needed;
18618 /* Step 1: Prologue guard. */
18620 /* Alignment code needs count to be in register. */
18621 if (CONST_INT_P (count_exp) && desired_align > align)
18623 if (INTVAL (count_exp) > desired_align
18624 && INTVAL (count_exp) > size_needed)
18626 align_bytes
18627 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18628 if (align_bytes <= 0)
18629 align_bytes = 0;
18630 else
18631 align_bytes = desired_align - align_bytes;
18633 if (align_bytes == 0)
18634 count_exp = force_reg (counter_mode (count_exp), count_exp);
18636 gcc_assert (desired_align >= 1 && align >= 1);
18638 /* Ensure that alignment prologue won't copy past end of block. */
18639 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18641 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18642 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18643 Make sure it is power of 2. */
18644 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18646 if (count)
18648 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18650 /* If main algorithm works on QImode, no epilogue is needed.
18651 For small sizes just don't align anything. */
18652 if (size_needed == 1)
18653 desired_align = align;
18654 else
18655 goto epilogue;
18658 else
18660 label = gen_label_rtx ();
18661 emit_cmp_and_jump_insns (count_exp,
18662 GEN_INT (epilogue_size_needed),
18663 LTU, 0, counter_mode (count_exp), 1, label);
18664 if (expected_size == -1 || expected_size < epilogue_size_needed)
18665 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18666 else
18667 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18671 /* Emit code to decide on runtime whether library call or inline should be
18672 used. */
18673 if (dynamic_check != -1)
18675 if (CONST_INT_P (count_exp))
18677 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18679 emit_block_move_via_libcall (dst, src, count_exp, false);
18680 count_exp = const0_rtx;
18681 goto epilogue;
18684 else
18686 rtx hot_label = gen_label_rtx ();
18687 jump_around_label = gen_label_rtx ();
18688 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18689 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18690 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18691 emit_block_move_via_libcall (dst, src, count_exp, false);
18692 emit_jump (jump_around_label);
18693 emit_label (hot_label);
18697 /* Step 2: Alignment prologue. */
18699 if (desired_align > align)
18701 if (align_bytes == 0)
18703 /* Except for the first move in epilogue, we no longer know
18704 constant offset in aliasing info. It don't seems to worth
18705 the pain to maintain it for the first move, so throw away
18706 the info early. */
18707 src = change_address (src, BLKmode, srcreg);
18708 dst = change_address (dst, BLKmode, destreg);
18709 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18710 desired_align);
18712 else
18714 /* If we know how many bytes need to be stored before dst is
18715 sufficiently aligned, maintain aliasing info accurately. */
18716 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18717 desired_align, align_bytes);
18718 count_exp = plus_constant (count_exp, -align_bytes);
18719 count -= align_bytes;
18721 if (need_zero_guard
18722 && (count < (unsigned HOST_WIDE_INT) size_needed
18723 || (align_bytes == 0
18724 && count < ((unsigned HOST_WIDE_INT) size_needed
18725 + desired_align - align))))
18727 /* It is possible that we copied enough so the main loop will not
18728 execute. */
18729 gcc_assert (size_needed > 1);
18730 if (label == NULL_RTX)
18731 label = gen_label_rtx ();
18732 emit_cmp_and_jump_insns (count_exp,
18733 GEN_INT (size_needed),
18734 LTU, 0, counter_mode (count_exp), 1, label);
18735 if (expected_size == -1
18736 || expected_size < (desired_align - align) / 2 + size_needed)
18737 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18738 else
18739 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18742 if (label && size_needed == 1)
18744 emit_label (label);
18745 LABEL_NUSES (label) = 1;
18746 label = NULL;
18747 epilogue_size_needed = 1;
18749 else if (label == NULL_RTX)
18750 epilogue_size_needed = size_needed;
18752 /* Step 3: Main loop. */
18754 switch (alg)
18756 case libcall:
18757 case no_stringop:
18758 gcc_unreachable ();
18759 case loop_1_byte:
18760 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18761 count_exp, QImode, 1, expected_size);
18762 break;
18763 case loop:
18764 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18765 count_exp, Pmode, 1, expected_size);
18766 break;
18767 case unrolled_loop:
18768 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18769 registers for 4 temporaries anyway. */
18770 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18771 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18772 expected_size);
18773 break;
18774 case rep_prefix_8_byte:
18775 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18776 DImode);
18777 break;
18778 case rep_prefix_4_byte:
18779 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18780 SImode);
18781 break;
18782 case rep_prefix_1_byte:
18783 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18784 QImode);
18785 break;
18787 /* Adjust properly the offset of src and dest memory for aliasing. */
18788 if (CONST_INT_P (count_exp))
18790 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18791 (count / size_needed) * size_needed);
18792 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18793 (count / size_needed) * size_needed);
18795 else
18797 src = change_address (src, BLKmode, srcreg);
18798 dst = change_address (dst, BLKmode, destreg);
18801 /* Step 4: Epilogue to copy the remaining bytes. */
18802 epilogue:
18803 if (label)
18805 /* When the main loop is done, COUNT_EXP might hold original count,
18806 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18807 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18808 bytes. Compensate if needed. */
18810 if (size_needed < epilogue_size_needed)
18812 tmp =
18813 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18814 GEN_INT (size_needed - 1), count_exp, 1,
18815 OPTAB_DIRECT);
18816 if (tmp != count_exp)
18817 emit_move_insn (count_exp, tmp);
18819 emit_label (label);
18820 LABEL_NUSES (label) = 1;
18823 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18824 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18825 epilogue_size_needed);
18826 if (jump_around_label)
18827 emit_label (jump_around_label);
18828 return 1;
18831 /* Helper function for memcpy. For QImode value 0xXY produce
18832 0xXYXYXYXY of wide specified by MODE. This is essentially
18833 a * 0x10101010, but we can do slightly better than
18834 synth_mult by unwinding the sequence by hand on CPUs with
18835 slow multiply. */
18836 static rtx
18837 promote_duplicated_reg (enum machine_mode mode, rtx val)
18839 enum machine_mode valmode = GET_MODE (val);
18840 rtx tmp;
18841 int nops = mode == DImode ? 3 : 2;
18843 gcc_assert (mode == SImode || mode == DImode);
18844 if (val == const0_rtx)
18845 return copy_to_mode_reg (mode, const0_rtx);
18846 if (CONST_INT_P (val))
18848 HOST_WIDE_INT v = INTVAL (val) & 255;
18850 v |= v << 8;
18851 v |= v << 16;
18852 if (mode == DImode)
18853 v |= (v << 16) << 16;
18854 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18857 if (valmode == VOIDmode)
18858 valmode = QImode;
18859 if (valmode != QImode)
18860 val = gen_lowpart (QImode, val);
18861 if (mode == QImode)
18862 return val;
18863 if (!TARGET_PARTIAL_REG_STALL)
18864 nops--;
18865 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18866 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18867 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18868 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18870 rtx reg = convert_modes (mode, QImode, val, true);
18871 tmp = promote_duplicated_reg (mode, const1_rtx);
18872 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18873 OPTAB_DIRECT);
18875 else
18877 rtx reg = convert_modes (mode, QImode, val, true);
18879 if (!TARGET_PARTIAL_REG_STALL)
18880 if (mode == SImode)
18881 emit_insn (gen_movsi_insv_1 (reg, reg));
18882 else
18883 emit_insn (gen_movdi_insv_1 (reg, reg));
18884 else
18886 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18887 NULL, 1, OPTAB_DIRECT);
18888 reg =
18889 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18891 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18892 NULL, 1, OPTAB_DIRECT);
18893 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18894 if (mode == SImode)
18895 return reg;
18896 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18897 NULL, 1, OPTAB_DIRECT);
18898 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18899 return reg;
18903 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18904 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18905 alignment from ALIGN to DESIRED_ALIGN. */
18906 static rtx
18907 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18909 rtx promoted_val;
18911 if (TARGET_64BIT
18912 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18913 promoted_val = promote_duplicated_reg (DImode, val);
18914 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18915 promoted_val = promote_duplicated_reg (SImode, val);
18916 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18917 promoted_val = promote_duplicated_reg (HImode, val);
18918 else
18919 promoted_val = val;
18921 return promoted_val;
18924 /* Expand string clear operation (bzero). Use i386 string operations when
18925 profitable. See expand_movmem comment for explanation of individual
18926 steps performed. */
18928 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18929 rtx expected_align_exp, rtx expected_size_exp)
18931 rtx destreg;
18932 rtx label = NULL;
18933 rtx tmp;
18934 rtx jump_around_label = NULL;
18935 HOST_WIDE_INT align = 1;
18936 unsigned HOST_WIDE_INT count = 0;
18937 HOST_WIDE_INT expected_size = -1;
18938 int size_needed = 0, epilogue_size_needed;
18939 int desired_align = 0, align_bytes = 0;
18940 enum stringop_alg alg;
18941 rtx promoted_val = NULL;
18942 bool force_loopy_epilogue = false;
18943 int dynamic_check;
18944 bool need_zero_guard = false;
18946 if (CONST_INT_P (align_exp))
18947 align = INTVAL (align_exp);
18948 /* i386 can do misaligned access on reasonably increased cost. */
18949 if (CONST_INT_P (expected_align_exp)
18950 && INTVAL (expected_align_exp) > align)
18951 align = INTVAL (expected_align_exp);
18952 if (CONST_INT_P (count_exp))
18953 count = expected_size = INTVAL (count_exp);
18954 if (CONST_INT_P (expected_size_exp) && count == 0)
18955 expected_size = INTVAL (expected_size_exp);
18957 /* Make sure we don't need to care about overflow later on. */
18958 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18959 return 0;
18961 /* Step 0: Decide on preferred algorithm, desired alignment and
18962 size of chunks to be copied by main loop. */
18964 alg = decide_alg (count, expected_size, true, &dynamic_check);
18965 desired_align = decide_alignment (align, alg, expected_size);
18967 if (!TARGET_ALIGN_STRINGOPS)
18968 align = desired_align;
18970 if (alg == libcall)
18971 return 0;
18972 gcc_assert (alg != no_stringop);
18973 if (!count)
18974 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18975 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18976 switch (alg)
18978 case libcall:
18979 case no_stringop:
18980 gcc_unreachable ();
18981 case loop:
18982 need_zero_guard = true;
18983 size_needed = GET_MODE_SIZE (Pmode);
18984 break;
18985 case unrolled_loop:
18986 need_zero_guard = true;
18987 size_needed = GET_MODE_SIZE (Pmode) * 4;
18988 break;
18989 case rep_prefix_8_byte:
18990 size_needed = 8;
18991 break;
18992 case rep_prefix_4_byte:
18993 size_needed = 4;
18994 break;
18995 case rep_prefix_1_byte:
18996 size_needed = 1;
18997 break;
18998 case loop_1_byte:
18999 need_zero_guard = true;
19000 size_needed = 1;
19001 break;
19003 epilogue_size_needed = size_needed;
19005 /* Step 1: Prologue guard. */
19007 /* Alignment code needs count to be in register. */
19008 if (CONST_INT_P (count_exp) && desired_align > align)
19010 if (INTVAL (count_exp) > desired_align
19011 && INTVAL (count_exp) > size_needed)
19013 align_bytes
19014 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
19015 if (align_bytes <= 0)
19016 align_bytes = 0;
19017 else
19018 align_bytes = desired_align - align_bytes;
19020 if (align_bytes == 0)
19022 enum machine_mode mode = SImode;
19023 if (TARGET_64BIT && (count & ~0xffffffff))
19024 mode = DImode;
19025 count_exp = force_reg (mode, count_exp);
19028 /* Do the cheap promotion to allow better CSE across the
19029 main loop and epilogue (ie one load of the big constant in the
19030 front of all code. */
19031 if (CONST_INT_P (val_exp))
19032 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19033 desired_align, align);
19034 /* Ensure that alignment prologue won't copy past end of block. */
19035 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
19037 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
19038 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
19039 Make sure it is power of 2. */
19040 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
19042 /* To improve performance of small blocks, we jump around the VAL
19043 promoting mode. This mean that if the promoted VAL is not constant,
19044 we might not use it in the epilogue and have to use byte
19045 loop variant. */
19046 if (epilogue_size_needed > 2 && !promoted_val)
19047 force_loopy_epilogue = true;
19048 if (count)
19050 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
19052 /* If main algorithm works on QImode, no epilogue is needed.
19053 For small sizes just don't align anything. */
19054 if (size_needed == 1)
19055 desired_align = align;
19056 else
19057 goto epilogue;
19060 else
19062 label = gen_label_rtx ();
19063 emit_cmp_and_jump_insns (count_exp,
19064 GEN_INT (epilogue_size_needed),
19065 LTU, 0, counter_mode (count_exp), 1, label);
19066 if (expected_size == -1 || expected_size <= epilogue_size_needed)
19067 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19068 else
19069 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19072 if (dynamic_check != -1)
19074 rtx hot_label = gen_label_rtx ();
19075 jump_around_label = gen_label_rtx ();
19076 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
19077 LEU, 0, counter_mode (count_exp), 1, hot_label);
19078 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19079 set_storage_via_libcall (dst, count_exp, val_exp, false);
19080 emit_jump (jump_around_label);
19081 emit_label (hot_label);
19084 /* Step 2: Alignment prologue. */
19086 /* Do the expensive promotion once we branched off the small blocks. */
19087 if (!promoted_val)
19088 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19089 desired_align, align);
19090 gcc_assert (desired_align >= 1 && align >= 1);
19092 if (desired_align > align)
19094 if (align_bytes == 0)
19096 /* Except for the first move in epilogue, we no longer know
19097 constant offset in aliasing info. It don't seems to worth
19098 the pain to maintain it for the first move, so throw away
19099 the info early. */
19100 dst = change_address (dst, BLKmode, destreg);
19101 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
19102 desired_align);
19104 else
19106 /* If we know how many bytes need to be stored before dst is
19107 sufficiently aligned, maintain aliasing info accurately. */
19108 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
19109 desired_align, align_bytes);
19110 count_exp = plus_constant (count_exp, -align_bytes);
19111 count -= align_bytes;
19113 if (need_zero_guard
19114 && (count < (unsigned HOST_WIDE_INT) size_needed
19115 || (align_bytes == 0
19116 && count < ((unsigned HOST_WIDE_INT) size_needed
19117 + desired_align - align))))
19119 /* It is possible that we copied enough so the main loop will not
19120 execute. */
19121 gcc_assert (size_needed > 1);
19122 if (label == NULL_RTX)
19123 label = gen_label_rtx ();
19124 emit_cmp_and_jump_insns (count_exp,
19125 GEN_INT (size_needed),
19126 LTU, 0, counter_mode (count_exp), 1, label);
19127 if (expected_size == -1
19128 || expected_size < (desired_align - align) / 2 + size_needed)
19129 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19130 else
19131 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19134 if (label && size_needed == 1)
19136 emit_label (label);
19137 LABEL_NUSES (label) = 1;
19138 label = NULL;
19139 promoted_val = val_exp;
19140 epilogue_size_needed = 1;
19142 else if (label == NULL_RTX)
19143 epilogue_size_needed = size_needed;
19145 /* Step 3: Main loop. */
19147 switch (alg)
19149 case libcall:
19150 case no_stringop:
19151 gcc_unreachable ();
19152 case loop_1_byte:
19153 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19154 count_exp, QImode, 1, expected_size);
19155 break;
19156 case loop:
19157 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19158 count_exp, Pmode, 1, expected_size);
19159 break;
19160 case unrolled_loop:
19161 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19162 count_exp, Pmode, 4, expected_size);
19163 break;
19164 case rep_prefix_8_byte:
19165 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19166 DImode, val_exp);
19167 break;
19168 case rep_prefix_4_byte:
19169 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19170 SImode, val_exp);
19171 break;
19172 case rep_prefix_1_byte:
19173 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19174 QImode, val_exp);
19175 break;
19177 /* Adjust properly the offset of src and dest memory for aliasing. */
19178 if (CONST_INT_P (count_exp))
19179 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
19180 (count / size_needed) * size_needed);
19181 else
19182 dst = change_address (dst, BLKmode, destreg);
19184 /* Step 4: Epilogue to copy the remaining bytes. */
19186 if (label)
19188 /* When the main loop is done, COUNT_EXP might hold original count,
19189 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
19190 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
19191 bytes. Compensate if needed. */
19193 if (size_needed < epilogue_size_needed)
19195 tmp =
19196 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
19197 GEN_INT (size_needed - 1), count_exp, 1,
19198 OPTAB_DIRECT);
19199 if (tmp != count_exp)
19200 emit_move_insn (count_exp, tmp);
19202 emit_label (label);
19203 LABEL_NUSES (label) = 1;
19205 epilogue:
19206 if (count_exp != const0_rtx && epilogue_size_needed > 1)
19208 if (force_loopy_epilogue)
19209 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
19210 epilogue_size_needed);
19211 else
19212 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
19213 epilogue_size_needed);
19215 if (jump_around_label)
19216 emit_label (jump_around_label);
19217 return 1;
19220 /* Expand the appropriate insns for doing strlen if not just doing
19221 repnz; scasb
19223 out = result, initialized with the start address
19224 align_rtx = alignment of the address.
19225 scratch = scratch register, initialized with the startaddress when
19226 not aligned, otherwise undefined
19228 This is just the body. It needs the initializations mentioned above and
19229 some address computing at the end. These things are done in i386.md. */
19231 static void
19232 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19234 int align;
19235 rtx tmp;
19236 rtx align_2_label = NULL_RTX;
19237 rtx align_3_label = NULL_RTX;
19238 rtx align_4_label = gen_label_rtx ();
19239 rtx end_0_label = gen_label_rtx ();
19240 rtx mem;
19241 rtx tmpreg = gen_reg_rtx (SImode);
19242 rtx scratch = gen_reg_rtx (SImode);
19243 rtx cmp;
19245 align = 0;
19246 if (CONST_INT_P (align_rtx))
19247 align = INTVAL (align_rtx);
19249 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19251 /* Is there a known alignment and is it less than 4? */
19252 if (align < 4)
19254 rtx scratch1 = gen_reg_rtx (Pmode);
19255 emit_move_insn (scratch1, out);
19256 /* Is there a known alignment and is it not 2? */
19257 if (align != 2)
19259 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19260 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19262 /* Leave just the 3 lower bits. */
19263 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19264 NULL_RTX, 0, OPTAB_WIDEN);
19266 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19267 Pmode, 1, align_4_label);
19268 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19269 Pmode, 1, align_2_label);
19270 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19271 Pmode, 1, align_3_label);
19273 else
19275 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19276 check if is aligned to 4 - byte. */
19278 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19279 NULL_RTX, 0, OPTAB_WIDEN);
19281 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19282 Pmode, 1, align_4_label);
19285 mem = change_address (src, QImode, out);
19287 /* Now compare the bytes. */
19289 /* Compare the first n unaligned byte on a byte per byte basis. */
19290 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19291 QImode, 1, end_0_label);
19293 /* Increment the address. */
19294 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19296 /* Not needed with an alignment of 2 */
19297 if (align != 2)
19299 emit_label (align_2_label);
19301 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19302 end_0_label);
19304 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19306 emit_label (align_3_label);
19309 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19310 end_0_label);
19312 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19315 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19316 align this loop. It gives only huge programs, but does not help to
19317 speed up. */
19318 emit_label (align_4_label);
19320 mem = change_address (src, SImode, out);
19321 emit_move_insn (scratch, mem);
19322 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19324 /* This formula yields a nonzero result iff one of the bytes is zero.
19325 This saves three branches inside loop and many cycles. */
19327 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19328 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19329 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19330 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19331 gen_int_mode (0x80808080, SImode)));
19332 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19333 align_4_label);
19335 if (TARGET_CMOVE)
19337 rtx reg = gen_reg_rtx (SImode);
19338 rtx reg2 = gen_reg_rtx (Pmode);
19339 emit_move_insn (reg, tmpreg);
19340 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19342 /* If zero is not in the first two bytes, move two bytes forward. */
19343 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19344 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19345 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19346 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19347 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19348 reg,
19349 tmpreg)));
19350 /* Emit lea manually to avoid clobbering of flags. */
19351 emit_insn (gen_rtx_SET (SImode, reg2,
19352 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19354 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19355 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19356 emit_insn (gen_rtx_SET (VOIDmode, out,
19357 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19358 reg2,
19359 out)));
19361 else
19363 rtx end_2_label = gen_label_rtx ();
19364 /* Is zero in the first two bytes? */
19366 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19367 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19368 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19369 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19370 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19371 pc_rtx);
19372 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19373 JUMP_LABEL (tmp) = end_2_label;
19375 /* Not in the first two. Move two bytes forward. */
19376 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19377 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19379 emit_label (end_2_label);
19383 /* Avoid branch in fixing the byte. */
19384 tmpreg = gen_lowpart (QImode, tmpreg);
19385 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19386 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19387 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19388 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19390 emit_label (end_0_label);
19393 /* Expand strlen. */
19396 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19398 rtx addr, scratch1, scratch2, scratch3, scratch4;
19400 /* The generic case of strlen expander is long. Avoid it's
19401 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19403 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19404 && !TARGET_INLINE_ALL_STRINGOPS
19405 && !optimize_insn_for_size_p ()
19406 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19407 return 0;
19409 addr = force_reg (Pmode, XEXP (src, 0));
19410 scratch1 = gen_reg_rtx (Pmode);
19412 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19413 && !optimize_insn_for_size_p ())
19415 /* Well it seems that some optimizer does not combine a call like
19416 foo(strlen(bar), strlen(bar));
19417 when the move and the subtraction is done here. It does calculate
19418 the length just once when these instructions are done inside of
19419 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19420 often used and I use one fewer register for the lifetime of
19421 output_strlen_unroll() this is better. */
19423 emit_move_insn (out, addr);
19425 ix86_expand_strlensi_unroll_1 (out, src, align);
19427 /* strlensi_unroll_1 returns the address of the zero at the end of
19428 the string, like memchr(), so compute the length by subtracting
19429 the start address. */
19430 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19432 else
19434 rtx unspec;
19436 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19437 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19438 return false;
19440 scratch2 = gen_reg_rtx (Pmode);
19441 scratch3 = gen_reg_rtx (Pmode);
19442 scratch4 = force_reg (Pmode, constm1_rtx);
19444 emit_move_insn (scratch3, addr);
19445 eoschar = force_reg (QImode, eoschar);
19447 src = replace_equiv_address_nv (src, scratch3);
19449 /* If .md starts supporting :P, this can be done in .md. */
19450 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19451 scratch4), UNSPEC_SCAS);
19452 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19453 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19454 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19456 return 1;
19459 /* For given symbol (function) construct code to compute address of it's PLT
19460 entry in large x86-64 PIC model. */
19462 construct_plt_address (rtx symbol)
19464 rtx tmp = gen_reg_rtx (Pmode);
19465 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19467 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19468 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19470 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19471 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19472 return tmp;
19475 void
19476 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19477 rtx callarg2,
19478 rtx pop, int sibcall)
19480 rtx use = NULL, call;
19482 if (pop == const0_rtx)
19483 pop = NULL;
19484 gcc_assert (!TARGET_64BIT || !pop);
19486 if (TARGET_MACHO && !TARGET_64BIT)
19488 #if TARGET_MACHO
19489 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19490 fnaddr = machopic_indirect_call_target (fnaddr);
19491 #endif
19493 else
19495 /* Static functions and indirect calls don't need the pic register. */
19496 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19497 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19498 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19499 use_reg (&use, pic_offset_table_rtx);
19502 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19504 rtx al = gen_rtx_REG (QImode, AX_REG);
19505 emit_move_insn (al, callarg2);
19506 use_reg (&use, al);
19509 if (ix86_cmodel == CM_LARGE_PIC
19510 && MEM_P (fnaddr)
19511 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19512 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19513 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19514 else if (sibcall
19515 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19516 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19518 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19519 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19522 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19523 if (retval)
19524 call = gen_rtx_SET (VOIDmode, retval, call);
19525 if (pop)
19527 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19528 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19529 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19531 if (TARGET_64BIT
19532 && ix86_cfun_abi () == MS_ABI
19533 && (!callarg2 || INTVAL (callarg2) != -2))
19535 /* We need to represent that SI and DI registers are clobbered
19536 by SYSV calls. */
19537 static int clobbered_registers[] = {
19538 XMM6_REG, XMM7_REG, XMM8_REG,
19539 XMM9_REG, XMM10_REG, XMM11_REG,
19540 XMM12_REG, XMM13_REG, XMM14_REG,
19541 XMM15_REG, SI_REG, DI_REG
19543 unsigned int i;
19544 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19545 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19546 UNSPEC_MS_TO_SYSV_CALL);
19548 vec[0] = call;
19549 vec[1] = unspec;
19550 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19551 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19552 ? TImode : DImode,
19553 gen_rtx_REG
19554 (SSE_REGNO_P (clobbered_registers[i])
19555 ? TImode : DImode,
19556 clobbered_registers[i]));
19558 call = gen_rtx_PARALLEL (VOIDmode,
19559 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19560 + 2, vec));
19563 call = emit_call_insn (call);
19564 if (use)
19565 CALL_INSN_FUNCTION_USAGE (call) = use;
19569 /* Clear stack slot assignments remembered from previous functions.
19570 This is called from INIT_EXPANDERS once before RTL is emitted for each
19571 function. */
19573 static struct machine_function *
19574 ix86_init_machine_status (void)
19576 struct machine_function *f;
19578 f = ggc_alloc_cleared_machine_function ();
19579 f->use_fast_prologue_epilogue_nregs = -1;
19580 f->tls_descriptor_call_expanded_p = 0;
19581 f->call_abi = ix86_abi;
19583 return f;
19586 /* Return a MEM corresponding to a stack slot with mode MODE.
19587 Allocate a new slot if necessary.
19589 The RTL for a function can have several slots available: N is
19590 which slot to use. */
19593 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19595 struct stack_local_entry *s;
19597 gcc_assert (n < MAX_386_STACK_LOCALS);
19599 /* Virtual slot is valid only before vregs are instantiated. */
19600 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19602 for (s = ix86_stack_locals; s; s = s->next)
19603 if (s->mode == mode && s->n == n)
19604 return copy_rtx (s->rtl);
19606 s = ggc_alloc_stack_local_entry ();
19607 s->n = n;
19608 s->mode = mode;
19609 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19611 s->next = ix86_stack_locals;
19612 ix86_stack_locals = s;
19613 return s->rtl;
19616 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19618 static GTY(()) rtx ix86_tls_symbol;
19620 ix86_tls_get_addr (void)
19623 if (!ix86_tls_symbol)
19625 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19626 (TARGET_ANY_GNU_TLS
19627 && !TARGET_64BIT)
19628 ? "___tls_get_addr"
19629 : "__tls_get_addr");
19632 return ix86_tls_symbol;
19635 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19637 static GTY(()) rtx ix86_tls_module_base_symbol;
19639 ix86_tls_module_base (void)
19642 if (!ix86_tls_module_base_symbol)
19644 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19645 "_TLS_MODULE_BASE_");
19646 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19647 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19650 return ix86_tls_module_base_symbol;
19653 /* Calculate the length of the memory address in the instruction
19654 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19657 memory_address_length (rtx addr)
19659 struct ix86_address parts;
19660 rtx base, index, disp;
19661 int len;
19662 int ok;
19664 if (GET_CODE (addr) == PRE_DEC
19665 || GET_CODE (addr) == POST_INC
19666 || GET_CODE (addr) == PRE_MODIFY
19667 || GET_CODE (addr) == POST_MODIFY)
19668 return 0;
19670 ok = ix86_decompose_address (addr, &parts);
19671 gcc_assert (ok);
19673 if (parts.base && GET_CODE (parts.base) == SUBREG)
19674 parts.base = SUBREG_REG (parts.base);
19675 if (parts.index && GET_CODE (parts.index) == SUBREG)
19676 parts.index = SUBREG_REG (parts.index);
19678 base = parts.base;
19679 index = parts.index;
19680 disp = parts.disp;
19681 len = 0;
19683 /* Rule of thumb:
19684 - esp as the base always wants an index,
19685 - ebp as the base always wants a displacement,
19686 - r12 as the base always wants an index,
19687 - r13 as the base always wants a displacement. */
19689 /* Register Indirect. */
19690 if (base && !index && !disp)
19692 /* esp (for its index) and ebp (for its displacement) need
19693 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19694 code. */
19695 if (REG_P (addr)
19696 && (addr == arg_pointer_rtx
19697 || addr == frame_pointer_rtx
19698 || REGNO (addr) == SP_REG
19699 || REGNO (addr) == BP_REG
19700 || REGNO (addr) == R12_REG
19701 || REGNO (addr) == R13_REG))
19702 len = 1;
19705 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19706 is not disp32, but disp32(%rip), so for disp32
19707 SIB byte is needed, unless print_operand_address
19708 optimizes it into disp32(%rip) or (%rip) is implied
19709 by UNSPEC. */
19710 else if (disp && !base && !index)
19712 len = 4;
19713 if (TARGET_64BIT)
19715 rtx symbol = disp;
19717 if (GET_CODE (disp) == CONST)
19718 symbol = XEXP (disp, 0);
19719 if (GET_CODE (symbol) == PLUS
19720 && CONST_INT_P (XEXP (symbol, 1)))
19721 symbol = XEXP (symbol, 0);
19723 if (GET_CODE (symbol) != LABEL_REF
19724 && (GET_CODE (symbol) != SYMBOL_REF
19725 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19726 && (GET_CODE (symbol) != UNSPEC
19727 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19728 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19729 len += 1;
19733 else
19735 /* Find the length of the displacement constant. */
19736 if (disp)
19738 if (base && satisfies_constraint_K (disp))
19739 len = 1;
19740 else
19741 len = 4;
19743 /* ebp always wants a displacement. Similarly r13. */
19744 else if (base && REG_P (base)
19745 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19746 len = 1;
19748 /* An index requires the two-byte modrm form.... */
19749 if (index
19750 /* ...like esp (or r12), which always wants an index. */
19751 || base == arg_pointer_rtx
19752 || base == frame_pointer_rtx
19753 || (base && REG_P (base)
19754 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19755 len += 1;
19758 switch (parts.seg)
19760 case SEG_FS:
19761 case SEG_GS:
19762 len += 1;
19763 break;
19764 default:
19765 break;
19768 return len;
19771 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19772 is set, expect that insn have 8bit immediate alternative. */
19774 ix86_attr_length_immediate_default (rtx insn, int shortform)
19776 int len = 0;
19777 int i;
19778 extract_insn_cached (insn);
19779 for (i = recog_data.n_operands - 1; i >= 0; --i)
19780 if (CONSTANT_P (recog_data.operand[i]))
19782 enum attr_mode mode = get_attr_mode (insn);
19784 gcc_assert (!len);
19785 if (shortform && CONST_INT_P (recog_data.operand[i]))
19787 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19788 switch (mode)
19790 case MODE_QI:
19791 len = 1;
19792 continue;
19793 case MODE_HI:
19794 ival = trunc_int_for_mode (ival, HImode);
19795 break;
19796 case MODE_SI:
19797 ival = trunc_int_for_mode (ival, SImode);
19798 break;
19799 default:
19800 break;
19802 if (IN_RANGE (ival, -128, 127))
19804 len = 1;
19805 continue;
19808 switch (mode)
19810 case MODE_QI:
19811 len = 1;
19812 break;
19813 case MODE_HI:
19814 len = 2;
19815 break;
19816 case MODE_SI:
19817 len = 4;
19818 break;
19819 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19820 case MODE_DI:
19821 len = 4;
19822 break;
19823 default:
19824 fatal_insn ("unknown insn mode", insn);
19827 return len;
19829 /* Compute default value for "length_address" attribute. */
19831 ix86_attr_length_address_default (rtx insn)
19833 int i;
19835 if (get_attr_type (insn) == TYPE_LEA)
19837 rtx set = PATTERN (insn), addr;
19839 if (GET_CODE (set) == PARALLEL)
19840 set = XVECEXP (set, 0, 0);
19842 gcc_assert (GET_CODE (set) == SET);
19844 addr = SET_SRC (set);
19845 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19847 if (GET_CODE (addr) == ZERO_EXTEND)
19848 addr = XEXP (addr, 0);
19849 if (GET_CODE (addr) == SUBREG)
19850 addr = SUBREG_REG (addr);
19853 return memory_address_length (addr);
19856 extract_insn_cached (insn);
19857 for (i = recog_data.n_operands - 1; i >= 0; --i)
19858 if (MEM_P (recog_data.operand[i]))
19860 constrain_operands_cached (reload_completed);
19861 if (which_alternative != -1)
19863 const char *constraints = recog_data.constraints[i];
19864 int alt = which_alternative;
19866 while (*constraints == '=' || *constraints == '+')
19867 constraints++;
19868 while (alt-- > 0)
19869 while (*constraints++ != ',')
19871 /* Skip ignored operands. */
19872 if (*constraints == 'X')
19873 continue;
19875 return memory_address_length (XEXP (recog_data.operand[i], 0));
19877 return 0;
19880 /* Compute default value for "length_vex" attribute. It includes
19881 2 or 3 byte VEX prefix and 1 opcode byte. */
19884 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19885 int has_vex_w)
19887 int i;
19889 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19890 byte VEX prefix. */
19891 if (!has_0f_opcode || has_vex_w)
19892 return 3 + 1;
19894 /* We can always use 2 byte VEX prefix in 32bit. */
19895 if (!TARGET_64BIT)
19896 return 2 + 1;
19898 extract_insn_cached (insn);
19900 for (i = recog_data.n_operands - 1; i >= 0; --i)
19901 if (REG_P (recog_data.operand[i]))
19903 /* REX.W bit uses 3 byte VEX prefix. */
19904 if (GET_MODE (recog_data.operand[i]) == DImode
19905 && GENERAL_REG_P (recog_data.operand[i]))
19906 return 3 + 1;
19908 else
19910 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19911 if (MEM_P (recog_data.operand[i])
19912 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19913 return 3 + 1;
19916 return 2 + 1;
19919 /* Return the maximum number of instructions a cpu can issue. */
19921 static int
19922 ix86_issue_rate (void)
19924 switch (ix86_tune)
19926 case PROCESSOR_PENTIUM:
19927 case PROCESSOR_ATOM:
19928 case PROCESSOR_K6:
19929 return 2;
19931 case PROCESSOR_PENTIUMPRO:
19932 case PROCESSOR_PENTIUM4:
19933 case PROCESSOR_ATHLON:
19934 case PROCESSOR_K8:
19935 case PROCESSOR_AMDFAM10:
19936 case PROCESSOR_NOCONA:
19937 case PROCESSOR_GENERIC32:
19938 case PROCESSOR_GENERIC64:
19939 case PROCESSOR_BDVER1:
19940 return 3;
19942 case PROCESSOR_CORE2:
19943 return 4;
19945 default:
19946 return 1;
19950 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19951 by DEP_INSN and nothing set by DEP_INSN. */
19953 static int
19954 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19956 rtx set, set2;
19958 /* Simplify the test for uninteresting insns. */
19959 if (insn_type != TYPE_SETCC
19960 && insn_type != TYPE_ICMOV
19961 && insn_type != TYPE_FCMOV
19962 && insn_type != TYPE_IBR)
19963 return 0;
19965 if ((set = single_set (dep_insn)) != 0)
19967 set = SET_DEST (set);
19968 set2 = NULL_RTX;
19970 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19971 && XVECLEN (PATTERN (dep_insn), 0) == 2
19972 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19973 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19975 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19976 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19978 else
19979 return 0;
19981 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19982 return 0;
19984 /* This test is true if the dependent insn reads the flags but
19985 not any other potentially set register. */
19986 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19987 return 0;
19989 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19990 return 0;
19992 return 1;
19995 /* Return true iff USE_INSN has a memory address with operands set by
19996 SET_INSN. */
19998 bool
19999 ix86_agi_dependent (rtx set_insn, rtx use_insn)
20001 int i;
20002 extract_insn_cached (use_insn);
20003 for (i = recog_data.n_operands - 1; i >= 0; --i)
20004 if (MEM_P (recog_data.operand[i]))
20006 rtx addr = XEXP (recog_data.operand[i], 0);
20007 return modified_in_p (addr, set_insn) != 0;
20009 return false;
20012 static int
20013 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
20015 enum attr_type insn_type, dep_insn_type;
20016 enum attr_memory memory;
20017 rtx set, set2;
20018 int dep_insn_code_number;
20020 /* Anti and output dependencies have zero cost on all CPUs. */
20021 if (REG_NOTE_KIND (link) != 0)
20022 return 0;
20024 dep_insn_code_number = recog_memoized (dep_insn);
20026 /* If we can't recognize the insns, we can't really do anything. */
20027 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
20028 return cost;
20030 insn_type = get_attr_type (insn);
20031 dep_insn_type = get_attr_type (dep_insn);
20033 switch (ix86_tune)
20035 case PROCESSOR_PENTIUM:
20036 /* Address Generation Interlock adds a cycle of latency. */
20037 if (insn_type == TYPE_LEA)
20039 rtx addr = PATTERN (insn);
20041 if (GET_CODE (addr) == PARALLEL)
20042 addr = XVECEXP (addr, 0, 0);
20044 gcc_assert (GET_CODE (addr) == SET);
20046 addr = SET_SRC (addr);
20047 if (modified_in_p (addr, dep_insn))
20048 cost += 1;
20050 else if (ix86_agi_dependent (dep_insn, insn))
20051 cost += 1;
20053 /* ??? Compares pair with jump/setcc. */
20054 if (ix86_flags_dependent (insn, dep_insn, insn_type))
20055 cost = 0;
20057 /* Floating point stores require value to be ready one cycle earlier. */
20058 if (insn_type == TYPE_FMOV
20059 && get_attr_memory (insn) == MEMORY_STORE
20060 && !ix86_agi_dependent (dep_insn, insn))
20061 cost += 1;
20062 break;
20064 case PROCESSOR_PENTIUMPRO:
20065 memory = get_attr_memory (insn);
20067 /* INT->FP conversion is expensive. */
20068 if (get_attr_fp_int_src (dep_insn))
20069 cost += 5;
20071 /* There is one cycle extra latency between an FP op and a store. */
20072 if (insn_type == TYPE_FMOV
20073 && (set = single_set (dep_insn)) != NULL_RTX
20074 && (set2 = single_set (insn)) != NULL_RTX
20075 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
20076 && MEM_P (SET_DEST (set2)))
20077 cost += 1;
20079 /* Show ability of reorder buffer to hide latency of load by executing
20080 in parallel with previous instruction in case
20081 previous instruction is not needed to compute the address. */
20082 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20083 && !ix86_agi_dependent (dep_insn, insn))
20085 /* Claim moves to take one cycle, as core can issue one load
20086 at time and the next load can start cycle later. */
20087 if (dep_insn_type == TYPE_IMOV
20088 || dep_insn_type == TYPE_FMOV)
20089 cost = 1;
20090 else if (cost > 1)
20091 cost--;
20093 break;
20095 case PROCESSOR_K6:
20096 memory = get_attr_memory (insn);
20098 /* The esp dependency is resolved before the instruction is really
20099 finished. */
20100 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
20101 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
20102 return 1;
20104 /* INT->FP conversion is expensive. */
20105 if (get_attr_fp_int_src (dep_insn))
20106 cost += 5;
20108 /* Show ability of reorder buffer to hide latency of load by executing
20109 in parallel with previous instruction in case
20110 previous instruction is not needed to compute the address. */
20111 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20112 && !ix86_agi_dependent (dep_insn, insn))
20114 /* Claim moves to take one cycle, as core can issue one load
20115 at time and the next load can start cycle later. */
20116 if (dep_insn_type == TYPE_IMOV
20117 || dep_insn_type == TYPE_FMOV)
20118 cost = 1;
20119 else if (cost > 2)
20120 cost -= 2;
20121 else
20122 cost = 1;
20124 break;
20126 case PROCESSOR_ATHLON:
20127 case PROCESSOR_K8:
20128 case PROCESSOR_AMDFAM10:
20129 case PROCESSOR_BDVER1:
20130 case PROCESSOR_ATOM:
20131 case PROCESSOR_GENERIC32:
20132 case PROCESSOR_GENERIC64:
20133 memory = get_attr_memory (insn);
20135 /* Show ability of reorder buffer to hide latency of load by executing
20136 in parallel with previous instruction in case
20137 previous instruction is not needed to compute the address. */
20138 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20139 && !ix86_agi_dependent (dep_insn, insn))
20141 enum attr_unit unit = get_attr_unit (insn);
20142 int loadcost = 3;
20144 /* Because of the difference between the length of integer and
20145 floating unit pipeline preparation stages, the memory operands
20146 for floating point are cheaper.
20148 ??? For Athlon it the difference is most probably 2. */
20149 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
20150 loadcost = 3;
20151 else
20152 loadcost = TARGET_ATHLON ? 2 : 0;
20154 if (cost >= loadcost)
20155 cost -= loadcost;
20156 else
20157 cost = 0;
20160 default:
20161 break;
20164 return cost;
20167 /* How many alternative schedules to try. This should be as wide as the
20168 scheduling freedom in the DFA, but no wider. Making this value too
20169 large results extra work for the scheduler. */
20171 static int
20172 ia32_multipass_dfa_lookahead (void)
20174 switch (ix86_tune)
20176 case PROCESSOR_PENTIUM:
20177 return 2;
20179 case PROCESSOR_PENTIUMPRO:
20180 case PROCESSOR_K6:
20181 return 1;
20183 default:
20184 return 0;
20189 /* Compute the alignment given to a constant that is being placed in memory.
20190 EXP is the constant and ALIGN is the alignment that the object would
20191 ordinarily have.
20192 The value of this function is used instead of that alignment to align
20193 the object. */
20196 ix86_constant_alignment (tree exp, int align)
20198 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
20199 || TREE_CODE (exp) == INTEGER_CST)
20201 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
20202 return 64;
20203 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
20204 return 128;
20206 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
20207 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
20208 return BITS_PER_WORD;
20210 return align;
20213 /* Compute the alignment for a static variable.
20214 TYPE is the data type, and ALIGN is the alignment that
20215 the object would ordinarily have. The value of this function is used
20216 instead of that alignment to align the object. */
20219 ix86_data_alignment (tree type, int align)
20221 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20223 if (AGGREGATE_TYPE_P (type)
20224 && TYPE_SIZE (type)
20225 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20226 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20227 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20228 && align < max_align)
20229 align = max_align;
20231 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20232 to 16byte boundary. */
20233 if (TARGET_64BIT)
20235 if (AGGREGATE_TYPE_P (type)
20236 && TYPE_SIZE (type)
20237 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20238 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20239 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20240 return 128;
20243 if (TREE_CODE (type) == ARRAY_TYPE)
20245 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20246 return 64;
20247 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20248 return 128;
20250 else if (TREE_CODE (type) == COMPLEX_TYPE)
20253 if (TYPE_MODE (type) == DCmode && align < 64)
20254 return 64;
20255 if ((TYPE_MODE (type) == XCmode
20256 || TYPE_MODE (type) == TCmode) && align < 128)
20257 return 128;
20259 else if ((TREE_CODE (type) == RECORD_TYPE
20260 || TREE_CODE (type) == UNION_TYPE
20261 || TREE_CODE (type) == QUAL_UNION_TYPE)
20262 && TYPE_FIELDS (type))
20264 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20265 return 64;
20266 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20267 return 128;
20269 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20270 || TREE_CODE (type) == INTEGER_TYPE)
20272 if (TYPE_MODE (type) == DFmode && align < 64)
20273 return 64;
20274 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20275 return 128;
20278 return align;
20281 /* Compute the alignment for a local variable or a stack slot. EXP is
20282 the data type or decl itself, MODE is the widest mode available and
20283 ALIGN is the alignment that the object would ordinarily have. The
20284 value of this macro is used instead of that alignment to align the
20285 object. */
20287 unsigned int
20288 ix86_local_alignment (tree exp, enum machine_mode mode,
20289 unsigned int align)
20291 tree type, decl;
20293 if (exp && DECL_P (exp))
20295 type = TREE_TYPE (exp);
20296 decl = exp;
20298 else
20300 type = exp;
20301 decl = NULL;
20304 /* Don't do dynamic stack realignment for long long objects with
20305 -mpreferred-stack-boundary=2. */
20306 if (!TARGET_64BIT
20307 && align == 64
20308 && ix86_preferred_stack_boundary < 64
20309 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20310 && (!type || !TYPE_USER_ALIGN (type))
20311 && (!decl || !DECL_USER_ALIGN (decl)))
20312 align = 32;
20314 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20315 register in MODE. We will return the largest alignment of XF
20316 and DF. */
20317 if (!type)
20319 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20320 align = GET_MODE_ALIGNMENT (DFmode);
20321 return align;
20324 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20325 to 16byte boundary. Exact wording is:
20327 An array uses the same alignment as its elements, except that a local or
20328 global array variable of length at least 16 bytes or
20329 a C99 variable-length array variable always has alignment of at least 16 bytes.
20331 This was added to allow use of aligned SSE instructions at arrays. This
20332 rule is meant for static storage (where compiler can not do the analysis
20333 by itself). We follow it for automatic variables only when convenient.
20334 We fully control everything in the function compiled and functions from
20335 other unit can not rely on the alignment.
20337 Exclude va_list type. It is the common case of local array where
20338 we can not benefit from the alignment. */
20339 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20340 && TARGET_SSE)
20342 if (AGGREGATE_TYPE_P (type)
20343 && (TYPE_MAIN_VARIANT (type)
20344 != TYPE_MAIN_VARIANT (va_list_type_node))
20345 && TYPE_SIZE (type)
20346 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20347 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20348 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20349 return 128;
20351 if (TREE_CODE (type) == ARRAY_TYPE)
20353 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20354 return 64;
20355 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20356 return 128;
20358 else if (TREE_CODE (type) == COMPLEX_TYPE)
20360 if (TYPE_MODE (type) == DCmode && align < 64)
20361 return 64;
20362 if ((TYPE_MODE (type) == XCmode
20363 || TYPE_MODE (type) == TCmode) && align < 128)
20364 return 128;
20366 else if ((TREE_CODE (type) == RECORD_TYPE
20367 || TREE_CODE (type) == UNION_TYPE
20368 || TREE_CODE (type) == QUAL_UNION_TYPE)
20369 && TYPE_FIELDS (type))
20371 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20372 return 64;
20373 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20374 return 128;
20376 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20377 || TREE_CODE (type) == INTEGER_TYPE)
20380 if (TYPE_MODE (type) == DFmode && align < 64)
20381 return 64;
20382 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20383 return 128;
20385 return align;
20388 /* Compute the minimum required alignment for dynamic stack realignment
20389 purposes for a local variable, parameter or a stack slot. EXP is
20390 the data type or decl itself, MODE is its mode and ALIGN is the
20391 alignment that the object would ordinarily have. */
20393 unsigned int
20394 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20395 unsigned int align)
20397 tree type, decl;
20399 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20400 return align;
20402 if (exp && DECL_P (exp))
20404 type = TREE_TYPE (exp);
20405 decl = exp;
20407 else
20409 type = exp;
20410 decl = NULL;
20413 /* Don't do dynamic stack realignment for long long objects with
20414 -mpreferred-stack-boundary=2. */
20415 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20416 && (!type || !TYPE_USER_ALIGN (type))
20417 && (!decl || !DECL_USER_ALIGN (decl)))
20418 return 32;
20420 return align;
20423 /* Find a location for the static chain incoming to a nested function.
20424 This is a register, unless all free registers are used by arguments. */
20426 static rtx
20427 ix86_static_chain (const_tree fndecl, bool incoming_p)
20429 unsigned regno;
20431 if (!DECL_STATIC_CHAIN (fndecl))
20432 return NULL;
20434 if (TARGET_64BIT)
20436 /* We always use R10 in 64-bit mode. */
20437 regno = R10_REG;
20439 else
20441 tree fntype;
20442 /* By default in 32-bit mode we use ECX to pass the static chain. */
20443 regno = CX_REG;
20445 fntype = TREE_TYPE (fndecl);
20446 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20448 /* Fastcall functions use ecx/edx for arguments, which leaves
20449 us with EAX for the static chain. */
20450 regno = AX_REG;
20452 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20454 /* Thiscall functions use ecx for arguments, which leaves
20455 us with EAX for the static chain. */
20456 regno = AX_REG;
20458 else if (ix86_function_regparm (fntype, fndecl) == 3)
20460 /* For regparm 3, we have no free call-clobbered registers in
20461 which to store the static chain. In order to implement this,
20462 we have the trampoline push the static chain to the stack.
20463 However, we can't push a value below the return address when
20464 we call the nested function directly, so we have to use an
20465 alternate entry point. For this we use ESI, and have the
20466 alternate entry point push ESI, so that things appear the
20467 same once we're executing the nested function. */
20468 if (incoming_p)
20470 if (fndecl == current_function_decl)
20471 ix86_static_chain_on_stack = true;
20472 return gen_frame_mem (SImode,
20473 plus_constant (arg_pointer_rtx, -8));
20475 regno = SI_REG;
20479 return gen_rtx_REG (Pmode, regno);
20482 /* Emit RTL insns to initialize the variable parts of a trampoline.
20483 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20484 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20485 to be passed to the target function. */
20487 static void
20488 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20490 rtx mem, fnaddr;
20492 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20494 if (!TARGET_64BIT)
20496 rtx disp, chain;
20497 int opcode;
20499 /* Depending on the static chain location, either load a register
20500 with a constant, or push the constant to the stack. All of the
20501 instructions are the same size. */
20502 chain = ix86_static_chain (fndecl, true);
20503 if (REG_P (chain))
20505 if (REGNO (chain) == CX_REG)
20506 opcode = 0xb9;
20507 else if (REGNO (chain) == AX_REG)
20508 opcode = 0xb8;
20509 else
20510 gcc_unreachable ();
20512 else
20513 opcode = 0x68;
20515 mem = adjust_address (m_tramp, QImode, 0);
20516 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20518 mem = adjust_address (m_tramp, SImode, 1);
20519 emit_move_insn (mem, chain_value);
20521 /* Compute offset from the end of the jmp to the target function.
20522 In the case in which the trampoline stores the static chain on
20523 the stack, we need to skip the first insn which pushes the
20524 (call-saved) register static chain; this push is 1 byte. */
20525 disp = expand_binop (SImode, sub_optab, fnaddr,
20526 plus_constant (XEXP (m_tramp, 0),
20527 MEM_P (chain) ? 9 : 10),
20528 NULL_RTX, 1, OPTAB_DIRECT);
20530 mem = adjust_address (m_tramp, QImode, 5);
20531 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20533 mem = adjust_address (m_tramp, SImode, 6);
20534 emit_move_insn (mem, disp);
20536 else
20538 int offset = 0;
20540 /* Load the function address to r11. Try to load address using
20541 the shorter movl instead of movabs. We may want to support
20542 movq for kernel mode, but kernel does not use trampolines at
20543 the moment. */
20544 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20546 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20548 mem = adjust_address (m_tramp, HImode, offset);
20549 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20551 mem = adjust_address (m_tramp, SImode, offset + 2);
20552 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20553 offset += 6;
20555 else
20557 mem = adjust_address (m_tramp, HImode, offset);
20558 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20560 mem = adjust_address (m_tramp, DImode, offset + 2);
20561 emit_move_insn (mem, fnaddr);
20562 offset += 10;
20565 /* Load static chain using movabs to r10. */
20566 mem = adjust_address (m_tramp, HImode, offset);
20567 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20569 mem = adjust_address (m_tramp, DImode, offset + 2);
20570 emit_move_insn (mem, chain_value);
20571 offset += 10;
20573 /* Jump to r11; the last (unused) byte is a nop, only there to
20574 pad the write out to a single 32-bit store. */
20575 mem = adjust_address (m_tramp, SImode, offset);
20576 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20577 offset += 4;
20579 gcc_assert (offset <= TRAMPOLINE_SIZE);
20582 #ifdef ENABLE_EXECUTE_STACK
20583 #ifdef CHECK_EXECUTE_STACK_ENABLED
20584 if (CHECK_EXECUTE_STACK_ENABLED)
20585 #endif
20586 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20587 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20588 #endif
20591 /* The following file contains several enumerations and data structures
20592 built from the definitions in i386-builtin-types.def. */
20594 #include "i386-builtin-types.inc"
20596 /* Table for the ix86 builtin non-function types. */
20597 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20599 /* Retrieve an element from the above table, building some of
20600 the types lazily. */
20602 static tree
20603 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20605 unsigned int index;
20606 tree type, itype;
20608 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20610 type = ix86_builtin_type_tab[(int) tcode];
20611 if (type != NULL)
20612 return type;
20614 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20615 if (tcode <= IX86_BT_LAST_VECT)
20617 enum machine_mode mode;
20619 index = tcode - IX86_BT_LAST_PRIM - 1;
20620 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20621 mode = ix86_builtin_type_vect_mode[index];
20623 type = build_vector_type_for_mode (itype, mode);
20625 else
20627 int quals;
20629 index = tcode - IX86_BT_LAST_VECT - 1;
20630 if (tcode <= IX86_BT_LAST_PTR)
20631 quals = TYPE_UNQUALIFIED;
20632 else
20633 quals = TYPE_QUAL_CONST;
20635 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20636 if (quals != TYPE_UNQUALIFIED)
20637 itype = build_qualified_type (itype, quals);
20639 type = build_pointer_type (itype);
20642 ix86_builtin_type_tab[(int) tcode] = type;
20643 return type;
20646 /* Table for the ix86 builtin function types. */
20647 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20649 /* Retrieve an element from the above table, building some of
20650 the types lazily. */
20652 static tree
20653 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20655 tree type;
20657 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20659 type = ix86_builtin_func_type_tab[(int) tcode];
20660 if (type != NULL)
20661 return type;
20663 if (tcode <= IX86_BT_LAST_FUNC)
20665 unsigned start = ix86_builtin_func_start[(int) tcode];
20666 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20667 tree rtype, atype, args = void_list_node;
20668 unsigned i;
20670 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20671 for (i = after - 1; i > start; --i)
20673 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20674 args = tree_cons (NULL, atype, args);
20677 type = build_function_type (rtype, args);
20679 else
20681 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20682 enum ix86_builtin_func_type icode;
20684 icode = ix86_builtin_func_alias_base[index];
20685 type = ix86_get_builtin_func_type (icode);
20688 ix86_builtin_func_type_tab[(int) tcode] = type;
20689 return type;
20693 /* Codes for all the SSE/MMX builtins. */
20694 enum ix86_builtins
20696 IX86_BUILTIN_ADDPS,
20697 IX86_BUILTIN_ADDSS,
20698 IX86_BUILTIN_DIVPS,
20699 IX86_BUILTIN_DIVSS,
20700 IX86_BUILTIN_MULPS,
20701 IX86_BUILTIN_MULSS,
20702 IX86_BUILTIN_SUBPS,
20703 IX86_BUILTIN_SUBSS,
20705 IX86_BUILTIN_CMPEQPS,
20706 IX86_BUILTIN_CMPLTPS,
20707 IX86_BUILTIN_CMPLEPS,
20708 IX86_BUILTIN_CMPGTPS,
20709 IX86_BUILTIN_CMPGEPS,
20710 IX86_BUILTIN_CMPNEQPS,
20711 IX86_BUILTIN_CMPNLTPS,
20712 IX86_BUILTIN_CMPNLEPS,
20713 IX86_BUILTIN_CMPNGTPS,
20714 IX86_BUILTIN_CMPNGEPS,
20715 IX86_BUILTIN_CMPORDPS,
20716 IX86_BUILTIN_CMPUNORDPS,
20717 IX86_BUILTIN_CMPEQSS,
20718 IX86_BUILTIN_CMPLTSS,
20719 IX86_BUILTIN_CMPLESS,
20720 IX86_BUILTIN_CMPNEQSS,
20721 IX86_BUILTIN_CMPNLTSS,
20722 IX86_BUILTIN_CMPNLESS,
20723 IX86_BUILTIN_CMPNGTSS,
20724 IX86_BUILTIN_CMPNGESS,
20725 IX86_BUILTIN_CMPORDSS,
20726 IX86_BUILTIN_CMPUNORDSS,
20728 IX86_BUILTIN_COMIEQSS,
20729 IX86_BUILTIN_COMILTSS,
20730 IX86_BUILTIN_COMILESS,
20731 IX86_BUILTIN_COMIGTSS,
20732 IX86_BUILTIN_COMIGESS,
20733 IX86_BUILTIN_COMINEQSS,
20734 IX86_BUILTIN_UCOMIEQSS,
20735 IX86_BUILTIN_UCOMILTSS,
20736 IX86_BUILTIN_UCOMILESS,
20737 IX86_BUILTIN_UCOMIGTSS,
20738 IX86_BUILTIN_UCOMIGESS,
20739 IX86_BUILTIN_UCOMINEQSS,
20741 IX86_BUILTIN_CVTPI2PS,
20742 IX86_BUILTIN_CVTPS2PI,
20743 IX86_BUILTIN_CVTSI2SS,
20744 IX86_BUILTIN_CVTSI642SS,
20745 IX86_BUILTIN_CVTSS2SI,
20746 IX86_BUILTIN_CVTSS2SI64,
20747 IX86_BUILTIN_CVTTPS2PI,
20748 IX86_BUILTIN_CVTTSS2SI,
20749 IX86_BUILTIN_CVTTSS2SI64,
20751 IX86_BUILTIN_MAXPS,
20752 IX86_BUILTIN_MAXSS,
20753 IX86_BUILTIN_MINPS,
20754 IX86_BUILTIN_MINSS,
20756 IX86_BUILTIN_LOADUPS,
20757 IX86_BUILTIN_STOREUPS,
20758 IX86_BUILTIN_MOVSS,
20760 IX86_BUILTIN_MOVHLPS,
20761 IX86_BUILTIN_MOVLHPS,
20762 IX86_BUILTIN_LOADHPS,
20763 IX86_BUILTIN_LOADLPS,
20764 IX86_BUILTIN_STOREHPS,
20765 IX86_BUILTIN_STORELPS,
20767 IX86_BUILTIN_MASKMOVQ,
20768 IX86_BUILTIN_MOVMSKPS,
20769 IX86_BUILTIN_PMOVMSKB,
20771 IX86_BUILTIN_MOVNTPS,
20772 IX86_BUILTIN_MOVNTQ,
20774 IX86_BUILTIN_LOADDQU,
20775 IX86_BUILTIN_STOREDQU,
20777 IX86_BUILTIN_PACKSSWB,
20778 IX86_BUILTIN_PACKSSDW,
20779 IX86_BUILTIN_PACKUSWB,
20781 IX86_BUILTIN_PADDB,
20782 IX86_BUILTIN_PADDW,
20783 IX86_BUILTIN_PADDD,
20784 IX86_BUILTIN_PADDQ,
20785 IX86_BUILTIN_PADDSB,
20786 IX86_BUILTIN_PADDSW,
20787 IX86_BUILTIN_PADDUSB,
20788 IX86_BUILTIN_PADDUSW,
20789 IX86_BUILTIN_PSUBB,
20790 IX86_BUILTIN_PSUBW,
20791 IX86_BUILTIN_PSUBD,
20792 IX86_BUILTIN_PSUBQ,
20793 IX86_BUILTIN_PSUBSB,
20794 IX86_BUILTIN_PSUBSW,
20795 IX86_BUILTIN_PSUBUSB,
20796 IX86_BUILTIN_PSUBUSW,
20798 IX86_BUILTIN_PAND,
20799 IX86_BUILTIN_PANDN,
20800 IX86_BUILTIN_POR,
20801 IX86_BUILTIN_PXOR,
20803 IX86_BUILTIN_PAVGB,
20804 IX86_BUILTIN_PAVGW,
20806 IX86_BUILTIN_PCMPEQB,
20807 IX86_BUILTIN_PCMPEQW,
20808 IX86_BUILTIN_PCMPEQD,
20809 IX86_BUILTIN_PCMPGTB,
20810 IX86_BUILTIN_PCMPGTW,
20811 IX86_BUILTIN_PCMPGTD,
20813 IX86_BUILTIN_PMADDWD,
20815 IX86_BUILTIN_PMAXSW,
20816 IX86_BUILTIN_PMAXUB,
20817 IX86_BUILTIN_PMINSW,
20818 IX86_BUILTIN_PMINUB,
20820 IX86_BUILTIN_PMULHUW,
20821 IX86_BUILTIN_PMULHW,
20822 IX86_BUILTIN_PMULLW,
20824 IX86_BUILTIN_PSADBW,
20825 IX86_BUILTIN_PSHUFW,
20827 IX86_BUILTIN_PSLLW,
20828 IX86_BUILTIN_PSLLD,
20829 IX86_BUILTIN_PSLLQ,
20830 IX86_BUILTIN_PSRAW,
20831 IX86_BUILTIN_PSRAD,
20832 IX86_BUILTIN_PSRLW,
20833 IX86_BUILTIN_PSRLD,
20834 IX86_BUILTIN_PSRLQ,
20835 IX86_BUILTIN_PSLLWI,
20836 IX86_BUILTIN_PSLLDI,
20837 IX86_BUILTIN_PSLLQI,
20838 IX86_BUILTIN_PSRAWI,
20839 IX86_BUILTIN_PSRADI,
20840 IX86_BUILTIN_PSRLWI,
20841 IX86_BUILTIN_PSRLDI,
20842 IX86_BUILTIN_PSRLQI,
20844 IX86_BUILTIN_PUNPCKHBW,
20845 IX86_BUILTIN_PUNPCKHWD,
20846 IX86_BUILTIN_PUNPCKHDQ,
20847 IX86_BUILTIN_PUNPCKLBW,
20848 IX86_BUILTIN_PUNPCKLWD,
20849 IX86_BUILTIN_PUNPCKLDQ,
20851 IX86_BUILTIN_SHUFPS,
20853 IX86_BUILTIN_RCPPS,
20854 IX86_BUILTIN_RCPSS,
20855 IX86_BUILTIN_RSQRTPS,
20856 IX86_BUILTIN_RSQRTPS_NR,
20857 IX86_BUILTIN_RSQRTSS,
20858 IX86_BUILTIN_RSQRTF,
20859 IX86_BUILTIN_SQRTPS,
20860 IX86_BUILTIN_SQRTPS_NR,
20861 IX86_BUILTIN_SQRTSS,
20863 IX86_BUILTIN_UNPCKHPS,
20864 IX86_BUILTIN_UNPCKLPS,
20866 IX86_BUILTIN_ANDPS,
20867 IX86_BUILTIN_ANDNPS,
20868 IX86_BUILTIN_ORPS,
20869 IX86_BUILTIN_XORPS,
20871 IX86_BUILTIN_EMMS,
20872 IX86_BUILTIN_LDMXCSR,
20873 IX86_BUILTIN_STMXCSR,
20874 IX86_BUILTIN_SFENCE,
20876 /* 3DNow! Original */
20877 IX86_BUILTIN_FEMMS,
20878 IX86_BUILTIN_PAVGUSB,
20879 IX86_BUILTIN_PF2ID,
20880 IX86_BUILTIN_PFACC,
20881 IX86_BUILTIN_PFADD,
20882 IX86_BUILTIN_PFCMPEQ,
20883 IX86_BUILTIN_PFCMPGE,
20884 IX86_BUILTIN_PFCMPGT,
20885 IX86_BUILTIN_PFMAX,
20886 IX86_BUILTIN_PFMIN,
20887 IX86_BUILTIN_PFMUL,
20888 IX86_BUILTIN_PFRCP,
20889 IX86_BUILTIN_PFRCPIT1,
20890 IX86_BUILTIN_PFRCPIT2,
20891 IX86_BUILTIN_PFRSQIT1,
20892 IX86_BUILTIN_PFRSQRT,
20893 IX86_BUILTIN_PFSUB,
20894 IX86_BUILTIN_PFSUBR,
20895 IX86_BUILTIN_PI2FD,
20896 IX86_BUILTIN_PMULHRW,
20898 /* 3DNow! Athlon Extensions */
20899 IX86_BUILTIN_PF2IW,
20900 IX86_BUILTIN_PFNACC,
20901 IX86_BUILTIN_PFPNACC,
20902 IX86_BUILTIN_PI2FW,
20903 IX86_BUILTIN_PSWAPDSI,
20904 IX86_BUILTIN_PSWAPDSF,
20906 /* SSE2 */
20907 IX86_BUILTIN_ADDPD,
20908 IX86_BUILTIN_ADDSD,
20909 IX86_BUILTIN_DIVPD,
20910 IX86_BUILTIN_DIVSD,
20911 IX86_BUILTIN_MULPD,
20912 IX86_BUILTIN_MULSD,
20913 IX86_BUILTIN_SUBPD,
20914 IX86_BUILTIN_SUBSD,
20916 IX86_BUILTIN_CMPEQPD,
20917 IX86_BUILTIN_CMPLTPD,
20918 IX86_BUILTIN_CMPLEPD,
20919 IX86_BUILTIN_CMPGTPD,
20920 IX86_BUILTIN_CMPGEPD,
20921 IX86_BUILTIN_CMPNEQPD,
20922 IX86_BUILTIN_CMPNLTPD,
20923 IX86_BUILTIN_CMPNLEPD,
20924 IX86_BUILTIN_CMPNGTPD,
20925 IX86_BUILTIN_CMPNGEPD,
20926 IX86_BUILTIN_CMPORDPD,
20927 IX86_BUILTIN_CMPUNORDPD,
20928 IX86_BUILTIN_CMPEQSD,
20929 IX86_BUILTIN_CMPLTSD,
20930 IX86_BUILTIN_CMPLESD,
20931 IX86_BUILTIN_CMPNEQSD,
20932 IX86_BUILTIN_CMPNLTSD,
20933 IX86_BUILTIN_CMPNLESD,
20934 IX86_BUILTIN_CMPORDSD,
20935 IX86_BUILTIN_CMPUNORDSD,
20937 IX86_BUILTIN_COMIEQSD,
20938 IX86_BUILTIN_COMILTSD,
20939 IX86_BUILTIN_COMILESD,
20940 IX86_BUILTIN_COMIGTSD,
20941 IX86_BUILTIN_COMIGESD,
20942 IX86_BUILTIN_COMINEQSD,
20943 IX86_BUILTIN_UCOMIEQSD,
20944 IX86_BUILTIN_UCOMILTSD,
20945 IX86_BUILTIN_UCOMILESD,
20946 IX86_BUILTIN_UCOMIGTSD,
20947 IX86_BUILTIN_UCOMIGESD,
20948 IX86_BUILTIN_UCOMINEQSD,
20950 IX86_BUILTIN_MAXPD,
20951 IX86_BUILTIN_MAXSD,
20952 IX86_BUILTIN_MINPD,
20953 IX86_BUILTIN_MINSD,
20955 IX86_BUILTIN_ANDPD,
20956 IX86_BUILTIN_ANDNPD,
20957 IX86_BUILTIN_ORPD,
20958 IX86_BUILTIN_XORPD,
20960 IX86_BUILTIN_SQRTPD,
20961 IX86_BUILTIN_SQRTSD,
20963 IX86_BUILTIN_UNPCKHPD,
20964 IX86_BUILTIN_UNPCKLPD,
20966 IX86_BUILTIN_SHUFPD,
20968 IX86_BUILTIN_LOADUPD,
20969 IX86_BUILTIN_STOREUPD,
20970 IX86_BUILTIN_MOVSD,
20972 IX86_BUILTIN_LOADHPD,
20973 IX86_BUILTIN_LOADLPD,
20975 IX86_BUILTIN_CVTDQ2PD,
20976 IX86_BUILTIN_CVTDQ2PS,
20978 IX86_BUILTIN_CVTPD2DQ,
20979 IX86_BUILTIN_CVTPD2PI,
20980 IX86_BUILTIN_CVTPD2PS,
20981 IX86_BUILTIN_CVTTPD2DQ,
20982 IX86_BUILTIN_CVTTPD2PI,
20984 IX86_BUILTIN_CVTPI2PD,
20985 IX86_BUILTIN_CVTSI2SD,
20986 IX86_BUILTIN_CVTSI642SD,
20988 IX86_BUILTIN_CVTSD2SI,
20989 IX86_BUILTIN_CVTSD2SI64,
20990 IX86_BUILTIN_CVTSD2SS,
20991 IX86_BUILTIN_CVTSS2SD,
20992 IX86_BUILTIN_CVTTSD2SI,
20993 IX86_BUILTIN_CVTTSD2SI64,
20995 IX86_BUILTIN_CVTPS2DQ,
20996 IX86_BUILTIN_CVTPS2PD,
20997 IX86_BUILTIN_CVTTPS2DQ,
20999 IX86_BUILTIN_MOVNTI,
21000 IX86_BUILTIN_MOVNTPD,
21001 IX86_BUILTIN_MOVNTDQ,
21003 IX86_BUILTIN_MOVQ128,
21005 /* SSE2 MMX */
21006 IX86_BUILTIN_MASKMOVDQU,
21007 IX86_BUILTIN_MOVMSKPD,
21008 IX86_BUILTIN_PMOVMSKB128,
21010 IX86_BUILTIN_PACKSSWB128,
21011 IX86_BUILTIN_PACKSSDW128,
21012 IX86_BUILTIN_PACKUSWB128,
21014 IX86_BUILTIN_PADDB128,
21015 IX86_BUILTIN_PADDW128,
21016 IX86_BUILTIN_PADDD128,
21017 IX86_BUILTIN_PADDQ128,
21018 IX86_BUILTIN_PADDSB128,
21019 IX86_BUILTIN_PADDSW128,
21020 IX86_BUILTIN_PADDUSB128,
21021 IX86_BUILTIN_PADDUSW128,
21022 IX86_BUILTIN_PSUBB128,
21023 IX86_BUILTIN_PSUBW128,
21024 IX86_BUILTIN_PSUBD128,
21025 IX86_BUILTIN_PSUBQ128,
21026 IX86_BUILTIN_PSUBSB128,
21027 IX86_BUILTIN_PSUBSW128,
21028 IX86_BUILTIN_PSUBUSB128,
21029 IX86_BUILTIN_PSUBUSW128,
21031 IX86_BUILTIN_PAND128,
21032 IX86_BUILTIN_PANDN128,
21033 IX86_BUILTIN_POR128,
21034 IX86_BUILTIN_PXOR128,
21036 IX86_BUILTIN_PAVGB128,
21037 IX86_BUILTIN_PAVGW128,
21039 IX86_BUILTIN_PCMPEQB128,
21040 IX86_BUILTIN_PCMPEQW128,
21041 IX86_BUILTIN_PCMPEQD128,
21042 IX86_BUILTIN_PCMPGTB128,
21043 IX86_BUILTIN_PCMPGTW128,
21044 IX86_BUILTIN_PCMPGTD128,
21046 IX86_BUILTIN_PMADDWD128,
21048 IX86_BUILTIN_PMAXSW128,
21049 IX86_BUILTIN_PMAXUB128,
21050 IX86_BUILTIN_PMINSW128,
21051 IX86_BUILTIN_PMINUB128,
21053 IX86_BUILTIN_PMULUDQ,
21054 IX86_BUILTIN_PMULUDQ128,
21055 IX86_BUILTIN_PMULHUW128,
21056 IX86_BUILTIN_PMULHW128,
21057 IX86_BUILTIN_PMULLW128,
21059 IX86_BUILTIN_PSADBW128,
21060 IX86_BUILTIN_PSHUFHW,
21061 IX86_BUILTIN_PSHUFLW,
21062 IX86_BUILTIN_PSHUFD,
21064 IX86_BUILTIN_PSLLDQI128,
21065 IX86_BUILTIN_PSLLWI128,
21066 IX86_BUILTIN_PSLLDI128,
21067 IX86_BUILTIN_PSLLQI128,
21068 IX86_BUILTIN_PSRAWI128,
21069 IX86_BUILTIN_PSRADI128,
21070 IX86_BUILTIN_PSRLDQI128,
21071 IX86_BUILTIN_PSRLWI128,
21072 IX86_BUILTIN_PSRLDI128,
21073 IX86_BUILTIN_PSRLQI128,
21075 IX86_BUILTIN_PSLLDQ128,
21076 IX86_BUILTIN_PSLLW128,
21077 IX86_BUILTIN_PSLLD128,
21078 IX86_BUILTIN_PSLLQ128,
21079 IX86_BUILTIN_PSRAW128,
21080 IX86_BUILTIN_PSRAD128,
21081 IX86_BUILTIN_PSRLW128,
21082 IX86_BUILTIN_PSRLD128,
21083 IX86_BUILTIN_PSRLQ128,
21085 IX86_BUILTIN_PUNPCKHBW128,
21086 IX86_BUILTIN_PUNPCKHWD128,
21087 IX86_BUILTIN_PUNPCKHDQ128,
21088 IX86_BUILTIN_PUNPCKHQDQ128,
21089 IX86_BUILTIN_PUNPCKLBW128,
21090 IX86_BUILTIN_PUNPCKLWD128,
21091 IX86_BUILTIN_PUNPCKLDQ128,
21092 IX86_BUILTIN_PUNPCKLQDQ128,
21094 IX86_BUILTIN_CLFLUSH,
21095 IX86_BUILTIN_MFENCE,
21096 IX86_BUILTIN_LFENCE,
21098 IX86_BUILTIN_BSRSI,
21099 IX86_BUILTIN_BSRDI,
21100 IX86_BUILTIN_RDPMC,
21101 IX86_BUILTIN_RDTSC,
21102 IX86_BUILTIN_RDTSCP,
21103 IX86_BUILTIN_ROLQI,
21104 IX86_BUILTIN_ROLHI,
21105 IX86_BUILTIN_RORQI,
21106 IX86_BUILTIN_RORHI,
21108 /* SSE3. */
21109 IX86_BUILTIN_ADDSUBPS,
21110 IX86_BUILTIN_HADDPS,
21111 IX86_BUILTIN_HSUBPS,
21112 IX86_BUILTIN_MOVSHDUP,
21113 IX86_BUILTIN_MOVSLDUP,
21114 IX86_BUILTIN_ADDSUBPD,
21115 IX86_BUILTIN_HADDPD,
21116 IX86_BUILTIN_HSUBPD,
21117 IX86_BUILTIN_LDDQU,
21119 IX86_BUILTIN_MONITOR,
21120 IX86_BUILTIN_MWAIT,
21122 /* SSSE3. */
21123 IX86_BUILTIN_PHADDW,
21124 IX86_BUILTIN_PHADDD,
21125 IX86_BUILTIN_PHADDSW,
21126 IX86_BUILTIN_PHSUBW,
21127 IX86_BUILTIN_PHSUBD,
21128 IX86_BUILTIN_PHSUBSW,
21129 IX86_BUILTIN_PMADDUBSW,
21130 IX86_BUILTIN_PMULHRSW,
21131 IX86_BUILTIN_PSHUFB,
21132 IX86_BUILTIN_PSIGNB,
21133 IX86_BUILTIN_PSIGNW,
21134 IX86_BUILTIN_PSIGND,
21135 IX86_BUILTIN_PALIGNR,
21136 IX86_BUILTIN_PABSB,
21137 IX86_BUILTIN_PABSW,
21138 IX86_BUILTIN_PABSD,
21140 IX86_BUILTIN_PHADDW128,
21141 IX86_BUILTIN_PHADDD128,
21142 IX86_BUILTIN_PHADDSW128,
21143 IX86_BUILTIN_PHSUBW128,
21144 IX86_BUILTIN_PHSUBD128,
21145 IX86_BUILTIN_PHSUBSW128,
21146 IX86_BUILTIN_PMADDUBSW128,
21147 IX86_BUILTIN_PMULHRSW128,
21148 IX86_BUILTIN_PSHUFB128,
21149 IX86_BUILTIN_PSIGNB128,
21150 IX86_BUILTIN_PSIGNW128,
21151 IX86_BUILTIN_PSIGND128,
21152 IX86_BUILTIN_PALIGNR128,
21153 IX86_BUILTIN_PABSB128,
21154 IX86_BUILTIN_PABSW128,
21155 IX86_BUILTIN_PABSD128,
21157 /* AMDFAM10 - SSE4A New Instructions. */
21158 IX86_BUILTIN_MOVNTSD,
21159 IX86_BUILTIN_MOVNTSS,
21160 IX86_BUILTIN_EXTRQI,
21161 IX86_BUILTIN_EXTRQ,
21162 IX86_BUILTIN_INSERTQI,
21163 IX86_BUILTIN_INSERTQ,
21165 /* SSE4.1. */
21166 IX86_BUILTIN_BLENDPD,
21167 IX86_BUILTIN_BLENDPS,
21168 IX86_BUILTIN_BLENDVPD,
21169 IX86_BUILTIN_BLENDVPS,
21170 IX86_BUILTIN_PBLENDVB128,
21171 IX86_BUILTIN_PBLENDW128,
21173 IX86_BUILTIN_DPPD,
21174 IX86_BUILTIN_DPPS,
21176 IX86_BUILTIN_INSERTPS128,
21178 IX86_BUILTIN_MOVNTDQA,
21179 IX86_BUILTIN_MPSADBW128,
21180 IX86_BUILTIN_PACKUSDW128,
21181 IX86_BUILTIN_PCMPEQQ,
21182 IX86_BUILTIN_PHMINPOSUW128,
21184 IX86_BUILTIN_PMAXSB128,
21185 IX86_BUILTIN_PMAXSD128,
21186 IX86_BUILTIN_PMAXUD128,
21187 IX86_BUILTIN_PMAXUW128,
21189 IX86_BUILTIN_PMINSB128,
21190 IX86_BUILTIN_PMINSD128,
21191 IX86_BUILTIN_PMINUD128,
21192 IX86_BUILTIN_PMINUW128,
21194 IX86_BUILTIN_PMOVSXBW128,
21195 IX86_BUILTIN_PMOVSXBD128,
21196 IX86_BUILTIN_PMOVSXBQ128,
21197 IX86_BUILTIN_PMOVSXWD128,
21198 IX86_BUILTIN_PMOVSXWQ128,
21199 IX86_BUILTIN_PMOVSXDQ128,
21201 IX86_BUILTIN_PMOVZXBW128,
21202 IX86_BUILTIN_PMOVZXBD128,
21203 IX86_BUILTIN_PMOVZXBQ128,
21204 IX86_BUILTIN_PMOVZXWD128,
21205 IX86_BUILTIN_PMOVZXWQ128,
21206 IX86_BUILTIN_PMOVZXDQ128,
21208 IX86_BUILTIN_PMULDQ128,
21209 IX86_BUILTIN_PMULLD128,
21211 IX86_BUILTIN_ROUNDPD,
21212 IX86_BUILTIN_ROUNDPS,
21213 IX86_BUILTIN_ROUNDSD,
21214 IX86_BUILTIN_ROUNDSS,
21216 IX86_BUILTIN_PTESTZ,
21217 IX86_BUILTIN_PTESTC,
21218 IX86_BUILTIN_PTESTNZC,
21220 IX86_BUILTIN_VEC_INIT_V2SI,
21221 IX86_BUILTIN_VEC_INIT_V4HI,
21222 IX86_BUILTIN_VEC_INIT_V8QI,
21223 IX86_BUILTIN_VEC_EXT_V2DF,
21224 IX86_BUILTIN_VEC_EXT_V2DI,
21225 IX86_BUILTIN_VEC_EXT_V4SF,
21226 IX86_BUILTIN_VEC_EXT_V4SI,
21227 IX86_BUILTIN_VEC_EXT_V8HI,
21228 IX86_BUILTIN_VEC_EXT_V2SI,
21229 IX86_BUILTIN_VEC_EXT_V4HI,
21230 IX86_BUILTIN_VEC_EXT_V16QI,
21231 IX86_BUILTIN_VEC_SET_V2DI,
21232 IX86_BUILTIN_VEC_SET_V4SF,
21233 IX86_BUILTIN_VEC_SET_V4SI,
21234 IX86_BUILTIN_VEC_SET_V8HI,
21235 IX86_BUILTIN_VEC_SET_V4HI,
21236 IX86_BUILTIN_VEC_SET_V16QI,
21238 IX86_BUILTIN_VEC_PACK_SFIX,
21240 /* SSE4.2. */
21241 IX86_BUILTIN_CRC32QI,
21242 IX86_BUILTIN_CRC32HI,
21243 IX86_BUILTIN_CRC32SI,
21244 IX86_BUILTIN_CRC32DI,
21246 IX86_BUILTIN_PCMPESTRI128,
21247 IX86_BUILTIN_PCMPESTRM128,
21248 IX86_BUILTIN_PCMPESTRA128,
21249 IX86_BUILTIN_PCMPESTRC128,
21250 IX86_BUILTIN_PCMPESTRO128,
21251 IX86_BUILTIN_PCMPESTRS128,
21252 IX86_BUILTIN_PCMPESTRZ128,
21253 IX86_BUILTIN_PCMPISTRI128,
21254 IX86_BUILTIN_PCMPISTRM128,
21255 IX86_BUILTIN_PCMPISTRA128,
21256 IX86_BUILTIN_PCMPISTRC128,
21257 IX86_BUILTIN_PCMPISTRO128,
21258 IX86_BUILTIN_PCMPISTRS128,
21259 IX86_BUILTIN_PCMPISTRZ128,
21261 IX86_BUILTIN_PCMPGTQ,
21263 /* AES instructions */
21264 IX86_BUILTIN_AESENC128,
21265 IX86_BUILTIN_AESENCLAST128,
21266 IX86_BUILTIN_AESDEC128,
21267 IX86_BUILTIN_AESDECLAST128,
21268 IX86_BUILTIN_AESIMC128,
21269 IX86_BUILTIN_AESKEYGENASSIST128,
21271 /* PCLMUL instruction */
21272 IX86_BUILTIN_PCLMULQDQ128,
21274 /* AVX */
21275 IX86_BUILTIN_ADDPD256,
21276 IX86_BUILTIN_ADDPS256,
21277 IX86_BUILTIN_ADDSUBPD256,
21278 IX86_BUILTIN_ADDSUBPS256,
21279 IX86_BUILTIN_ANDPD256,
21280 IX86_BUILTIN_ANDPS256,
21281 IX86_BUILTIN_ANDNPD256,
21282 IX86_BUILTIN_ANDNPS256,
21283 IX86_BUILTIN_BLENDPD256,
21284 IX86_BUILTIN_BLENDPS256,
21285 IX86_BUILTIN_BLENDVPD256,
21286 IX86_BUILTIN_BLENDVPS256,
21287 IX86_BUILTIN_DIVPD256,
21288 IX86_BUILTIN_DIVPS256,
21289 IX86_BUILTIN_DPPS256,
21290 IX86_BUILTIN_HADDPD256,
21291 IX86_BUILTIN_HADDPS256,
21292 IX86_BUILTIN_HSUBPD256,
21293 IX86_BUILTIN_HSUBPS256,
21294 IX86_BUILTIN_MAXPD256,
21295 IX86_BUILTIN_MAXPS256,
21296 IX86_BUILTIN_MINPD256,
21297 IX86_BUILTIN_MINPS256,
21298 IX86_BUILTIN_MULPD256,
21299 IX86_BUILTIN_MULPS256,
21300 IX86_BUILTIN_ORPD256,
21301 IX86_BUILTIN_ORPS256,
21302 IX86_BUILTIN_SHUFPD256,
21303 IX86_BUILTIN_SHUFPS256,
21304 IX86_BUILTIN_SUBPD256,
21305 IX86_BUILTIN_SUBPS256,
21306 IX86_BUILTIN_XORPD256,
21307 IX86_BUILTIN_XORPS256,
21308 IX86_BUILTIN_CMPSD,
21309 IX86_BUILTIN_CMPSS,
21310 IX86_BUILTIN_CMPPD,
21311 IX86_BUILTIN_CMPPS,
21312 IX86_BUILTIN_CMPPD256,
21313 IX86_BUILTIN_CMPPS256,
21314 IX86_BUILTIN_CVTDQ2PD256,
21315 IX86_BUILTIN_CVTDQ2PS256,
21316 IX86_BUILTIN_CVTPD2PS256,
21317 IX86_BUILTIN_CVTPS2DQ256,
21318 IX86_BUILTIN_CVTPS2PD256,
21319 IX86_BUILTIN_CVTTPD2DQ256,
21320 IX86_BUILTIN_CVTPD2DQ256,
21321 IX86_BUILTIN_CVTTPS2DQ256,
21322 IX86_BUILTIN_EXTRACTF128PD256,
21323 IX86_BUILTIN_EXTRACTF128PS256,
21324 IX86_BUILTIN_EXTRACTF128SI256,
21325 IX86_BUILTIN_VZEROALL,
21326 IX86_BUILTIN_VZEROUPPER,
21327 IX86_BUILTIN_VPERMILVARPD,
21328 IX86_BUILTIN_VPERMILVARPS,
21329 IX86_BUILTIN_VPERMILVARPD256,
21330 IX86_BUILTIN_VPERMILVARPS256,
21331 IX86_BUILTIN_VPERMILPD,
21332 IX86_BUILTIN_VPERMILPS,
21333 IX86_BUILTIN_VPERMILPD256,
21334 IX86_BUILTIN_VPERMILPS256,
21335 IX86_BUILTIN_VPERMIL2PD,
21336 IX86_BUILTIN_VPERMIL2PS,
21337 IX86_BUILTIN_VPERMIL2PD256,
21338 IX86_BUILTIN_VPERMIL2PS256,
21339 IX86_BUILTIN_VPERM2F128PD256,
21340 IX86_BUILTIN_VPERM2F128PS256,
21341 IX86_BUILTIN_VPERM2F128SI256,
21342 IX86_BUILTIN_VBROADCASTSS,
21343 IX86_BUILTIN_VBROADCASTSD256,
21344 IX86_BUILTIN_VBROADCASTSS256,
21345 IX86_BUILTIN_VBROADCASTPD256,
21346 IX86_BUILTIN_VBROADCASTPS256,
21347 IX86_BUILTIN_VINSERTF128PD256,
21348 IX86_BUILTIN_VINSERTF128PS256,
21349 IX86_BUILTIN_VINSERTF128SI256,
21350 IX86_BUILTIN_LOADUPD256,
21351 IX86_BUILTIN_LOADUPS256,
21352 IX86_BUILTIN_STOREUPD256,
21353 IX86_BUILTIN_STOREUPS256,
21354 IX86_BUILTIN_LDDQU256,
21355 IX86_BUILTIN_MOVNTDQ256,
21356 IX86_BUILTIN_MOVNTPD256,
21357 IX86_BUILTIN_MOVNTPS256,
21358 IX86_BUILTIN_LOADDQU256,
21359 IX86_BUILTIN_STOREDQU256,
21360 IX86_BUILTIN_MASKLOADPD,
21361 IX86_BUILTIN_MASKLOADPS,
21362 IX86_BUILTIN_MASKSTOREPD,
21363 IX86_BUILTIN_MASKSTOREPS,
21364 IX86_BUILTIN_MASKLOADPD256,
21365 IX86_BUILTIN_MASKLOADPS256,
21366 IX86_BUILTIN_MASKSTOREPD256,
21367 IX86_BUILTIN_MASKSTOREPS256,
21368 IX86_BUILTIN_MOVSHDUP256,
21369 IX86_BUILTIN_MOVSLDUP256,
21370 IX86_BUILTIN_MOVDDUP256,
21372 IX86_BUILTIN_SQRTPD256,
21373 IX86_BUILTIN_SQRTPS256,
21374 IX86_BUILTIN_SQRTPS_NR256,
21375 IX86_BUILTIN_RSQRTPS256,
21376 IX86_BUILTIN_RSQRTPS_NR256,
21378 IX86_BUILTIN_RCPPS256,
21380 IX86_BUILTIN_ROUNDPD256,
21381 IX86_BUILTIN_ROUNDPS256,
21383 IX86_BUILTIN_UNPCKHPD256,
21384 IX86_BUILTIN_UNPCKLPD256,
21385 IX86_BUILTIN_UNPCKHPS256,
21386 IX86_BUILTIN_UNPCKLPS256,
21388 IX86_BUILTIN_SI256_SI,
21389 IX86_BUILTIN_PS256_PS,
21390 IX86_BUILTIN_PD256_PD,
21391 IX86_BUILTIN_SI_SI256,
21392 IX86_BUILTIN_PS_PS256,
21393 IX86_BUILTIN_PD_PD256,
21395 IX86_BUILTIN_VTESTZPD,
21396 IX86_BUILTIN_VTESTCPD,
21397 IX86_BUILTIN_VTESTNZCPD,
21398 IX86_BUILTIN_VTESTZPS,
21399 IX86_BUILTIN_VTESTCPS,
21400 IX86_BUILTIN_VTESTNZCPS,
21401 IX86_BUILTIN_VTESTZPD256,
21402 IX86_BUILTIN_VTESTCPD256,
21403 IX86_BUILTIN_VTESTNZCPD256,
21404 IX86_BUILTIN_VTESTZPS256,
21405 IX86_BUILTIN_VTESTCPS256,
21406 IX86_BUILTIN_VTESTNZCPS256,
21407 IX86_BUILTIN_PTESTZ256,
21408 IX86_BUILTIN_PTESTC256,
21409 IX86_BUILTIN_PTESTNZC256,
21411 IX86_BUILTIN_MOVMSKPD256,
21412 IX86_BUILTIN_MOVMSKPS256,
21414 /* TFmode support builtins. */
21415 IX86_BUILTIN_INFQ,
21416 IX86_BUILTIN_HUGE_VALQ,
21417 IX86_BUILTIN_FABSQ,
21418 IX86_BUILTIN_COPYSIGNQ,
21420 /* Vectorizer support builtins. */
21421 IX86_BUILTIN_CPYSGNPS,
21422 IX86_BUILTIN_CPYSGNPD,
21424 IX86_BUILTIN_CVTUDQ2PS,
21426 IX86_BUILTIN_VEC_PERM_V2DF,
21427 IX86_BUILTIN_VEC_PERM_V4SF,
21428 IX86_BUILTIN_VEC_PERM_V2DI,
21429 IX86_BUILTIN_VEC_PERM_V4SI,
21430 IX86_BUILTIN_VEC_PERM_V8HI,
21431 IX86_BUILTIN_VEC_PERM_V16QI,
21432 IX86_BUILTIN_VEC_PERM_V2DI_U,
21433 IX86_BUILTIN_VEC_PERM_V4SI_U,
21434 IX86_BUILTIN_VEC_PERM_V8HI_U,
21435 IX86_BUILTIN_VEC_PERM_V16QI_U,
21436 IX86_BUILTIN_VEC_PERM_V4DF,
21437 IX86_BUILTIN_VEC_PERM_V8SF,
21439 /* FMA4 and XOP instructions. */
21440 IX86_BUILTIN_VFMADDSS,
21441 IX86_BUILTIN_VFMADDSD,
21442 IX86_BUILTIN_VFMADDPS,
21443 IX86_BUILTIN_VFMADDPD,
21444 IX86_BUILTIN_VFMSUBSS,
21445 IX86_BUILTIN_VFMSUBSD,
21446 IX86_BUILTIN_VFMSUBPS,
21447 IX86_BUILTIN_VFMSUBPD,
21448 IX86_BUILTIN_VFMADDSUBPS,
21449 IX86_BUILTIN_VFMADDSUBPD,
21450 IX86_BUILTIN_VFMSUBADDPS,
21451 IX86_BUILTIN_VFMSUBADDPD,
21452 IX86_BUILTIN_VFNMADDSS,
21453 IX86_BUILTIN_VFNMADDSD,
21454 IX86_BUILTIN_VFNMADDPS,
21455 IX86_BUILTIN_VFNMADDPD,
21456 IX86_BUILTIN_VFNMSUBSS,
21457 IX86_BUILTIN_VFNMSUBSD,
21458 IX86_BUILTIN_VFNMSUBPS,
21459 IX86_BUILTIN_VFNMSUBPD,
21460 IX86_BUILTIN_VFMADDPS256,
21461 IX86_BUILTIN_VFMADDPD256,
21462 IX86_BUILTIN_VFMSUBPS256,
21463 IX86_BUILTIN_VFMSUBPD256,
21464 IX86_BUILTIN_VFMADDSUBPS256,
21465 IX86_BUILTIN_VFMADDSUBPD256,
21466 IX86_BUILTIN_VFMSUBADDPS256,
21467 IX86_BUILTIN_VFMSUBADDPD256,
21468 IX86_BUILTIN_VFNMADDPS256,
21469 IX86_BUILTIN_VFNMADDPD256,
21470 IX86_BUILTIN_VFNMSUBPS256,
21471 IX86_BUILTIN_VFNMSUBPD256,
21473 IX86_BUILTIN_VPCMOV,
21474 IX86_BUILTIN_VPCMOV_V2DI,
21475 IX86_BUILTIN_VPCMOV_V4SI,
21476 IX86_BUILTIN_VPCMOV_V8HI,
21477 IX86_BUILTIN_VPCMOV_V16QI,
21478 IX86_BUILTIN_VPCMOV_V4SF,
21479 IX86_BUILTIN_VPCMOV_V2DF,
21480 IX86_BUILTIN_VPCMOV256,
21481 IX86_BUILTIN_VPCMOV_V4DI256,
21482 IX86_BUILTIN_VPCMOV_V8SI256,
21483 IX86_BUILTIN_VPCMOV_V16HI256,
21484 IX86_BUILTIN_VPCMOV_V32QI256,
21485 IX86_BUILTIN_VPCMOV_V8SF256,
21486 IX86_BUILTIN_VPCMOV_V4DF256,
21488 IX86_BUILTIN_VPPERM,
21490 IX86_BUILTIN_VPMACSSWW,
21491 IX86_BUILTIN_VPMACSWW,
21492 IX86_BUILTIN_VPMACSSWD,
21493 IX86_BUILTIN_VPMACSWD,
21494 IX86_BUILTIN_VPMACSSDD,
21495 IX86_BUILTIN_VPMACSDD,
21496 IX86_BUILTIN_VPMACSSDQL,
21497 IX86_BUILTIN_VPMACSSDQH,
21498 IX86_BUILTIN_VPMACSDQL,
21499 IX86_BUILTIN_VPMACSDQH,
21500 IX86_BUILTIN_VPMADCSSWD,
21501 IX86_BUILTIN_VPMADCSWD,
21503 IX86_BUILTIN_VPHADDBW,
21504 IX86_BUILTIN_VPHADDBD,
21505 IX86_BUILTIN_VPHADDBQ,
21506 IX86_BUILTIN_VPHADDWD,
21507 IX86_BUILTIN_VPHADDWQ,
21508 IX86_BUILTIN_VPHADDDQ,
21509 IX86_BUILTIN_VPHADDUBW,
21510 IX86_BUILTIN_VPHADDUBD,
21511 IX86_BUILTIN_VPHADDUBQ,
21512 IX86_BUILTIN_VPHADDUWD,
21513 IX86_BUILTIN_VPHADDUWQ,
21514 IX86_BUILTIN_VPHADDUDQ,
21515 IX86_BUILTIN_VPHSUBBW,
21516 IX86_BUILTIN_VPHSUBWD,
21517 IX86_BUILTIN_VPHSUBDQ,
21519 IX86_BUILTIN_VPROTB,
21520 IX86_BUILTIN_VPROTW,
21521 IX86_BUILTIN_VPROTD,
21522 IX86_BUILTIN_VPROTQ,
21523 IX86_BUILTIN_VPROTB_IMM,
21524 IX86_BUILTIN_VPROTW_IMM,
21525 IX86_BUILTIN_VPROTD_IMM,
21526 IX86_BUILTIN_VPROTQ_IMM,
21528 IX86_BUILTIN_VPSHLB,
21529 IX86_BUILTIN_VPSHLW,
21530 IX86_BUILTIN_VPSHLD,
21531 IX86_BUILTIN_VPSHLQ,
21532 IX86_BUILTIN_VPSHAB,
21533 IX86_BUILTIN_VPSHAW,
21534 IX86_BUILTIN_VPSHAD,
21535 IX86_BUILTIN_VPSHAQ,
21537 IX86_BUILTIN_VFRCZSS,
21538 IX86_BUILTIN_VFRCZSD,
21539 IX86_BUILTIN_VFRCZPS,
21540 IX86_BUILTIN_VFRCZPD,
21541 IX86_BUILTIN_VFRCZPS256,
21542 IX86_BUILTIN_VFRCZPD256,
21544 IX86_BUILTIN_VPCOMEQUB,
21545 IX86_BUILTIN_VPCOMNEUB,
21546 IX86_BUILTIN_VPCOMLTUB,
21547 IX86_BUILTIN_VPCOMLEUB,
21548 IX86_BUILTIN_VPCOMGTUB,
21549 IX86_BUILTIN_VPCOMGEUB,
21550 IX86_BUILTIN_VPCOMFALSEUB,
21551 IX86_BUILTIN_VPCOMTRUEUB,
21553 IX86_BUILTIN_VPCOMEQUW,
21554 IX86_BUILTIN_VPCOMNEUW,
21555 IX86_BUILTIN_VPCOMLTUW,
21556 IX86_BUILTIN_VPCOMLEUW,
21557 IX86_BUILTIN_VPCOMGTUW,
21558 IX86_BUILTIN_VPCOMGEUW,
21559 IX86_BUILTIN_VPCOMFALSEUW,
21560 IX86_BUILTIN_VPCOMTRUEUW,
21562 IX86_BUILTIN_VPCOMEQUD,
21563 IX86_BUILTIN_VPCOMNEUD,
21564 IX86_BUILTIN_VPCOMLTUD,
21565 IX86_BUILTIN_VPCOMLEUD,
21566 IX86_BUILTIN_VPCOMGTUD,
21567 IX86_BUILTIN_VPCOMGEUD,
21568 IX86_BUILTIN_VPCOMFALSEUD,
21569 IX86_BUILTIN_VPCOMTRUEUD,
21571 IX86_BUILTIN_VPCOMEQUQ,
21572 IX86_BUILTIN_VPCOMNEUQ,
21573 IX86_BUILTIN_VPCOMLTUQ,
21574 IX86_BUILTIN_VPCOMLEUQ,
21575 IX86_BUILTIN_VPCOMGTUQ,
21576 IX86_BUILTIN_VPCOMGEUQ,
21577 IX86_BUILTIN_VPCOMFALSEUQ,
21578 IX86_BUILTIN_VPCOMTRUEUQ,
21580 IX86_BUILTIN_VPCOMEQB,
21581 IX86_BUILTIN_VPCOMNEB,
21582 IX86_BUILTIN_VPCOMLTB,
21583 IX86_BUILTIN_VPCOMLEB,
21584 IX86_BUILTIN_VPCOMGTB,
21585 IX86_BUILTIN_VPCOMGEB,
21586 IX86_BUILTIN_VPCOMFALSEB,
21587 IX86_BUILTIN_VPCOMTRUEB,
21589 IX86_BUILTIN_VPCOMEQW,
21590 IX86_BUILTIN_VPCOMNEW,
21591 IX86_BUILTIN_VPCOMLTW,
21592 IX86_BUILTIN_VPCOMLEW,
21593 IX86_BUILTIN_VPCOMGTW,
21594 IX86_BUILTIN_VPCOMGEW,
21595 IX86_BUILTIN_VPCOMFALSEW,
21596 IX86_BUILTIN_VPCOMTRUEW,
21598 IX86_BUILTIN_VPCOMEQD,
21599 IX86_BUILTIN_VPCOMNED,
21600 IX86_BUILTIN_VPCOMLTD,
21601 IX86_BUILTIN_VPCOMLED,
21602 IX86_BUILTIN_VPCOMGTD,
21603 IX86_BUILTIN_VPCOMGED,
21604 IX86_BUILTIN_VPCOMFALSED,
21605 IX86_BUILTIN_VPCOMTRUED,
21607 IX86_BUILTIN_VPCOMEQQ,
21608 IX86_BUILTIN_VPCOMNEQ,
21609 IX86_BUILTIN_VPCOMLTQ,
21610 IX86_BUILTIN_VPCOMLEQ,
21611 IX86_BUILTIN_VPCOMGTQ,
21612 IX86_BUILTIN_VPCOMGEQ,
21613 IX86_BUILTIN_VPCOMFALSEQ,
21614 IX86_BUILTIN_VPCOMTRUEQ,
21616 /* LWP instructions. */
21617 IX86_BUILTIN_LLWPCB,
21618 IX86_BUILTIN_SLWPCB,
21619 IX86_BUILTIN_LWPVAL32,
21620 IX86_BUILTIN_LWPVAL64,
21621 IX86_BUILTIN_LWPINS32,
21622 IX86_BUILTIN_LWPINS64,
21624 IX86_BUILTIN_CLZS,
21626 IX86_BUILTIN_MAX
21629 /* Table for the ix86 builtin decls. */
21630 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21632 /* Table of all of the builtin functions that are possible with different ISA's
21633 but are waiting to be built until a function is declared to use that
21634 ISA. */
21635 struct builtin_isa {
21636 const char *name; /* function name */
21637 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21638 int isa; /* isa_flags this builtin is defined for */
21639 bool const_p; /* true if the declaration is constant */
21640 bool set_and_not_built_p;
21643 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21646 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21647 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21648 function decl in the ix86_builtins array. Returns the function decl or
21649 NULL_TREE, if the builtin was not added.
21651 If the front end has a special hook for builtin functions, delay adding
21652 builtin functions that aren't in the current ISA until the ISA is changed
21653 with function specific optimization. Doing so, can save about 300K for the
21654 default compiler. When the builtin is expanded, check at that time whether
21655 it is valid.
21657 If the front end doesn't have a special hook, record all builtins, even if
21658 it isn't an instruction set in the current ISA in case the user uses
21659 function specific options for a different ISA, so that we don't get scope
21660 errors if a builtin is added in the middle of a function scope. */
21662 static inline tree
21663 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21664 enum ix86_builtins code)
21666 tree decl = NULL_TREE;
21668 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21670 ix86_builtins_isa[(int) code].isa = mask;
21672 mask &= ~OPTION_MASK_ISA_64BIT;
21673 if (mask == 0
21674 || (mask & ix86_isa_flags) != 0
21675 || (lang_hooks.builtin_function
21676 == lang_hooks.builtin_function_ext_scope))
21679 tree type = ix86_get_builtin_func_type (tcode);
21680 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21681 NULL, NULL_TREE);
21682 ix86_builtins[(int) code] = decl;
21683 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21685 else
21687 ix86_builtins[(int) code] = NULL_TREE;
21688 ix86_builtins_isa[(int) code].tcode = tcode;
21689 ix86_builtins_isa[(int) code].name = name;
21690 ix86_builtins_isa[(int) code].const_p = false;
21691 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21695 return decl;
21698 /* Like def_builtin, but also marks the function decl "const". */
21700 static inline tree
21701 def_builtin_const (int mask, const char *name,
21702 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21704 tree decl = def_builtin (mask, name, tcode, code);
21705 if (decl)
21706 TREE_READONLY (decl) = 1;
21707 else
21708 ix86_builtins_isa[(int) code].const_p = true;
21710 return decl;
21713 /* Add any new builtin functions for a given ISA that may not have been
21714 declared. This saves a bit of space compared to adding all of the
21715 declarations to the tree, even if we didn't use them. */
21717 static void
21718 ix86_add_new_builtins (int isa)
21720 int i;
21722 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21724 if ((ix86_builtins_isa[i].isa & isa) != 0
21725 && ix86_builtins_isa[i].set_and_not_built_p)
21727 tree decl, type;
21729 /* Don't define the builtin again. */
21730 ix86_builtins_isa[i].set_and_not_built_p = false;
21732 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21733 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21734 type, i, BUILT_IN_MD, NULL,
21735 NULL_TREE);
21737 ix86_builtins[i] = decl;
21738 if (ix86_builtins_isa[i].const_p)
21739 TREE_READONLY (decl) = 1;
21744 /* Bits for builtin_description.flag. */
21746 /* Set when we don't support the comparison natively, and should
21747 swap_comparison in order to support it. */
21748 #define BUILTIN_DESC_SWAP_OPERANDS 1
21750 struct builtin_description
21752 const unsigned int mask;
21753 const enum insn_code icode;
21754 const char *const name;
21755 const enum ix86_builtins code;
21756 const enum rtx_code comparison;
21757 const int flag;
21760 static const struct builtin_description bdesc_comi[] =
21762 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21763 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21764 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21765 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21766 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21767 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21771 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21772 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21774 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21775 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21776 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21777 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21778 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21779 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21780 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21781 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21782 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21783 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21784 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21785 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21788 static const struct builtin_description bdesc_pcmpestr[] =
21790 /* SSE4.2 */
21791 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21792 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21793 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21794 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21795 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21796 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21797 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21800 static const struct builtin_description bdesc_pcmpistr[] =
21802 /* SSE4.2 */
21803 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21804 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21805 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21806 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21807 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21808 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21809 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21812 /* Special builtins with variable number of arguments. */
21813 static const struct builtin_description bdesc_special_args[] =
21815 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21816 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21818 /* MMX */
21819 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21821 /* 3DNow! */
21822 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21824 /* SSE */
21825 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21826 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21829 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21830 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21832 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21834 /* SSE or 3DNow!A */
21835 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21836 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21838 /* SSE2 */
21839 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21840 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21841 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21842 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21843 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21844 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21845 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21846 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21847 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21849 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21850 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21852 /* SSE3 */
21853 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21855 /* SSE4.1 */
21856 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21858 /* SSE4A */
21859 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21860 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21862 /* AVX */
21863 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21864 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21866 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21867 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21868 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21869 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21870 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21872 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21873 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21874 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21875 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21876 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21877 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21878 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21880 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21881 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21882 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21884 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21885 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21886 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21887 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21888 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21889 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21890 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21891 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21893 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21894 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21895 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21896 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21897 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21898 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21902 /* Builtins with variable number of arguments. */
21903 static const struct builtin_description bdesc_args[] =
21905 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21906 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21907 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21908 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21909 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21910 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21911 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21913 /* MMX */
21914 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21915 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21916 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21917 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21918 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21919 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21921 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21922 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21923 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21924 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21925 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21926 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21927 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21928 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21930 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21931 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21933 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21934 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21935 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21936 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21938 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21939 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21940 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21941 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21942 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21943 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21945 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21946 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21947 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21948 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21949 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21950 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21952 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21953 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21954 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21956 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21958 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21959 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21960 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21961 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21962 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21963 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21965 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21966 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21967 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21968 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21969 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21970 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21972 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21973 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21974 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21975 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21977 /* 3DNow! */
21978 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21979 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21980 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21981 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21983 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21984 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21985 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21986 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21987 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21988 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21989 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21990 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21991 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21992 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21993 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21994 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21995 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21996 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21997 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21999 /* 3DNow!A */
22000 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
22001 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
22002 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22003 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
22004 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22005 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
22007 /* SSE */
22008 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
22009 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22010 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22011 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22012 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22013 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22014 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
22015 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
22016 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
22017 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
22018 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
22019 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
22021 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22023 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22024 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22025 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22026 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22027 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22028 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22029 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22030 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22032 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
22033 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
22034 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
22035 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22036 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22037 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22038 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
22039 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
22040 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
22041 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22042 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
22043 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22044 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
22045 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
22046 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
22047 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22048 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
22049 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
22050 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
22051 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22052 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
22053 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
22055 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22056 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22057 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22058 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22060 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22061 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22062 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22063 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22065 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22067 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22068 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22069 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22070 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22071 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22073 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
22074 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
22075 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
22077 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
22079 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22080 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22081 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22083 /* SSE MMX or 3Dnow!A */
22084 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22085 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22086 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22088 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22089 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22090 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22091 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22093 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
22094 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
22096 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
22098 /* SSE2 */
22099 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22101 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
22102 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
22103 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
22104 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
22105 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
22106 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22107 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
22108 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
22109 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
22110 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
22111 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
22112 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
22114 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
22116 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
22117 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
22118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
22124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22127 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
22129 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22130 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22131 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22132 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
22136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22138 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22139 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22140 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22141 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22143 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22145 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22147 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22148 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22149 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22150 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22151 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
22152 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22153 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22154 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22155 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22156 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22157 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22158 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22162 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22168 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22173 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22174 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22176 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22178 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22181 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22182 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22184 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
22186 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22187 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22188 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22189 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22190 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22191 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22192 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22193 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22195 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22196 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22197 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22198 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22199 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22201 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22202 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22204 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22205 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
22207 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22208 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22209 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22210 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22212 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22213 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22215 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22216 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22219 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22220 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22222 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22223 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22224 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22225 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22227 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22228 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22229 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22230 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22231 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22232 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22233 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22234 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22236 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22237 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22238 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22240 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22241 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22243 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22244 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22246 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22248 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22249 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22250 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22251 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22253 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22254 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22255 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22256 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22257 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22258 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22259 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22262 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22263 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22264 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22265 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22266 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22267 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22269 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22270 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22271 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22272 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22274 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22275 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22276 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22278 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22280 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22281 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22283 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22285 /* SSE2 MMX */
22286 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22287 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22289 /* SSE3 */
22290 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22291 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22293 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22294 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22295 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22296 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22297 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22298 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22300 /* SSSE3 */
22301 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22302 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22303 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22304 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22305 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22306 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22308 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22309 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22310 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22311 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22312 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22313 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22314 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22315 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22316 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22317 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22318 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22319 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22320 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22321 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22322 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22323 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22324 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22325 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22326 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22327 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22328 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22329 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22330 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22331 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22333 /* SSSE3. */
22334 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22335 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22337 /* SSE4.1 */
22338 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22339 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22340 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22341 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22342 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22343 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22344 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22345 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22346 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22347 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22349 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22350 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22351 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22352 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22353 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22354 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22355 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22356 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22357 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22358 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22359 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22360 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22361 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22363 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22364 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22365 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22366 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22367 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22368 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22369 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22370 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22371 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22372 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22373 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22374 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22376 /* SSE4.1 */
22377 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22378 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22379 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22380 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22382 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22383 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22384 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22386 /* SSE4.2 */
22387 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22388 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22389 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22390 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22391 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22393 /* SSE4A */
22394 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22395 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22396 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22397 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22399 /* AES */
22400 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22401 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22403 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22404 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22405 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22406 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22408 /* PCLMUL */
22409 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22411 /* AVX */
22412 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22413 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22415 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22416 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22417 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22420 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22422 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22426 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22427 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22428 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22429 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22430 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22431 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22432 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22433 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22434 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22435 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22436 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22437 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22444 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22448 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22449 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22450 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22451 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22452 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22453 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22454 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22455 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22456 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22457 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22458 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22459 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22460 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22461 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22462 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22463 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22464 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22465 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22466 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22467 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22468 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22469 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22470 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22471 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22472 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22473 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22474 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22475 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22476 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22477 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22479 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22480 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22481 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22483 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22484 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22485 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22486 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22487 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22489 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22491 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22492 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22494 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22495 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22496 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22497 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22499 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22500 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22501 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22502 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8si, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22503 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v8sf, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22504 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_extract_lo_v4df, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22506 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22507 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22508 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22509 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22510 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22511 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22512 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22513 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22514 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22515 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22517 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22518 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22519 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22520 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22522 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22523 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22525 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22528 /* FMA4 and XOP. */
22529 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22530 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22531 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22532 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22533 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22534 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22535 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22536 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22537 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22538 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22539 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22540 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22541 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22542 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22543 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22544 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22545 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22546 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22547 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22548 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22549 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22550 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22551 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22552 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22553 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22554 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22555 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22556 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22557 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22558 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22559 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22560 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22561 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22562 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22563 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22564 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22565 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22566 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22567 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22568 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22569 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22570 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22571 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22572 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22573 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22574 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22575 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22576 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22577 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22578 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22579 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22580 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22582 static const struct builtin_description bdesc_multi_arg[] =
22584 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22585 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22586 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22587 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22588 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22589 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22590 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22591 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22593 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22594 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22595 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22596 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22597 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22598 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22599 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22600 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22602 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22603 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22604 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22605 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22607 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22608 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22609 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22610 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22612 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22613 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22614 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22615 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22617 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22618 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22619 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22620 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22622 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22630 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22638 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22646 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22654 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22662 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22670 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22678 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22687 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22696 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22697 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22698 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22701 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22702 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22703 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22704 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22705 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22706 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22707 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22709 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22710 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22711 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22712 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22713 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22714 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22715 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22717 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22718 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22719 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22720 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22721 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22722 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22723 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22725 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22726 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22727 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22728 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22729 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22730 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22731 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22733 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22734 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22735 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22736 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22737 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22738 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22739 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22741 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22742 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22743 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22744 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22745 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22746 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22747 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22749 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22750 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22751 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22752 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22753 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22754 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22755 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22757 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22758 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22759 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22760 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22761 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22762 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22763 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22764 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22766 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22767 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22768 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22769 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22770 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22771 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22772 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22773 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22775 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22776 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22777 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22778 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22782 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22783 in the current target ISA to allow the user to compile particular modules
22784 with different target specific options that differ from the command line
22785 options. */
22786 static void
22787 ix86_init_mmx_sse_builtins (void)
22789 const struct builtin_description * d;
22790 enum ix86_builtin_func_type ftype;
22791 size_t i;
22793 /* Add all special builtins with variable number of operands. */
22794 for (i = 0, d = bdesc_special_args;
22795 i < ARRAY_SIZE (bdesc_special_args);
22796 i++, d++)
22798 if (d->name == 0)
22799 continue;
22801 ftype = (enum ix86_builtin_func_type) d->flag;
22802 def_builtin (d->mask, d->name, ftype, d->code);
22805 /* Add all builtins with variable number of operands. */
22806 for (i = 0, d = bdesc_args;
22807 i < ARRAY_SIZE (bdesc_args);
22808 i++, d++)
22810 if (d->name == 0)
22811 continue;
22813 ftype = (enum ix86_builtin_func_type) d->flag;
22814 def_builtin_const (d->mask, d->name, ftype, d->code);
22817 /* pcmpestr[im] insns. */
22818 for (i = 0, d = bdesc_pcmpestr;
22819 i < ARRAY_SIZE (bdesc_pcmpestr);
22820 i++, d++)
22822 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22823 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22824 else
22825 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22826 def_builtin_const (d->mask, d->name, ftype, d->code);
22829 /* pcmpistr[im] insns. */
22830 for (i = 0, d = bdesc_pcmpistr;
22831 i < ARRAY_SIZE (bdesc_pcmpistr);
22832 i++, d++)
22834 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22835 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22836 else
22837 ftype = INT_FTYPE_V16QI_V16QI_INT;
22838 def_builtin_const (d->mask, d->name, ftype, d->code);
22841 /* comi/ucomi insns. */
22842 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22844 if (d->mask == OPTION_MASK_ISA_SSE2)
22845 ftype = INT_FTYPE_V2DF_V2DF;
22846 else
22847 ftype = INT_FTYPE_V4SF_V4SF;
22848 def_builtin_const (d->mask, d->name, ftype, d->code);
22851 /* SSE */
22852 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22853 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22854 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22855 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22857 /* SSE or 3DNow!A */
22858 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22859 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22860 IX86_BUILTIN_MASKMOVQ);
22862 /* SSE2 */
22863 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22864 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22866 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22867 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22868 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22869 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22871 /* SSE3. */
22872 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22873 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22874 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22875 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22877 /* AES */
22878 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22879 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22880 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22881 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22882 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22883 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22884 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22885 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22886 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22887 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22888 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22889 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22891 /* PCLMUL */
22892 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22893 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22895 /* MMX access to the vec_init patterns. */
22896 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22897 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22899 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22900 V4HI_FTYPE_HI_HI_HI_HI,
22901 IX86_BUILTIN_VEC_INIT_V4HI);
22903 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22904 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22905 IX86_BUILTIN_VEC_INIT_V8QI);
22907 /* Access to the vec_extract patterns. */
22908 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22909 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22910 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22911 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22912 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22913 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22914 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22915 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22916 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22917 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22919 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22920 "__builtin_ia32_vec_ext_v4hi",
22921 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22923 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22924 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22926 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22927 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22929 /* Access to the vec_set patterns. */
22930 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22931 "__builtin_ia32_vec_set_v2di",
22932 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22934 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22935 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22937 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22938 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22940 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22941 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22943 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22944 "__builtin_ia32_vec_set_v4hi",
22945 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22947 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22948 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22950 /* Add FMA4 multi-arg argument instructions */
22951 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22953 if (d->name == 0)
22954 continue;
22956 ftype = (enum ix86_builtin_func_type) d->flag;
22957 def_builtin_const (d->mask, d->name, ftype, d->code);
22961 /* Internal method for ix86_init_builtins. */
22963 static void
22964 ix86_init_builtins_va_builtins_abi (void)
22966 tree ms_va_ref, sysv_va_ref;
22967 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22968 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22969 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22970 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22972 if (!TARGET_64BIT)
22973 return;
22974 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22975 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22976 ms_va_ref = build_reference_type (ms_va_list_type_node);
22977 sysv_va_ref =
22978 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22980 fnvoid_va_end_ms =
22981 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22982 fnvoid_va_start_ms =
22983 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22984 fnvoid_va_end_sysv =
22985 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22986 fnvoid_va_start_sysv =
22987 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22988 NULL_TREE);
22989 fnvoid_va_copy_ms =
22990 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22991 NULL_TREE);
22992 fnvoid_va_copy_sysv =
22993 build_function_type_list (void_type_node, sysv_va_ref,
22994 sysv_va_ref, NULL_TREE);
22996 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22997 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22998 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22999 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
23000 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
23001 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
23002 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
23003 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23004 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
23005 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23006 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
23007 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23010 static void
23011 ix86_init_builtin_types (void)
23013 tree float128_type_node, float80_type_node;
23015 /* The __float80 type. */
23016 float80_type_node = long_double_type_node;
23017 if (TYPE_MODE (float80_type_node) != XFmode)
23019 /* The __float80 type. */
23020 float80_type_node = make_node (REAL_TYPE);
23022 TYPE_PRECISION (float80_type_node) = 80;
23023 layout_type (float80_type_node);
23025 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
23027 /* The __float128 type. */
23028 float128_type_node = make_node (REAL_TYPE);
23029 TYPE_PRECISION (float128_type_node) = 128;
23030 layout_type (float128_type_node);
23031 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
23033 /* This macro is built by i386-builtin-types.awk. */
23034 DEFINE_BUILTIN_PRIMITIVE_TYPES;
23037 static void
23038 ix86_init_builtins (void)
23040 tree t;
23042 ix86_init_builtin_types ();
23044 /* TFmode support builtins. */
23045 def_builtin_const (0, "__builtin_infq",
23046 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
23047 def_builtin_const (0, "__builtin_huge_valq",
23048 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
23050 /* We will expand them to normal call if SSE2 isn't available since
23051 they are used by libgcc. */
23052 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
23053 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
23054 BUILT_IN_MD, "__fabstf2", NULL_TREE);
23055 TREE_READONLY (t) = 1;
23056 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
23058 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
23059 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
23060 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
23061 TREE_READONLY (t) = 1;
23062 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
23064 ix86_init_mmx_sse_builtins ();
23066 if (TARGET_64BIT)
23067 ix86_init_builtins_va_builtins_abi ();
23070 /* Return the ix86 builtin for CODE. */
23072 static tree
23073 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
23075 if (code >= IX86_BUILTIN_MAX)
23076 return error_mark_node;
23078 return ix86_builtins[code];
23081 /* Errors in the source file can cause expand_expr to return const0_rtx
23082 where we expect a vector. To avoid crashing, use one of the vector
23083 clear instructions. */
23084 static rtx
23085 safe_vector_operand (rtx x, enum machine_mode mode)
23087 if (x == const0_rtx)
23088 x = CONST0_RTX (mode);
23089 return x;
23092 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23094 static rtx
23095 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23097 rtx pat;
23098 tree arg0 = CALL_EXPR_ARG (exp, 0);
23099 tree arg1 = CALL_EXPR_ARG (exp, 1);
23100 rtx op0 = expand_normal (arg0);
23101 rtx op1 = expand_normal (arg1);
23102 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23103 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23104 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23106 if (VECTOR_MODE_P (mode0))
23107 op0 = safe_vector_operand (op0, mode0);
23108 if (VECTOR_MODE_P (mode1))
23109 op1 = safe_vector_operand (op1, mode1);
23111 if (optimize || !target
23112 || GET_MODE (target) != tmode
23113 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23114 target = gen_reg_rtx (tmode);
23116 if (GET_MODE (op1) == SImode && mode1 == TImode)
23118 rtx x = gen_reg_rtx (V4SImode);
23119 emit_insn (gen_sse2_loadd (x, op1));
23120 op1 = gen_lowpart (TImode, x);
23123 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23124 op0 = copy_to_mode_reg (mode0, op0);
23125 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23126 op1 = copy_to_mode_reg (mode1, op1);
23128 pat = GEN_FCN (icode) (target, op0, op1);
23129 if (! pat)
23130 return 0;
23132 emit_insn (pat);
23134 return target;
23137 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23139 static rtx
23140 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23141 enum ix86_builtin_func_type m_type,
23142 enum rtx_code sub_code)
23144 rtx pat;
23145 int i;
23146 int nargs;
23147 bool comparison_p = false;
23148 bool tf_p = false;
23149 bool last_arg_constant = false;
23150 int num_memory = 0;
23151 struct {
23152 rtx op;
23153 enum machine_mode mode;
23154 } args[4];
23156 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23158 switch (m_type)
23160 case MULTI_ARG_4_DF2_DI_I:
23161 case MULTI_ARG_4_DF2_DI_I1:
23162 case MULTI_ARG_4_SF2_SI_I:
23163 case MULTI_ARG_4_SF2_SI_I1:
23164 nargs = 4;
23165 last_arg_constant = true;
23166 break;
23168 case MULTI_ARG_3_SF:
23169 case MULTI_ARG_3_DF:
23170 case MULTI_ARG_3_SF2:
23171 case MULTI_ARG_3_DF2:
23172 case MULTI_ARG_3_DI:
23173 case MULTI_ARG_3_SI:
23174 case MULTI_ARG_3_SI_DI:
23175 case MULTI_ARG_3_HI:
23176 case MULTI_ARG_3_HI_SI:
23177 case MULTI_ARG_3_QI:
23178 case MULTI_ARG_3_DI2:
23179 case MULTI_ARG_3_SI2:
23180 case MULTI_ARG_3_HI2:
23181 case MULTI_ARG_3_QI2:
23182 nargs = 3;
23183 break;
23185 case MULTI_ARG_2_SF:
23186 case MULTI_ARG_2_DF:
23187 case MULTI_ARG_2_DI:
23188 case MULTI_ARG_2_SI:
23189 case MULTI_ARG_2_HI:
23190 case MULTI_ARG_2_QI:
23191 nargs = 2;
23192 break;
23194 case MULTI_ARG_2_DI_IMM:
23195 case MULTI_ARG_2_SI_IMM:
23196 case MULTI_ARG_2_HI_IMM:
23197 case MULTI_ARG_2_QI_IMM:
23198 nargs = 2;
23199 last_arg_constant = true;
23200 break;
23202 case MULTI_ARG_1_SF:
23203 case MULTI_ARG_1_DF:
23204 case MULTI_ARG_1_SF2:
23205 case MULTI_ARG_1_DF2:
23206 case MULTI_ARG_1_DI:
23207 case MULTI_ARG_1_SI:
23208 case MULTI_ARG_1_HI:
23209 case MULTI_ARG_1_QI:
23210 case MULTI_ARG_1_SI_DI:
23211 case MULTI_ARG_1_HI_DI:
23212 case MULTI_ARG_1_HI_SI:
23213 case MULTI_ARG_1_QI_DI:
23214 case MULTI_ARG_1_QI_SI:
23215 case MULTI_ARG_1_QI_HI:
23216 nargs = 1;
23217 break;
23219 case MULTI_ARG_2_DI_CMP:
23220 case MULTI_ARG_2_SI_CMP:
23221 case MULTI_ARG_2_HI_CMP:
23222 case MULTI_ARG_2_QI_CMP:
23223 nargs = 2;
23224 comparison_p = true;
23225 break;
23227 case MULTI_ARG_2_SF_TF:
23228 case MULTI_ARG_2_DF_TF:
23229 case MULTI_ARG_2_DI_TF:
23230 case MULTI_ARG_2_SI_TF:
23231 case MULTI_ARG_2_HI_TF:
23232 case MULTI_ARG_2_QI_TF:
23233 nargs = 2;
23234 tf_p = true;
23235 break;
23237 default:
23238 gcc_unreachable ();
23241 if (optimize || !target
23242 || GET_MODE (target) != tmode
23243 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23244 target = gen_reg_rtx (tmode);
23246 gcc_assert (nargs <= 4);
23248 for (i = 0; i < nargs; i++)
23250 tree arg = CALL_EXPR_ARG (exp, i);
23251 rtx op = expand_normal (arg);
23252 int adjust = (comparison_p) ? 1 : 0;
23253 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23255 if (last_arg_constant && i == nargs-1)
23257 if (!CONST_INT_P (op))
23259 error ("last argument must be an immediate");
23260 return gen_reg_rtx (tmode);
23263 else
23265 if (VECTOR_MODE_P (mode))
23266 op = safe_vector_operand (op, mode);
23268 /* If we aren't optimizing, only allow one memory operand to be
23269 generated. */
23270 if (memory_operand (op, mode))
23271 num_memory++;
23273 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23275 if (optimize
23276 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23277 || num_memory > 1)
23278 op = force_reg (mode, op);
23281 args[i].op = op;
23282 args[i].mode = mode;
23285 switch (nargs)
23287 case 1:
23288 pat = GEN_FCN (icode) (target, args[0].op);
23289 break;
23291 case 2:
23292 if (tf_p)
23293 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23294 GEN_INT ((int)sub_code));
23295 else if (! comparison_p)
23296 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23297 else
23299 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23300 args[0].op,
23301 args[1].op);
23303 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23305 break;
23307 case 3:
23308 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23309 break;
23311 case 4:
23312 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23313 break;
23315 default:
23316 gcc_unreachable ();
23319 if (! pat)
23320 return 0;
23322 emit_insn (pat);
23323 return target;
23326 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23327 insns with vec_merge. */
23329 static rtx
23330 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23331 rtx target)
23333 rtx pat;
23334 tree arg0 = CALL_EXPR_ARG (exp, 0);
23335 rtx op1, op0 = expand_normal (arg0);
23336 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23337 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23339 if (optimize || !target
23340 || GET_MODE (target) != tmode
23341 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23342 target = gen_reg_rtx (tmode);
23344 if (VECTOR_MODE_P (mode0))
23345 op0 = safe_vector_operand (op0, mode0);
23347 if ((optimize && !register_operand (op0, mode0))
23348 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23349 op0 = copy_to_mode_reg (mode0, op0);
23351 op1 = op0;
23352 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23353 op1 = copy_to_mode_reg (mode0, op1);
23355 pat = GEN_FCN (icode) (target, op0, op1);
23356 if (! pat)
23357 return 0;
23358 emit_insn (pat);
23359 return target;
23362 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23364 static rtx
23365 ix86_expand_sse_compare (const struct builtin_description *d,
23366 tree exp, rtx target, bool swap)
23368 rtx pat;
23369 tree arg0 = CALL_EXPR_ARG (exp, 0);
23370 tree arg1 = CALL_EXPR_ARG (exp, 1);
23371 rtx op0 = expand_normal (arg0);
23372 rtx op1 = expand_normal (arg1);
23373 rtx op2;
23374 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23375 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23376 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23377 enum rtx_code comparison = d->comparison;
23379 if (VECTOR_MODE_P (mode0))
23380 op0 = safe_vector_operand (op0, mode0);
23381 if (VECTOR_MODE_P (mode1))
23382 op1 = safe_vector_operand (op1, mode1);
23384 /* Swap operands if we have a comparison that isn't available in
23385 hardware. */
23386 if (swap)
23388 rtx tmp = gen_reg_rtx (mode1);
23389 emit_move_insn (tmp, op1);
23390 op1 = op0;
23391 op0 = tmp;
23394 if (optimize || !target
23395 || GET_MODE (target) != tmode
23396 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23397 target = gen_reg_rtx (tmode);
23399 if ((optimize && !register_operand (op0, mode0))
23400 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23401 op0 = copy_to_mode_reg (mode0, op0);
23402 if ((optimize && !register_operand (op1, mode1))
23403 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23404 op1 = copy_to_mode_reg (mode1, op1);
23406 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23407 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23408 if (! pat)
23409 return 0;
23410 emit_insn (pat);
23411 return target;
23414 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23416 static rtx
23417 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23418 rtx target)
23420 rtx pat;
23421 tree arg0 = CALL_EXPR_ARG (exp, 0);
23422 tree arg1 = CALL_EXPR_ARG (exp, 1);
23423 rtx op0 = expand_normal (arg0);
23424 rtx op1 = expand_normal (arg1);
23425 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23426 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23427 enum rtx_code comparison = d->comparison;
23429 if (VECTOR_MODE_P (mode0))
23430 op0 = safe_vector_operand (op0, mode0);
23431 if (VECTOR_MODE_P (mode1))
23432 op1 = safe_vector_operand (op1, mode1);
23434 /* Swap operands if we have a comparison that isn't available in
23435 hardware. */
23436 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23438 rtx tmp = op1;
23439 op1 = op0;
23440 op0 = tmp;
23443 target = gen_reg_rtx (SImode);
23444 emit_move_insn (target, const0_rtx);
23445 target = gen_rtx_SUBREG (QImode, target, 0);
23447 if ((optimize && !register_operand (op0, mode0))
23448 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23449 op0 = copy_to_mode_reg (mode0, op0);
23450 if ((optimize && !register_operand (op1, mode1))
23451 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23452 op1 = copy_to_mode_reg (mode1, op1);
23454 pat = GEN_FCN (d->icode) (op0, op1);
23455 if (! pat)
23456 return 0;
23457 emit_insn (pat);
23458 emit_insn (gen_rtx_SET (VOIDmode,
23459 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23460 gen_rtx_fmt_ee (comparison, QImode,
23461 SET_DEST (pat),
23462 const0_rtx)));
23464 return SUBREG_REG (target);
23467 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23469 static rtx
23470 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23471 rtx target)
23473 rtx pat;
23474 tree arg0 = CALL_EXPR_ARG (exp, 0);
23475 tree arg1 = CALL_EXPR_ARG (exp, 1);
23476 rtx op0 = expand_normal (arg0);
23477 rtx op1 = expand_normal (arg1);
23478 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23479 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23480 enum rtx_code comparison = d->comparison;
23482 if (VECTOR_MODE_P (mode0))
23483 op0 = safe_vector_operand (op0, mode0);
23484 if (VECTOR_MODE_P (mode1))
23485 op1 = safe_vector_operand (op1, mode1);
23487 target = gen_reg_rtx (SImode);
23488 emit_move_insn (target, const0_rtx);
23489 target = gen_rtx_SUBREG (QImode, target, 0);
23491 if ((optimize && !register_operand (op0, mode0))
23492 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23493 op0 = copy_to_mode_reg (mode0, op0);
23494 if ((optimize && !register_operand (op1, mode1))
23495 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23496 op1 = copy_to_mode_reg (mode1, op1);
23498 pat = GEN_FCN (d->icode) (op0, op1);
23499 if (! pat)
23500 return 0;
23501 emit_insn (pat);
23502 emit_insn (gen_rtx_SET (VOIDmode,
23503 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23504 gen_rtx_fmt_ee (comparison, QImode,
23505 SET_DEST (pat),
23506 const0_rtx)));
23508 return SUBREG_REG (target);
23511 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23513 static rtx
23514 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23515 tree exp, rtx target)
23517 rtx pat;
23518 tree arg0 = CALL_EXPR_ARG (exp, 0);
23519 tree arg1 = CALL_EXPR_ARG (exp, 1);
23520 tree arg2 = CALL_EXPR_ARG (exp, 2);
23521 tree arg3 = CALL_EXPR_ARG (exp, 3);
23522 tree arg4 = CALL_EXPR_ARG (exp, 4);
23523 rtx scratch0, scratch1;
23524 rtx op0 = expand_normal (arg0);
23525 rtx op1 = expand_normal (arg1);
23526 rtx op2 = expand_normal (arg2);
23527 rtx op3 = expand_normal (arg3);
23528 rtx op4 = expand_normal (arg4);
23529 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23531 tmode0 = insn_data[d->icode].operand[0].mode;
23532 tmode1 = insn_data[d->icode].operand[1].mode;
23533 modev2 = insn_data[d->icode].operand[2].mode;
23534 modei3 = insn_data[d->icode].operand[3].mode;
23535 modev4 = insn_data[d->icode].operand[4].mode;
23536 modei5 = insn_data[d->icode].operand[5].mode;
23537 modeimm = insn_data[d->icode].operand[6].mode;
23539 if (VECTOR_MODE_P (modev2))
23540 op0 = safe_vector_operand (op0, modev2);
23541 if (VECTOR_MODE_P (modev4))
23542 op2 = safe_vector_operand (op2, modev4);
23544 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23545 op0 = copy_to_mode_reg (modev2, op0);
23546 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23547 op1 = copy_to_mode_reg (modei3, op1);
23548 if ((optimize && !register_operand (op2, modev4))
23549 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23550 op2 = copy_to_mode_reg (modev4, op2);
23551 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23552 op3 = copy_to_mode_reg (modei5, op3);
23554 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23556 error ("the fifth argument must be a 8-bit immediate");
23557 return const0_rtx;
23560 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23562 if (optimize || !target
23563 || GET_MODE (target) != tmode0
23564 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23565 target = gen_reg_rtx (tmode0);
23567 scratch1 = gen_reg_rtx (tmode1);
23569 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23571 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23573 if (optimize || !target
23574 || GET_MODE (target) != tmode1
23575 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23576 target = gen_reg_rtx (tmode1);
23578 scratch0 = gen_reg_rtx (tmode0);
23580 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23582 else
23584 gcc_assert (d->flag);
23586 scratch0 = gen_reg_rtx (tmode0);
23587 scratch1 = gen_reg_rtx (tmode1);
23589 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23592 if (! pat)
23593 return 0;
23595 emit_insn (pat);
23597 if (d->flag)
23599 target = gen_reg_rtx (SImode);
23600 emit_move_insn (target, const0_rtx);
23601 target = gen_rtx_SUBREG (QImode, target, 0);
23603 emit_insn
23604 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23605 gen_rtx_fmt_ee (EQ, QImode,
23606 gen_rtx_REG ((enum machine_mode) d->flag,
23607 FLAGS_REG),
23608 const0_rtx)));
23609 return SUBREG_REG (target);
23611 else
23612 return target;
23616 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23618 static rtx
23619 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23620 tree exp, rtx target)
23622 rtx pat;
23623 tree arg0 = CALL_EXPR_ARG (exp, 0);
23624 tree arg1 = CALL_EXPR_ARG (exp, 1);
23625 tree arg2 = CALL_EXPR_ARG (exp, 2);
23626 rtx scratch0, scratch1;
23627 rtx op0 = expand_normal (arg0);
23628 rtx op1 = expand_normal (arg1);
23629 rtx op2 = expand_normal (arg2);
23630 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23632 tmode0 = insn_data[d->icode].operand[0].mode;
23633 tmode1 = insn_data[d->icode].operand[1].mode;
23634 modev2 = insn_data[d->icode].operand[2].mode;
23635 modev3 = insn_data[d->icode].operand[3].mode;
23636 modeimm = insn_data[d->icode].operand[4].mode;
23638 if (VECTOR_MODE_P (modev2))
23639 op0 = safe_vector_operand (op0, modev2);
23640 if (VECTOR_MODE_P (modev3))
23641 op1 = safe_vector_operand (op1, modev3);
23643 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23644 op0 = copy_to_mode_reg (modev2, op0);
23645 if ((optimize && !register_operand (op1, modev3))
23646 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23647 op1 = copy_to_mode_reg (modev3, op1);
23649 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23651 error ("the third argument must be a 8-bit immediate");
23652 return const0_rtx;
23655 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23657 if (optimize || !target
23658 || GET_MODE (target) != tmode0
23659 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23660 target = gen_reg_rtx (tmode0);
23662 scratch1 = gen_reg_rtx (tmode1);
23664 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23666 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23668 if (optimize || !target
23669 || GET_MODE (target) != tmode1
23670 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23671 target = gen_reg_rtx (tmode1);
23673 scratch0 = gen_reg_rtx (tmode0);
23675 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23677 else
23679 gcc_assert (d->flag);
23681 scratch0 = gen_reg_rtx (tmode0);
23682 scratch1 = gen_reg_rtx (tmode1);
23684 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23687 if (! pat)
23688 return 0;
23690 emit_insn (pat);
23692 if (d->flag)
23694 target = gen_reg_rtx (SImode);
23695 emit_move_insn (target, const0_rtx);
23696 target = gen_rtx_SUBREG (QImode, target, 0);
23698 emit_insn
23699 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23700 gen_rtx_fmt_ee (EQ, QImode,
23701 gen_rtx_REG ((enum machine_mode) d->flag,
23702 FLAGS_REG),
23703 const0_rtx)));
23704 return SUBREG_REG (target);
23706 else
23707 return target;
23710 /* Subroutine of ix86_expand_builtin to take care of insns with
23711 variable number of operands. */
23713 static rtx
23714 ix86_expand_args_builtin (const struct builtin_description *d,
23715 tree exp, rtx target)
23717 rtx pat, real_target;
23718 unsigned int i, nargs;
23719 unsigned int nargs_constant = 0;
23720 int num_memory = 0;
23721 struct
23723 rtx op;
23724 enum machine_mode mode;
23725 } args[4];
23726 bool last_arg_count = false;
23727 enum insn_code icode = d->icode;
23728 const struct insn_data_d *insn_p = &insn_data[icode];
23729 enum machine_mode tmode = insn_p->operand[0].mode;
23730 enum machine_mode rmode = VOIDmode;
23731 bool swap = false;
23732 enum rtx_code comparison = d->comparison;
23734 switch ((enum ix86_builtin_func_type) d->flag)
23736 case INT_FTYPE_V8SF_V8SF_PTEST:
23737 case INT_FTYPE_V4DI_V4DI_PTEST:
23738 case INT_FTYPE_V4DF_V4DF_PTEST:
23739 case INT_FTYPE_V4SF_V4SF_PTEST:
23740 case INT_FTYPE_V2DI_V2DI_PTEST:
23741 case INT_FTYPE_V2DF_V2DF_PTEST:
23742 return ix86_expand_sse_ptest (d, exp, target);
23743 case FLOAT128_FTYPE_FLOAT128:
23744 case FLOAT_FTYPE_FLOAT:
23745 case INT_FTYPE_INT:
23746 case UINT64_FTYPE_INT:
23747 case UINT16_FTYPE_UINT16:
23748 case INT64_FTYPE_INT64:
23749 case INT64_FTYPE_V4SF:
23750 case INT64_FTYPE_V2DF:
23751 case INT_FTYPE_V16QI:
23752 case INT_FTYPE_V8QI:
23753 case INT_FTYPE_V8SF:
23754 case INT_FTYPE_V4DF:
23755 case INT_FTYPE_V4SF:
23756 case INT_FTYPE_V2DF:
23757 case V16QI_FTYPE_V16QI:
23758 case V8SI_FTYPE_V8SF:
23759 case V8SI_FTYPE_V4SI:
23760 case V8HI_FTYPE_V8HI:
23761 case V8HI_FTYPE_V16QI:
23762 case V8QI_FTYPE_V8QI:
23763 case V8SF_FTYPE_V8SF:
23764 case V8SF_FTYPE_V8SI:
23765 case V8SF_FTYPE_V4SF:
23766 case V4SI_FTYPE_V4SI:
23767 case V4SI_FTYPE_V16QI:
23768 case V4SI_FTYPE_V4SF:
23769 case V4SI_FTYPE_V8SI:
23770 case V4SI_FTYPE_V8HI:
23771 case V4SI_FTYPE_V4DF:
23772 case V4SI_FTYPE_V2DF:
23773 case V4HI_FTYPE_V4HI:
23774 case V4DF_FTYPE_V4DF:
23775 case V4DF_FTYPE_V4SI:
23776 case V4DF_FTYPE_V4SF:
23777 case V4DF_FTYPE_V2DF:
23778 case V4SF_FTYPE_V4SF:
23779 case V4SF_FTYPE_V4SI:
23780 case V4SF_FTYPE_V8SF:
23781 case V4SF_FTYPE_V4DF:
23782 case V4SF_FTYPE_V2DF:
23783 case V2DI_FTYPE_V2DI:
23784 case V2DI_FTYPE_V16QI:
23785 case V2DI_FTYPE_V8HI:
23786 case V2DI_FTYPE_V4SI:
23787 case V2DF_FTYPE_V2DF:
23788 case V2DF_FTYPE_V4SI:
23789 case V2DF_FTYPE_V4DF:
23790 case V2DF_FTYPE_V4SF:
23791 case V2DF_FTYPE_V2SI:
23792 case V2SI_FTYPE_V2SI:
23793 case V2SI_FTYPE_V4SF:
23794 case V2SI_FTYPE_V2SF:
23795 case V2SI_FTYPE_V2DF:
23796 case V2SF_FTYPE_V2SF:
23797 case V2SF_FTYPE_V2SI:
23798 nargs = 1;
23799 break;
23800 case V4SF_FTYPE_V4SF_VEC_MERGE:
23801 case V2DF_FTYPE_V2DF_VEC_MERGE:
23802 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23803 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23804 case V16QI_FTYPE_V16QI_V16QI:
23805 case V16QI_FTYPE_V8HI_V8HI:
23806 case V8QI_FTYPE_V8QI_V8QI:
23807 case V8QI_FTYPE_V4HI_V4HI:
23808 case V8HI_FTYPE_V8HI_V8HI:
23809 case V8HI_FTYPE_V16QI_V16QI:
23810 case V8HI_FTYPE_V4SI_V4SI:
23811 case V8SF_FTYPE_V8SF_V8SF:
23812 case V8SF_FTYPE_V8SF_V8SI:
23813 case V4SI_FTYPE_V4SI_V4SI:
23814 case V4SI_FTYPE_V8HI_V8HI:
23815 case V4SI_FTYPE_V4SF_V4SF:
23816 case V4SI_FTYPE_V2DF_V2DF:
23817 case V4HI_FTYPE_V4HI_V4HI:
23818 case V4HI_FTYPE_V8QI_V8QI:
23819 case V4HI_FTYPE_V2SI_V2SI:
23820 case V4DF_FTYPE_V4DF_V4DF:
23821 case V4DF_FTYPE_V4DF_V4DI:
23822 case V4SF_FTYPE_V4SF_V4SF:
23823 case V4SF_FTYPE_V4SF_V4SI:
23824 case V4SF_FTYPE_V4SF_V2SI:
23825 case V4SF_FTYPE_V4SF_V2DF:
23826 case V4SF_FTYPE_V4SF_DI:
23827 case V4SF_FTYPE_V4SF_SI:
23828 case V2DI_FTYPE_V2DI_V2DI:
23829 case V2DI_FTYPE_V16QI_V16QI:
23830 case V2DI_FTYPE_V4SI_V4SI:
23831 case V2DI_FTYPE_V2DI_V16QI:
23832 case V2DI_FTYPE_V2DF_V2DF:
23833 case V2SI_FTYPE_V2SI_V2SI:
23834 case V2SI_FTYPE_V4HI_V4HI:
23835 case V2SI_FTYPE_V2SF_V2SF:
23836 case V2DF_FTYPE_V2DF_V2DF:
23837 case V2DF_FTYPE_V2DF_V4SF:
23838 case V2DF_FTYPE_V2DF_V2DI:
23839 case V2DF_FTYPE_V2DF_DI:
23840 case V2DF_FTYPE_V2DF_SI:
23841 case V2SF_FTYPE_V2SF_V2SF:
23842 case V1DI_FTYPE_V1DI_V1DI:
23843 case V1DI_FTYPE_V8QI_V8QI:
23844 case V1DI_FTYPE_V2SI_V2SI:
23845 if (comparison == UNKNOWN)
23846 return ix86_expand_binop_builtin (icode, exp, target);
23847 nargs = 2;
23848 break;
23849 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23850 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23851 gcc_assert (comparison != UNKNOWN);
23852 nargs = 2;
23853 swap = true;
23854 break;
23855 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23856 case V8HI_FTYPE_V8HI_SI_COUNT:
23857 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23858 case V4SI_FTYPE_V4SI_SI_COUNT:
23859 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23860 case V4HI_FTYPE_V4HI_SI_COUNT:
23861 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23862 case V2DI_FTYPE_V2DI_SI_COUNT:
23863 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23864 case V2SI_FTYPE_V2SI_SI_COUNT:
23865 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23866 case V1DI_FTYPE_V1DI_SI_COUNT:
23867 nargs = 2;
23868 last_arg_count = true;
23869 break;
23870 case UINT64_FTYPE_UINT64_UINT64:
23871 case UINT_FTYPE_UINT_UINT:
23872 case UINT_FTYPE_UINT_USHORT:
23873 case UINT_FTYPE_UINT_UCHAR:
23874 case UINT16_FTYPE_UINT16_INT:
23875 case UINT8_FTYPE_UINT8_INT:
23876 nargs = 2;
23877 break;
23878 case V2DI_FTYPE_V2DI_INT_CONVERT:
23879 nargs = 2;
23880 rmode = V1TImode;
23881 nargs_constant = 1;
23882 break;
23883 case V8HI_FTYPE_V8HI_INT:
23884 case V8SF_FTYPE_V8SF_INT:
23885 case V4SI_FTYPE_V4SI_INT:
23886 case V4SI_FTYPE_V8SI_INT:
23887 case V4HI_FTYPE_V4HI_INT:
23888 case V4DF_FTYPE_V4DF_INT:
23889 case V4SF_FTYPE_V4SF_INT:
23890 case V4SF_FTYPE_V8SF_INT:
23891 case V2DI_FTYPE_V2DI_INT:
23892 case V2DF_FTYPE_V2DF_INT:
23893 case V2DF_FTYPE_V4DF_INT:
23894 nargs = 2;
23895 nargs_constant = 1;
23896 break;
23897 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23898 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23899 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23900 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23901 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23902 nargs = 3;
23903 break;
23904 case V16QI_FTYPE_V16QI_V16QI_INT:
23905 case V8HI_FTYPE_V8HI_V8HI_INT:
23906 case V8SI_FTYPE_V8SI_V8SI_INT:
23907 case V8SI_FTYPE_V8SI_V4SI_INT:
23908 case V8SF_FTYPE_V8SF_V8SF_INT:
23909 case V8SF_FTYPE_V8SF_V4SF_INT:
23910 case V4SI_FTYPE_V4SI_V4SI_INT:
23911 case V4DF_FTYPE_V4DF_V4DF_INT:
23912 case V4DF_FTYPE_V4DF_V2DF_INT:
23913 case V4SF_FTYPE_V4SF_V4SF_INT:
23914 case V2DI_FTYPE_V2DI_V2DI_INT:
23915 case V2DF_FTYPE_V2DF_V2DF_INT:
23916 nargs = 3;
23917 nargs_constant = 1;
23918 break;
23919 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23920 nargs = 3;
23921 rmode = V2DImode;
23922 nargs_constant = 1;
23923 break;
23924 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23925 nargs = 3;
23926 rmode = DImode;
23927 nargs_constant = 1;
23928 break;
23929 case V2DI_FTYPE_V2DI_UINT_UINT:
23930 nargs = 3;
23931 nargs_constant = 2;
23932 break;
23933 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23934 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23935 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23936 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23937 nargs = 4;
23938 nargs_constant = 1;
23939 break;
23940 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23941 nargs = 4;
23942 nargs_constant = 2;
23943 break;
23944 default:
23945 gcc_unreachable ();
23948 gcc_assert (nargs <= ARRAY_SIZE (args));
23950 if (comparison != UNKNOWN)
23952 gcc_assert (nargs == 2);
23953 return ix86_expand_sse_compare (d, exp, target, swap);
23956 if (rmode == VOIDmode || rmode == tmode)
23958 if (optimize
23959 || target == 0
23960 || GET_MODE (target) != tmode
23961 || ! (*insn_p->operand[0].predicate) (target, tmode))
23962 target = gen_reg_rtx (tmode);
23963 real_target = target;
23965 else
23967 target = gen_reg_rtx (rmode);
23968 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23971 for (i = 0; i < nargs; i++)
23973 tree arg = CALL_EXPR_ARG (exp, i);
23974 rtx op = expand_normal (arg);
23975 enum machine_mode mode = insn_p->operand[i + 1].mode;
23976 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23978 if (last_arg_count && (i + 1) == nargs)
23980 /* SIMD shift insns take either an 8-bit immediate or
23981 register as count. But builtin functions take int as
23982 count. If count doesn't match, we put it in register. */
23983 if (!match)
23985 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23986 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23987 op = copy_to_reg (op);
23990 else if ((nargs - i) <= nargs_constant)
23992 if (!match)
23993 switch (icode)
23995 case CODE_FOR_sse4_1_roundpd:
23996 case CODE_FOR_sse4_1_roundps:
23997 case CODE_FOR_sse4_1_roundsd:
23998 case CODE_FOR_sse4_1_roundss:
23999 case CODE_FOR_sse4_1_blendps:
24000 case CODE_FOR_avx_blendpd256:
24001 case CODE_FOR_avx_vpermilv4df:
24002 case CODE_FOR_avx_roundpd256:
24003 case CODE_FOR_avx_roundps256:
24004 error ("the last argument must be a 4-bit immediate");
24005 return const0_rtx;
24007 case CODE_FOR_sse4_1_blendpd:
24008 case CODE_FOR_avx_vpermilv2df:
24009 case CODE_FOR_xop_vpermil2v2df3:
24010 case CODE_FOR_xop_vpermil2v4sf3:
24011 case CODE_FOR_xop_vpermil2v4df3:
24012 case CODE_FOR_xop_vpermil2v8sf3:
24013 error ("the last argument must be a 2-bit immediate");
24014 return const0_rtx;
24016 case CODE_FOR_avx_vextractf128v4df:
24017 case CODE_FOR_avx_vextractf128v8sf:
24018 case CODE_FOR_avx_vextractf128v8si:
24019 case CODE_FOR_avx_vinsertf128v4df:
24020 case CODE_FOR_avx_vinsertf128v8sf:
24021 case CODE_FOR_avx_vinsertf128v8si:
24022 error ("the last argument must be a 1-bit immediate");
24023 return const0_rtx;
24025 case CODE_FOR_avx_cmpsdv2df3:
24026 case CODE_FOR_avx_cmpssv4sf3:
24027 case CODE_FOR_avx_cmppdv2df3:
24028 case CODE_FOR_avx_cmppsv4sf3:
24029 case CODE_FOR_avx_cmppdv4df3:
24030 case CODE_FOR_avx_cmppsv8sf3:
24031 error ("the last argument must be a 5-bit immediate");
24032 return const0_rtx;
24034 default:
24035 switch (nargs_constant)
24037 case 2:
24038 if ((nargs - i) == nargs_constant)
24040 error ("the next to last argument must be an 8-bit immediate");
24041 break;
24043 case 1:
24044 error ("the last argument must be an 8-bit immediate");
24045 break;
24046 default:
24047 gcc_unreachable ();
24049 return const0_rtx;
24052 else
24054 if (VECTOR_MODE_P (mode))
24055 op = safe_vector_operand (op, mode);
24057 /* If we aren't optimizing, only allow one memory operand to
24058 be generated. */
24059 if (memory_operand (op, mode))
24060 num_memory++;
24062 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
24064 if (optimize || !match || num_memory > 1)
24065 op = copy_to_mode_reg (mode, op);
24067 else
24069 op = copy_to_reg (op);
24070 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
24074 args[i].op = op;
24075 args[i].mode = mode;
24078 switch (nargs)
24080 case 1:
24081 pat = GEN_FCN (icode) (real_target, args[0].op);
24082 break;
24083 case 2:
24084 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24085 break;
24086 case 3:
24087 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24088 args[2].op);
24089 break;
24090 case 4:
24091 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24092 args[2].op, args[3].op);
24093 break;
24094 default:
24095 gcc_unreachable ();
24098 if (! pat)
24099 return 0;
24101 emit_insn (pat);
24102 return target;
24105 /* Subroutine of ix86_expand_builtin to take care of special insns
24106 with variable number of operands. */
24108 static rtx
24109 ix86_expand_special_args_builtin (const struct builtin_description *d,
24110 tree exp, rtx target)
24112 tree arg;
24113 rtx pat, op;
24114 unsigned int i, nargs, arg_adjust, memory;
24115 struct
24117 rtx op;
24118 enum machine_mode mode;
24119 } args[3];
24120 enum insn_code icode = d->icode;
24121 bool last_arg_constant = false;
24122 const struct insn_data_d *insn_p = &insn_data[icode];
24123 enum machine_mode tmode = insn_p->operand[0].mode;
24124 enum { load, store } klass;
24126 switch ((enum ix86_builtin_func_type) d->flag)
24128 case VOID_FTYPE_VOID:
24129 emit_insn (GEN_FCN (icode) (target));
24130 return 0;
24131 case UINT64_FTYPE_VOID:
24132 nargs = 0;
24133 klass = load;
24134 memory = 0;
24135 break;
24136 case UINT64_FTYPE_PUNSIGNED:
24137 case V2DI_FTYPE_PV2DI:
24138 case V32QI_FTYPE_PCCHAR:
24139 case V16QI_FTYPE_PCCHAR:
24140 case V8SF_FTYPE_PCV4SF:
24141 case V8SF_FTYPE_PCFLOAT:
24142 case V4SF_FTYPE_PCFLOAT:
24143 case V4DF_FTYPE_PCV2DF:
24144 case V4DF_FTYPE_PCDOUBLE:
24145 case V2DF_FTYPE_PCDOUBLE:
24146 case VOID_FTYPE_PVOID:
24147 nargs = 1;
24148 klass = load;
24149 memory = 0;
24150 break;
24151 case VOID_FTYPE_PV2SF_V4SF:
24152 case VOID_FTYPE_PV4DI_V4DI:
24153 case VOID_FTYPE_PV2DI_V2DI:
24154 case VOID_FTYPE_PCHAR_V32QI:
24155 case VOID_FTYPE_PCHAR_V16QI:
24156 case VOID_FTYPE_PFLOAT_V8SF:
24157 case VOID_FTYPE_PFLOAT_V4SF:
24158 case VOID_FTYPE_PDOUBLE_V4DF:
24159 case VOID_FTYPE_PDOUBLE_V2DF:
24160 case VOID_FTYPE_PULONGLONG_ULONGLONG:
24161 case VOID_FTYPE_PINT_INT:
24162 nargs = 1;
24163 klass = store;
24164 /* Reserve memory operand for target. */
24165 memory = ARRAY_SIZE (args);
24166 break;
24167 case V4SF_FTYPE_V4SF_PCV2SF:
24168 case V2DF_FTYPE_V2DF_PCDOUBLE:
24169 nargs = 2;
24170 klass = load;
24171 memory = 1;
24172 break;
24173 case V8SF_FTYPE_PCV8SF_V8SF:
24174 case V4DF_FTYPE_PCV4DF_V4DF:
24175 case V4SF_FTYPE_PCV4SF_V4SF:
24176 case V2DF_FTYPE_PCV2DF_V2DF:
24177 nargs = 2;
24178 klass = load;
24179 memory = 0;
24180 break;
24181 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24182 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24183 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24184 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24185 nargs = 2;
24186 klass = store;
24187 /* Reserve memory operand for target. */
24188 memory = ARRAY_SIZE (args);
24189 break;
24190 case VOID_FTYPE_UINT_UINT_UINT:
24191 case VOID_FTYPE_UINT64_UINT_UINT:
24192 case UCHAR_FTYPE_UINT_UINT_UINT:
24193 case UCHAR_FTYPE_UINT64_UINT_UINT:
24194 nargs = 3;
24195 klass = load;
24196 memory = ARRAY_SIZE (args);
24197 last_arg_constant = true;
24198 break;
24199 default:
24200 gcc_unreachable ();
24203 gcc_assert (nargs <= ARRAY_SIZE (args));
24205 if (klass == store)
24207 arg = CALL_EXPR_ARG (exp, 0);
24208 op = expand_normal (arg);
24209 gcc_assert (target == 0);
24210 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24211 arg_adjust = 1;
24213 else
24215 arg_adjust = 0;
24216 if (optimize
24217 || target == 0
24218 || GET_MODE (target) != tmode
24219 || ! (*insn_p->operand[0].predicate) (target, tmode))
24220 target = gen_reg_rtx (tmode);
24223 for (i = 0; i < nargs; i++)
24225 enum machine_mode mode = insn_p->operand[i + 1].mode;
24226 bool match;
24228 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24229 op = expand_normal (arg);
24230 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24232 if (last_arg_constant && (i + 1) == nargs)
24234 if (!match)
24236 if (icode == CODE_FOR_lwp_lwpvalsi3
24237 || icode == CODE_FOR_lwp_lwpinssi3
24238 || icode == CODE_FOR_lwp_lwpvaldi3
24239 || icode == CODE_FOR_lwp_lwpinsdi3)
24240 error ("the last argument must be a 32-bit immediate");
24241 else
24242 error ("the last argument must be an 8-bit immediate");
24243 return const0_rtx;
24246 else
24248 if (i == memory)
24250 /* This must be the memory operand. */
24251 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24252 gcc_assert (GET_MODE (op) == mode
24253 || GET_MODE (op) == VOIDmode);
24255 else
24257 /* This must be register. */
24258 if (VECTOR_MODE_P (mode))
24259 op = safe_vector_operand (op, mode);
24261 gcc_assert (GET_MODE (op) == mode
24262 || GET_MODE (op) == VOIDmode);
24263 op = copy_to_mode_reg (mode, op);
24267 args[i].op = op;
24268 args[i].mode = mode;
24271 switch (nargs)
24273 case 0:
24274 pat = GEN_FCN (icode) (target);
24275 break;
24276 case 1:
24277 pat = GEN_FCN (icode) (target, args[0].op);
24278 break;
24279 case 2:
24280 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24281 break;
24282 case 3:
24283 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24284 break;
24285 default:
24286 gcc_unreachable ();
24289 if (! pat)
24290 return 0;
24291 emit_insn (pat);
24292 return klass == store ? 0 : target;
24295 /* Return the integer constant in ARG. Constrain it to be in the range
24296 of the subparts of VEC_TYPE; issue an error if not. */
24298 static int
24299 get_element_number (tree vec_type, tree arg)
24301 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24303 if (!host_integerp (arg, 1)
24304 || (elt = tree_low_cst (arg, 1), elt > max))
24306 error ("selector must be an integer constant in the range 0..%wi", max);
24307 return 0;
24310 return elt;
24313 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24314 ix86_expand_vector_init. We DO have language-level syntax for this, in
24315 the form of (type){ init-list }. Except that since we can't place emms
24316 instructions from inside the compiler, we can't allow the use of MMX
24317 registers unless the user explicitly asks for it. So we do *not* define
24318 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24319 we have builtins invoked by mmintrin.h that gives us license to emit
24320 these sorts of instructions. */
24322 static rtx
24323 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24325 enum machine_mode tmode = TYPE_MODE (type);
24326 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24327 int i, n_elt = GET_MODE_NUNITS (tmode);
24328 rtvec v = rtvec_alloc (n_elt);
24330 gcc_assert (VECTOR_MODE_P (tmode));
24331 gcc_assert (call_expr_nargs (exp) == n_elt);
24333 for (i = 0; i < n_elt; ++i)
24335 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24336 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24339 if (!target || !register_operand (target, tmode))
24340 target = gen_reg_rtx (tmode);
24342 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24343 return target;
24346 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24347 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24348 had a language-level syntax for referencing vector elements. */
24350 static rtx
24351 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24353 enum machine_mode tmode, mode0;
24354 tree arg0, arg1;
24355 int elt;
24356 rtx op0;
24358 arg0 = CALL_EXPR_ARG (exp, 0);
24359 arg1 = CALL_EXPR_ARG (exp, 1);
24361 op0 = expand_normal (arg0);
24362 elt = get_element_number (TREE_TYPE (arg0), arg1);
24364 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24365 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24366 gcc_assert (VECTOR_MODE_P (mode0));
24368 op0 = force_reg (mode0, op0);
24370 if (optimize || !target || !register_operand (target, tmode))
24371 target = gen_reg_rtx (tmode);
24373 ix86_expand_vector_extract (true, target, op0, elt);
24375 return target;
24378 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24379 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24380 a language-level syntax for referencing vector elements. */
24382 static rtx
24383 ix86_expand_vec_set_builtin (tree exp)
24385 enum machine_mode tmode, mode1;
24386 tree arg0, arg1, arg2;
24387 int elt;
24388 rtx op0, op1, target;
24390 arg0 = CALL_EXPR_ARG (exp, 0);
24391 arg1 = CALL_EXPR_ARG (exp, 1);
24392 arg2 = CALL_EXPR_ARG (exp, 2);
24394 tmode = TYPE_MODE (TREE_TYPE (arg0));
24395 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24396 gcc_assert (VECTOR_MODE_P (tmode));
24398 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24399 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24400 elt = get_element_number (TREE_TYPE (arg0), arg2);
24402 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24403 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24405 op0 = force_reg (tmode, op0);
24406 op1 = force_reg (mode1, op1);
24408 /* OP0 is the source of these builtin functions and shouldn't be
24409 modified. Create a copy, use it and return it as target. */
24410 target = gen_reg_rtx (tmode);
24411 emit_move_insn (target, op0);
24412 ix86_expand_vector_set (true, target, op1, elt);
24414 return target;
24417 /* Expand an expression EXP that calls a built-in function,
24418 with result going to TARGET if that's convenient
24419 (and in mode MODE if that's convenient).
24420 SUBTARGET may be used as the target for computing one of EXP's operands.
24421 IGNORE is nonzero if the value is to be ignored. */
24423 static rtx
24424 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24425 enum machine_mode mode ATTRIBUTE_UNUSED,
24426 int ignore ATTRIBUTE_UNUSED)
24428 const struct builtin_description *d;
24429 size_t i;
24430 enum insn_code icode;
24431 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24432 tree arg0, arg1, arg2;
24433 rtx op0, op1, op2, pat;
24434 enum machine_mode mode0, mode1, mode2;
24435 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24437 /* Determine whether the builtin function is available under the current ISA.
24438 Originally the builtin was not created if it wasn't applicable to the
24439 current ISA based on the command line switches. With function specific
24440 options, we need to check in the context of the function making the call
24441 whether it is supported. */
24442 if (ix86_builtins_isa[fcode].isa
24443 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24445 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24446 NULL, NULL, false);
24448 if (!opts)
24449 error ("%qE needs unknown isa option", fndecl);
24450 else
24452 gcc_assert (opts != NULL);
24453 error ("%qE needs isa option %s", fndecl, opts);
24454 free (opts);
24456 return const0_rtx;
24459 switch (fcode)
24461 case IX86_BUILTIN_MASKMOVQ:
24462 case IX86_BUILTIN_MASKMOVDQU:
24463 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24464 ? CODE_FOR_mmx_maskmovq
24465 : CODE_FOR_sse2_maskmovdqu);
24466 /* Note the arg order is different from the operand order. */
24467 arg1 = CALL_EXPR_ARG (exp, 0);
24468 arg2 = CALL_EXPR_ARG (exp, 1);
24469 arg0 = CALL_EXPR_ARG (exp, 2);
24470 op0 = expand_normal (arg0);
24471 op1 = expand_normal (arg1);
24472 op2 = expand_normal (arg2);
24473 mode0 = insn_data[icode].operand[0].mode;
24474 mode1 = insn_data[icode].operand[1].mode;
24475 mode2 = insn_data[icode].operand[2].mode;
24477 op0 = force_reg (Pmode, op0);
24478 op0 = gen_rtx_MEM (mode1, op0);
24480 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24481 op0 = copy_to_mode_reg (mode0, op0);
24482 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24483 op1 = copy_to_mode_reg (mode1, op1);
24484 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24485 op2 = copy_to_mode_reg (mode2, op2);
24486 pat = GEN_FCN (icode) (op0, op1, op2);
24487 if (! pat)
24488 return 0;
24489 emit_insn (pat);
24490 return 0;
24492 case IX86_BUILTIN_LDMXCSR:
24493 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24494 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24495 emit_move_insn (target, op0);
24496 emit_insn (gen_sse_ldmxcsr (target));
24497 return 0;
24499 case IX86_BUILTIN_STMXCSR:
24500 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24501 emit_insn (gen_sse_stmxcsr (target));
24502 return copy_to_mode_reg (SImode, target);
24504 case IX86_BUILTIN_CLFLUSH:
24505 arg0 = CALL_EXPR_ARG (exp, 0);
24506 op0 = expand_normal (arg0);
24507 icode = CODE_FOR_sse2_clflush;
24508 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24509 op0 = copy_to_mode_reg (Pmode, op0);
24511 emit_insn (gen_sse2_clflush (op0));
24512 return 0;
24514 case IX86_BUILTIN_MONITOR:
24515 arg0 = CALL_EXPR_ARG (exp, 0);
24516 arg1 = CALL_EXPR_ARG (exp, 1);
24517 arg2 = CALL_EXPR_ARG (exp, 2);
24518 op0 = expand_normal (arg0);
24519 op1 = expand_normal (arg1);
24520 op2 = expand_normal (arg2);
24521 if (!REG_P (op0))
24522 op0 = copy_to_mode_reg (Pmode, op0);
24523 if (!REG_P (op1))
24524 op1 = copy_to_mode_reg (SImode, op1);
24525 if (!REG_P (op2))
24526 op2 = copy_to_mode_reg (SImode, op2);
24527 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24528 return 0;
24530 case IX86_BUILTIN_MWAIT:
24531 arg0 = CALL_EXPR_ARG (exp, 0);
24532 arg1 = CALL_EXPR_ARG (exp, 1);
24533 op0 = expand_normal (arg0);
24534 op1 = expand_normal (arg1);
24535 if (!REG_P (op0))
24536 op0 = copy_to_mode_reg (SImode, op0);
24537 if (!REG_P (op1))
24538 op1 = copy_to_mode_reg (SImode, op1);
24539 emit_insn (gen_sse3_mwait (op0, op1));
24540 return 0;
24542 case IX86_BUILTIN_VEC_INIT_V2SI:
24543 case IX86_BUILTIN_VEC_INIT_V4HI:
24544 case IX86_BUILTIN_VEC_INIT_V8QI:
24545 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24547 case IX86_BUILTIN_VEC_EXT_V2DF:
24548 case IX86_BUILTIN_VEC_EXT_V2DI:
24549 case IX86_BUILTIN_VEC_EXT_V4SF:
24550 case IX86_BUILTIN_VEC_EXT_V4SI:
24551 case IX86_BUILTIN_VEC_EXT_V8HI:
24552 case IX86_BUILTIN_VEC_EXT_V2SI:
24553 case IX86_BUILTIN_VEC_EXT_V4HI:
24554 case IX86_BUILTIN_VEC_EXT_V16QI:
24555 return ix86_expand_vec_ext_builtin (exp, target);
24557 case IX86_BUILTIN_VEC_SET_V2DI:
24558 case IX86_BUILTIN_VEC_SET_V4SF:
24559 case IX86_BUILTIN_VEC_SET_V4SI:
24560 case IX86_BUILTIN_VEC_SET_V8HI:
24561 case IX86_BUILTIN_VEC_SET_V4HI:
24562 case IX86_BUILTIN_VEC_SET_V16QI:
24563 return ix86_expand_vec_set_builtin (exp);
24565 case IX86_BUILTIN_VEC_PERM_V2DF:
24566 case IX86_BUILTIN_VEC_PERM_V4SF:
24567 case IX86_BUILTIN_VEC_PERM_V2DI:
24568 case IX86_BUILTIN_VEC_PERM_V4SI:
24569 case IX86_BUILTIN_VEC_PERM_V8HI:
24570 case IX86_BUILTIN_VEC_PERM_V16QI:
24571 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24572 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24573 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24574 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24575 case IX86_BUILTIN_VEC_PERM_V4DF:
24576 case IX86_BUILTIN_VEC_PERM_V8SF:
24577 return ix86_expand_vec_perm_builtin (exp);
24579 case IX86_BUILTIN_INFQ:
24580 case IX86_BUILTIN_HUGE_VALQ:
24582 REAL_VALUE_TYPE inf;
24583 rtx tmp;
24585 real_inf (&inf);
24586 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24588 tmp = validize_mem (force_const_mem (mode, tmp));
24590 if (target == 0)
24591 target = gen_reg_rtx (mode);
24593 emit_move_insn (target, tmp);
24594 return target;
24597 case IX86_BUILTIN_LLWPCB:
24598 arg0 = CALL_EXPR_ARG (exp, 0);
24599 op0 = expand_normal (arg0);
24600 icode = CODE_FOR_lwp_llwpcb;
24601 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24602 op0 = copy_to_mode_reg (Pmode, op0);
24603 emit_insn (gen_lwp_llwpcb (op0));
24604 return 0;
24606 case IX86_BUILTIN_SLWPCB:
24607 icode = CODE_FOR_lwp_slwpcb;
24608 if (!target
24609 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24610 target = gen_reg_rtx (Pmode);
24611 emit_insn (gen_lwp_slwpcb (target));
24612 return target;
24614 default:
24615 break;
24618 for (i = 0, d = bdesc_special_args;
24619 i < ARRAY_SIZE (bdesc_special_args);
24620 i++, d++)
24621 if (d->code == fcode)
24622 return ix86_expand_special_args_builtin (d, exp, target);
24624 for (i = 0, d = bdesc_args;
24625 i < ARRAY_SIZE (bdesc_args);
24626 i++, d++)
24627 if (d->code == fcode)
24628 switch (fcode)
24630 case IX86_BUILTIN_FABSQ:
24631 case IX86_BUILTIN_COPYSIGNQ:
24632 if (!TARGET_SSE2)
24633 /* Emit a normal call if SSE2 isn't available. */
24634 return expand_call (exp, target, ignore);
24635 default:
24636 return ix86_expand_args_builtin (d, exp, target);
24639 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24640 if (d->code == fcode)
24641 return ix86_expand_sse_comi (d, exp, target);
24643 for (i = 0, d = bdesc_pcmpestr;
24644 i < ARRAY_SIZE (bdesc_pcmpestr);
24645 i++, d++)
24646 if (d->code == fcode)
24647 return ix86_expand_sse_pcmpestr (d, exp, target);
24649 for (i = 0, d = bdesc_pcmpistr;
24650 i < ARRAY_SIZE (bdesc_pcmpistr);
24651 i++, d++)
24652 if (d->code == fcode)
24653 return ix86_expand_sse_pcmpistr (d, exp, target);
24655 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24656 if (d->code == fcode)
24657 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24658 (enum ix86_builtin_func_type)
24659 d->flag, d->comparison);
24661 gcc_unreachable ();
24664 /* Returns a function decl for a vectorized version of the builtin function
24665 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24666 if it is not available. */
24668 static tree
24669 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24670 tree type_in)
24672 enum machine_mode in_mode, out_mode;
24673 int in_n, out_n;
24674 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24676 if (TREE_CODE (type_out) != VECTOR_TYPE
24677 || TREE_CODE (type_in) != VECTOR_TYPE
24678 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24679 return NULL_TREE;
24681 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24682 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24683 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24684 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24686 switch (fn)
24688 case BUILT_IN_SQRT:
24689 if (out_mode == DFmode && out_n == 2
24690 && in_mode == DFmode && in_n == 2)
24691 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24692 break;
24694 case BUILT_IN_SQRTF:
24695 if (out_mode == SFmode && out_n == 4
24696 && in_mode == SFmode && in_n == 4)
24697 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24698 break;
24700 case BUILT_IN_LRINT:
24701 if (out_mode == SImode && out_n == 4
24702 && in_mode == DFmode && in_n == 2)
24703 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24704 break;
24706 case BUILT_IN_LRINTF:
24707 if (out_mode == SImode && out_n == 4
24708 && in_mode == SFmode && in_n == 4)
24709 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24710 break;
24712 case BUILT_IN_COPYSIGN:
24713 if (out_mode == DFmode && out_n == 2
24714 && in_mode == DFmode && in_n == 2)
24715 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24716 break;
24718 case BUILT_IN_COPYSIGNF:
24719 if (out_mode == SFmode && out_n == 4
24720 && in_mode == SFmode && in_n == 4)
24721 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24722 break;
24724 default:
24728 /* Dispatch to a handler for a vectorization library. */
24729 if (ix86_veclib_handler)
24730 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24731 type_in);
24733 return NULL_TREE;
24736 /* Handler for an SVML-style interface to
24737 a library with vectorized intrinsics. */
24739 static tree
24740 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24742 char name[20];
24743 tree fntype, new_fndecl, args;
24744 unsigned arity;
24745 const char *bname;
24746 enum machine_mode el_mode, in_mode;
24747 int n, in_n;
24749 /* The SVML is suitable for unsafe math only. */
24750 if (!flag_unsafe_math_optimizations)
24751 return NULL_TREE;
24753 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24754 n = TYPE_VECTOR_SUBPARTS (type_out);
24755 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24756 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24757 if (el_mode != in_mode
24758 || n != in_n)
24759 return NULL_TREE;
24761 switch (fn)
24763 case BUILT_IN_EXP:
24764 case BUILT_IN_LOG:
24765 case BUILT_IN_LOG10:
24766 case BUILT_IN_POW:
24767 case BUILT_IN_TANH:
24768 case BUILT_IN_TAN:
24769 case BUILT_IN_ATAN:
24770 case BUILT_IN_ATAN2:
24771 case BUILT_IN_ATANH:
24772 case BUILT_IN_CBRT:
24773 case BUILT_IN_SINH:
24774 case BUILT_IN_SIN:
24775 case BUILT_IN_ASINH:
24776 case BUILT_IN_ASIN:
24777 case BUILT_IN_COSH:
24778 case BUILT_IN_COS:
24779 case BUILT_IN_ACOSH:
24780 case BUILT_IN_ACOS:
24781 if (el_mode != DFmode || n != 2)
24782 return NULL_TREE;
24783 break;
24785 case BUILT_IN_EXPF:
24786 case BUILT_IN_LOGF:
24787 case BUILT_IN_LOG10F:
24788 case BUILT_IN_POWF:
24789 case BUILT_IN_TANHF:
24790 case BUILT_IN_TANF:
24791 case BUILT_IN_ATANF:
24792 case BUILT_IN_ATAN2F:
24793 case BUILT_IN_ATANHF:
24794 case BUILT_IN_CBRTF:
24795 case BUILT_IN_SINHF:
24796 case BUILT_IN_SINF:
24797 case BUILT_IN_ASINHF:
24798 case BUILT_IN_ASINF:
24799 case BUILT_IN_COSHF:
24800 case BUILT_IN_COSF:
24801 case BUILT_IN_ACOSHF:
24802 case BUILT_IN_ACOSF:
24803 if (el_mode != SFmode || n != 4)
24804 return NULL_TREE;
24805 break;
24807 default:
24808 return NULL_TREE;
24811 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24813 if (fn == BUILT_IN_LOGF)
24814 strcpy (name, "vmlsLn4");
24815 else if (fn == BUILT_IN_LOG)
24816 strcpy (name, "vmldLn2");
24817 else if (n == 4)
24819 sprintf (name, "vmls%s", bname+10);
24820 name[strlen (name)-1] = '4';
24822 else
24823 sprintf (name, "vmld%s2", bname+10);
24825 /* Convert to uppercase. */
24826 name[4] &= ~0x20;
24828 arity = 0;
24829 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24830 args = TREE_CHAIN (args))
24831 arity++;
24833 if (arity == 1)
24834 fntype = build_function_type_list (type_out, type_in, NULL);
24835 else
24836 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24838 /* Build a function declaration for the vectorized function. */
24839 new_fndecl = build_decl (BUILTINS_LOCATION,
24840 FUNCTION_DECL, get_identifier (name), fntype);
24841 TREE_PUBLIC (new_fndecl) = 1;
24842 DECL_EXTERNAL (new_fndecl) = 1;
24843 DECL_IS_NOVOPS (new_fndecl) = 1;
24844 TREE_READONLY (new_fndecl) = 1;
24846 return new_fndecl;
24849 /* Handler for an ACML-style interface to
24850 a library with vectorized intrinsics. */
24852 static tree
24853 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24855 char name[20] = "__vr.._";
24856 tree fntype, new_fndecl, args;
24857 unsigned arity;
24858 const char *bname;
24859 enum machine_mode el_mode, in_mode;
24860 int n, in_n;
24862 /* The ACML is 64bits only and suitable for unsafe math only as
24863 it does not correctly support parts of IEEE with the required
24864 precision such as denormals. */
24865 if (!TARGET_64BIT
24866 || !flag_unsafe_math_optimizations)
24867 return NULL_TREE;
24869 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24870 n = TYPE_VECTOR_SUBPARTS (type_out);
24871 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24872 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24873 if (el_mode != in_mode
24874 || n != in_n)
24875 return NULL_TREE;
24877 switch (fn)
24879 case BUILT_IN_SIN:
24880 case BUILT_IN_COS:
24881 case BUILT_IN_EXP:
24882 case BUILT_IN_LOG:
24883 case BUILT_IN_LOG2:
24884 case BUILT_IN_LOG10:
24885 name[4] = 'd';
24886 name[5] = '2';
24887 if (el_mode != DFmode
24888 || n != 2)
24889 return NULL_TREE;
24890 break;
24892 case BUILT_IN_SINF:
24893 case BUILT_IN_COSF:
24894 case BUILT_IN_EXPF:
24895 case BUILT_IN_POWF:
24896 case BUILT_IN_LOGF:
24897 case BUILT_IN_LOG2F:
24898 case BUILT_IN_LOG10F:
24899 name[4] = 's';
24900 name[5] = '4';
24901 if (el_mode != SFmode
24902 || n != 4)
24903 return NULL_TREE;
24904 break;
24906 default:
24907 return NULL_TREE;
24910 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24911 sprintf (name + 7, "%s", bname+10);
24913 arity = 0;
24914 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24915 args = TREE_CHAIN (args))
24916 arity++;
24918 if (arity == 1)
24919 fntype = build_function_type_list (type_out, type_in, NULL);
24920 else
24921 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24923 /* Build a function declaration for the vectorized function. */
24924 new_fndecl = build_decl (BUILTINS_LOCATION,
24925 FUNCTION_DECL, get_identifier (name), fntype);
24926 TREE_PUBLIC (new_fndecl) = 1;
24927 DECL_EXTERNAL (new_fndecl) = 1;
24928 DECL_IS_NOVOPS (new_fndecl) = 1;
24929 TREE_READONLY (new_fndecl) = 1;
24931 return new_fndecl;
24935 /* Returns a decl of a function that implements conversion of an integer vector
24936 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24937 are the types involved when converting according to CODE.
24938 Return NULL_TREE if it is not available. */
24940 static tree
24941 ix86_vectorize_builtin_conversion (unsigned int code,
24942 tree dest_type, tree src_type)
24944 if (! TARGET_SSE2)
24945 return NULL_TREE;
24947 switch (code)
24949 case FLOAT_EXPR:
24950 switch (TYPE_MODE (src_type))
24952 case V4SImode:
24953 switch (TYPE_MODE (dest_type))
24955 case V4SFmode:
24956 return (TYPE_UNSIGNED (src_type)
24957 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24958 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24959 case V4DFmode:
24960 return (TYPE_UNSIGNED (src_type)
24961 ? NULL_TREE
24962 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24963 default:
24964 return NULL_TREE;
24966 break;
24967 case V8SImode:
24968 switch (TYPE_MODE (dest_type))
24970 case V8SFmode:
24971 return (TYPE_UNSIGNED (src_type)
24972 ? NULL_TREE
24973 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24974 default:
24975 return NULL_TREE;
24977 break;
24978 default:
24979 return NULL_TREE;
24982 case FIX_TRUNC_EXPR:
24983 switch (TYPE_MODE (dest_type))
24985 case V4SImode:
24986 switch (TYPE_MODE (src_type))
24988 case V4SFmode:
24989 return (TYPE_UNSIGNED (dest_type)
24990 ? NULL_TREE
24991 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24992 case V4DFmode:
24993 return (TYPE_UNSIGNED (dest_type)
24994 ? NULL_TREE
24995 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24996 default:
24997 return NULL_TREE;
24999 break;
25001 case V8SImode:
25002 switch (TYPE_MODE (src_type))
25004 case V8SFmode:
25005 return (TYPE_UNSIGNED (dest_type)
25006 ? NULL_TREE
25007 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
25008 default:
25009 return NULL_TREE;
25011 break;
25013 default:
25014 return NULL_TREE;
25017 default:
25018 return NULL_TREE;
25021 return NULL_TREE;
25024 /* Returns a code for a target-specific builtin that implements
25025 reciprocal of the function, or NULL_TREE if not available. */
25027 static tree
25028 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
25029 bool sqrt ATTRIBUTE_UNUSED)
25031 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
25032 && flag_finite_math_only && !flag_trapping_math
25033 && flag_unsafe_math_optimizations))
25034 return NULL_TREE;
25036 if (md_fn)
25037 /* Machine dependent builtins. */
25038 switch (fn)
25040 /* Vectorized version of sqrt to rsqrt conversion. */
25041 case IX86_BUILTIN_SQRTPS_NR:
25042 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
25044 default:
25045 return NULL_TREE;
25047 else
25048 /* Normal builtins. */
25049 switch (fn)
25051 /* Sqrt to rsqrt conversion. */
25052 case BUILT_IN_SQRTF:
25053 return ix86_builtins[IX86_BUILTIN_RSQRTF];
25055 default:
25056 return NULL_TREE;
25060 /* Helper for avx_vpermilps256_operand et al. This is also used by
25061 the expansion functions to turn the parallel back into a mask.
25062 The return value is 0 for no match and the imm8+1 for a match. */
25065 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
25067 unsigned i, nelt = GET_MODE_NUNITS (mode);
25068 unsigned mask = 0;
25069 unsigned char ipar[8];
25071 if (XVECLEN (par, 0) != (int) nelt)
25072 return 0;
25074 /* Validate that all of the elements are constants, and not totally
25075 out of range. Copy the data into an integral array to make the
25076 subsequent checks easier. */
25077 for (i = 0; i < nelt; ++i)
25079 rtx er = XVECEXP (par, 0, i);
25080 unsigned HOST_WIDE_INT ei;
25082 if (!CONST_INT_P (er))
25083 return 0;
25084 ei = INTVAL (er);
25085 if (ei >= nelt)
25086 return 0;
25087 ipar[i] = ei;
25090 switch (mode)
25092 case V4DFmode:
25093 /* In the 256-bit DFmode case, we can only move elements within
25094 a 128-bit lane. */
25095 for (i = 0; i < 2; ++i)
25097 if (ipar[i] >= 2)
25098 return 0;
25099 mask |= ipar[i] << i;
25101 for (i = 2; i < 4; ++i)
25103 if (ipar[i] < 2)
25104 return 0;
25105 mask |= (ipar[i] - 2) << i;
25107 break;
25109 case V8SFmode:
25110 /* In the 256-bit SFmode case, we have full freedom of movement
25111 within the low 128-bit lane, but the high 128-bit lane must
25112 mirror the exact same pattern. */
25113 for (i = 0; i < 4; ++i)
25114 if (ipar[i] + 4 != ipar[i + 4])
25115 return 0;
25116 nelt = 4;
25117 /* FALLTHRU */
25119 case V2DFmode:
25120 case V4SFmode:
25121 /* In the 128-bit case, we've full freedom in the placement of
25122 the elements from the source operand. */
25123 for (i = 0; i < nelt; ++i)
25124 mask |= ipar[i] << (i * (nelt / 2));
25125 break;
25127 default:
25128 gcc_unreachable ();
25131 /* Make sure success has a non-zero value by adding one. */
25132 return mask + 1;
25135 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
25136 the expansion functions to turn the parallel back into a mask.
25137 The return value is 0 for no match and the imm8+1 for a match. */
25140 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
25142 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
25143 unsigned mask = 0;
25144 unsigned char ipar[8];
25146 if (XVECLEN (par, 0) != (int) nelt)
25147 return 0;
25149 /* Validate that all of the elements are constants, and not totally
25150 out of range. Copy the data into an integral array to make the
25151 subsequent checks easier. */
25152 for (i = 0; i < nelt; ++i)
25154 rtx er = XVECEXP (par, 0, i);
25155 unsigned HOST_WIDE_INT ei;
25157 if (!CONST_INT_P (er))
25158 return 0;
25159 ei = INTVAL (er);
25160 if (ei >= 2 * nelt)
25161 return 0;
25162 ipar[i] = ei;
25165 /* Validate that the halves of the permute are halves. */
25166 for (i = 0; i < nelt2 - 1; ++i)
25167 if (ipar[i] + 1 != ipar[i + 1])
25168 return 0;
25169 for (i = nelt2; i < nelt - 1; ++i)
25170 if (ipar[i] + 1 != ipar[i + 1])
25171 return 0;
25173 /* Reconstruct the mask. */
25174 for (i = 0; i < 2; ++i)
25176 unsigned e = ipar[i * nelt2];
25177 if (e % nelt2)
25178 return 0;
25179 e /= nelt2;
25180 mask |= e << (i * 4);
25183 /* Make sure success has a non-zero value by adding one. */
25184 return mask + 1;
25188 /* Store OPERAND to the memory after reload is completed. This means
25189 that we can't easily use assign_stack_local. */
25191 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25193 rtx result;
25195 gcc_assert (reload_completed);
25196 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25198 result = gen_rtx_MEM (mode,
25199 gen_rtx_PLUS (Pmode,
25200 stack_pointer_rtx,
25201 GEN_INT (-RED_ZONE_SIZE)));
25202 emit_move_insn (result, operand);
25204 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25206 switch (mode)
25208 case HImode:
25209 case SImode:
25210 operand = gen_lowpart (DImode, operand);
25211 /* FALLTHRU */
25212 case DImode:
25213 emit_insn (
25214 gen_rtx_SET (VOIDmode,
25215 gen_rtx_MEM (DImode,
25216 gen_rtx_PRE_DEC (DImode,
25217 stack_pointer_rtx)),
25218 operand));
25219 break;
25220 default:
25221 gcc_unreachable ();
25223 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25225 else
25227 switch (mode)
25229 case DImode:
25231 rtx operands[2];
25232 split_di (&operand, 1, operands, operands + 1);
25233 emit_insn (
25234 gen_rtx_SET (VOIDmode,
25235 gen_rtx_MEM (SImode,
25236 gen_rtx_PRE_DEC (Pmode,
25237 stack_pointer_rtx)),
25238 operands[1]));
25239 emit_insn (
25240 gen_rtx_SET (VOIDmode,
25241 gen_rtx_MEM (SImode,
25242 gen_rtx_PRE_DEC (Pmode,
25243 stack_pointer_rtx)),
25244 operands[0]));
25246 break;
25247 case HImode:
25248 /* Store HImodes as SImodes. */
25249 operand = gen_lowpart (SImode, operand);
25250 /* FALLTHRU */
25251 case SImode:
25252 emit_insn (
25253 gen_rtx_SET (VOIDmode,
25254 gen_rtx_MEM (GET_MODE (operand),
25255 gen_rtx_PRE_DEC (SImode,
25256 stack_pointer_rtx)),
25257 operand));
25258 break;
25259 default:
25260 gcc_unreachable ();
25262 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25264 return result;
25267 /* Free operand from the memory. */
25268 void
25269 ix86_free_from_memory (enum machine_mode mode)
25271 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25273 int size;
25275 if (mode == DImode || TARGET_64BIT)
25276 size = 8;
25277 else
25278 size = 4;
25279 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25280 to pop or add instruction if registers are available. */
25281 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25282 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25283 GEN_INT (size))));
25287 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25288 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25289 same. */
25290 static const enum reg_class *
25291 i386_ira_cover_classes (void)
25293 static const enum reg_class sse_fpmath_classes[] = {
25294 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25296 static const enum reg_class no_sse_fpmath_classes[] = {
25297 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25300 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25303 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25304 QImode must go into class Q_REGS.
25305 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25306 movdf to do mem-to-mem moves through integer regs. */
25307 enum reg_class
25308 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25310 enum machine_mode mode = GET_MODE (x);
25312 /* We're only allowed to return a subclass of CLASS. Many of the
25313 following checks fail for NO_REGS, so eliminate that early. */
25314 if (regclass == NO_REGS)
25315 return NO_REGS;
25317 /* All classes can load zeros. */
25318 if (x == CONST0_RTX (mode))
25319 return regclass;
25321 /* Force constants into memory if we are loading a (nonzero) constant into
25322 an MMX or SSE register. This is because there are no MMX/SSE instructions
25323 to load from a constant. */
25324 if (CONSTANT_P (x)
25325 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25326 return NO_REGS;
25328 /* Prefer SSE regs only, if we can use them for math. */
25329 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25330 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25332 /* Floating-point constants need more complex checks. */
25333 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25335 /* General regs can load everything. */
25336 if (reg_class_subset_p (regclass, GENERAL_REGS))
25337 return regclass;
25339 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25340 zero above. We only want to wind up preferring 80387 registers if
25341 we plan on doing computation with them. */
25342 if (TARGET_80387
25343 && standard_80387_constant_p (x))
25345 /* Limit class to non-sse. */
25346 if (regclass == FLOAT_SSE_REGS)
25347 return FLOAT_REGS;
25348 if (regclass == FP_TOP_SSE_REGS)
25349 return FP_TOP_REG;
25350 if (regclass == FP_SECOND_SSE_REGS)
25351 return FP_SECOND_REG;
25352 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25353 return regclass;
25356 return NO_REGS;
25359 /* Generally when we see PLUS here, it's the function invariant
25360 (plus soft-fp const_int). Which can only be computed into general
25361 regs. */
25362 if (GET_CODE (x) == PLUS)
25363 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25365 /* QImode constants are easy to load, but non-constant QImode data
25366 must go into Q_REGS. */
25367 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25369 if (reg_class_subset_p (regclass, Q_REGS))
25370 return regclass;
25371 if (reg_class_subset_p (Q_REGS, regclass))
25372 return Q_REGS;
25373 return NO_REGS;
25376 return regclass;
25379 /* Discourage putting floating-point values in SSE registers unless
25380 SSE math is being used, and likewise for the 387 registers. */
25381 enum reg_class
25382 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25384 enum machine_mode mode = GET_MODE (x);
25386 /* Restrict the output reload class to the register bank that we are doing
25387 math on. If we would like not to return a subset of CLASS, reject this
25388 alternative: if reload cannot do this, it will still use its choice. */
25389 mode = GET_MODE (x);
25390 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25391 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25393 if (X87_FLOAT_MODE_P (mode))
25395 if (regclass == FP_TOP_SSE_REGS)
25396 return FP_TOP_REG;
25397 else if (regclass == FP_SECOND_SSE_REGS)
25398 return FP_SECOND_REG;
25399 else
25400 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25403 return regclass;
25406 static enum reg_class
25407 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25408 enum machine_mode mode,
25409 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25411 /* QImode spills from non-QI registers require
25412 intermediate register on 32bit targets. */
25413 if (!in_p && mode == QImode && !TARGET_64BIT
25414 && (rclass == GENERAL_REGS
25415 || rclass == LEGACY_REGS
25416 || rclass == INDEX_REGS))
25418 int regno;
25420 if (REG_P (x))
25421 regno = REGNO (x);
25422 else
25423 regno = -1;
25425 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25426 regno = true_regnum (x);
25428 /* Return Q_REGS if the operand is in memory. */
25429 if (regno == -1)
25430 return Q_REGS;
25433 return NO_REGS;
25436 /* If we are copying between general and FP registers, we need a memory
25437 location. The same is true for SSE and MMX registers.
25439 To optimize register_move_cost performance, allow inline variant.
25441 The macro can't work reliably when one of the CLASSES is class containing
25442 registers from multiple units (SSE, MMX, integer). We avoid this by never
25443 combining those units in single alternative in the machine description.
25444 Ensure that this constraint holds to avoid unexpected surprises.
25446 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25447 enforce these sanity checks. */
25449 static inline int
25450 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25451 enum machine_mode mode, int strict)
25453 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25454 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25455 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25456 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25457 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25458 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25460 gcc_assert (!strict);
25461 return true;
25464 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25465 return true;
25467 /* ??? This is a lie. We do have moves between mmx/general, and for
25468 mmx/sse2. But by saying we need secondary memory we discourage the
25469 register allocator from using the mmx registers unless needed. */
25470 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25471 return true;
25473 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25475 /* SSE1 doesn't have any direct moves from other classes. */
25476 if (!TARGET_SSE2)
25477 return true;
25479 /* If the target says that inter-unit moves are more expensive
25480 than moving through memory, then don't generate them. */
25481 if (!TARGET_INTER_UNIT_MOVES)
25482 return true;
25484 /* Between SSE and general, we have moves no larger than word size. */
25485 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25486 return true;
25489 return false;
25493 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25494 enum machine_mode mode, int strict)
25496 return inline_secondary_memory_needed (class1, class2, mode, strict);
25499 /* Return true if the registers in CLASS cannot represent the change from
25500 modes FROM to TO. */
25502 bool
25503 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25504 enum reg_class regclass)
25506 if (from == to)
25507 return false;
25509 /* x87 registers can't do subreg at all, as all values are reformatted
25510 to extended precision. */
25511 if (MAYBE_FLOAT_CLASS_P (regclass))
25512 return true;
25514 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25516 /* Vector registers do not support QI or HImode loads. If we don't
25517 disallow a change to these modes, reload will assume it's ok to
25518 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25519 the vec_dupv4hi pattern. */
25520 if (GET_MODE_SIZE (from) < 4)
25521 return true;
25523 /* Vector registers do not support subreg with nonzero offsets, which
25524 are otherwise valid for integer registers. Since we can't see
25525 whether we have a nonzero offset from here, prohibit all
25526 nonparadoxical subregs changing size. */
25527 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25528 return true;
25531 return false;
25534 /* Return the cost of moving data of mode M between a
25535 register and memory. A value of 2 is the default; this cost is
25536 relative to those in `REGISTER_MOVE_COST'.
25538 This function is used extensively by register_move_cost that is used to
25539 build tables at startup. Make it inline in this case.
25540 When IN is 2, return maximum of in and out move cost.
25542 If moving between registers and memory is more expensive than
25543 between two registers, you should define this macro to express the
25544 relative cost.
25546 Model also increased moving costs of QImode registers in non
25547 Q_REGS classes.
25549 static inline int
25550 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25551 int in)
25553 int cost;
25554 if (FLOAT_CLASS_P (regclass))
25556 int index;
25557 switch (mode)
25559 case SFmode:
25560 index = 0;
25561 break;
25562 case DFmode:
25563 index = 1;
25564 break;
25565 case XFmode:
25566 index = 2;
25567 break;
25568 default:
25569 return 100;
25571 if (in == 2)
25572 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25573 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25575 if (SSE_CLASS_P (regclass))
25577 int index;
25578 switch (GET_MODE_SIZE (mode))
25580 case 4:
25581 index = 0;
25582 break;
25583 case 8:
25584 index = 1;
25585 break;
25586 case 16:
25587 index = 2;
25588 break;
25589 default:
25590 return 100;
25592 if (in == 2)
25593 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25594 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25596 if (MMX_CLASS_P (regclass))
25598 int index;
25599 switch (GET_MODE_SIZE (mode))
25601 case 4:
25602 index = 0;
25603 break;
25604 case 8:
25605 index = 1;
25606 break;
25607 default:
25608 return 100;
25610 if (in)
25611 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25612 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25614 switch (GET_MODE_SIZE (mode))
25616 case 1:
25617 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25619 if (!in)
25620 return ix86_cost->int_store[0];
25621 if (TARGET_PARTIAL_REG_DEPENDENCY
25622 && optimize_function_for_speed_p (cfun))
25623 cost = ix86_cost->movzbl_load;
25624 else
25625 cost = ix86_cost->int_load[0];
25626 if (in == 2)
25627 return MAX (cost, ix86_cost->int_store[0]);
25628 return cost;
25630 else
25632 if (in == 2)
25633 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25634 if (in)
25635 return ix86_cost->movzbl_load;
25636 else
25637 return ix86_cost->int_store[0] + 4;
25639 break;
25640 case 2:
25641 if (in == 2)
25642 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25643 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25644 default:
25645 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25646 if (mode == TFmode)
25647 mode = XFmode;
25648 if (in == 2)
25649 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25650 else if (in)
25651 cost = ix86_cost->int_load[2];
25652 else
25653 cost = ix86_cost->int_store[2];
25654 return (cost * (((int) GET_MODE_SIZE (mode)
25655 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25659 static int
25660 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25661 bool in)
25663 return inline_memory_move_cost (mode, regclass, in ? 1 : 0);
25667 /* Return the cost of moving data from a register in class CLASS1 to
25668 one in class CLASS2.
25670 It is not required that the cost always equal 2 when FROM is the same as TO;
25671 on some machines it is expensive to move between registers if they are not
25672 general registers. */
25674 static int
25675 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25676 enum reg_class class2)
25678 /* In case we require secondary memory, compute cost of the store followed
25679 by load. In order to avoid bad register allocation choices, we need
25680 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25682 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25684 int cost = 1;
25686 cost += inline_memory_move_cost (mode, class1, 2);
25687 cost += inline_memory_move_cost (mode, class2, 2);
25689 /* In case of copying from general_purpose_register we may emit multiple
25690 stores followed by single load causing memory size mismatch stall.
25691 Count this as arbitrarily high cost of 20. */
25692 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25693 cost += 20;
25695 /* In the case of FP/MMX moves, the registers actually overlap, and we
25696 have to switch modes in order to treat them differently. */
25697 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25698 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25699 cost += 20;
25701 return cost;
25704 /* Moves between SSE/MMX and integer unit are expensive. */
25705 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25706 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25708 /* ??? By keeping returned value relatively high, we limit the number
25709 of moves between integer and MMX/SSE registers for all targets.
25710 Additionally, high value prevents problem with x86_modes_tieable_p(),
25711 where integer modes in MMX/SSE registers are not tieable
25712 because of missing QImode and HImode moves to, from or between
25713 MMX/SSE registers. */
25714 return MAX (8, ix86_cost->mmxsse_to_integer);
25716 if (MAYBE_FLOAT_CLASS_P (class1))
25717 return ix86_cost->fp_move;
25718 if (MAYBE_SSE_CLASS_P (class1))
25719 return ix86_cost->sse_move;
25720 if (MAYBE_MMX_CLASS_P (class1))
25721 return ix86_cost->mmx_move;
25722 return 2;
25725 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25727 bool
25728 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25730 /* Flags and only flags can only hold CCmode values. */
25731 if (CC_REGNO_P (regno))
25732 return GET_MODE_CLASS (mode) == MODE_CC;
25733 if (GET_MODE_CLASS (mode) == MODE_CC
25734 || GET_MODE_CLASS (mode) == MODE_RANDOM
25735 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25736 return 0;
25737 if (FP_REGNO_P (regno))
25738 return VALID_FP_MODE_P (mode);
25739 if (SSE_REGNO_P (regno))
25741 /* We implement the move patterns for all vector modes into and
25742 out of SSE registers, even when no operation instructions
25743 are available. OImode move is available only when AVX is
25744 enabled. */
25745 return ((TARGET_AVX && mode == OImode)
25746 || VALID_AVX256_REG_MODE (mode)
25747 || VALID_SSE_REG_MODE (mode)
25748 || VALID_SSE2_REG_MODE (mode)
25749 || VALID_MMX_REG_MODE (mode)
25750 || VALID_MMX_REG_MODE_3DNOW (mode));
25752 if (MMX_REGNO_P (regno))
25754 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25755 so if the register is available at all, then we can move data of
25756 the given mode into or out of it. */
25757 return (VALID_MMX_REG_MODE (mode)
25758 || VALID_MMX_REG_MODE_3DNOW (mode));
25761 if (mode == QImode)
25763 /* Take care for QImode values - they can be in non-QI regs,
25764 but then they do cause partial register stalls. */
25765 if (regno <= BX_REG || TARGET_64BIT)
25766 return 1;
25767 if (!TARGET_PARTIAL_REG_STALL)
25768 return 1;
25769 return reload_in_progress || reload_completed;
25771 /* We handle both integer and floats in the general purpose registers. */
25772 else if (VALID_INT_MODE_P (mode))
25773 return 1;
25774 else if (VALID_FP_MODE_P (mode))
25775 return 1;
25776 else if (VALID_DFP_MODE_P (mode))
25777 return 1;
25778 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25779 on to use that value in smaller contexts, this can easily force a
25780 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25781 supporting DImode, allow it. */
25782 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25783 return 1;
25785 return 0;
25788 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25789 tieable integer mode. */
25791 static bool
25792 ix86_tieable_integer_mode_p (enum machine_mode mode)
25794 switch (mode)
25796 case HImode:
25797 case SImode:
25798 return true;
25800 case QImode:
25801 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25803 case DImode:
25804 return TARGET_64BIT;
25806 default:
25807 return false;
25811 /* Return true if MODE1 is accessible in a register that can hold MODE2
25812 without copying. That is, all register classes that can hold MODE2
25813 can also hold MODE1. */
25815 bool
25816 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25818 if (mode1 == mode2)
25819 return true;
25821 if (ix86_tieable_integer_mode_p (mode1)
25822 && ix86_tieable_integer_mode_p (mode2))
25823 return true;
25825 /* MODE2 being XFmode implies fp stack or general regs, which means we
25826 can tie any smaller floating point modes to it. Note that we do not
25827 tie this with TFmode. */
25828 if (mode2 == XFmode)
25829 return mode1 == SFmode || mode1 == DFmode;
25831 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25832 that we can tie it with SFmode. */
25833 if (mode2 == DFmode)
25834 return mode1 == SFmode;
25836 /* If MODE2 is only appropriate for an SSE register, then tie with
25837 any other mode acceptable to SSE registers. */
25838 if (GET_MODE_SIZE (mode2) == 16
25839 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25840 return (GET_MODE_SIZE (mode1) == 16
25841 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25843 /* If MODE2 is appropriate for an MMX register, then tie
25844 with any other mode acceptable to MMX registers. */
25845 if (GET_MODE_SIZE (mode2) == 8
25846 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25847 return (GET_MODE_SIZE (mode1) == 8
25848 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25850 return false;
25853 /* Compute a (partial) cost for rtx X. Return true if the complete
25854 cost has been computed, and false if subexpressions should be
25855 scanned. In either case, *TOTAL contains the cost result. */
25857 static bool
25858 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25860 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25861 enum machine_mode mode = GET_MODE (x);
25862 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25864 switch (code)
25866 case CONST_INT:
25867 case CONST:
25868 case LABEL_REF:
25869 case SYMBOL_REF:
25870 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25871 *total = 3;
25872 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25873 *total = 2;
25874 else if (flag_pic && SYMBOLIC_CONST (x)
25875 && (!TARGET_64BIT
25876 || (!GET_CODE (x) != LABEL_REF
25877 && (GET_CODE (x) != SYMBOL_REF
25878 || !SYMBOL_REF_LOCAL_P (x)))))
25879 *total = 1;
25880 else
25881 *total = 0;
25882 return true;
25884 case CONST_DOUBLE:
25885 if (mode == VOIDmode)
25886 *total = 0;
25887 else
25888 switch (standard_80387_constant_p (x))
25890 case 1: /* 0.0 */
25891 *total = 1;
25892 break;
25893 default: /* Other constants */
25894 *total = 2;
25895 break;
25896 case 0:
25897 case -1:
25898 /* Start with (MEM (SYMBOL_REF)), since that's where
25899 it'll probably end up. Add a penalty for size. */
25900 *total = (COSTS_N_INSNS (1)
25901 + (flag_pic != 0 && !TARGET_64BIT)
25902 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25903 break;
25905 return true;
25907 case ZERO_EXTEND:
25908 /* The zero extensions is often completely free on x86_64, so make
25909 it as cheap as possible. */
25910 if (TARGET_64BIT && mode == DImode
25911 && GET_MODE (XEXP (x, 0)) == SImode)
25912 *total = 1;
25913 else if (TARGET_ZERO_EXTEND_WITH_AND)
25914 *total = cost->add;
25915 else
25916 *total = cost->movzx;
25917 return false;
25919 case SIGN_EXTEND:
25920 *total = cost->movsx;
25921 return false;
25923 case ASHIFT:
25924 if (CONST_INT_P (XEXP (x, 1))
25925 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25927 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25928 if (value == 1)
25930 *total = cost->add;
25931 return false;
25933 if ((value == 2 || value == 3)
25934 && cost->lea <= cost->shift_const)
25936 *total = cost->lea;
25937 return false;
25940 /* FALLTHRU */
25942 case ROTATE:
25943 case ASHIFTRT:
25944 case LSHIFTRT:
25945 case ROTATERT:
25946 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25948 if (CONST_INT_P (XEXP (x, 1)))
25950 if (INTVAL (XEXP (x, 1)) > 32)
25951 *total = cost->shift_const + COSTS_N_INSNS (2);
25952 else
25953 *total = cost->shift_const * 2;
25955 else
25957 if (GET_CODE (XEXP (x, 1)) == AND)
25958 *total = cost->shift_var * 2;
25959 else
25960 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25963 else
25965 if (CONST_INT_P (XEXP (x, 1)))
25966 *total = cost->shift_const;
25967 else
25968 *total = cost->shift_var;
25970 return false;
25972 case MULT:
25973 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25975 /* ??? SSE scalar cost should be used here. */
25976 *total = cost->fmul;
25977 return false;
25979 else if (X87_FLOAT_MODE_P (mode))
25981 *total = cost->fmul;
25982 return false;
25984 else if (FLOAT_MODE_P (mode))
25986 /* ??? SSE vector cost should be used here. */
25987 *total = cost->fmul;
25988 return false;
25990 else
25992 rtx op0 = XEXP (x, 0);
25993 rtx op1 = XEXP (x, 1);
25994 int nbits;
25995 if (CONST_INT_P (XEXP (x, 1)))
25997 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25998 for (nbits = 0; value != 0; value &= value - 1)
25999 nbits++;
26001 else
26002 /* This is arbitrary. */
26003 nbits = 7;
26005 /* Compute costs correctly for widening multiplication. */
26006 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
26007 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
26008 == GET_MODE_SIZE (mode))
26010 int is_mulwiden = 0;
26011 enum machine_mode inner_mode = GET_MODE (op0);
26013 if (GET_CODE (op0) == GET_CODE (op1))
26014 is_mulwiden = 1, op1 = XEXP (op1, 0);
26015 else if (CONST_INT_P (op1))
26017 if (GET_CODE (op0) == SIGN_EXTEND)
26018 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
26019 == INTVAL (op1);
26020 else
26021 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
26024 if (is_mulwiden)
26025 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
26028 *total = (cost->mult_init[MODE_INDEX (mode)]
26029 + nbits * cost->mult_bit
26030 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
26032 return true;
26035 case DIV:
26036 case UDIV:
26037 case MOD:
26038 case UMOD:
26039 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26040 /* ??? SSE cost should be used here. */
26041 *total = cost->fdiv;
26042 else if (X87_FLOAT_MODE_P (mode))
26043 *total = cost->fdiv;
26044 else if (FLOAT_MODE_P (mode))
26045 /* ??? SSE vector cost should be used here. */
26046 *total = cost->fdiv;
26047 else
26048 *total = cost->divide[MODE_INDEX (mode)];
26049 return false;
26051 case PLUS:
26052 if (GET_MODE_CLASS (mode) == MODE_INT
26053 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
26055 if (GET_CODE (XEXP (x, 0)) == PLUS
26056 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
26057 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
26058 && CONSTANT_P (XEXP (x, 1)))
26060 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
26061 if (val == 2 || val == 4 || val == 8)
26063 *total = cost->lea;
26064 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26065 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
26066 outer_code, speed);
26067 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26068 return true;
26071 else if (GET_CODE (XEXP (x, 0)) == MULT
26072 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
26074 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
26075 if (val == 2 || val == 4 || val == 8)
26077 *total = cost->lea;
26078 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26079 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26080 return true;
26083 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26085 *total = cost->lea;
26086 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26087 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26088 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26089 return true;
26092 /* FALLTHRU */
26094 case MINUS:
26095 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26097 /* ??? SSE cost should be used here. */
26098 *total = cost->fadd;
26099 return false;
26101 else if (X87_FLOAT_MODE_P (mode))
26103 *total = cost->fadd;
26104 return false;
26106 else if (FLOAT_MODE_P (mode))
26108 /* ??? SSE vector cost should be used here. */
26109 *total = cost->fadd;
26110 return false;
26112 /* FALLTHRU */
26114 case AND:
26115 case IOR:
26116 case XOR:
26117 if (!TARGET_64BIT && mode == DImode)
26119 *total = (cost->add * 2
26120 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26121 << (GET_MODE (XEXP (x, 0)) != DImode))
26122 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26123 << (GET_MODE (XEXP (x, 1)) != DImode)));
26124 return true;
26126 /* FALLTHRU */
26128 case NEG:
26129 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26131 /* ??? SSE cost should be used here. */
26132 *total = cost->fchs;
26133 return false;
26135 else if (X87_FLOAT_MODE_P (mode))
26137 *total = cost->fchs;
26138 return false;
26140 else if (FLOAT_MODE_P (mode))
26142 /* ??? SSE vector cost should be used here. */
26143 *total = cost->fchs;
26144 return false;
26146 /* FALLTHRU */
26148 case NOT:
26149 if (!TARGET_64BIT && mode == DImode)
26150 *total = cost->add * 2;
26151 else
26152 *total = cost->add;
26153 return false;
26155 case COMPARE:
26156 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26157 && XEXP (XEXP (x, 0), 1) == const1_rtx
26158 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26159 && XEXP (x, 1) == const0_rtx)
26161 /* This kind of construct is implemented using test[bwl].
26162 Treat it as if we had an AND. */
26163 *total = (cost->add
26164 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26165 + rtx_cost (const1_rtx, outer_code, speed));
26166 return true;
26168 return false;
26170 case FLOAT_EXTEND:
26171 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26172 *total = 0;
26173 return false;
26175 case ABS:
26176 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26177 /* ??? SSE cost should be used here. */
26178 *total = cost->fabs;
26179 else if (X87_FLOAT_MODE_P (mode))
26180 *total = cost->fabs;
26181 else if (FLOAT_MODE_P (mode))
26182 /* ??? SSE vector cost should be used here. */
26183 *total = cost->fabs;
26184 return false;
26186 case SQRT:
26187 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26188 /* ??? SSE cost should be used here. */
26189 *total = cost->fsqrt;
26190 else if (X87_FLOAT_MODE_P (mode))
26191 *total = cost->fsqrt;
26192 else if (FLOAT_MODE_P (mode))
26193 /* ??? SSE vector cost should be used here. */
26194 *total = cost->fsqrt;
26195 return false;
26197 case UNSPEC:
26198 if (XINT (x, 1) == UNSPEC_TP)
26199 *total = 0;
26200 return false;
26202 case VEC_SELECT:
26203 case VEC_CONCAT:
26204 case VEC_MERGE:
26205 case VEC_DUPLICATE:
26206 /* ??? Assume all of these vector manipulation patterns are
26207 recognizable. In which case they all pretty much have the
26208 same cost. */
26209 *total = COSTS_N_INSNS (1);
26210 return true;
26212 default:
26213 return false;
26217 #if TARGET_MACHO
26219 static int current_machopic_label_num;
26221 /* Given a symbol name and its associated stub, write out the
26222 definition of the stub. */
26224 void
26225 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26227 unsigned int length;
26228 char *binder_name, *symbol_name, lazy_ptr_name[32];
26229 int label = ++current_machopic_label_num;
26231 /* For 64-bit we shouldn't get here. */
26232 gcc_assert (!TARGET_64BIT);
26234 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26235 symb = (*targetm.strip_name_encoding) (symb);
26237 length = strlen (stub);
26238 binder_name = XALLOCAVEC (char, length + 32);
26239 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26241 length = strlen (symb);
26242 symbol_name = XALLOCAVEC (char, length + 32);
26243 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26245 sprintf (lazy_ptr_name, "L%d$lz", label);
26247 if (MACHOPIC_PURE)
26248 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26249 else
26250 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26252 fprintf (file, "%s:\n", stub);
26253 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26255 if (MACHOPIC_PURE)
26257 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26258 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26259 fprintf (file, "\tjmp\t*%%edx\n");
26261 else
26262 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26264 fprintf (file, "%s:\n", binder_name);
26266 if (MACHOPIC_PURE)
26268 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26269 fputs ("\tpushl\t%eax\n", file);
26271 else
26272 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26274 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26276 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26277 fprintf (file, "%s:\n", lazy_ptr_name);
26278 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26279 fprintf (file, ASM_LONG "%s\n", binder_name);
26281 #endif /* TARGET_MACHO */
26283 /* Order the registers for register allocator. */
26285 void
26286 x86_order_regs_for_local_alloc (void)
26288 int pos = 0;
26289 int i;
26291 /* First allocate the local general purpose registers. */
26292 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26293 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26294 reg_alloc_order [pos++] = i;
26296 /* Global general purpose registers. */
26297 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26298 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26299 reg_alloc_order [pos++] = i;
26301 /* x87 registers come first in case we are doing FP math
26302 using them. */
26303 if (!TARGET_SSE_MATH)
26304 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26305 reg_alloc_order [pos++] = i;
26307 /* SSE registers. */
26308 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26309 reg_alloc_order [pos++] = i;
26310 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26311 reg_alloc_order [pos++] = i;
26313 /* x87 registers. */
26314 if (TARGET_SSE_MATH)
26315 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26316 reg_alloc_order [pos++] = i;
26318 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26319 reg_alloc_order [pos++] = i;
26321 /* Initialize the rest of array as we do not allocate some registers
26322 at all. */
26323 while (pos < FIRST_PSEUDO_REGISTER)
26324 reg_alloc_order [pos++] = 0;
26327 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26328 struct attribute_spec.handler. */
26329 static tree
26330 ix86_handle_abi_attribute (tree *node, tree name,
26331 tree args ATTRIBUTE_UNUSED,
26332 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26334 if (TREE_CODE (*node) != FUNCTION_TYPE
26335 && TREE_CODE (*node) != METHOD_TYPE
26336 && TREE_CODE (*node) != FIELD_DECL
26337 && TREE_CODE (*node) != TYPE_DECL)
26339 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26340 name);
26341 *no_add_attrs = true;
26342 return NULL_TREE;
26344 if (!TARGET_64BIT)
26346 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26347 name);
26348 *no_add_attrs = true;
26349 return NULL_TREE;
26352 /* Can combine regparm with all attributes but fastcall. */
26353 if (is_attribute_p ("ms_abi", name))
26355 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26357 error ("ms_abi and sysv_abi attributes are not compatible");
26360 return NULL_TREE;
26362 else if (is_attribute_p ("sysv_abi", name))
26364 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26366 error ("ms_abi and sysv_abi attributes are not compatible");
26369 return NULL_TREE;
26372 return NULL_TREE;
26375 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26376 struct attribute_spec.handler. */
26377 static tree
26378 ix86_handle_struct_attribute (tree *node, tree name,
26379 tree args ATTRIBUTE_UNUSED,
26380 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26382 tree *type = NULL;
26383 if (DECL_P (*node))
26385 if (TREE_CODE (*node) == TYPE_DECL)
26386 type = &TREE_TYPE (*node);
26388 else
26389 type = node;
26391 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26392 || TREE_CODE (*type) == UNION_TYPE)))
26394 warning (OPT_Wattributes, "%qE attribute ignored",
26395 name);
26396 *no_add_attrs = true;
26399 else if ((is_attribute_p ("ms_struct", name)
26400 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26401 || ((is_attribute_p ("gcc_struct", name)
26402 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26404 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26405 name);
26406 *no_add_attrs = true;
26409 return NULL_TREE;
26412 static tree
26413 ix86_handle_fndecl_attribute (tree *node, tree name,
26414 tree args ATTRIBUTE_UNUSED,
26415 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26417 if (TREE_CODE (*node) != FUNCTION_DECL)
26419 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26420 name);
26421 *no_add_attrs = true;
26422 return NULL_TREE;
26425 if (TARGET_64BIT)
26427 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26428 name);
26429 return NULL_TREE;
26432 #ifndef HAVE_AS_IX86_SWAP
26433 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26434 #endif
26436 return NULL_TREE;
26439 static bool
26440 ix86_ms_bitfield_layout_p (const_tree record_type)
26442 return (TARGET_MS_BITFIELD_LAYOUT &&
26443 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26444 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26447 /* Returns an expression indicating where the this parameter is
26448 located on entry to the FUNCTION. */
26450 static rtx
26451 x86_this_parameter (tree function)
26453 tree type = TREE_TYPE (function);
26454 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26455 int nregs;
26457 if (TARGET_64BIT)
26459 const int *parm_regs;
26461 if (ix86_function_type_abi (type) == MS_ABI)
26462 parm_regs = x86_64_ms_abi_int_parameter_registers;
26463 else
26464 parm_regs = x86_64_int_parameter_registers;
26465 return gen_rtx_REG (DImode, parm_regs[aggr]);
26468 nregs = ix86_function_regparm (type, function);
26470 if (nregs > 0 && !stdarg_p (type))
26472 int regno;
26474 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26475 regno = aggr ? DX_REG : CX_REG;
26476 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26478 regno = CX_REG;
26479 if (aggr)
26480 return gen_rtx_MEM (SImode,
26481 plus_constant (stack_pointer_rtx, 4));
26483 else
26485 regno = AX_REG;
26486 if (aggr)
26488 regno = DX_REG;
26489 if (nregs == 1)
26490 return gen_rtx_MEM (SImode,
26491 plus_constant (stack_pointer_rtx, 4));
26494 return gen_rtx_REG (SImode, regno);
26497 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26500 /* Determine whether x86_output_mi_thunk can succeed. */
26502 static bool
26503 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26504 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26505 HOST_WIDE_INT vcall_offset, const_tree function)
26507 /* 64-bit can handle anything. */
26508 if (TARGET_64BIT)
26509 return true;
26511 /* For 32-bit, everything's fine if we have one free register. */
26512 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26513 return true;
26515 /* Need a free register for vcall_offset. */
26516 if (vcall_offset)
26517 return false;
26519 /* Need a free register for GOT references. */
26520 if (flag_pic && !(*targetm.binds_local_p) (function))
26521 return false;
26523 /* Otherwise ok. */
26524 return true;
26527 /* Output the assembler code for a thunk function. THUNK_DECL is the
26528 declaration for the thunk function itself, FUNCTION is the decl for
26529 the target function. DELTA is an immediate constant offset to be
26530 added to THIS. If VCALL_OFFSET is nonzero, the word at
26531 *(*this + vcall_offset) should be added to THIS. */
26533 static void
26534 x86_output_mi_thunk (FILE *file,
26535 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26536 HOST_WIDE_INT vcall_offset, tree function)
26538 rtx xops[3];
26539 rtx this_param = x86_this_parameter (function);
26540 rtx this_reg, tmp;
26542 /* Make sure unwind info is emitted for the thunk if needed. */
26543 final_start_function (emit_barrier (), file, 1);
26545 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26546 pull it in now and let DELTA benefit. */
26547 if (REG_P (this_param))
26548 this_reg = this_param;
26549 else if (vcall_offset)
26551 /* Put the this parameter into %eax. */
26552 xops[0] = this_param;
26553 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26554 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26556 else
26557 this_reg = NULL_RTX;
26559 /* Adjust the this parameter by a fixed constant. */
26560 if (delta)
26562 xops[0] = GEN_INT (delta);
26563 xops[1] = this_reg ? this_reg : this_param;
26564 if (TARGET_64BIT)
26566 if (!x86_64_general_operand (xops[0], DImode))
26568 tmp = gen_rtx_REG (DImode, R10_REG);
26569 xops[1] = tmp;
26570 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26571 xops[0] = tmp;
26572 xops[1] = this_param;
26574 if (x86_maybe_negate_const_int (&xops[0], DImode))
26575 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26576 else
26577 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26579 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26580 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26581 else
26582 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26585 /* Adjust the this parameter by a value stored in the vtable. */
26586 if (vcall_offset)
26588 if (TARGET_64BIT)
26589 tmp = gen_rtx_REG (DImode, R10_REG);
26590 else
26592 int tmp_regno = CX_REG;
26593 if (lookup_attribute ("fastcall",
26594 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26595 || lookup_attribute ("thiscall",
26596 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26597 tmp_regno = AX_REG;
26598 tmp = gen_rtx_REG (SImode, tmp_regno);
26601 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26602 xops[1] = tmp;
26603 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26605 /* Adjust the this parameter. */
26606 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26607 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26609 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26610 xops[0] = GEN_INT (vcall_offset);
26611 xops[1] = tmp2;
26612 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26613 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26615 xops[1] = this_reg;
26616 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26619 /* If necessary, drop THIS back to its stack slot. */
26620 if (this_reg && this_reg != this_param)
26622 xops[0] = this_reg;
26623 xops[1] = this_param;
26624 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26627 xops[0] = XEXP (DECL_RTL (function), 0);
26628 if (TARGET_64BIT)
26630 if (!flag_pic || (*targetm.binds_local_p) (function))
26631 output_asm_insn ("jmp\t%P0", xops);
26632 /* All thunks should be in the same object as their target,
26633 and thus binds_local_p should be true. */
26634 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26635 gcc_unreachable ();
26636 else
26638 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26639 tmp = gen_rtx_CONST (Pmode, tmp);
26640 tmp = gen_rtx_MEM (QImode, tmp);
26641 xops[0] = tmp;
26642 output_asm_insn ("jmp\t%A0", xops);
26645 else
26647 if (!flag_pic || (*targetm.binds_local_p) (function))
26648 output_asm_insn ("jmp\t%P0", xops);
26649 else
26650 #if TARGET_MACHO
26651 if (TARGET_MACHO)
26653 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26654 tmp = (gen_rtx_SYMBOL_REF
26655 (Pmode,
26656 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26657 tmp = gen_rtx_MEM (QImode, tmp);
26658 xops[0] = tmp;
26659 output_asm_insn ("jmp\t%0", xops);
26661 else
26662 #endif /* TARGET_MACHO */
26664 tmp = gen_rtx_REG (SImode, CX_REG);
26665 output_set_got (tmp, NULL_RTX);
26667 xops[1] = tmp;
26668 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26669 output_asm_insn ("jmp\t{*}%1", xops);
26672 final_end_function ();
26675 static void
26676 x86_file_start (void)
26678 default_file_start ();
26679 #if TARGET_MACHO
26680 darwin_file_start ();
26681 #endif
26682 if (X86_FILE_START_VERSION_DIRECTIVE)
26683 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26684 if (X86_FILE_START_FLTUSED)
26685 fputs ("\t.global\t__fltused\n", asm_out_file);
26686 if (ix86_asm_dialect == ASM_INTEL)
26687 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26691 x86_field_alignment (tree field, int computed)
26693 enum machine_mode mode;
26694 tree type = TREE_TYPE (field);
26696 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26697 return computed;
26698 mode = TYPE_MODE (strip_array_types (type));
26699 if (mode == DFmode || mode == DCmode
26700 || GET_MODE_CLASS (mode) == MODE_INT
26701 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26702 return MIN (32, computed);
26703 return computed;
26706 /* Output assembler code to FILE to increment profiler label # LABELNO
26707 for profiling a function entry. */
26708 void
26709 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26711 if (TARGET_64BIT)
26713 #ifndef NO_PROFILE_COUNTERS
26714 fprintf (file, "\tleaq\t%sP%d(%%rip),%%r11\n", LPREFIX, labelno);
26715 #endif
26717 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26718 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26719 else
26720 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26722 else if (flag_pic)
26724 #ifndef NO_PROFILE_COUNTERS
26725 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26726 LPREFIX, labelno);
26727 #endif
26728 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26730 else
26732 #ifndef NO_PROFILE_COUNTERS
26733 fprintf (file, "\tmovl\t$%sP%d,%%" PROFILE_COUNT_REGISTER "\n",
26734 LPREFIX, labelno);
26735 #endif
26736 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26740 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26741 /* We don't have exact information about the insn sizes, but we may assume
26742 quite safely that we are informed about all 1 byte insns and memory
26743 address sizes. This is enough to eliminate unnecessary padding in
26744 99% of cases. */
26746 static int
26747 min_insn_size (rtx insn)
26749 int l = 0, len;
26751 if (!INSN_P (insn) || !active_insn_p (insn))
26752 return 0;
26754 /* Discard alignments we've emit and jump instructions. */
26755 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26756 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26757 return 0;
26758 if (JUMP_TABLE_DATA_P (insn))
26759 return 0;
26761 /* Important case - calls are always 5 bytes.
26762 It is common to have many calls in the row. */
26763 if (CALL_P (insn)
26764 && symbolic_reference_mentioned_p (PATTERN (insn))
26765 && !SIBLING_CALL_P (insn))
26766 return 5;
26767 len = get_attr_length (insn);
26768 if (len <= 1)
26769 return 1;
26771 /* For normal instructions we rely on get_attr_length being exact,
26772 with a few exceptions. */
26773 if (!JUMP_P (insn))
26775 enum attr_type type = get_attr_type (insn);
26777 switch (type)
26779 case TYPE_MULTI:
26780 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26781 || asm_noperands (PATTERN (insn)) >= 0)
26782 return 0;
26783 break;
26784 case TYPE_OTHER:
26785 case TYPE_FCMP:
26786 break;
26787 default:
26788 /* Otherwise trust get_attr_length. */
26789 return len;
26792 l = get_attr_length_address (insn);
26793 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26794 l = 4;
26796 if (l)
26797 return 1+l;
26798 else
26799 return 2;
26802 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26803 window. */
26805 static void
26806 ix86_avoid_jump_mispredicts (void)
26808 rtx insn, start = get_insns ();
26809 int nbytes = 0, njumps = 0;
26810 int isjump = 0;
26812 /* Look for all minimal intervals of instructions containing 4 jumps.
26813 The intervals are bounded by START and INSN. NBYTES is the total
26814 size of instructions in the interval including INSN and not including
26815 START. When the NBYTES is smaller than 16 bytes, it is possible
26816 that the end of START and INSN ends up in the same 16byte page.
26818 The smallest offset in the page INSN can start is the case where START
26819 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26820 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26822 for (insn = start; insn; insn = NEXT_INSN (insn))
26824 int min_size;
26826 if (LABEL_P (insn))
26828 int align = label_to_alignment (insn);
26829 int max_skip = label_to_max_skip (insn);
26831 if (max_skip > 15)
26832 max_skip = 15;
26833 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26834 already in the current 16 byte page, because otherwise
26835 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26836 bytes to reach 16 byte boundary. */
26837 if (align <= 0
26838 || (align <= 3 && max_skip != (1 << align) - 1))
26839 max_skip = 0;
26840 if (dump_file)
26841 fprintf (dump_file, "Label %i with max_skip %i\n",
26842 INSN_UID (insn), max_skip);
26843 if (max_skip)
26845 while (nbytes + max_skip >= 16)
26847 start = NEXT_INSN (start);
26848 if ((JUMP_P (start)
26849 && GET_CODE (PATTERN (start)) != ADDR_VEC
26850 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26851 || CALL_P (start))
26852 njumps--, isjump = 1;
26853 else
26854 isjump = 0;
26855 nbytes -= min_insn_size (start);
26858 continue;
26861 min_size = min_insn_size (insn);
26862 nbytes += min_size;
26863 if (dump_file)
26864 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26865 INSN_UID (insn), min_size);
26866 if ((JUMP_P (insn)
26867 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26868 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26869 || CALL_P (insn))
26870 njumps++;
26871 else
26872 continue;
26874 while (njumps > 3)
26876 start = NEXT_INSN (start);
26877 if ((JUMP_P (start)
26878 && GET_CODE (PATTERN (start)) != ADDR_VEC
26879 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26880 || CALL_P (start))
26881 njumps--, isjump = 1;
26882 else
26883 isjump = 0;
26884 nbytes -= min_insn_size (start);
26886 gcc_assert (njumps >= 0);
26887 if (dump_file)
26888 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26889 INSN_UID (start), INSN_UID (insn), nbytes);
26891 if (njumps == 3 && isjump && nbytes < 16)
26893 int padsize = 15 - nbytes + min_insn_size (insn);
26895 if (dump_file)
26896 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26897 INSN_UID (insn), padsize);
26898 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26902 #endif
26904 /* AMD Athlon works faster
26905 when RET is not destination of conditional jump or directly preceded
26906 by other jump instruction. We avoid the penalty by inserting NOP just
26907 before the RET instructions in such cases. */
26908 static void
26909 ix86_pad_returns (void)
26911 edge e;
26912 edge_iterator ei;
26914 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26916 basic_block bb = e->src;
26917 rtx ret = BB_END (bb);
26918 rtx prev;
26919 bool replace = false;
26921 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26922 || optimize_bb_for_size_p (bb))
26923 continue;
26924 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26925 if (active_insn_p (prev) || LABEL_P (prev))
26926 break;
26927 if (prev && LABEL_P (prev))
26929 edge e;
26930 edge_iterator ei;
26932 FOR_EACH_EDGE (e, ei, bb->preds)
26933 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26934 && !(e->flags & EDGE_FALLTHRU))
26935 replace = true;
26937 if (!replace)
26939 prev = prev_active_insn (ret);
26940 if (prev
26941 && ((JUMP_P (prev) && any_condjump_p (prev))
26942 || CALL_P (prev)))
26943 replace = true;
26944 /* Empty functions get branch mispredict even when the jump destination
26945 is not visible to us. */
26946 if (!prev && !optimize_function_for_size_p (cfun))
26947 replace = true;
26949 if (replace)
26951 emit_jump_insn_before (gen_return_internal_long (), ret);
26952 delete_insn (ret);
26957 /* Implement machine specific optimizations. We implement padding of returns
26958 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26959 static void
26960 ix86_reorg (void)
26962 if (optimize && optimize_function_for_speed_p (cfun))
26964 if (TARGET_PAD_RETURNS)
26965 ix86_pad_returns ();
26966 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26967 if (TARGET_FOUR_JUMP_LIMIT)
26968 ix86_avoid_jump_mispredicts ();
26969 #endif
26973 /* Return nonzero when QImode register that must be represented via REX prefix
26974 is used. */
26975 bool
26976 x86_extended_QIreg_mentioned_p (rtx insn)
26978 int i;
26979 extract_insn_cached (insn);
26980 for (i = 0; i < recog_data.n_operands; i++)
26981 if (REG_P (recog_data.operand[i])
26982 && REGNO (recog_data.operand[i]) > BX_REG)
26983 return true;
26984 return false;
26987 /* Return nonzero when P points to register encoded via REX prefix.
26988 Called via for_each_rtx. */
26989 static int
26990 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26992 unsigned int regno;
26993 if (!REG_P (*p))
26994 return 0;
26995 regno = REGNO (*p);
26996 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26999 /* Return true when INSN mentions register that must be encoded using REX
27000 prefix. */
27001 bool
27002 x86_extended_reg_mentioned_p (rtx insn)
27004 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
27005 extended_reg_mentioned_1, NULL);
27008 /* If profitable, negate (without causing overflow) integer constant
27009 of mode MODE at location LOC. Return true in this case. */
27010 bool
27011 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
27013 HOST_WIDE_INT val;
27015 if (!CONST_INT_P (*loc))
27016 return false;
27018 switch (mode)
27020 case DImode:
27021 /* DImode x86_64 constants must fit in 32 bits. */
27022 gcc_assert (x86_64_immediate_operand (*loc, mode));
27024 mode = SImode;
27025 break;
27027 case SImode:
27028 case HImode:
27029 case QImode:
27030 break;
27032 default:
27033 gcc_unreachable ();
27036 /* Avoid overflows. */
27037 if (mode_signbit_p (mode, *loc))
27038 return false;
27040 val = INTVAL (*loc);
27042 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
27043 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
27044 if ((val < 0 && val != -128)
27045 || val == 128)
27047 *loc = GEN_INT (-val);
27048 return true;
27051 return false;
27054 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
27055 optabs would emit if we didn't have TFmode patterns. */
27057 void
27058 x86_emit_floatuns (rtx operands[2])
27060 rtx neglab, donelab, i0, i1, f0, in, out;
27061 enum machine_mode mode, inmode;
27063 inmode = GET_MODE (operands[1]);
27064 gcc_assert (inmode == SImode || inmode == DImode);
27066 out = operands[0];
27067 in = force_reg (inmode, operands[1]);
27068 mode = GET_MODE (out);
27069 neglab = gen_label_rtx ();
27070 donelab = gen_label_rtx ();
27071 f0 = gen_reg_rtx (mode);
27073 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
27075 expand_float (out, in, 0);
27077 emit_jump_insn (gen_jump (donelab));
27078 emit_barrier ();
27080 emit_label (neglab);
27082 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27083 1, OPTAB_DIRECT);
27084 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27085 1, OPTAB_DIRECT);
27086 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27088 expand_float (f0, i0, 0);
27090 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27092 emit_label (donelab);
27095 /* AVX does not support 32-byte integer vector operations,
27096 thus the longest vector we are faced with is V16QImode. */
27097 #define MAX_VECT_LEN 16
27099 struct expand_vec_perm_d
27101 rtx target, op0, op1;
27102 unsigned char perm[MAX_VECT_LEN];
27103 enum machine_mode vmode;
27104 unsigned char nelt;
27105 bool testing_p;
27108 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
27109 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
27111 /* Get a vector mode of the same size as the original but with elements
27112 twice as wide. This is only guaranteed to apply to integral vectors. */
27114 static inline enum machine_mode
27115 get_mode_wider_vector (enum machine_mode o)
27117 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
27118 enum machine_mode n = GET_MODE_WIDER_MODE (o);
27119 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
27120 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
27121 return n;
27124 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27125 with all elements equal to VAR. Return true if successful. */
27127 static bool
27128 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27129 rtx target, rtx val)
27131 bool ok;
27133 switch (mode)
27135 case V2SImode:
27136 case V2SFmode:
27137 if (!mmx_ok)
27138 return false;
27139 /* FALLTHRU */
27141 case V4DFmode:
27142 case V4DImode:
27143 case V8SFmode:
27144 case V8SImode:
27145 case V2DFmode:
27146 case V2DImode:
27147 case V4SFmode:
27148 case V4SImode:
27150 rtx insn, dup;
27152 /* First attempt to recognize VAL as-is. */
27153 dup = gen_rtx_VEC_DUPLICATE (mode, val);
27154 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
27155 if (recog_memoized (insn) < 0)
27157 rtx seq;
27158 /* If that fails, force VAL into a register. */
27160 start_sequence ();
27161 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
27162 seq = get_insns ();
27163 end_sequence ();
27164 if (seq)
27165 emit_insn_before (seq, insn);
27167 ok = recog_memoized (insn) >= 0;
27168 gcc_assert (ok);
27171 return true;
27173 case V4HImode:
27174 if (!mmx_ok)
27175 return false;
27176 if (TARGET_SSE || TARGET_3DNOW_A)
27178 rtx x;
27180 val = gen_lowpart (SImode, val);
27181 x = gen_rtx_TRUNCATE (HImode, val);
27182 x = gen_rtx_VEC_DUPLICATE (mode, x);
27183 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27184 return true;
27186 goto widen;
27188 case V8QImode:
27189 if (!mmx_ok)
27190 return false;
27191 goto widen;
27193 case V8HImode:
27194 if (TARGET_SSE2)
27196 struct expand_vec_perm_d dperm;
27197 rtx tmp1, tmp2;
27199 permute:
27200 memset (&dperm, 0, sizeof (dperm));
27201 dperm.target = target;
27202 dperm.vmode = mode;
27203 dperm.nelt = GET_MODE_NUNITS (mode);
27204 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
27206 /* Extend to SImode using a paradoxical SUBREG. */
27207 tmp1 = gen_reg_rtx (SImode);
27208 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27210 /* Insert the SImode value as low element of a V4SImode vector. */
27211 tmp2 = gen_lowpart (V4SImode, dperm.op0);
27212 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
27214 ok = (expand_vec_perm_1 (&dperm)
27215 || expand_vec_perm_broadcast_1 (&dperm));
27216 gcc_assert (ok);
27217 return ok;
27219 goto widen;
27221 case V16QImode:
27222 if (TARGET_SSE2)
27223 goto permute;
27224 goto widen;
27226 widen:
27227 /* Replicate the value once into the next wider mode and recurse. */
27229 enum machine_mode smode, wsmode, wvmode;
27230 rtx x;
27232 smode = GET_MODE_INNER (mode);
27233 wvmode = get_mode_wider_vector (mode);
27234 wsmode = GET_MODE_INNER (wvmode);
27236 val = convert_modes (wsmode, smode, val, true);
27237 x = expand_simple_binop (wsmode, ASHIFT, val,
27238 GEN_INT (GET_MODE_BITSIZE (smode)),
27239 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27240 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27242 x = gen_lowpart (wvmode, target);
27243 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27244 gcc_assert (ok);
27245 return ok;
27248 case V16HImode:
27249 case V32QImode:
27251 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27252 rtx x = gen_reg_rtx (hvmode);
27254 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27255 gcc_assert (ok);
27257 x = gen_rtx_VEC_CONCAT (mode, x, x);
27258 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27260 return true;
27262 default:
27263 return false;
27267 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27268 whose ONE_VAR element is VAR, and other elements are zero. Return true
27269 if successful. */
27271 static bool
27272 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27273 rtx target, rtx var, int one_var)
27275 enum machine_mode vsimode;
27276 rtx new_target;
27277 rtx x, tmp;
27278 bool use_vector_set = false;
27280 switch (mode)
27282 case V2DImode:
27283 /* For SSE4.1, we normally use vector set. But if the second
27284 element is zero and inter-unit moves are OK, we use movq
27285 instead. */
27286 use_vector_set = (TARGET_64BIT
27287 && TARGET_SSE4_1
27288 && !(TARGET_INTER_UNIT_MOVES
27289 && one_var == 0));
27290 break;
27291 case V16QImode:
27292 case V4SImode:
27293 case V4SFmode:
27294 use_vector_set = TARGET_SSE4_1;
27295 break;
27296 case V8HImode:
27297 use_vector_set = TARGET_SSE2;
27298 break;
27299 case V4HImode:
27300 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27301 break;
27302 case V32QImode:
27303 case V16HImode:
27304 case V8SImode:
27305 case V8SFmode:
27306 case V4DFmode:
27307 use_vector_set = TARGET_AVX;
27308 break;
27309 case V4DImode:
27310 /* Use ix86_expand_vector_set in 64bit mode only. */
27311 use_vector_set = TARGET_AVX && TARGET_64BIT;
27312 break;
27313 default:
27314 break;
27317 if (use_vector_set)
27319 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27320 var = force_reg (GET_MODE_INNER (mode), var);
27321 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27322 return true;
27325 switch (mode)
27327 case V2SFmode:
27328 case V2SImode:
27329 if (!mmx_ok)
27330 return false;
27331 /* FALLTHRU */
27333 case V2DFmode:
27334 case V2DImode:
27335 if (one_var != 0)
27336 return false;
27337 var = force_reg (GET_MODE_INNER (mode), var);
27338 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27339 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27340 return true;
27342 case V4SFmode:
27343 case V4SImode:
27344 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27345 new_target = gen_reg_rtx (mode);
27346 else
27347 new_target = target;
27348 var = force_reg (GET_MODE_INNER (mode), var);
27349 x = gen_rtx_VEC_DUPLICATE (mode, var);
27350 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27351 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27352 if (one_var != 0)
27354 /* We need to shuffle the value to the correct position, so
27355 create a new pseudo to store the intermediate result. */
27357 /* With SSE2, we can use the integer shuffle insns. */
27358 if (mode != V4SFmode && TARGET_SSE2)
27360 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27361 const1_rtx,
27362 GEN_INT (one_var == 1 ? 0 : 1),
27363 GEN_INT (one_var == 2 ? 0 : 1),
27364 GEN_INT (one_var == 3 ? 0 : 1)));
27365 if (target != new_target)
27366 emit_move_insn (target, new_target);
27367 return true;
27370 /* Otherwise convert the intermediate result to V4SFmode and
27371 use the SSE1 shuffle instructions. */
27372 if (mode != V4SFmode)
27374 tmp = gen_reg_rtx (V4SFmode);
27375 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27377 else
27378 tmp = new_target;
27380 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27381 const1_rtx,
27382 GEN_INT (one_var == 1 ? 0 : 1),
27383 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27384 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27386 if (mode != V4SFmode)
27387 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27388 else if (tmp != target)
27389 emit_move_insn (target, tmp);
27391 else if (target != new_target)
27392 emit_move_insn (target, new_target);
27393 return true;
27395 case V8HImode:
27396 case V16QImode:
27397 vsimode = V4SImode;
27398 goto widen;
27399 case V4HImode:
27400 case V8QImode:
27401 if (!mmx_ok)
27402 return false;
27403 vsimode = V2SImode;
27404 goto widen;
27405 widen:
27406 if (one_var != 0)
27407 return false;
27409 /* Zero extend the variable element to SImode and recurse. */
27410 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27412 x = gen_reg_rtx (vsimode);
27413 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27414 var, one_var))
27415 gcc_unreachable ();
27417 emit_move_insn (target, gen_lowpart (mode, x));
27418 return true;
27420 default:
27421 return false;
27425 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27426 consisting of the values in VALS. It is known that all elements
27427 except ONE_VAR are constants. Return true if successful. */
27429 static bool
27430 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27431 rtx target, rtx vals, int one_var)
27433 rtx var = XVECEXP (vals, 0, one_var);
27434 enum machine_mode wmode;
27435 rtx const_vec, x;
27437 const_vec = copy_rtx (vals);
27438 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27439 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27441 switch (mode)
27443 case V2DFmode:
27444 case V2DImode:
27445 case V2SFmode:
27446 case V2SImode:
27447 /* For the two element vectors, it's just as easy to use
27448 the general case. */
27449 return false;
27451 case V4DImode:
27452 /* Use ix86_expand_vector_set in 64bit mode only. */
27453 if (!TARGET_64BIT)
27454 return false;
27455 case V4DFmode:
27456 case V8SFmode:
27457 case V8SImode:
27458 case V16HImode:
27459 case V32QImode:
27460 case V4SFmode:
27461 case V4SImode:
27462 case V8HImode:
27463 case V4HImode:
27464 break;
27466 case V16QImode:
27467 if (TARGET_SSE4_1)
27468 break;
27469 wmode = V8HImode;
27470 goto widen;
27471 case V8QImode:
27472 wmode = V4HImode;
27473 goto widen;
27474 widen:
27475 /* There's no way to set one QImode entry easily. Combine
27476 the variable value with its adjacent constant value, and
27477 promote to an HImode set. */
27478 x = XVECEXP (vals, 0, one_var ^ 1);
27479 if (one_var & 1)
27481 var = convert_modes (HImode, QImode, var, true);
27482 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27483 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27484 x = GEN_INT (INTVAL (x) & 0xff);
27486 else
27488 var = convert_modes (HImode, QImode, var, true);
27489 x = gen_int_mode (INTVAL (x) << 8, HImode);
27491 if (x != const0_rtx)
27492 var = expand_simple_binop (HImode, IOR, var, x, var,
27493 1, OPTAB_LIB_WIDEN);
27495 x = gen_reg_rtx (wmode);
27496 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27497 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27499 emit_move_insn (target, gen_lowpart (mode, x));
27500 return true;
27502 default:
27503 return false;
27506 emit_move_insn (target, const_vec);
27507 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27508 return true;
27511 /* A subroutine of ix86_expand_vector_init_general. Use vector
27512 concatenate to handle the most general case: all values variable,
27513 and none identical. */
27515 static void
27516 ix86_expand_vector_init_concat (enum machine_mode mode,
27517 rtx target, rtx *ops, int n)
27519 enum machine_mode cmode, hmode = VOIDmode;
27520 rtx first[8], second[4];
27521 rtvec v;
27522 int i, j;
27524 switch (n)
27526 case 2:
27527 switch (mode)
27529 case V8SImode:
27530 cmode = V4SImode;
27531 break;
27532 case V8SFmode:
27533 cmode = V4SFmode;
27534 break;
27535 case V4DImode:
27536 cmode = V2DImode;
27537 break;
27538 case V4DFmode:
27539 cmode = V2DFmode;
27540 break;
27541 case V4SImode:
27542 cmode = V2SImode;
27543 break;
27544 case V4SFmode:
27545 cmode = V2SFmode;
27546 break;
27547 case V2DImode:
27548 cmode = DImode;
27549 break;
27550 case V2SImode:
27551 cmode = SImode;
27552 break;
27553 case V2DFmode:
27554 cmode = DFmode;
27555 break;
27556 case V2SFmode:
27557 cmode = SFmode;
27558 break;
27559 default:
27560 gcc_unreachable ();
27563 if (!register_operand (ops[1], cmode))
27564 ops[1] = force_reg (cmode, ops[1]);
27565 if (!register_operand (ops[0], cmode))
27566 ops[0] = force_reg (cmode, ops[0]);
27567 emit_insn (gen_rtx_SET (VOIDmode, target,
27568 gen_rtx_VEC_CONCAT (mode, ops[0],
27569 ops[1])));
27570 break;
27572 case 4:
27573 switch (mode)
27575 case V4DImode:
27576 cmode = V2DImode;
27577 break;
27578 case V4DFmode:
27579 cmode = V2DFmode;
27580 break;
27581 case V4SImode:
27582 cmode = V2SImode;
27583 break;
27584 case V4SFmode:
27585 cmode = V2SFmode;
27586 break;
27587 default:
27588 gcc_unreachable ();
27590 goto half;
27592 case 8:
27593 switch (mode)
27595 case V8SImode:
27596 cmode = V2SImode;
27597 hmode = V4SImode;
27598 break;
27599 case V8SFmode:
27600 cmode = V2SFmode;
27601 hmode = V4SFmode;
27602 break;
27603 default:
27604 gcc_unreachable ();
27606 goto half;
27608 half:
27609 /* FIXME: We process inputs backward to help RA. PR 36222. */
27610 i = n - 1;
27611 j = (n >> 1) - 1;
27612 for (; i > 0; i -= 2, j--)
27614 first[j] = gen_reg_rtx (cmode);
27615 v = gen_rtvec (2, ops[i - 1], ops[i]);
27616 ix86_expand_vector_init (false, first[j],
27617 gen_rtx_PARALLEL (cmode, v));
27620 n >>= 1;
27621 if (n > 2)
27623 gcc_assert (hmode != VOIDmode);
27624 for (i = j = 0; i < n; i += 2, j++)
27626 second[j] = gen_reg_rtx (hmode);
27627 ix86_expand_vector_init_concat (hmode, second [j],
27628 &first [i], 2);
27630 n >>= 1;
27631 ix86_expand_vector_init_concat (mode, target, second, n);
27633 else
27634 ix86_expand_vector_init_concat (mode, target, first, n);
27635 break;
27637 default:
27638 gcc_unreachable ();
27642 /* A subroutine of ix86_expand_vector_init_general. Use vector
27643 interleave to handle the most general case: all values variable,
27644 and none identical. */
27646 static void
27647 ix86_expand_vector_init_interleave (enum machine_mode mode,
27648 rtx target, rtx *ops, int n)
27650 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27651 int i, j;
27652 rtx op0, op1;
27653 rtx (*gen_load_even) (rtx, rtx, rtx);
27654 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27655 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27657 switch (mode)
27659 case V8HImode:
27660 gen_load_even = gen_vec_setv8hi;
27661 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27662 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27663 inner_mode = HImode;
27664 first_imode = V4SImode;
27665 second_imode = V2DImode;
27666 third_imode = VOIDmode;
27667 break;
27668 case V16QImode:
27669 gen_load_even = gen_vec_setv16qi;
27670 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27671 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27672 inner_mode = QImode;
27673 first_imode = V8HImode;
27674 second_imode = V4SImode;
27675 third_imode = V2DImode;
27676 break;
27677 default:
27678 gcc_unreachable ();
27681 for (i = 0; i < n; i++)
27683 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27684 op0 = gen_reg_rtx (SImode);
27685 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27687 /* Insert the SImode value as low element of V4SImode vector. */
27688 op1 = gen_reg_rtx (V4SImode);
27689 op0 = gen_rtx_VEC_MERGE (V4SImode,
27690 gen_rtx_VEC_DUPLICATE (V4SImode,
27691 op0),
27692 CONST0_RTX (V4SImode),
27693 const1_rtx);
27694 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27696 /* Cast the V4SImode vector back to a vector in orignal mode. */
27697 op0 = gen_reg_rtx (mode);
27698 emit_move_insn (op0, gen_lowpart (mode, op1));
27700 /* Load even elements into the second positon. */
27701 emit_insn ((*gen_load_even) (op0,
27702 force_reg (inner_mode,
27703 ops [i + i + 1]),
27704 const1_rtx));
27706 /* Cast vector to FIRST_IMODE vector. */
27707 ops[i] = gen_reg_rtx (first_imode);
27708 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27711 /* Interleave low FIRST_IMODE vectors. */
27712 for (i = j = 0; i < n; i += 2, j++)
27714 op0 = gen_reg_rtx (first_imode);
27715 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27717 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27718 ops[j] = gen_reg_rtx (second_imode);
27719 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27722 /* Interleave low SECOND_IMODE vectors. */
27723 switch (second_imode)
27725 case V4SImode:
27726 for (i = j = 0; i < n / 2; i += 2, j++)
27728 op0 = gen_reg_rtx (second_imode);
27729 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27730 ops[i + 1]));
27732 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27733 vector. */
27734 ops[j] = gen_reg_rtx (third_imode);
27735 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27737 second_imode = V2DImode;
27738 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27739 /* FALLTHRU */
27741 case V2DImode:
27742 op0 = gen_reg_rtx (second_imode);
27743 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27744 ops[1]));
27746 /* Cast the SECOND_IMODE vector back to a vector on original
27747 mode. */
27748 emit_insn (gen_rtx_SET (VOIDmode, target,
27749 gen_lowpart (mode, op0)));
27750 break;
27752 default:
27753 gcc_unreachable ();
27757 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27758 all values variable, and none identical. */
27760 static void
27761 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27762 rtx target, rtx vals)
27764 rtx ops[32], op0, op1;
27765 enum machine_mode half_mode = VOIDmode;
27766 int n, i;
27768 switch (mode)
27770 case V2SFmode:
27771 case V2SImode:
27772 if (!mmx_ok && !TARGET_SSE)
27773 break;
27774 /* FALLTHRU */
27776 case V8SFmode:
27777 case V8SImode:
27778 case V4DFmode:
27779 case V4DImode:
27780 case V4SFmode:
27781 case V4SImode:
27782 case V2DFmode:
27783 case V2DImode:
27784 n = GET_MODE_NUNITS (mode);
27785 for (i = 0; i < n; i++)
27786 ops[i] = XVECEXP (vals, 0, i);
27787 ix86_expand_vector_init_concat (mode, target, ops, n);
27788 return;
27790 case V32QImode:
27791 half_mode = V16QImode;
27792 goto half;
27794 case V16HImode:
27795 half_mode = V8HImode;
27796 goto half;
27798 half:
27799 n = GET_MODE_NUNITS (mode);
27800 for (i = 0; i < n; i++)
27801 ops[i] = XVECEXP (vals, 0, i);
27802 op0 = gen_reg_rtx (half_mode);
27803 op1 = gen_reg_rtx (half_mode);
27804 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27805 n >> 2);
27806 ix86_expand_vector_init_interleave (half_mode, op1,
27807 &ops [n >> 1], n >> 2);
27808 emit_insn (gen_rtx_SET (VOIDmode, target,
27809 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27810 return;
27812 case V16QImode:
27813 if (!TARGET_SSE4_1)
27814 break;
27815 /* FALLTHRU */
27817 case V8HImode:
27818 if (!TARGET_SSE2)
27819 break;
27821 /* Don't use ix86_expand_vector_init_interleave if we can't
27822 move from GPR to SSE register directly. */
27823 if (!TARGET_INTER_UNIT_MOVES)
27824 break;
27826 n = GET_MODE_NUNITS (mode);
27827 for (i = 0; i < n; i++)
27828 ops[i] = XVECEXP (vals, 0, i);
27829 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27830 return;
27832 case V4HImode:
27833 case V8QImode:
27834 break;
27836 default:
27837 gcc_unreachable ();
27841 int i, j, n_elts, n_words, n_elt_per_word;
27842 enum machine_mode inner_mode;
27843 rtx words[4], shift;
27845 inner_mode = GET_MODE_INNER (mode);
27846 n_elts = GET_MODE_NUNITS (mode);
27847 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27848 n_elt_per_word = n_elts / n_words;
27849 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27851 for (i = 0; i < n_words; ++i)
27853 rtx word = NULL_RTX;
27855 for (j = 0; j < n_elt_per_word; ++j)
27857 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27858 elt = convert_modes (word_mode, inner_mode, elt, true);
27860 if (j == 0)
27861 word = elt;
27862 else
27864 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27865 word, 1, OPTAB_LIB_WIDEN);
27866 word = expand_simple_binop (word_mode, IOR, word, elt,
27867 word, 1, OPTAB_LIB_WIDEN);
27871 words[i] = word;
27874 if (n_words == 1)
27875 emit_move_insn (target, gen_lowpart (mode, words[0]));
27876 else if (n_words == 2)
27878 rtx tmp = gen_reg_rtx (mode);
27879 emit_clobber (tmp);
27880 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27881 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27882 emit_move_insn (target, tmp);
27884 else if (n_words == 4)
27886 rtx tmp = gen_reg_rtx (V4SImode);
27887 gcc_assert (word_mode == SImode);
27888 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27889 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27890 emit_move_insn (target, gen_lowpart (mode, tmp));
27892 else
27893 gcc_unreachable ();
27897 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27898 instructions unless MMX_OK is true. */
27900 void
27901 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27903 enum machine_mode mode = GET_MODE (target);
27904 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27905 int n_elts = GET_MODE_NUNITS (mode);
27906 int n_var = 0, one_var = -1;
27907 bool all_same = true, all_const_zero = true;
27908 int i;
27909 rtx x;
27911 for (i = 0; i < n_elts; ++i)
27913 x = XVECEXP (vals, 0, i);
27914 if (!(CONST_INT_P (x)
27915 || GET_CODE (x) == CONST_DOUBLE
27916 || GET_CODE (x) == CONST_FIXED))
27917 n_var++, one_var = i;
27918 else if (x != CONST0_RTX (inner_mode))
27919 all_const_zero = false;
27920 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27921 all_same = false;
27924 /* Constants are best loaded from the constant pool. */
27925 if (n_var == 0)
27927 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27928 return;
27931 /* If all values are identical, broadcast the value. */
27932 if (all_same
27933 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27934 XVECEXP (vals, 0, 0)))
27935 return;
27937 /* Values where only one field is non-constant are best loaded from
27938 the pool and overwritten via move later. */
27939 if (n_var == 1)
27941 if (all_const_zero
27942 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27943 XVECEXP (vals, 0, one_var),
27944 one_var))
27945 return;
27947 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27948 return;
27951 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27954 void
27955 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27957 enum machine_mode mode = GET_MODE (target);
27958 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27959 enum machine_mode half_mode;
27960 bool use_vec_merge = false;
27961 rtx tmp;
27962 static rtx (*gen_extract[6][2]) (rtx, rtx)
27964 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27965 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27966 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27967 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27968 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27969 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27971 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27973 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27974 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27975 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27976 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27977 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27978 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27980 int i, j, n;
27982 switch (mode)
27984 case V2SFmode:
27985 case V2SImode:
27986 if (mmx_ok)
27988 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27989 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27990 if (elt == 0)
27991 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27992 else
27993 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27994 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27995 return;
27997 break;
27999 case V2DImode:
28000 use_vec_merge = TARGET_SSE4_1;
28001 if (use_vec_merge)
28002 break;
28004 case V2DFmode:
28006 rtx op0, op1;
28008 /* For the two element vectors, we implement a VEC_CONCAT with
28009 the extraction of the other element. */
28011 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
28012 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
28014 if (elt == 0)
28015 op0 = val, op1 = tmp;
28016 else
28017 op0 = tmp, op1 = val;
28019 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
28020 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28022 return;
28024 case V4SFmode:
28025 use_vec_merge = TARGET_SSE4_1;
28026 if (use_vec_merge)
28027 break;
28029 switch (elt)
28031 case 0:
28032 use_vec_merge = true;
28033 break;
28035 case 1:
28036 /* tmp = target = A B C D */
28037 tmp = copy_to_reg (target);
28038 /* target = A A B B */
28039 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
28040 /* target = X A B B */
28041 ix86_expand_vector_set (false, target, val, 0);
28042 /* target = A X C D */
28043 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28044 const1_rtx, const0_rtx,
28045 GEN_INT (2+4), GEN_INT (3+4)));
28046 return;
28048 case 2:
28049 /* tmp = target = A B C D */
28050 tmp = copy_to_reg (target);
28051 /* tmp = X B C D */
28052 ix86_expand_vector_set (false, tmp, val, 0);
28053 /* target = A B X D */
28054 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28055 const0_rtx, const1_rtx,
28056 GEN_INT (0+4), GEN_INT (3+4)));
28057 return;
28059 case 3:
28060 /* tmp = target = A B C D */
28061 tmp = copy_to_reg (target);
28062 /* tmp = X B C D */
28063 ix86_expand_vector_set (false, tmp, val, 0);
28064 /* target = A B X D */
28065 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28066 const0_rtx, const1_rtx,
28067 GEN_INT (2+4), GEN_INT (0+4)));
28068 return;
28070 default:
28071 gcc_unreachable ();
28073 break;
28075 case V4SImode:
28076 use_vec_merge = TARGET_SSE4_1;
28077 if (use_vec_merge)
28078 break;
28080 /* Element 0 handled by vec_merge below. */
28081 if (elt == 0)
28083 use_vec_merge = true;
28084 break;
28087 if (TARGET_SSE2)
28089 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28090 store into element 0, then shuffle them back. */
28092 rtx order[4];
28094 order[0] = GEN_INT (elt);
28095 order[1] = const1_rtx;
28096 order[2] = const2_rtx;
28097 order[3] = GEN_INT (3);
28098 order[elt] = const0_rtx;
28100 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28101 order[1], order[2], order[3]));
28103 ix86_expand_vector_set (false, target, val, 0);
28105 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28106 order[1], order[2], order[3]));
28108 else
28110 /* For SSE1, we have to reuse the V4SF code. */
28111 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28112 gen_lowpart (SFmode, val), elt);
28114 return;
28116 case V8HImode:
28117 use_vec_merge = TARGET_SSE2;
28118 break;
28119 case V4HImode:
28120 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28121 break;
28123 case V16QImode:
28124 use_vec_merge = TARGET_SSE4_1;
28125 break;
28127 case V8QImode:
28128 break;
28130 case V32QImode:
28131 half_mode = V16QImode;
28132 j = 0;
28133 n = 16;
28134 goto half;
28136 case V16HImode:
28137 half_mode = V8HImode;
28138 j = 1;
28139 n = 8;
28140 goto half;
28142 case V8SImode:
28143 half_mode = V4SImode;
28144 j = 2;
28145 n = 4;
28146 goto half;
28148 case V4DImode:
28149 half_mode = V2DImode;
28150 j = 3;
28151 n = 2;
28152 goto half;
28154 case V8SFmode:
28155 half_mode = V4SFmode;
28156 j = 4;
28157 n = 4;
28158 goto half;
28160 case V4DFmode:
28161 half_mode = V2DFmode;
28162 j = 5;
28163 n = 2;
28164 goto half;
28166 half:
28167 /* Compute offset. */
28168 i = elt / n;
28169 elt %= n;
28171 gcc_assert (i <= 1);
28173 /* Extract the half. */
28174 tmp = gen_reg_rtx (half_mode);
28175 emit_insn ((*gen_extract[j][i]) (tmp, target));
28177 /* Put val in tmp at elt. */
28178 ix86_expand_vector_set (false, tmp, val, elt);
28180 /* Put it back. */
28181 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28182 return;
28184 default:
28185 break;
28188 if (use_vec_merge)
28190 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28191 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28192 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28194 else
28196 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28198 emit_move_insn (mem, target);
28200 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28201 emit_move_insn (tmp, val);
28203 emit_move_insn (target, mem);
28207 void
28208 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28210 enum machine_mode mode = GET_MODE (vec);
28211 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28212 bool use_vec_extr = false;
28213 rtx tmp;
28215 switch (mode)
28217 case V2SImode:
28218 case V2SFmode:
28219 if (!mmx_ok)
28220 break;
28221 /* FALLTHRU */
28223 case V2DFmode:
28224 case V2DImode:
28225 use_vec_extr = true;
28226 break;
28228 case V4SFmode:
28229 use_vec_extr = TARGET_SSE4_1;
28230 if (use_vec_extr)
28231 break;
28233 switch (elt)
28235 case 0:
28236 tmp = vec;
28237 break;
28239 case 1:
28240 case 3:
28241 tmp = gen_reg_rtx (mode);
28242 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28243 GEN_INT (elt), GEN_INT (elt),
28244 GEN_INT (elt+4), GEN_INT (elt+4)));
28245 break;
28247 case 2:
28248 tmp = gen_reg_rtx (mode);
28249 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28250 break;
28252 default:
28253 gcc_unreachable ();
28255 vec = tmp;
28256 use_vec_extr = true;
28257 elt = 0;
28258 break;
28260 case V4SImode:
28261 use_vec_extr = TARGET_SSE4_1;
28262 if (use_vec_extr)
28263 break;
28265 if (TARGET_SSE2)
28267 switch (elt)
28269 case 0:
28270 tmp = vec;
28271 break;
28273 case 1:
28274 case 3:
28275 tmp = gen_reg_rtx (mode);
28276 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28277 GEN_INT (elt), GEN_INT (elt),
28278 GEN_INT (elt), GEN_INT (elt)));
28279 break;
28281 case 2:
28282 tmp = gen_reg_rtx (mode);
28283 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28284 break;
28286 default:
28287 gcc_unreachable ();
28289 vec = tmp;
28290 use_vec_extr = true;
28291 elt = 0;
28293 else
28295 /* For SSE1, we have to reuse the V4SF code. */
28296 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28297 gen_lowpart (V4SFmode, vec), elt);
28298 return;
28300 break;
28302 case V8HImode:
28303 use_vec_extr = TARGET_SSE2;
28304 break;
28305 case V4HImode:
28306 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28307 break;
28309 case V16QImode:
28310 use_vec_extr = TARGET_SSE4_1;
28311 break;
28313 case V8QImode:
28314 /* ??? Could extract the appropriate HImode element and shift. */
28315 default:
28316 break;
28319 if (use_vec_extr)
28321 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28322 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28324 /* Let the rtl optimizers know about the zero extension performed. */
28325 if (inner_mode == QImode || inner_mode == HImode)
28327 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28328 target = gen_lowpart (SImode, target);
28331 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28333 else
28335 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28337 emit_move_insn (mem, vec);
28339 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28340 emit_move_insn (target, tmp);
28344 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28345 pattern to reduce; DEST is the destination; IN is the input vector. */
28347 void
28348 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28350 rtx tmp1, tmp2, tmp3;
28352 tmp1 = gen_reg_rtx (V4SFmode);
28353 tmp2 = gen_reg_rtx (V4SFmode);
28354 tmp3 = gen_reg_rtx (V4SFmode);
28356 emit_insn (gen_sse_movhlps (tmp1, in, in));
28357 emit_insn (fn (tmp2, tmp1, in));
28359 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28360 const1_rtx, const1_rtx,
28361 GEN_INT (1+4), GEN_INT (1+4)));
28362 emit_insn (fn (dest, tmp2, tmp3));
28365 /* Target hook for scalar_mode_supported_p. */
28366 static bool
28367 ix86_scalar_mode_supported_p (enum machine_mode mode)
28369 if (DECIMAL_FLOAT_MODE_P (mode))
28370 return default_decimal_float_supported_p ();
28371 else if (mode == TFmode)
28372 return true;
28373 else
28374 return default_scalar_mode_supported_p (mode);
28377 /* Implements target hook vector_mode_supported_p. */
28378 static bool
28379 ix86_vector_mode_supported_p (enum machine_mode mode)
28381 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28382 return true;
28383 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28384 return true;
28385 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28386 return true;
28387 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28388 return true;
28389 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28390 return true;
28391 return false;
28394 /* Target hook for c_mode_for_suffix. */
28395 static enum machine_mode
28396 ix86_c_mode_for_suffix (char suffix)
28398 if (suffix == 'q')
28399 return TFmode;
28400 if (suffix == 'w')
28401 return XFmode;
28403 return VOIDmode;
28406 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28408 We do this in the new i386 backend to maintain source compatibility
28409 with the old cc0-based compiler. */
28411 static tree
28412 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28413 tree inputs ATTRIBUTE_UNUSED,
28414 tree clobbers)
28416 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28417 clobbers);
28418 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28419 clobbers);
28420 return clobbers;
28423 /* Implements target vector targetm.asm.encode_section_info. This
28424 is not used by netware. */
28426 static void ATTRIBUTE_UNUSED
28427 ix86_encode_section_info (tree decl, rtx rtl, int first)
28429 default_encode_section_info (decl, rtl, first);
28431 if (TREE_CODE (decl) == VAR_DECL
28432 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28433 && ix86_in_large_data_p (decl))
28434 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28437 /* Worker function for REVERSE_CONDITION. */
28439 enum rtx_code
28440 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28442 return (mode != CCFPmode && mode != CCFPUmode
28443 ? reverse_condition (code)
28444 : reverse_condition_maybe_unordered (code));
28447 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28448 to OPERANDS[0]. */
28450 const char *
28451 output_387_reg_move (rtx insn, rtx *operands)
28453 if (REG_P (operands[0]))
28455 if (REG_P (operands[1])
28456 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28458 if (REGNO (operands[0]) == FIRST_STACK_REG)
28459 return output_387_ffreep (operands, 0);
28460 return "fstp\t%y0";
28462 if (STACK_TOP_P (operands[0]))
28463 return "fld%Z1\t%y1";
28464 return "fst\t%y0";
28466 else if (MEM_P (operands[0]))
28468 gcc_assert (REG_P (operands[1]));
28469 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28470 return "fstp%Z0\t%y0";
28471 else
28473 /* There is no non-popping store to memory for XFmode.
28474 So if we need one, follow the store with a load. */
28475 if (GET_MODE (operands[0]) == XFmode)
28476 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28477 else
28478 return "fst%Z0\t%y0";
28481 else
28482 gcc_unreachable();
28485 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28486 FP status register is set. */
28488 void
28489 ix86_emit_fp_unordered_jump (rtx label)
28491 rtx reg = gen_reg_rtx (HImode);
28492 rtx temp;
28494 emit_insn (gen_x86_fnstsw_1 (reg));
28496 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28498 emit_insn (gen_x86_sahf_1 (reg));
28500 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28501 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28503 else
28505 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28507 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28508 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28511 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28512 gen_rtx_LABEL_REF (VOIDmode, label),
28513 pc_rtx);
28514 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28516 emit_jump_insn (temp);
28517 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28520 /* Output code to perform a log1p XFmode calculation. */
28522 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28524 rtx label1 = gen_label_rtx ();
28525 rtx label2 = gen_label_rtx ();
28527 rtx tmp = gen_reg_rtx (XFmode);
28528 rtx tmp2 = gen_reg_rtx (XFmode);
28529 rtx test;
28531 emit_insn (gen_absxf2 (tmp, op1));
28532 test = gen_rtx_GE (VOIDmode, tmp,
28533 CONST_DOUBLE_FROM_REAL_VALUE (
28534 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28535 XFmode));
28536 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28538 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28539 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28540 emit_jump (label2);
28542 emit_label (label1);
28543 emit_move_insn (tmp, CONST1_RTX (XFmode));
28544 emit_insn (gen_addxf3 (tmp, op1, tmp));
28545 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28546 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28548 emit_label (label2);
28551 /* Output code to perform a Newton-Rhapson approximation of a single precision
28552 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28554 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28556 rtx x0, x1, e0, e1, two;
28558 x0 = gen_reg_rtx (mode);
28559 e0 = gen_reg_rtx (mode);
28560 e1 = gen_reg_rtx (mode);
28561 x1 = gen_reg_rtx (mode);
28563 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28565 if (VECTOR_MODE_P (mode))
28566 two = ix86_build_const_vector (SFmode, true, two);
28568 two = force_reg (mode, two);
28570 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28572 /* x0 = rcp(b) estimate */
28573 emit_insn (gen_rtx_SET (VOIDmode, x0,
28574 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28575 UNSPEC_RCP)));
28576 /* e0 = x0 * a */
28577 emit_insn (gen_rtx_SET (VOIDmode, e0,
28578 gen_rtx_MULT (mode, x0, a)));
28579 /* e1 = x0 * b */
28580 emit_insn (gen_rtx_SET (VOIDmode, e1,
28581 gen_rtx_MULT (mode, x0, b)));
28582 /* x1 = 2. - e1 */
28583 emit_insn (gen_rtx_SET (VOIDmode, x1,
28584 gen_rtx_MINUS (mode, two, e1)));
28585 /* res = e0 * x1 */
28586 emit_insn (gen_rtx_SET (VOIDmode, res,
28587 gen_rtx_MULT (mode, e0, x1)));
28590 /* Output code to perform a Newton-Rhapson approximation of a
28591 single precision floating point [reciprocal] square root. */
28593 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28594 bool recip)
28596 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28597 REAL_VALUE_TYPE r;
28599 x0 = gen_reg_rtx (mode);
28600 e0 = gen_reg_rtx (mode);
28601 e1 = gen_reg_rtx (mode);
28602 e2 = gen_reg_rtx (mode);
28603 e3 = gen_reg_rtx (mode);
28605 real_from_integer (&r, VOIDmode, -3, -1, 0);
28606 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28608 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28609 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28611 if (VECTOR_MODE_P (mode))
28613 mthree = ix86_build_const_vector (SFmode, true, mthree);
28614 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28617 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28618 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28620 /* x0 = rsqrt(a) estimate */
28621 emit_insn (gen_rtx_SET (VOIDmode, x0,
28622 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28623 UNSPEC_RSQRT)));
28625 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28626 if (!recip)
28628 rtx zero, mask;
28630 zero = gen_reg_rtx (mode);
28631 mask = gen_reg_rtx (mode);
28633 zero = force_reg (mode, CONST0_RTX(mode));
28634 emit_insn (gen_rtx_SET (VOIDmode, mask,
28635 gen_rtx_NE (mode, zero, a)));
28637 emit_insn (gen_rtx_SET (VOIDmode, x0,
28638 gen_rtx_AND (mode, x0, mask)));
28641 /* e0 = x0 * a */
28642 emit_insn (gen_rtx_SET (VOIDmode, e0,
28643 gen_rtx_MULT (mode, x0, a)));
28644 /* e1 = e0 * x0 */
28645 emit_insn (gen_rtx_SET (VOIDmode, e1,
28646 gen_rtx_MULT (mode, e0, x0)));
28648 /* e2 = e1 - 3. */
28649 mthree = force_reg (mode, mthree);
28650 emit_insn (gen_rtx_SET (VOIDmode, e2,
28651 gen_rtx_PLUS (mode, e1, mthree)));
28653 mhalf = force_reg (mode, mhalf);
28654 if (recip)
28655 /* e3 = -.5 * x0 */
28656 emit_insn (gen_rtx_SET (VOIDmode, e3,
28657 gen_rtx_MULT (mode, x0, mhalf)));
28658 else
28659 /* e3 = -.5 * e0 */
28660 emit_insn (gen_rtx_SET (VOIDmode, e3,
28661 gen_rtx_MULT (mode, e0, mhalf)));
28662 /* ret = e2 * e3 */
28663 emit_insn (gen_rtx_SET (VOIDmode, res,
28664 gen_rtx_MULT (mode, e2, e3)));
28667 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28669 static void ATTRIBUTE_UNUSED
28670 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28671 tree decl)
28673 /* With Binutils 2.15, the "@unwind" marker must be specified on
28674 every occurrence of the ".eh_frame" section, not just the first
28675 one. */
28676 if (TARGET_64BIT
28677 && strcmp (name, ".eh_frame") == 0)
28679 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28680 flags & SECTION_WRITE ? "aw" : "a");
28681 return;
28683 default_elf_asm_named_section (name, flags, decl);
28686 /* Return the mangling of TYPE if it is an extended fundamental type. */
28688 static const char *
28689 ix86_mangle_type (const_tree type)
28691 type = TYPE_MAIN_VARIANT (type);
28693 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28694 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28695 return NULL;
28697 switch (TYPE_MODE (type))
28699 case TFmode:
28700 /* __float128 is "g". */
28701 return "g";
28702 case XFmode:
28703 /* "long double" or __float80 is "e". */
28704 return "e";
28705 default:
28706 return NULL;
28710 /* For 32-bit code we can save PIC register setup by using
28711 __stack_chk_fail_local hidden function instead of calling
28712 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28713 register, so it is better to call __stack_chk_fail directly. */
28715 static tree
28716 ix86_stack_protect_fail (void)
28718 return TARGET_64BIT
28719 ? default_external_stack_protect_fail ()
28720 : default_hidden_stack_protect_fail ();
28723 /* Select a format to encode pointers in exception handling data. CODE
28724 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28725 true if the symbol may be affected by dynamic relocations.
28727 ??? All x86 object file formats are capable of representing this.
28728 After all, the relocation needed is the same as for the call insn.
28729 Whether or not a particular assembler allows us to enter such, I
28730 guess we'll have to see. */
28732 asm_preferred_eh_data_format (int code, int global)
28734 if (flag_pic)
28736 int type = DW_EH_PE_sdata8;
28737 if (!TARGET_64BIT
28738 || ix86_cmodel == CM_SMALL_PIC
28739 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28740 type = DW_EH_PE_sdata4;
28741 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28743 if (ix86_cmodel == CM_SMALL
28744 || (ix86_cmodel == CM_MEDIUM && code))
28745 return DW_EH_PE_udata4;
28746 return DW_EH_PE_absptr;
28749 /* Expand copysign from SIGN to the positive value ABS_VALUE
28750 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28751 the sign-bit. */
28752 static void
28753 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28755 enum machine_mode mode = GET_MODE (sign);
28756 rtx sgn = gen_reg_rtx (mode);
28757 if (mask == NULL_RTX)
28759 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28760 if (!VECTOR_MODE_P (mode))
28762 /* We need to generate a scalar mode mask in this case. */
28763 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28764 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28765 mask = gen_reg_rtx (mode);
28766 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28769 else
28770 mask = gen_rtx_NOT (mode, mask);
28771 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28772 gen_rtx_AND (mode, mask, sign)));
28773 emit_insn (gen_rtx_SET (VOIDmode, result,
28774 gen_rtx_IOR (mode, abs_value, sgn)));
28777 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28778 mask for masking out the sign-bit is stored in *SMASK, if that is
28779 non-null. */
28780 static rtx
28781 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28783 enum machine_mode mode = GET_MODE (op0);
28784 rtx xa, mask;
28786 xa = gen_reg_rtx (mode);
28787 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28788 if (!VECTOR_MODE_P (mode))
28790 /* We need to generate a scalar mode mask in this case. */
28791 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28792 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28793 mask = gen_reg_rtx (mode);
28794 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28796 emit_insn (gen_rtx_SET (VOIDmode, xa,
28797 gen_rtx_AND (mode, op0, mask)));
28799 if (smask)
28800 *smask = mask;
28802 return xa;
28805 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28806 swapping the operands if SWAP_OPERANDS is true. The expanded
28807 code is a forward jump to a newly created label in case the
28808 comparison is true. The generated label rtx is returned. */
28809 static rtx
28810 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28811 bool swap_operands)
28813 rtx label, tmp;
28815 if (swap_operands)
28817 tmp = op0;
28818 op0 = op1;
28819 op1 = tmp;
28822 label = gen_label_rtx ();
28823 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28824 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28825 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28826 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28827 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28828 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28829 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28830 JUMP_LABEL (tmp) = label;
28832 return label;
28835 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28836 using comparison code CODE. Operands are swapped for the comparison if
28837 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28838 static rtx
28839 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28840 bool swap_operands)
28842 enum machine_mode mode = GET_MODE (op0);
28843 rtx mask = gen_reg_rtx (mode);
28845 if (swap_operands)
28847 rtx tmp = op0;
28848 op0 = op1;
28849 op1 = tmp;
28852 if (mode == DFmode)
28853 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28854 gen_rtx_fmt_ee (code, mode, op0, op1)));
28855 else
28856 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28857 gen_rtx_fmt_ee (code, mode, op0, op1)));
28859 return mask;
28862 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28863 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28864 static rtx
28865 ix86_gen_TWO52 (enum machine_mode mode)
28867 REAL_VALUE_TYPE TWO52r;
28868 rtx TWO52;
28870 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28871 TWO52 = const_double_from_real_value (TWO52r, mode);
28872 TWO52 = force_reg (mode, TWO52);
28874 return TWO52;
28877 /* Expand SSE sequence for computing lround from OP1 storing
28878 into OP0. */
28879 void
28880 ix86_expand_lround (rtx op0, rtx op1)
28882 /* C code for the stuff we're doing below:
28883 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28884 return (long)tmp;
28886 enum machine_mode mode = GET_MODE (op1);
28887 const struct real_format *fmt;
28888 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28889 rtx adj;
28891 /* load nextafter (0.5, 0.0) */
28892 fmt = REAL_MODE_FORMAT (mode);
28893 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28894 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28896 /* adj = copysign (0.5, op1) */
28897 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28898 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28900 /* adj = op1 + adj */
28901 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28903 /* op0 = (imode)adj */
28904 expand_fix (op0, adj, 0);
28907 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28908 into OPERAND0. */
28909 void
28910 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28912 /* C code for the stuff we're doing below (for do_floor):
28913 xi = (long)op1;
28914 xi -= (double)xi > op1 ? 1 : 0;
28915 return xi;
28917 enum machine_mode fmode = GET_MODE (op1);
28918 enum machine_mode imode = GET_MODE (op0);
28919 rtx ireg, freg, label, tmp;
28921 /* reg = (long)op1 */
28922 ireg = gen_reg_rtx (imode);
28923 expand_fix (ireg, op1, 0);
28925 /* freg = (double)reg */
28926 freg = gen_reg_rtx (fmode);
28927 expand_float (freg, ireg, 0);
28929 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28930 label = ix86_expand_sse_compare_and_jump (UNLE,
28931 freg, op1, !do_floor);
28932 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28933 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28934 emit_move_insn (ireg, tmp);
28936 emit_label (label);
28937 LABEL_NUSES (label) = 1;
28939 emit_move_insn (op0, ireg);
28942 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28943 result in OPERAND0. */
28944 void
28945 ix86_expand_rint (rtx operand0, rtx operand1)
28947 /* C code for the stuff we're doing below:
28948 xa = fabs (operand1);
28949 if (!isless (xa, 2**52))
28950 return operand1;
28951 xa = xa + 2**52 - 2**52;
28952 return copysign (xa, operand1);
28954 enum machine_mode mode = GET_MODE (operand0);
28955 rtx res, xa, label, TWO52, mask;
28957 res = gen_reg_rtx (mode);
28958 emit_move_insn (res, operand1);
28960 /* xa = abs (operand1) */
28961 xa = ix86_expand_sse_fabs (res, &mask);
28963 /* if (!isless (xa, TWO52)) goto label; */
28964 TWO52 = ix86_gen_TWO52 (mode);
28965 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28967 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28968 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28970 ix86_sse_copysign_to_positive (res, xa, res, mask);
28972 emit_label (label);
28973 LABEL_NUSES (label) = 1;
28975 emit_move_insn (operand0, res);
28978 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28979 into OPERAND0. */
28980 void
28981 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28983 /* C code for the stuff we expand below.
28984 double xa = fabs (x), x2;
28985 if (!isless (xa, TWO52))
28986 return x;
28987 xa = xa + TWO52 - TWO52;
28988 x2 = copysign (xa, x);
28989 Compensate. Floor:
28990 if (x2 > x)
28991 x2 -= 1;
28992 Compensate. Ceil:
28993 if (x2 < x)
28994 x2 -= -1;
28995 return x2;
28997 enum machine_mode mode = GET_MODE (operand0);
28998 rtx xa, TWO52, tmp, label, one, res, mask;
29000 TWO52 = ix86_gen_TWO52 (mode);
29002 /* Temporary for holding the result, initialized to the input
29003 operand to ease control flow. */
29004 res = gen_reg_rtx (mode);
29005 emit_move_insn (res, operand1);
29007 /* xa = abs (operand1) */
29008 xa = ix86_expand_sse_fabs (res, &mask);
29010 /* if (!isless (xa, TWO52)) goto label; */
29011 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29013 /* xa = xa + TWO52 - TWO52; */
29014 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29015 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
29017 /* xa = copysign (xa, operand1) */
29018 ix86_sse_copysign_to_positive (xa, xa, res, mask);
29020 /* generate 1.0 or -1.0 */
29021 one = force_reg (mode,
29022 const_double_from_real_value (do_floor
29023 ? dconst1 : dconstm1, mode));
29025 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29026 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29027 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29028 gen_rtx_AND (mode, one, tmp)));
29029 /* We always need to subtract here to preserve signed zero. */
29030 tmp = expand_simple_binop (mode, MINUS,
29031 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29032 emit_move_insn (res, tmp);
29034 emit_label (label);
29035 LABEL_NUSES (label) = 1;
29037 emit_move_insn (operand0, res);
29040 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
29041 into OPERAND0. */
29042 void
29043 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
29045 /* C code for the stuff we expand below.
29046 double xa = fabs (x), x2;
29047 if (!isless (xa, TWO52))
29048 return x;
29049 x2 = (double)(long)x;
29050 Compensate. Floor:
29051 if (x2 > x)
29052 x2 -= 1;
29053 Compensate. Ceil:
29054 if (x2 < x)
29055 x2 += 1;
29056 if (HONOR_SIGNED_ZEROS (mode))
29057 return copysign (x2, x);
29058 return x2;
29060 enum machine_mode mode = GET_MODE (operand0);
29061 rtx xa, xi, TWO52, tmp, label, one, res, mask;
29063 TWO52 = ix86_gen_TWO52 (mode);
29065 /* Temporary for holding the result, initialized to the input
29066 operand to ease control flow. */
29067 res = gen_reg_rtx (mode);
29068 emit_move_insn (res, operand1);
29070 /* xa = abs (operand1) */
29071 xa = ix86_expand_sse_fabs (res, &mask);
29073 /* if (!isless (xa, TWO52)) goto label; */
29074 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29076 /* xa = (double)(long)x */
29077 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29078 expand_fix (xi, res, 0);
29079 expand_float (xa, xi, 0);
29081 /* generate 1.0 */
29082 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29084 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29085 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29086 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29087 gen_rtx_AND (mode, one, tmp)));
29088 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29089 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29090 emit_move_insn (res, tmp);
29092 if (HONOR_SIGNED_ZEROS (mode))
29093 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29095 emit_label (label);
29096 LABEL_NUSES (label) = 1;
29098 emit_move_insn (operand0, res);
29101 /* Expand SSE sequence for computing round from OPERAND1 storing
29102 into OPERAND0. Sequence that works without relying on DImode truncation
29103 via cvttsd2siq that is only available on 64bit targets. */
29104 void
29105 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29107 /* C code for the stuff we expand below.
29108 double xa = fabs (x), xa2, x2;
29109 if (!isless (xa, TWO52))
29110 return x;
29111 Using the absolute value and copying back sign makes
29112 -0.0 -> -0.0 correct.
29113 xa2 = xa + TWO52 - TWO52;
29114 Compensate.
29115 dxa = xa2 - xa;
29116 if (dxa <= -0.5)
29117 xa2 += 1;
29118 else if (dxa > 0.5)
29119 xa2 -= 1;
29120 x2 = copysign (xa2, x);
29121 return x2;
29123 enum machine_mode mode = GET_MODE (operand0);
29124 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29126 TWO52 = ix86_gen_TWO52 (mode);
29128 /* Temporary for holding the result, initialized to the input
29129 operand to ease control flow. */
29130 res = gen_reg_rtx (mode);
29131 emit_move_insn (res, operand1);
29133 /* xa = abs (operand1) */
29134 xa = ix86_expand_sse_fabs (res, &mask);
29136 /* if (!isless (xa, TWO52)) goto label; */
29137 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29139 /* xa2 = xa + TWO52 - TWO52; */
29140 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29141 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29143 /* dxa = xa2 - xa; */
29144 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29146 /* generate 0.5, 1.0 and -0.5 */
29147 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29148 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29149 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29150 0, OPTAB_DIRECT);
29152 /* Compensate. */
29153 tmp = gen_reg_rtx (mode);
29154 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29155 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29156 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29157 gen_rtx_AND (mode, one, tmp)));
29158 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29159 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29160 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29161 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29162 gen_rtx_AND (mode, one, tmp)));
29163 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29165 /* res = copysign (xa2, operand1) */
29166 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29168 emit_label (label);
29169 LABEL_NUSES (label) = 1;
29171 emit_move_insn (operand0, res);
29174 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29175 into OPERAND0. */
29176 void
29177 ix86_expand_trunc (rtx operand0, rtx operand1)
29179 /* C code for SSE variant we expand below.
29180 double xa = fabs (x), x2;
29181 if (!isless (xa, TWO52))
29182 return x;
29183 x2 = (double)(long)x;
29184 if (HONOR_SIGNED_ZEROS (mode))
29185 return copysign (x2, x);
29186 return x2;
29188 enum machine_mode mode = GET_MODE (operand0);
29189 rtx xa, xi, TWO52, label, res, mask;
29191 TWO52 = ix86_gen_TWO52 (mode);
29193 /* Temporary for holding the result, initialized to the input
29194 operand to ease control flow. */
29195 res = gen_reg_rtx (mode);
29196 emit_move_insn (res, operand1);
29198 /* xa = abs (operand1) */
29199 xa = ix86_expand_sse_fabs (res, &mask);
29201 /* if (!isless (xa, TWO52)) goto label; */
29202 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29204 /* x = (double)(long)x */
29205 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29206 expand_fix (xi, res, 0);
29207 expand_float (res, xi, 0);
29209 if (HONOR_SIGNED_ZEROS (mode))
29210 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29212 emit_label (label);
29213 LABEL_NUSES (label) = 1;
29215 emit_move_insn (operand0, res);
29218 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29219 into OPERAND0. */
29220 void
29221 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29223 enum machine_mode mode = GET_MODE (operand0);
29224 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29226 /* C code for SSE variant we expand below.
29227 double xa = fabs (x), x2;
29228 if (!isless (xa, TWO52))
29229 return x;
29230 xa2 = xa + TWO52 - TWO52;
29231 Compensate:
29232 if (xa2 > xa)
29233 xa2 -= 1.0;
29234 x2 = copysign (xa2, x);
29235 return x2;
29238 TWO52 = ix86_gen_TWO52 (mode);
29240 /* Temporary for holding the result, initialized to the input
29241 operand to ease control flow. */
29242 res = gen_reg_rtx (mode);
29243 emit_move_insn (res, operand1);
29245 /* xa = abs (operand1) */
29246 xa = ix86_expand_sse_fabs (res, &smask);
29248 /* if (!isless (xa, TWO52)) goto label; */
29249 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29251 /* res = xa + TWO52 - TWO52; */
29252 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29253 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29254 emit_move_insn (res, tmp);
29256 /* generate 1.0 */
29257 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29259 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29260 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29261 emit_insn (gen_rtx_SET (VOIDmode, mask,
29262 gen_rtx_AND (mode, mask, one)));
29263 tmp = expand_simple_binop (mode, MINUS,
29264 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29265 emit_move_insn (res, tmp);
29267 /* res = copysign (res, operand1) */
29268 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29270 emit_label (label);
29271 LABEL_NUSES (label) = 1;
29273 emit_move_insn (operand0, res);
29276 /* Expand SSE sequence for computing round from OPERAND1 storing
29277 into OPERAND0. */
29278 void
29279 ix86_expand_round (rtx operand0, rtx operand1)
29281 /* C code for the stuff we're doing below:
29282 double xa = fabs (x);
29283 if (!isless (xa, TWO52))
29284 return x;
29285 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29286 return copysign (xa, x);
29288 enum machine_mode mode = GET_MODE (operand0);
29289 rtx res, TWO52, xa, label, xi, half, mask;
29290 const struct real_format *fmt;
29291 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29293 /* Temporary for holding the result, initialized to the input
29294 operand to ease control flow. */
29295 res = gen_reg_rtx (mode);
29296 emit_move_insn (res, operand1);
29298 TWO52 = ix86_gen_TWO52 (mode);
29299 xa = ix86_expand_sse_fabs (res, &mask);
29300 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29302 /* load nextafter (0.5, 0.0) */
29303 fmt = REAL_MODE_FORMAT (mode);
29304 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29305 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29307 /* xa = xa + 0.5 */
29308 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29309 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29311 /* xa = (double)(int64_t)xa */
29312 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29313 expand_fix (xi, xa, 0);
29314 expand_float (xa, xi, 0);
29316 /* res = copysign (xa, operand1) */
29317 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29319 emit_label (label);
29320 LABEL_NUSES (label) = 1;
29322 emit_move_insn (operand0, res);
29326 /* Table of valid machine attributes. */
29327 static const struct attribute_spec ix86_attribute_table[] =
29329 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29330 /* Stdcall attribute says callee is responsible for popping arguments
29331 if they are not variable. */
29332 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29333 /* Fastcall attribute says callee is responsible for popping arguments
29334 if they are not variable. */
29335 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29336 /* Thiscall attribute says callee is responsible for popping arguments
29337 if they are not variable. */
29338 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29339 /* Cdecl attribute says the callee is a normal C declaration */
29340 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29341 /* Regparm attribute specifies how many integer arguments are to be
29342 passed in registers. */
29343 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29344 /* Sseregparm attribute says we are using x86_64 calling conventions
29345 for FP arguments. */
29346 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29347 /* force_align_arg_pointer says this function realigns the stack at entry. */
29348 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29349 false, true, true, ix86_handle_cconv_attribute },
29350 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29351 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29352 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29353 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29354 #endif
29355 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29356 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29357 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29358 SUBTARGET_ATTRIBUTE_TABLE,
29359 #endif
29360 /* ms_abi and sysv_abi calling convention function attributes. */
29361 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29362 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29363 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29364 /* End element. */
29365 { NULL, 0, 0, false, false, false, NULL }
29368 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29369 static int
29370 ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost)
29372 switch (type_of_cost)
29374 case scalar_stmt:
29375 return ix86_cost->scalar_stmt_cost;
29377 case scalar_load:
29378 return ix86_cost->scalar_load_cost;
29380 case scalar_store:
29381 return ix86_cost->scalar_store_cost;
29383 case vector_stmt:
29384 return ix86_cost->vec_stmt_cost;
29386 case vector_load:
29387 return ix86_cost->vec_align_load_cost;
29389 case vector_store:
29390 return ix86_cost->vec_store_cost;
29392 case vec_to_scalar:
29393 return ix86_cost->vec_to_scalar_cost;
29395 case scalar_to_vec:
29396 return ix86_cost->scalar_to_vec_cost;
29398 case unaligned_load:
29399 return ix86_cost->vec_unalign_load_cost;
29401 case cond_branch_taken:
29402 return ix86_cost->cond_taken_branch_cost;
29404 case cond_branch_not_taken:
29405 return ix86_cost->cond_not_taken_branch_cost;
29407 case vec_perm:
29408 return 1;
29410 default:
29411 gcc_unreachable ();
29416 /* Implement targetm.vectorize.builtin_vec_perm. */
29418 static tree
29419 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29421 tree itype = TREE_TYPE (vec_type);
29422 bool u = TYPE_UNSIGNED (itype);
29423 enum machine_mode vmode = TYPE_MODE (vec_type);
29424 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29425 bool ok = TARGET_SSE2;
29427 switch (vmode)
29429 case V4DFmode:
29430 ok = TARGET_AVX;
29431 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29432 goto get_di;
29433 case V2DFmode:
29434 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29435 get_di:
29436 itype = ix86_get_builtin_type (IX86_BT_DI);
29437 break;
29439 case V8SFmode:
29440 ok = TARGET_AVX;
29441 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29442 goto get_si;
29443 case V4SFmode:
29444 ok = TARGET_SSE;
29445 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29446 get_si:
29447 itype = ix86_get_builtin_type (IX86_BT_SI);
29448 break;
29450 case V2DImode:
29451 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29452 break;
29453 case V4SImode:
29454 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29455 break;
29456 case V8HImode:
29457 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29458 break;
29459 case V16QImode:
29460 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29461 break;
29462 default:
29463 ok = false;
29464 break;
29467 if (!ok)
29468 return NULL_TREE;
29470 *mask_type = itype;
29471 return ix86_builtins[(int) fcode];
29474 /* Return a vector mode with twice as many elements as VMODE. */
29475 /* ??? Consider moving this to a table generated by genmodes.c. */
29477 static enum machine_mode
29478 doublesize_vector_mode (enum machine_mode vmode)
29480 switch (vmode)
29482 case V2SFmode: return V4SFmode;
29483 case V1DImode: return V2DImode;
29484 case V2SImode: return V4SImode;
29485 case V4HImode: return V8HImode;
29486 case V8QImode: return V16QImode;
29488 case V2DFmode: return V4DFmode;
29489 case V4SFmode: return V8SFmode;
29490 case V2DImode: return V4DImode;
29491 case V4SImode: return V8SImode;
29492 case V8HImode: return V16HImode;
29493 case V16QImode: return V32QImode;
29495 case V4DFmode: return V8DFmode;
29496 case V8SFmode: return V16SFmode;
29497 case V4DImode: return V8DImode;
29498 case V8SImode: return V16SImode;
29499 case V16HImode: return V32HImode;
29500 case V32QImode: return V64QImode;
29502 default:
29503 gcc_unreachable ();
29507 /* Construct (set target (vec_select op0 (parallel perm))) and
29508 return true if that's a valid instruction in the active ISA. */
29510 static bool
29511 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29513 rtx rperm[MAX_VECT_LEN], x;
29514 unsigned i;
29516 for (i = 0; i < nelt; ++i)
29517 rperm[i] = GEN_INT (perm[i]);
29519 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29520 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29521 x = gen_rtx_SET (VOIDmode, target, x);
29523 x = emit_insn (x);
29524 if (recog_memoized (x) < 0)
29526 remove_insn (x);
29527 return false;
29529 return true;
29532 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29534 static bool
29535 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29536 const unsigned char *perm, unsigned nelt)
29538 enum machine_mode v2mode;
29539 rtx x;
29541 v2mode = doublesize_vector_mode (GET_MODE (op0));
29542 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29543 return expand_vselect (target, x, perm, nelt);
29546 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29547 in terms of blendp[sd] / pblendw / pblendvb. */
29549 static bool
29550 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29552 enum machine_mode vmode = d->vmode;
29553 unsigned i, mask, nelt = d->nelt;
29554 rtx target, op0, op1, x;
29556 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29557 return false;
29558 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29559 return false;
29561 /* This is a blend, not a permute. Elements must stay in their
29562 respective lanes. */
29563 for (i = 0; i < nelt; ++i)
29565 unsigned e = d->perm[i];
29566 if (!(e == i || e == i + nelt))
29567 return false;
29570 if (d->testing_p)
29571 return true;
29573 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29574 decision should be extracted elsewhere, so that we only try that
29575 sequence once all budget==3 options have been tried. */
29577 /* For bytes, see if bytes move in pairs so we can use pblendw with
29578 an immediate argument, rather than pblendvb with a vector argument. */
29579 if (vmode == V16QImode)
29581 bool pblendw_ok = true;
29582 for (i = 0; i < 16 && pblendw_ok; i += 2)
29583 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29585 if (!pblendw_ok)
29587 rtx rperm[16], vperm;
29589 for (i = 0; i < nelt; ++i)
29590 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29592 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29593 vperm = force_reg (V16QImode, vperm);
29595 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29596 return true;
29600 target = d->target;
29601 op0 = d->op0;
29602 op1 = d->op1;
29603 mask = 0;
29605 switch (vmode)
29607 case V4DFmode:
29608 case V8SFmode:
29609 case V2DFmode:
29610 case V4SFmode:
29611 case V8HImode:
29612 for (i = 0; i < nelt; ++i)
29613 mask |= (d->perm[i] >= nelt) << i;
29614 break;
29616 case V2DImode:
29617 for (i = 0; i < 2; ++i)
29618 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29619 goto do_subreg;
29621 case V4SImode:
29622 for (i = 0; i < 4; ++i)
29623 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29624 goto do_subreg;
29626 case V16QImode:
29627 for (i = 0; i < 8; ++i)
29628 mask |= (d->perm[i * 2] >= 16) << i;
29630 do_subreg:
29631 vmode = V8HImode;
29632 target = gen_lowpart (vmode, target);
29633 op0 = gen_lowpart (vmode, op0);
29634 op1 = gen_lowpart (vmode, op1);
29635 break;
29637 default:
29638 gcc_unreachable ();
29641 /* This matches five different patterns with the different modes. */
29642 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29643 x = gen_rtx_SET (VOIDmode, target, x);
29644 emit_insn (x);
29646 return true;
29649 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29650 in terms of the variable form of vpermilps.
29652 Note that we will have already failed the immediate input vpermilps,
29653 which requires that the high and low part shuffle be identical; the
29654 variable form doesn't require that. */
29656 static bool
29657 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29659 rtx rperm[8], vperm;
29660 unsigned i;
29662 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29663 return false;
29665 /* We can only permute within the 128-bit lane. */
29666 for (i = 0; i < 8; ++i)
29668 unsigned e = d->perm[i];
29669 if (i < 4 ? e >= 4 : e < 4)
29670 return false;
29673 if (d->testing_p)
29674 return true;
29676 for (i = 0; i < 8; ++i)
29678 unsigned e = d->perm[i];
29680 /* Within each 128-bit lane, the elements of op0 are numbered
29681 from 0 and the elements of op1 are numbered from 4. */
29682 if (e >= 8 + 4)
29683 e -= 8;
29684 else if (e >= 4)
29685 e -= 4;
29687 rperm[i] = GEN_INT (e);
29690 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29691 vperm = force_reg (V8SImode, vperm);
29692 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29694 return true;
29697 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29698 in terms of pshufb or vpperm. */
29700 static bool
29701 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29703 unsigned i, nelt, eltsz;
29704 rtx rperm[16], vperm, target, op0, op1;
29706 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29707 return false;
29708 if (GET_MODE_SIZE (d->vmode) != 16)
29709 return false;
29711 if (d->testing_p)
29712 return true;
29714 nelt = d->nelt;
29715 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29717 for (i = 0; i < nelt; ++i)
29719 unsigned j, e = d->perm[i];
29720 for (j = 0; j < eltsz; ++j)
29721 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29724 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29725 vperm = force_reg (V16QImode, vperm);
29727 target = gen_lowpart (V16QImode, d->target);
29728 op0 = gen_lowpart (V16QImode, d->op0);
29729 if (d->op0 == d->op1)
29730 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29731 else
29733 op1 = gen_lowpart (V16QImode, d->op1);
29734 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29737 return true;
29740 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29741 in a single instruction. */
29743 static bool
29744 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29746 unsigned i, nelt = d->nelt;
29747 unsigned char perm2[MAX_VECT_LEN];
29749 /* Check plain VEC_SELECT first, because AVX has instructions that could
29750 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29751 input where SEL+CONCAT may not. */
29752 if (d->op0 == d->op1)
29754 int mask = nelt - 1;
29756 for (i = 0; i < nelt; i++)
29757 perm2[i] = d->perm[i] & mask;
29759 if (expand_vselect (d->target, d->op0, perm2, nelt))
29760 return true;
29762 /* There are plenty of patterns in sse.md that are written for
29763 SEL+CONCAT and are not replicated for a single op. Perhaps
29764 that should be changed, to avoid the nastiness here. */
29766 /* Recognize interleave style patterns, which means incrementing
29767 every other permutation operand. */
29768 for (i = 0; i < nelt; i += 2)
29770 perm2[i] = d->perm[i] & mask;
29771 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29773 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29774 return true;
29776 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29777 if (nelt >= 4)
29779 for (i = 0; i < nelt; i += 4)
29781 perm2[i + 0] = d->perm[i + 0] & mask;
29782 perm2[i + 1] = d->perm[i + 1] & mask;
29783 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29784 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29787 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29788 return true;
29792 /* Finally, try the fully general two operand permute. */
29793 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29794 return true;
29796 /* Recognize interleave style patterns with reversed operands. */
29797 if (d->op0 != d->op1)
29799 for (i = 0; i < nelt; ++i)
29801 unsigned e = d->perm[i];
29802 if (e >= nelt)
29803 e -= nelt;
29804 else
29805 e += nelt;
29806 perm2[i] = e;
29809 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29810 return true;
29813 /* Try the SSE4.1 blend variable merge instructions. */
29814 if (expand_vec_perm_blend (d))
29815 return true;
29817 /* Try one of the AVX vpermil variable permutations. */
29818 if (expand_vec_perm_vpermil (d))
29819 return true;
29821 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29822 if (expand_vec_perm_pshufb (d))
29823 return true;
29825 return false;
29828 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29829 in terms of a pair of pshuflw + pshufhw instructions. */
29831 static bool
29832 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29834 unsigned char perm2[MAX_VECT_LEN];
29835 unsigned i;
29836 bool ok;
29838 if (d->vmode != V8HImode || d->op0 != d->op1)
29839 return false;
29841 /* The two permutations only operate in 64-bit lanes. */
29842 for (i = 0; i < 4; ++i)
29843 if (d->perm[i] >= 4)
29844 return false;
29845 for (i = 4; i < 8; ++i)
29846 if (d->perm[i] < 4)
29847 return false;
29849 if (d->testing_p)
29850 return true;
29852 /* Emit the pshuflw. */
29853 memcpy (perm2, d->perm, 4);
29854 for (i = 4; i < 8; ++i)
29855 perm2[i] = i;
29856 ok = expand_vselect (d->target, d->op0, perm2, 8);
29857 gcc_assert (ok);
29859 /* Emit the pshufhw. */
29860 memcpy (perm2 + 4, d->perm + 4, 4);
29861 for (i = 0; i < 4; ++i)
29862 perm2[i] = i;
29863 ok = expand_vselect (d->target, d->target, perm2, 8);
29864 gcc_assert (ok);
29866 return true;
29869 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29870 the permutation using the SSSE3 palignr instruction. This succeeds
29871 when all of the elements in PERM fit within one vector and we merely
29872 need to shift them down so that a single vector permutation has a
29873 chance to succeed. */
29875 static bool
29876 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29878 unsigned i, nelt = d->nelt;
29879 unsigned min, max;
29880 bool in_order, ok;
29881 rtx shift;
29883 /* Even with AVX, palignr only operates on 128-bit vectors. */
29884 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29885 return false;
29887 min = nelt, max = 0;
29888 for (i = 0; i < nelt; ++i)
29890 unsigned e = d->perm[i];
29891 if (e < min)
29892 min = e;
29893 if (e > max)
29894 max = e;
29896 if (min == 0 || max - min >= nelt)
29897 return false;
29899 /* Given that we have SSSE3, we know we'll be able to implement the
29900 single operand permutation after the palignr with pshufb. */
29901 if (d->testing_p)
29902 return true;
29904 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29905 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29906 gen_lowpart (TImode, d->op1),
29907 gen_lowpart (TImode, d->op0), shift));
29909 d->op0 = d->op1 = d->target;
29911 in_order = true;
29912 for (i = 0; i < nelt; ++i)
29914 unsigned e = d->perm[i] - min;
29915 if (e != i)
29916 in_order = false;
29917 d->perm[i] = e;
29920 /* Test for the degenerate case where the alignment by itself
29921 produces the desired permutation. */
29922 if (in_order)
29923 return true;
29925 ok = expand_vec_perm_1 (d);
29926 gcc_assert (ok);
29928 return ok;
29931 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29932 a two vector permutation into a single vector permutation by using
29933 an interleave operation to merge the vectors. */
29935 static bool
29936 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29938 struct expand_vec_perm_d dremap, dfinal;
29939 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29940 unsigned contents, h1, h2, h3, h4;
29941 unsigned char remap[2 * MAX_VECT_LEN];
29942 rtx seq;
29943 bool ok;
29945 if (d->op0 == d->op1)
29946 return false;
29948 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29949 lanes. We can use similar techniques with the vperm2f128 instruction,
29950 but it requires slightly different logic. */
29951 if (GET_MODE_SIZE (d->vmode) != 16)
29952 return false;
29954 /* Examine from whence the elements come. */
29955 contents = 0;
29956 for (i = 0; i < nelt; ++i)
29957 contents |= 1u << d->perm[i];
29959 /* Split the two input vectors into 4 halves. */
29960 h1 = (1u << nelt2) - 1;
29961 h2 = h1 << nelt2;
29962 h3 = h2 << nelt2;
29963 h4 = h3 << nelt2;
29965 memset (remap, 0xff, sizeof (remap));
29966 dremap = *d;
29968 /* If the elements from the low halves use interleave low, and similarly
29969 for interleave high. If the elements are from mis-matched halves, we
29970 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29971 if ((contents & (h1 | h3)) == contents)
29973 for (i = 0; i < nelt2; ++i)
29975 remap[i] = i * 2;
29976 remap[i + nelt] = i * 2 + 1;
29977 dremap.perm[i * 2] = i;
29978 dremap.perm[i * 2 + 1] = i + nelt;
29981 else if ((contents & (h2 | h4)) == contents)
29983 for (i = 0; i < nelt2; ++i)
29985 remap[i + nelt2] = i * 2;
29986 remap[i + nelt + nelt2] = i * 2 + 1;
29987 dremap.perm[i * 2] = i + nelt2;
29988 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29991 else if ((contents & (h1 | h4)) == contents)
29993 for (i = 0; i < nelt2; ++i)
29995 remap[i] = i;
29996 remap[i + nelt + nelt2] = i + nelt2;
29997 dremap.perm[i] = i;
29998 dremap.perm[i + nelt2] = i + nelt + nelt2;
30000 if (nelt != 4)
30002 dremap.vmode = V2DImode;
30003 dremap.nelt = 2;
30004 dremap.perm[0] = 0;
30005 dremap.perm[1] = 3;
30008 else if ((contents & (h2 | h3)) == contents)
30010 for (i = 0; i < nelt2; ++i)
30012 remap[i + nelt2] = i;
30013 remap[i + nelt] = i + nelt2;
30014 dremap.perm[i] = i + nelt2;
30015 dremap.perm[i + nelt2] = i + nelt;
30017 if (nelt != 4)
30019 dremap.vmode = V2DImode;
30020 dremap.nelt = 2;
30021 dremap.perm[0] = 1;
30022 dremap.perm[1] = 2;
30025 else
30026 return false;
30028 /* Use the remapping array set up above to move the elements from their
30029 swizzled locations into their final destinations. */
30030 dfinal = *d;
30031 for (i = 0; i < nelt; ++i)
30033 unsigned e = remap[d->perm[i]];
30034 gcc_assert (e < nelt);
30035 dfinal.perm[i] = e;
30037 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
30038 dfinal.op1 = dfinal.op0;
30039 dremap.target = dfinal.op0;
30041 /* Test if the final remap can be done with a single insn. For V4SFmode or
30042 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
30043 start_sequence ();
30044 ok = expand_vec_perm_1 (&dfinal);
30045 seq = get_insns ();
30046 end_sequence ();
30048 if (!ok)
30049 return false;
30051 if (dremap.vmode != dfinal.vmode)
30053 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
30054 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
30055 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
30058 ok = expand_vec_perm_1 (&dremap);
30059 gcc_assert (ok);
30061 emit_insn (seq);
30062 return true;
30065 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
30066 permutation with two pshufb insns and an ior. We should have already
30067 failed all two instruction sequences. */
30069 static bool
30070 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
30072 rtx rperm[2][16], vperm, l, h, op, m128;
30073 unsigned int i, nelt, eltsz;
30075 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
30076 return false;
30077 gcc_assert (d->op0 != d->op1);
30079 nelt = d->nelt;
30080 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
30082 /* Generate two permutation masks. If the required element is within
30083 the given vector it is shuffled into the proper lane. If the required
30084 element is in the other vector, force a zero into the lane by setting
30085 bit 7 in the permutation mask. */
30086 m128 = GEN_INT (-128);
30087 for (i = 0; i < nelt; ++i)
30089 unsigned j, e = d->perm[i];
30090 unsigned which = (e >= nelt);
30091 if (e >= nelt)
30092 e -= nelt;
30094 for (j = 0; j < eltsz; ++j)
30096 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
30097 rperm[1-which][i*eltsz + j] = m128;
30101 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
30102 vperm = force_reg (V16QImode, vperm);
30104 l = gen_reg_rtx (V16QImode);
30105 op = gen_lowpart (V16QImode, d->op0);
30106 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
30108 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
30109 vperm = force_reg (V16QImode, vperm);
30111 h = gen_reg_rtx (V16QImode);
30112 op = gen_lowpart (V16QImode, d->op1);
30113 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
30115 op = gen_lowpart (V16QImode, d->target);
30116 emit_insn (gen_iorv16qi3 (op, l, h));
30118 return true;
30121 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
30122 and extract-odd permutations. */
30124 static bool
30125 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
30127 rtx t1, t2, t3, t4;
30129 switch (d->vmode)
30131 case V4DFmode:
30132 t1 = gen_reg_rtx (V4DFmode);
30133 t2 = gen_reg_rtx (V4DFmode);
30135 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
30136 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
30137 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
30139 /* Now an unpck[lh]pd will produce the result required. */
30140 if (odd)
30141 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
30142 else
30143 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
30144 emit_insn (t3);
30145 break;
30147 case V8SFmode:
30149 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
30150 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
30151 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
30153 t1 = gen_reg_rtx (V8SFmode);
30154 t2 = gen_reg_rtx (V8SFmode);
30155 t3 = gen_reg_rtx (V8SFmode);
30156 t4 = gen_reg_rtx (V8SFmode);
30158 /* Shuffle within the 128-bit lanes to produce:
30159 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
30160 expand_vselect (t1, d->op0, perm1, 8);
30161 expand_vselect (t2, d->op1, perm1, 8);
30163 /* Shuffle the lanes around to produce:
30164 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
30165 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
30166 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
30168 /* Now a vpermil2p will produce the result required. */
30169 /* ??? The vpermil2p requires a vector constant. Another option
30170 is a unpck[lh]ps to merge the two vectors to produce
30171 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
30172 vpermilps to get the elements into the final order. */
30173 d->op0 = t3;
30174 d->op1 = t4;
30175 memcpy (d->perm, odd ? permo: perme, 8);
30176 expand_vec_perm_vpermil (d);
30178 break;
30180 case V2DFmode:
30181 case V4SFmode:
30182 case V2DImode:
30183 case V4SImode:
30184 /* These are always directly implementable by expand_vec_perm_1. */
30185 gcc_unreachable ();
30187 case V8HImode:
30188 if (TARGET_SSSE3)
30189 return expand_vec_perm_pshufb2 (d);
30190 else
30192 /* We need 2*log2(N)-1 operations to achieve odd/even
30193 with interleave. */
30194 t1 = gen_reg_rtx (V8HImode);
30195 t2 = gen_reg_rtx (V8HImode);
30196 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
30197 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
30198 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
30199 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
30200 if (odd)
30201 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
30202 else
30203 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
30204 emit_insn (t3);
30206 break;
30208 case V16QImode:
30209 if (TARGET_SSSE3)
30210 return expand_vec_perm_pshufb2 (d);
30211 else
30213 t1 = gen_reg_rtx (V16QImode);
30214 t2 = gen_reg_rtx (V16QImode);
30215 t3 = gen_reg_rtx (V16QImode);
30216 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
30217 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
30218 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
30219 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
30220 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
30221 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
30222 if (odd)
30223 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
30224 else
30225 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
30226 emit_insn (t3);
30228 break;
30230 default:
30231 gcc_unreachable ();
30234 return true;
30237 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30238 extract-even and extract-odd permutations. */
30240 static bool
30241 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
30243 unsigned i, odd, nelt = d->nelt;
30245 odd = d->perm[0];
30246 if (odd != 0 && odd != 1)
30247 return false;
30249 for (i = 1; i < nelt; ++i)
30250 if (d->perm[i] != 2 * i + odd)
30251 return false;
30253 return expand_vec_perm_even_odd_1 (d, odd);
30256 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30257 permutations. We assume that expand_vec_perm_1 has already failed. */
30259 static bool
30260 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30262 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30263 enum machine_mode vmode = d->vmode;
30264 unsigned char perm2[4];
30265 rtx op0 = d->op0;
30266 bool ok;
30268 switch (vmode)
30270 case V4DFmode:
30271 case V8SFmode:
30272 /* These are special-cased in sse.md so that we can optionally
30273 use the vbroadcast instruction. They expand to two insns
30274 if the input happens to be in a register. */
30275 gcc_unreachable ();
30277 case V2DFmode:
30278 case V2DImode:
30279 case V4SFmode:
30280 case V4SImode:
30281 /* These are always implementable using standard shuffle patterns. */
30282 gcc_unreachable ();
30284 case V8HImode:
30285 case V16QImode:
30286 /* These can be implemented via interleave. We save one insn by
30287 stopping once we have promoted to V4SImode and then use pshufd. */
30290 optab otab = vec_interleave_low_optab;
30292 if (elt >= nelt2)
30294 otab = vec_interleave_high_optab;
30295 elt -= nelt2;
30297 nelt2 /= 2;
30299 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30300 vmode = get_mode_wider_vector (vmode);
30301 op0 = gen_lowpart (vmode, op0);
30303 while (vmode != V4SImode);
30305 memset (perm2, elt, 4);
30306 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30307 gcc_assert (ok);
30308 return true;
30310 default:
30311 gcc_unreachable ();
30315 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30316 broadcast permutations. */
30318 static bool
30319 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30321 unsigned i, elt, nelt = d->nelt;
30323 if (d->op0 != d->op1)
30324 return false;
30326 elt = d->perm[0];
30327 for (i = 1; i < nelt; ++i)
30328 if (d->perm[i] != elt)
30329 return false;
30331 return expand_vec_perm_broadcast_1 (d);
30334 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30335 With all of the interface bits taken care of, perform the expansion
30336 in D and return true on success. */
30338 static bool
30339 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30341 /* Try a single instruction expansion. */
30342 if (expand_vec_perm_1 (d))
30343 return true;
30345 /* Try sequences of two instructions. */
30347 if (expand_vec_perm_pshuflw_pshufhw (d))
30348 return true;
30350 if (expand_vec_perm_palignr (d))
30351 return true;
30353 if (expand_vec_perm_interleave2 (d))
30354 return true;
30356 if (expand_vec_perm_broadcast (d))
30357 return true;
30359 /* Try sequences of three instructions. */
30361 if (expand_vec_perm_pshufb2 (d))
30362 return true;
30364 /* ??? Look for narrow permutations whose element orderings would
30365 allow the promotion to a wider mode. */
30367 /* ??? Look for sequences of interleave or a wider permute that place
30368 the data into the correct lanes for a half-vector shuffle like
30369 pshuf[lh]w or vpermilps. */
30371 /* ??? Look for sequences of interleave that produce the desired results.
30372 The combinatorics of punpck[lh] get pretty ugly... */
30374 if (expand_vec_perm_even_odd (d))
30375 return true;
30377 return false;
30380 /* Extract the values from the vector CST into the permutation array in D.
30381 Return 0 on error, 1 if all values from the permutation come from the
30382 first vector, 2 if all values from the second vector, and 3 otherwise. */
30384 static int
30385 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30387 tree list = TREE_VECTOR_CST_ELTS (cst);
30388 unsigned i, nelt = d->nelt;
30389 int ret = 0;
30391 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30393 unsigned HOST_WIDE_INT e;
30395 if (!host_integerp (TREE_VALUE (list), 1))
30396 return 0;
30397 e = tree_low_cst (TREE_VALUE (list), 1);
30398 if (e >= 2 * nelt)
30399 return 0;
30401 ret |= (e < nelt ? 1 : 2);
30402 d->perm[i] = e;
30404 gcc_assert (list == NULL);
30406 /* For all elements from second vector, fold the elements to first. */
30407 if (ret == 2)
30408 for (i = 0; i < nelt; ++i)
30409 d->perm[i] -= nelt;
30411 return ret;
30414 static rtx
30415 ix86_expand_vec_perm_builtin (tree exp)
30417 struct expand_vec_perm_d d;
30418 tree arg0, arg1, arg2;
30420 arg0 = CALL_EXPR_ARG (exp, 0);
30421 arg1 = CALL_EXPR_ARG (exp, 1);
30422 arg2 = CALL_EXPR_ARG (exp, 2);
30424 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30425 d.nelt = GET_MODE_NUNITS (d.vmode);
30426 d.testing_p = false;
30427 gcc_assert (VECTOR_MODE_P (d.vmode));
30429 if (TREE_CODE (arg2) != VECTOR_CST)
30431 error_at (EXPR_LOCATION (exp),
30432 "vector permutation requires vector constant");
30433 goto exit_error;
30436 switch (extract_vec_perm_cst (&d, arg2))
30438 default:
30439 gcc_unreachable();
30441 case 0:
30442 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30443 goto exit_error;
30445 case 3:
30446 if (!operand_equal_p (arg0, arg1, 0))
30448 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30449 d.op0 = force_reg (d.vmode, d.op0);
30450 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30451 d.op1 = force_reg (d.vmode, d.op1);
30452 break;
30455 /* The elements of PERM do not suggest that only the first operand
30456 is used, but both operands are identical. Allow easier matching
30457 of the permutation by folding the permutation into the single
30458 input vector. */
30460 unsigned i, nelt = d.nelt;
30461 for (i = 0; i < nelt; ++i)
30462 if (d.perm[i] >= nelt)
30463 d.perm[i] -= nelt;
30465 /* FALLTHRU */
30467 case 1:
30468 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30469 d.op0 = force_reg (d.vmode, d.op0);
30470 d.op1 = d.op0;
30471 break;
30473 case 2:
30474 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30475 d.op0 = force_reg (d.vmode, d.op0);
30476 d.op1 = d.op0;
30477 break;
30480 d.target = gen_reg_rtx (d.vmode);
30481 if (ix86_expand_vec_perm_builtin_1 (&d))
30482 return d.target;
30484 /* For compiler generated permutations, we should never got here, because
30485 the compiler should also be checking the ok hook. But since this is a
30486 builtin the user has access too, so don't abort. */
30487 switch (d.nelt)
30489 case 2:
30490 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30491 break;
30492 case 4:
30493 sorry ("vector permutation (%d %d %d %d)",
30494 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30495 break;
30496 case 8:
30497 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30498 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30499 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30500 break;
30501 case 16:
30502 sorry ("vector permutation "
30503 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30504 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30505 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30506 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30507 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30508 break;
30509 default:
30510 gcc_unreachable ();
30512 exit_error:
30513 return CONST0_RTX (d.vmode);
30516 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30518 static bool
30519 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30521 struct expand_vec_perm_d d;
30522 int vec_mask;
30523 bool ret, one_vec;
30525 d.vmode = TYPE_MODE (vec_type);
30526 d.nelt = GET_MODE_NUNITS (d.vmode);
30527 d.testing_p = true;
30529 /* Given sufficient ISA support we can just return true here
30530 for selected vector modes. */
30531 if (GET_MODE_SIZE (d.vmode) == 16)
30533 /* All implementable with a single vpperm insn. */
30534 if (TARGET_XOP)
30535 return true;
30536 /* All implementable with 2 pshufb + 1 ior. */
30537 if (TARGET_SSSE3)
30538 return true;
30539 /* All implementable with shufpd or unpck[lh]pd. */
30540 if (d.nelt == 2)
30541 return true;
30544 vec_mask = extract_vec_perm_cst (&d, mask);
30546 /* This hook is cannot be called in response to something that the
30547 user does (unlike the builtin expander) so we shouldn't ever see
30548 an error generated from the extract. */
30549 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30550 one_vec = (vec_mask != 3);
30552 /* Implementable with shufps or pshufd. */
30553 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30554 return true;
30556 /* Otherwise we have to go through the motions and see if we can
30557 figure out how to generate the requested permutation. */
30558 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30559 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30560 if (!one_vec)
30561 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30563 start_sequence ();
30564 ret = ix86_expand_vec_perm_builtin_1 (&d);
30565 end_sequence ();
30567 return ret;
30570 void
30571 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30573 struct expand_vec_perm_d d;
30574 unsigned i, nelt;
30576 d.target = targ;
30577 d.op0 = op0;
30578 d.op1 = op1;
30579 d.vmode = GET_MODE (targ);
30580 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30581 d.testing_p = false;
30583 for (i = 0; i < nelt; ++i)
30584 d.perm[i] = i * 2 + odd;
30586 /* We'll either be able to implement the permutation directly... */
30587 if (expand_vec_perm_1 (&d))
30588 return;
30590 /* ... or we use the special-case patterns. */
30591 expand_vec_perm_even_odd_1 (&d, odd);
30594 /* This function returns the calling abi specific va_list type node.
30595 It returns the FNDECL specific va_list type. */
30597 static tree
30598 ix86_fn_abi_va_list (tree fndecl)
30600 if (!TARGET_64BIT)
30601 return va_list_type_node;
30602 gcc_assert (fndecl != NULL_TREE);
30604 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30605 return ms_va_list_type_node;
30606 else
30607 return sysv_va_list_type_node;
30610 /* Returns the canonical va_list type specified by TYPE. If there
30611 is no valid TYPE provided, it return NULL_TREE. */
30613 static tree
30614 ix86_canonical_va_list_type (tree type)
30616 tree wtype, htype;
30618 /* Resolve references and pointers to va_list type. */
30619 if (INDIRECT_REF_P (type))
30620 type = TREE_TYPE (type);
30621 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30622 type = TREE_TYPE (type);
30624 if (TARGET_64BIT)
30626 wtype = va_list_type_node;
30627 gcc_assert (wtype != NULL_TREE);
30628 htype = type;
30629 if (TREE_CODE (wtype) == ARRAY_TYPE)
30631 /* If va_list is an array type, the argument may have decayed
30632 to a pointer type, e.g. by being passed to another function.
30633 In that case, unwrap both types so that we can compare the
30634 underlying records. */
30635 if (TREE_CODE (htype) == ARRAY_TYPE
30636 || POINTER_TYPE_P (htype))
30638 wtype = TREE_TYPE (wtype);
30639 htype = TREE_TYPE (htype);
30642 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30643 return va_list_type_node;
30644 wtype = sysv_va_list_type_node;
30645 gcc_assert (wtype != NULL_TREE);
30646 htype = type;
30647 if (TREE_CODE (wtype) == ARRAY_TYPE)
30649 /* If va_list is an array type, the argument may have decayed
30650 to a pointer type, e.g. by being passed to another function.
30651 In that case, unwrap both types so that we can compare the
30652 underlying records. */
30653 if (TREE_CODE (htype) == ARRAY_TYPE
30654 || POINTER_TYPE_P (htype))
30656 wtype = TREE_TYPE (wtype);
30657 htype = TREE_TYPE (htype);
30660 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30661 return sysv_va_list_type_node;
30662 wtype = ms_va_list_type_node;
30663 gcc_assert (wtype != NULL_TREE);
30664 htype = type;
30665 if (TREE_CODE (wtype) == ARRAY_TYPE)
30667 /* If va_list is an array type, the argument may have decayed
30668 to a pointer type, e.g. by being passed to another function.
30669 In that case, unwrap both types so that we can compare the
30670 underlying records. */
30671 if (TREE_CODE (htype) == ARRAY_TYPE
30672 || POINTER_TYPE_P (htype))
30674 wtype = TREE_TYPE (wtype);
30675 htype = TREE_TYPE (htype);
30678 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30679 return ms_va_list_type_node;
30680 return NULL_TREE;
30682 return std_canonical_va_list_type (type);
30685 /* Iterate through the target-specific builtin types for va_list.
30686 IDX denotes the iterator, *PTREE is set to the result type of
30687 the va_list builtin, and *PNAME to its internal type.
30688 Returns zero if there is no element for this index, otherwise
30689 IDX should be increased upon the next call.
30690 Note, do not iterate a base builtin's name like __builtin_va_list.
30691 Used from c_common_nodes_and_builtins. */
30693 static int
30694 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30696 if (TARGET_64BIT)
30698 switch (idx)
30700 default:
30701 break;
30703 case 0:
30704 *ptree = ms_va_list_type_node;
30705 *pname = "__builtin_ms_va_list";
30706 return 1;
30708 case 1:
30709 *ptree = sysv_va_list_type_node;
30710 *pname = "__builtin_sysv_va_list";
30711 return 1;
30715 return 0;
30718 /* Initialize the GCC target structure. */
30719 #undef TARGET_RETURN_IN_MEMORY
30720 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30722 #undef TARGET_LEGITIMIZE_ADDRESS
30723 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30725 #undef TARGET_ATTRIBUTE_TABLE
30726 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30727 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30728 # undef TARGET_MERGE_DECL_ATTRIBUTES
30729 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30730 #endif
30732 #undef TARGET_COMP_TYPE_ATTRIBUTES
30733 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30735 #undef TARGET_INIT_BUILTINS
30736 #define TARGET_INIT_BUILTINS ix86_init_builtins
30737 #undef TARGET_BUILTIN_DECL
30738 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30739 #undef TARGET_EXPAND_BUILTIN
30740 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30742 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30743 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30744 ix86_builtin_vectorized_function
30746 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30747 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30749 #undef TARGET_BUILTIN_RECIPROCAL
30750 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30752 #undef TARGET_ASM_FUNCTION_EPILOGUE
30753 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30755 #undef TARGET_ENCODE_SECTION_INFO
30756 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30757 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30758 #else
30759 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30760 #endif
30762 #undef TARGET_ASM_OPEN_PAREN
30763 #define TARGET_ASM_OPEN_PAREN ""
30764 #undef TARGET_ASM_CLOSE_PAREN
30765 #define TARGET_ASM_CLOSE_PAREN ""
30767 #undef TARGET_ASM_BYTE_OP
30768 #define TARGET_ASM_BYTE_OP ASM_BYTE
30770 #undef TARGET_ASM_ALIGNED_HI_OP
30771 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30772 #undef TARGET_ASM_ALIGNED_SI_OP
30773 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30774 #ifdef ASM_QUAD
30775 #undef TARGET_ASM_ALIGNED_DI_OP
30776 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30777 #endif
30779 #undef TARGET_ASM_UNALIGNED_HI_OP
30780 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30781 #undef TARGET_ASM_UNALIGNED_SI_OP
30782 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30783 #undef TARGET_ASM_UNALIGNED_DI_OP
30784 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30786 #undef TARGET_PRINT_OPERAND
30787 #define TARGET_PRINT_OPERAND ix86_print_operand
30788 #undef TARGET_PRINT_OPERAND_ADDRESS
30789 #define TARGET_PRINT_OPERAND_ADDRESS ix86_print_operand_address
30790 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
30791 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ix86_print_operand_punct_valid_p
30793 #undef TARGET_SCHED_ADJUST_COST
30794 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30795 #undef TARGET_SCHED_ISSUE_RATE
30796 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30797 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30798 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30799 ia32_multipass_dfa_lookahead
30801 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30802 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30804 #ifdef HAVE_AS_TLS
30805 #undef TARGET_HAVE_TLS
30806 #define TARGET_HAVE_TLS true
30807 #endif
30808 #undef TARGET_CANNOT_FORCE_CONST_MEM
30809 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30810 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30811 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30813 #undef TARGET_DELEGITIMIZE_ADDRESS
30814 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30816 #undef TARGET_MS_BITFIELD_LAYOUT_P
30817 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30819 #if TARGET_MACHO
30820 #undef TARGET_BINDS_LOCAL_P
30821 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30822 #endif
30823 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30824 #undef TARGET_BINDS_LOCAL_P
30825 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30826 #endif
30828 #undef TARGET_ASM_OUTPUT_MI_THUNK
30829 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30830 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30831 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30833 #undef TARGET_ASM_FILE_START
30834 #define TARGET_ASM_FILE_START x86_file_start
30836 #undef TARGET_DEFAULT_TARGET_FLAGS
30837 #define TARGET_DEFAULT_TARGET_FLAGS \
30838 (TARGET_DEFAULT \
30839 | TARGET_SUBTARGET_DEFAULT \
30840 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30841 | MASK_FUSED_MADD)
30843 #undef TARGET_HANDLE_OPTION
30844 #define TARGET_HANDLE_OPTION ix86_handle_option
30846 #undef TARGET_REGISTER_MOVE_COST
30847 #define TARGET_REGISTER_MOVE_COST ix86_register_move_cost
30848 #undef TARGET_MEMORY_MOVE_COST
30849 #define TARGET_MEMORY_MOVE_COST ix86_memory_move_cost
30850 #undef TARGET_RTX_COSTS
30851 #define TARGET_RTX_COSTS ix86_rtx_costs
30852 #undef TARGET_ADDRESS_COST
30853 #define TARGET_ADDRESS_COST ix86_address_cost
30855 #undef TARGET_FIXED_CONDITION_CODE_REGS
30856 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30857 #undef TARGET_CC_MODES_COMPATIBLE
30858 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30860 #undef TARGET_MACHINE_DEPENDENT_REORG
30861 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30863 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30864 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30866 #undef TARGET_BUILD_BUILTIN_VA_LIST
30867 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30869 #undef TARGET_ENUM_VA_LIST_P
30870 #define TARGET_ENUM_VA_LIST_P ix86_enum_va_list
30872 #undef TARGET_FN_ABI_VA_LIST
30873 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30875 #undef TARGET_CANONICAL_VA_LIST_TYPE
30876 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30878 #undef TARGET_EXPAND_BUILTIN_VA_START
30879 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30881 #undef TARGET_MD_ASM_CLOBBERS
30882 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30884 #undef TARGET_PROMOTE_PROTOTYPES
30885 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30886 #undef TARGET_STRUCT_VALUE_RTX
30887 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30888 #undef TARGET_SETUP_INCOMING_VARARGS
30889 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30890 #undef TARGET_MUST_PASS_IN_STACK
30891 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30892 #undef TARGET_FUNCTION_ARG_ADVANCE
30893 #define TARGET_FUNCTION_ARG_ADVANCE ix86_function_arg_advance
30894 #undef TARGET_FUNCTION_ARG
30895 #define TARGET_FUNCTION_ARG ix86_function_arg
30896 #undef TARGET_PASS_BY_REFERENCE
30897 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30898 #undef TARGET_INTERNAL_ARG_POINTER
30899 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30900 #undef TARGET_UPDATE_STACK_BOUNDARY
30901 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30902 #undef TARGET_GET_DRAP_RTX
30903 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30904 #undef TARGET_STRICT_ARGUMENT_NAMING
30905 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30906 #undef TARGET_STATIC_CHAIN
30907 #define TARGET_STATIC_CHAIN ix86_static_chain
30908 #undef TARGET_TRAMPOLINE_INIT
30909 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30910 #undef TARGET_RETURN_POPS_ARGS
30911 #define TARGET_RETURN_POPS_ARGS ix86_return_pops_args
30913 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30914 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30916 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30917 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30919 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30920 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30922 #undef TARGET_C_MODE_FOR_SUFFIX
30923 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30925 #ifdef HAVE_AS_TLS
30926 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30927 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30928 #endif
30930 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30931 #undef TARGET_INSERT_ATTRIBUTES
30932 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30933 #endif
30935 #undef TARGET_MANGLE_TYPE
30936 #define TARGET_MANGLE_TYPE ix86_mangle_type
30938 #undef TARGET_STACK_PROTECT_FAIL
30939 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30941 #undef TARGET_FUNCTION_VALUE
30942 #define TARGET_FUNCTION_VALUE ix86_function_value
30944 #undef TARGET_FUNCTION_VALUE_REGNO_P
30945 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30947 #undef TARGET_SECONDARY_RELOAD
30948 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30950 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30951 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30952 ix86_builtin_vectorization_cost
30953 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30954 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30955 ix86_vectorize_builtin_vec_perm
30956 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30957 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30958 ix86_vectorize_builtin_vec_perm_ok
30960 #undef TARGET_SET_CURRENT_FUNCTION
30961 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30963 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30964 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30966 #undef TARGET_OPTION_SAVE
30967 #define TARGET_OPTION_SAVE ix86_function_specific_save
30969 #undef TARGET_OPTION_RESTORE
30970 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30972 #undef TARGET_OPTION_PRINT
30973 #define TARGET_OPTION_PRINT ix86_function_specific_print
30975 #undef TARGET_CAN_INLINE_P
30976 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30978 #undef TARGET_EXPAND_TO_RTL_HOOK
30979 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30981 #undef TARGET_LEGITIMATE_ADDRESS_P
30982 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30984 #undef TARGET_IRA_COVER_CLASSES
30985 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30987 #undef TARGET_FRAME_POINTER_REQUIRED
30988 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30990 #undef TARGET_CAN_ELIMINATE
30991 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30993 #undef TARGET_ASM_CODE_END
30994 #define TARGET_ASM_CODE_END ix86_code_end
30996 struct gcc_target targetm = TARGET_INITIALIZER;
30998 #include "gt-i386.h"