Merged r158229 through r158464 into branch.
[official-gcc.git] / gcc / config / i386 / i386.c
blobb99fe2ae34573363a97576e6c2a82b1990aee3ea
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
56 #include "debug.h"
57 #include "dwarf2out.h"
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1039 static const
1040 struct processor_costs atom_cost = {
1041 COSTS_N_INSNS (1), /* cost of an add instruction */
1042 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1043 COSTS_N_INSNS (1), /* variable shift costs */
1044 COSTS_N_INSNS (1), /* constant shift costs */
1045 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1046 COSTS_N_INSNS (4), /* HI */
1047 COSTS_N_INSNS (3), /* SI */
1048 COSTS_N_INSNS (4), /* DI */
1049 COSTS_N_INSNS (2)}, /* other */
1050 0, /* cost of multiply per each bit set */
1051 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1052 COSTS_N_INSNS (26), /* HI */
1053 COSTS_N_INSNS (42), /* SI */
1054 COSTS_N_INSNS (74), /* DI */
1055 COSTS_N_INSNS (74)}, /* other */
1056 COSTS_N_INSNS (1), /* cost of movsx */
1057 COSTS_N_INSNS (1), /* cost of movzx */
1058 8, /* "large" insn */
1059 17, /* MOVE_RATIO */
1060 2, /* cost for loading QImode using movzbl */
1061 {4, 4, 4}, /* cost of loading integer registers
1062 in QImode, HImode and SImode.
1063 Relative to reg-reg move (2). */
1064 {4, 4, 4}, /* cost of storing integer registers */
1065 4, /* cost of reg,reg fld/fst */
1066 {12, 12, 12}, /* cost of loading fp registers
1067 in SFmode, DFmode and XFmode */
1068 {6, 6, 8}, /* cost of storing fp registers
1069 in SFmode, DFmode and XFmode */
1070 2, /* cost of moving MMX register */
1071 {8, 8}, /* cost of loading MMX registers
1072 in SImode and DImode */
1073 {8, 8}, /* cost of storing MMX registers
1074 in SImode and DImode */
1075 2, /* cost of moving SSE register */
1076 {8, 8, 8}, /* cost of loading SSE registers
1077 in SImode, DImode and TImode */
1078 {8, 8, 8}, /* cost of storing SSE registers
1079 in SImode, DImode and TImode */
1080 5, /* MMX or SSE register to integer */
1081 32, /* size of l1 cache. */
1082 256, /* size of l2 cache. */
1083 64, /* size of prefetch block */
1084 6, /* number of parallel prefetches */
1085 3, /* Branch cost */
1086 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1087 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1088 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1089 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1090 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1091 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1092 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1093 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1094 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1095 {{libcall, {{8, loop}, {15, unrolled_loop},
1096 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1097 {libcall, {{24, loop}, {32, unrolled_loop},
1098 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 1, /* scalar_stmt_cost. */
1100 1, /* scalar load_cost. */
1101 1, /* scalar_store_cost. */
1102 1, /* vec_stmt_cost. */
1103 1, /* vec_to_scalar_cost. */
1104 1, /* scalar_to_vec_cost. */
1105 1, /* vec_align_load_cost. */
1106 2, /* vec_unalign_load_cost. */
1107 1, /* vec_store_cost. */
1108 3, /* cond_taken_branch_cost. */
1109 1, /* cond_not_taken_branch_cost. */
1112 /* Generic64 should produce code tuned for Nocona and K8. */
1113 static const
1114 struct processor_costs generic64_cost = {
1115 COSTS_N_INSNS (1), /* cost of an add instruction */
1116 /* On all chips taken into consideration lea is 2 cycles and more. With
1117 this cost however our current implementation of synth_mult results in
1118 use of unnecessary temporary registers causing regression on several
1119 SPECfp benchmarks. */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 512, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1164 is increased to perhaps more appropriate value of 5. */
1165 3, /* Branch cost */
1166 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1167 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1168 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1169 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1170 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1171 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 {DUMMY_STRINGOP_ALGS,
1175 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1176 1, /* scalar_stmt_cost. */
1177 1, /* scalar load_cost. */
1178 1, /* scalar_store_cost. */
1179 1, /* vec_stmt_cost. */
1180 1, /* vec_to_scalar_cost. */
1181 1, /* scalar_to_vec_cost. */
1182 1, /* vec_align_load_cost. */
1183 2, /* vec_unalign_load_cost. */
1184 1, /* vec_store_cost. */
1185 3, /* cond_taken_branch_cost. */
1186 1, /* cond_not_taken_branch_cost. */
1189 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1190 static const
1191 struct processor_costs generic32_cost = {
1192 COSTS_N_INSNS (1), /* cost of an add instruction */
1193 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1194 COSTS_N_INSNS (1), /* variable shift costs */
1195 COSTS_N_INSNS (1), /* constant shift costs */
1196 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1197 COSTS_N_INSNS (4), /* HI */
1198 COSTS_N_INSNS (3), /* SI */
1199 COSTS_N_INSNS (4), /* DI */
1200 COSTS_N_INSNS (2)}, /* other */
1201 0, /* cost of multiply per each bit set */
1202 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1203 COSTS_N_INSNS (26), /* HI */
1204 COSTS_N_INSNS (42), /* SI */
1205 COSTS_N_INSNS (74), /* DI */
1206 COSTS_N_INSNS (74)}, /* other */
1207 COSTS_N_INSNS (1), /* cost of movsx */
1208 COSTS_N_INSNS (1), /* cost of movzx */
1209 8, /* "large" insn */
1210 17, /* MOVE_RATIO */
1211 4, /* cost for loading QImode using movzbl */
1212 {4, 4, 4}, /* cost of loading integer registers
1213 in QImode, HImode and SImode.
1214 Relative to reg-reg move (2). */
1215 {4, 4, 4}, /* cost of storing integer registers */
1216 4, /* cost of reg,reg fld/fst */
1217 {12, 12, 12}, /* cost of loading fp registers
1218 in SFmode, DFmode and XFmode */
1219 {6, 6, 8}, /* cost of storing fp registers
1220 in SFmode, DFmode and XFmode */
1221 2, /* cost of moving MMX register */
1222 {8, 8}, /* cost of loading MMX registers
1223 in SImode and DImode */
1224 {8, 8}, /* cost of storing MMX registers
1225 in SImode and DImode */
1226 2, /* cost of moving SSE register */
1227 {8, 8, 8}, /* cost of loading SSE registers
1228 in SImode, DImode and TImode */
1229 {8, 8, 8}, /* cost of storing SSE registers
1230 in SImode, DImode and TImode */
1231 5, /* MMX or SSE register to integer */
1232 32, /* size of l1 cache. */
1233 256, /* size of l2 cache. */
1234 64, /* size of prefetch block */
1235 6, /* number of parallel prefetches */
1236 3, /* Branch cost */
1237 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1238 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1239 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1240 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1241 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1242 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1246 DUMMY_STRINGOP_ALGS},
1247 1, /* scalar_stmt_cost. */
1248 1, /* scalar load_cost. */
1249 1, /* scalar_store_cost. */
1250 1, /* vec_stmt_cost. */
1251 1, /* vec_to_scalar_cost. */
1252 1, /* scalar_to_vec_cost. */
1253 1, /* vec_align_load_cost. */
1254 2, /* vec_unalign_load_cost. */
1255 1, /* vec_store_cost. */
1256 3, /* cond_taken_branch_cost. */
1257 1, /* cond_not_taken_branch_cost. */
1260 const struct processor_costs *ix86_cost = &pentium_cost;
1262 /* Processor feature/optimization bitmasks. */
1263 #define m_386 (1<<PROCESSOR_I386)
1264 #define m_486 (1<<PROCESSOR_I486)
1265 #define m_PENT (1<<PROCESSOR_PENTIUM)
1266 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1267 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1268 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1269 #define m_CORE2 (1<<PROCESSOR_CORE2)
1270 #define m_ATOM (1<<PROCESSOR_ATOM)
1272 #define m_GEODE (1<<PROCESSOR_GEODE)
1273 #define m_K6 (1<<PROCESSOR_K6)
1274 #define m_K6_GEODE (m_K6 | m_GEODE)
1275 #define m_K8 (1<<PROCESSOR_K8)
1276 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1277 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1278 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1279 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1281 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1282 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1284 /* Generic instruction choice should be common subset of supported CPUs
1285 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1286 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1288 /* Feature tests against the various tunings. */
1289 unsigned char ix86_tune_features[X86_TUNE_LAST];
1291 /* Feature tests against the various tunings used to create ix86_tune_features
1292 based on the processor mask. */
1293 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1294 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1295 negatively, so enabling for Generic64 seems like good code size
1296 tradeoff. We can't enable it for 32bit generic because it does not
1297 work well with PPro base chips. */
1298 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1300 /* X86_TUNE_PUSH_MEMORY */
1301 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1302 | m_NOCONA | m_CORE2 | m_GENERIC,
1304 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1305 m_486 | m_PENT,
1307 /* X86_TUNE_UNROLL_STRLEN */
1308 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1309 | m_CORE2 | m_GENERIC,
1311 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1312 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1314 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1315 on simulation result. But after P4 was made, no performance benefit
1316 was observed with branch hints. It also increases the code size.
1317 As a result, icc never generates branch hints. */
1320 /* X86_TUNE_DOUBLE_WITH_ADD */
1321 ~m_386,
1323 /* X86_TUNE_USE_SAHF */
1324 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1325 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1328 partial dependencies. */
1329 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1330 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1332 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1333 register stalls on Generic32 compilation setting as well. However
1334 in current implementation the partial register stalls are not eliminated
1335 very well - they can be introduced via subregs synthesized by combine
1336 and can happen in caller/callee saving sequences. Because this option
1337 pays back little on PPro based chips and is in conflict with partial reg
1338 dependencies used by Athlon/P4 based chips, it is better to leave it off
1339 for generic32 for now. */
1340 m_PPRO,
1342 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1343 m_CORE2 | m_GENERIC,
1345 /* X86_TUNE_USE_HIMODE_FIOP */
1346 m_386 | m_486 | m_K6_GEODE,
1348 /* X86_TUNE_USE_SIMODE_FIOP */
1349 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1351 /* X86_TUNE_USE_MOV0 */
1352 m_K6,
1354 /* X86_TUNE_USE_CLTD */
1355 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1357 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1358 m_PENT4,
1360 /* X86_TUNE_SPLIT_LONG_MOVES */
1361 m_PPRO,
1363 /* X86_TUNE_READ_MODIFY_WRITE */
1364 ~m_PENT,
1366 /* X86_TUNE_READ_MODIFY */
1367 ~(m_PENT | m_PPRO),
1369 /* X86_TUNE_PROMOTE_QIMODE */
1370 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1371 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1373 /* X86_TUNE_FAST_PREFIX */
1374 ~(m_PENT | m_486 | m_386),
1376 /* X86_TUNE_SINGLE_STRINGOP */
1377 m_386 | m_PENT4 | m_NOCONA,
1379 /* X86_TUNE_QIMODE_MATH */
1382 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1383 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1384 might be considered for Generic32 if our scheme for avoiding partial
1385 stalls was more effective. */
1386 ~m_PPRO,
1388 /* X86_TUNE_PROMOTE_QI_REGS */
1391 /* X86_TUNE_PROMOTE_HI_REGS */
1392 m_PPRO,
1394 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1395 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_ADD_ESP_8 */
1399 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1400 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1402 /* X86_TUNE_SUB_ESP_4 */
1403 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1404 | m_GENERIC,
1406 /* X86_TUNE_SUB_ESP_8 */
1407 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1408 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1410 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1411 for DFmode copies */
1412 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1413 | m_GENERIC | m_GEODE),
1415 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1416 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1418 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1419 conflict here in between PPro/Pentium4 based chips that thread 128bit
1420 SSE registers as single units versus K8 based chips that divide SSE
1421 registers to two 64bit halves. This knob promotes all store destinations
1422 to be 128bit to allow register renaming on 128bit SSE units, but usually
1423 results in one extra microop on 64bit SSE units. Experimental results
1424 shows that disabling this option on P4 brings over 20% SPECfp regression,
1425 while enabling it on K8 brings roughly 2.4% regression that can be partly
1426 masked by careful scheduling of moves. */
1427 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1428 | m_AMDFAM10,
1430 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1431 m_AMDFAM10,
1433 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1434 are resolved on SSE register parts instead of whole registers, so we may
1435 maintain just lower part of scalar values in proper format leaving the
1436 upper part undefined. */
1437 m_ATHLON_K8,
1439 /* X86_TUNE_SSE_TYPELESS_STORES */
1440 m_AMD_MULTIPLE,
1442 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1443 m_PPRO | m_PENT4 | m_NOCONA,
1445 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1446 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1448 /* X86_TUNE_PROLOGUE_USING_MOVE */
1449 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1451 /* X86_TUNE_EPILOGUE_USING_MOVE */
1452 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1454 /* X86_TUNE_SHIFT1 */
1455 ~m_486,
1457 /* X86_TUNE_USE_FFREEP */
1458 m_AMD_MULTIPLE,
1460 /* X86_TUNE_INTER_UNIT_MOVES */
1461 ~(m_AMD_MULTIPLE | m_GENERIC),
1463 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1464 ~(m_AMDFAM10),
1466 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1467 than 4 branch instructions in the 16 byte window. */
1468 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1469 | m_GENERIC,
1471 /* X86_TUNE_SCHEDULE */
1472 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1473 | m_GENERIC,
1475 /* X86_TUNE_USE_BT */
1476 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1478 /* X86_TUNE_USE_INCDEC */
1479 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1481 /* X86_TUNE_PAD_RETURNS */
1482 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1484 /* X86_TUNE_EXT_80387_CONSTANTS */
1485 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1486 | m_CORE2 | m_GENERIC,
1488 /* X86_TUNE_SHORTEN_X87_SSE */
1489 ~m_K8,
1491 /* X86_TUNE_AVOID_VECTOR_DECODE */
1492 m_K8 | m_GENERIC64,
1494 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1495 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1496 ~(m_386 | m_486),
1498 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1499 vector path on AMD machines. */
1500 m_K8 | m_GENERIC64 | m_AMDFAM10,
1502 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1503 machines. */
1504 m_K8 | m_GENERIC64 | m_AMDFAM10,
1506 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1507 than a MOV. */
1508 m_PENT,
1510 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1511 but one byte longer. */
1512 m_PENT,
1514 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1515 operand that cannot be represented using a modRM byte. The XOR
1516 replacement is long decoded, so this split helps here as well. */
1517 m_K6,
1519 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1520 from FP to FP. */
1521 m_AMDFAM10 | m_GENERIC,
1523 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1524 from integer to FP. */
1525 m_AMDFAM10,
1527 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1528 with a subsequent conditional jump instruction into a single
1529 compare-and-branch uop. */
1530 m_CORE2,
1532 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1533 will impact LEA instruction selection. */
1534 m_ATOM,
1537 /* Feature tests against the various architecture variations. */
1538 unsigned char ix86_arch_features[X86_ARCH_LAST];
1540 /* Feature tests against the various architecture variations, used to create
1541 ix86_arch_features based on the processor mask. */
1542 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1543 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1544 ~(m_386 | m_486 | m_PENT | m_K6),
1546 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1547 ~m_386,
1549 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1550 ~(m_386 | m_486),
1552 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1553 ~m_386,
1555 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1556 ~m_386,
1559 static const unsigned int x86_accumulate_outgoing_args
1560 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1561 | m_GENERIC;
1563 static const unsigned int x86_arch_always_fancy_math_387
1564 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1565 | m_NOCONA | m_CORE2 | m_GENERIC;
1567 static enum stringop_alg stringop_alg = no_stringop;
1569 /* In case the average insn count for single function invocation is
1570 lower than this constant, emit fast (but longer) prologue and
1571 epilogue code. */
1572 #define FAST_PROLOGUE_INSN_COUNT 20
1574 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1575 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1576 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1577 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1579 /* Array of the smallest class containing reg number REGNO, indexed by
1580 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1582 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1584 /* ax, dx, cx, bx */
1585 AREG, DREG, CREG, BREG,
1586 /* si, di, bp, sp */
1587 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1588 /* FP registers */
1589 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1590 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1591 /* arg pointer */
1592 NON_Q_REGS,
1593 /* flags, fpsr, fpcr, frame */
1594 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1595 /* SSE registers */
1596 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1597 SSE_REGS, SSE_REGS,
1598 /* MMX registers */
1599 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1600 MMX_REGS, MMX_REGS,
1601 /* REX registers */
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1604 /* SSE REX registers */
1605 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1606 SSE_REGS, SSE_REGS,
1609 /* The "default" register map used in 32bit mode. */
1611 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1613 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1614 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1615 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1616 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1617 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1619 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1622 /* The "default" register map used in 64bit mode. */
1624 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1626 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1627 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1628 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1629 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1630 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1631 8,9,10,11,12,13,14,15, /* extended integer registers */
1632 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1635 /* Define the register numbers to be used in Dwarf debugging information.
1636 The SVR4 reference port C compiler uses the following register numbers
1637 in its Dwarf output code:
1638 0 for %eax (gcc regno = 0)
1639 1 for %ecx (gcc regno = 2)
1640 2 for %edx (gcc regno = 1)
1641 3 for %ebx (gcc regno = 3)
1642 4 for %esp (gcc regno = 7)
1643 5 for %ebp (gcc regno = 6)
1644 6 for %esi (gcc regno = 4)
1645 7 for %edi (gcc regno = 5)
1646 The following three DWARF register numbers are never generated by
1647 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1648 believes these numbers have these meanings.
1649 8 for %eip (no gcc equivalent)
1650 9 for %eflags (gcc regno = 17)
1651 10 for %trapno (no gcc equivalent)
1652 It is not at all clear how we should number the FP stack registers
1653 for the x86 architecture. If the version of SDB on x86/svr4 were
1654 a bit less brain dead with respect to floating-point then we would
1655 have a precedent to follow with respect to DWARF register numbers
1656 for x86 FP registers, but the SDB on x86/svr4 is so completely
1657 broken with respect to FP registers that it is hardly worth thinking
1658 of it as something to strive for compatibility with.
1659 The version of x86/svr4 SDB I have at the moment does (partially)
1660 seem to believe that DWARF register number 11 is associated with
1661 the x86 register %st(0), but that's about all. Higher DWARF
1662 register numbers don't seem to be associated with anything in
1663 particular, and even for DWARF regno 11, SDB only seems to under-
1664 stand that it should say that a variable lives in %st(0) (when
1665 asked via an `=' command) if we said it was in DWARF regno 11,
1666 but SDB still prints garbage when asked for the value of the
1667 variable in question (via a `/' command).
1668 (Also note that the labels SDB prints for various FP stack regs
1669 when doing an `x' command are all wrong.)
1670 Note that these problems generally don't affect the native SVR4
1671 C compiler because it doesn't allow the use of -O with -g and
1672 because when it is *not* optimizing, it allocates a memory
1673 location for each floating-point variable, and the memory
1674 location is what gets described in the DWARF AT_location
1675 attribute for the variable in question.
1676 Regardless of the severe mental illness of the x86/svr4 SDB, we
1677 do something sensible here and we use the following DWARF
1678 register numbers. Note that these are all stack-top-relative
1679 numbers.
1680 11 for %st(0) (gcc regno = 8)
1681 12 for %st(1) (gcc regno = 9)
1682 13 for %st(2) (gcc regno = 10)
1683 14 for %st(3) (gcc regno = 11)
1684 15 for %st(4) (gcc regno = 12)
1685 16 for %st(5) (gcc regno = 13)
1686 17 for %st(6) (gcc regno = 14)
1687 18 for %st(7) (gcc regno = 15)
1689 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1691 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1692 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1693 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1694 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1695 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1697 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1700 /* Test and compare insns in i386.md store the information needed to
1701 generate branch and scc insns here. */
1703 rtx ix86_compare_op0 = NULL_RTX;
1704 rtx ix86_compare_op1 = NULL_RTX;
1706 /* Define parameter passing and return registers. */
1708 static int const x86_64_int_parameter_registers[6] =
1710 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1713 static int const x86_64_ms_abi_int_parameter_registers[4] =
1715 CX_REG, DX_REG, R8_REG, R9_REG
1718 static int const x86_64_int_return_registers[4] =
1720 AX_REG, DX_REG, DI_REG, SI_REG
1723 /* Define the structure for the machine field in struct function. */
1725 struct GTY(()) stack_local_entry {
1726 unsigned short mode;
1727 unsigned short n;
1728 rtx rtl;
1729 struct stack_local_entry *next;
1732 /* Structure describing stack frame layout.
1733 Stack grows downward:
1735 [arguments]
1736 <- ARG_POINTER
1737 saved pc
1739 saved frame pointer if frame_pointer_needed
1740 <- HARD_FRAME_POINTER
1741 [saved regs]
1743 [padding0]
1745 [saved SSE regs]
1747 [padding1] \
1749 [va_arg registers] (
1750 > to_allocate <- FRAME_POINTER
1751 [frame] (
1753 [padding2] /
1755 struct ix86_frame
1757 int padding0;
1758 int nsseregs;
1759 int nregs;
1760 int padding1;
1761 int va_arg_size;
1762 HOST_WIDE_INT frame;
1763 int padding2;
1764 int outgoing_arguments_size;
1765 int red_zone_size;
1767 HOST_WIDE_INT to_allocate;
1768 /* The offsets relative to ARG_POINTER. */
1769 HOST_WIDE_INT frame_pointer_offset;
1770 HOST_WIDE_INT hard_frame_pointer_offset;
1771 HOST_WIDE_INT stack_pointer_offset;
1773 /* When save_regs_using_mov is set, emit prologue using
1774 move instead of push instructions. */
1775 bool save_regs_using_mov;
1778 /* Code model option. */
1779 enum cmodel ix86_cmodel;
1780 /* Asm dialect. */
1781 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1782 /* TLS dialects. */
1783 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1785 /* Which unit we are generating floating point math for. */
1786 enum fpmath_unit ix86_fpmath;
1788 /* Which cpu are we scheduling for. */
1789 enum attr_cpu ix86_schedule;
1791 /* Which cpu are we optimizing for. */
1792 enum processor_type ix86_tune;
1794 /* Which instruction set architecture to use. */
1795 enum processor_type ix86_arch;
1797 /* true if sse prefetch instruction is not NOOP. */
1798 int x86_prefetch_sse;
1800 /* ix86_regparm_string as a number */
1801 static int ix86_regparm;
1803 /* -mstackrealign option */
1804 extern int ix86_force_align_arg_pointer;
1805 static const char ix86_force_align_arg_pointer_string[]
1806 = "force_align_arg_pointer";
1808 static rtx (*ix86_gen_leave) (void);
1809 static rtx (*ix86_gen_pop1) (rtx);
1810 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1812 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1813 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1814 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1815 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1817 /* Preferred alignment for stack boundary in bits. */
1818 unsigned int ix86_preferred_stack_boundary;
1820 /* Alignment for incoming stack boundary in bits specified at
1821 command line. */
1822 static unsigned int ix86_user_incoming_stack_boundary;
1824 /* Default alignment for incoming stack boundary in bits. */
1825 static unsigned int ix86_default_incoming_stack_boundary;
1827 /* Alignment for incoming stack boundary in bits. */
1828 unsigned int ix86_incoming_stack_boundary;
1830 /* The abi used by target. */
1831 enum calling_abi ix86_abi;
1833 /* Values 1-5: see jump.c */
1834 int ix86_branch_cost;
1836 /* Calling abi specific va_list type nodes. */
1837 static GTY(()) tree sysv_va_list_type_node;
1838 static GTY(()) tree ms_va_list_type_node;
1840 /* Variables which are this size or smaller are put in the data/bss
1841 or ldata/lbss sections. */
1843 int ix86_section_threshold = 65536;
1845 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1846 char internal_label_prefix[16];
1847 int internal_label_prefix_len;
1849 /* Fence to use after loop using movnt. */
1850 tree x86_mfence;
1852 /* Register class used for passing given 64bit part of the argument.
1853 These represent classes as documented by the PS ABI, with the exception
1854 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1855 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1857 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1858 whenever possible (upper half does contain padding). */
1859 enum x86_64_reg_class
1861 X86_64_NO_CLASS,
1862 X86_64_INTEGER_CLASS,
1863 X86_64_INTEGERSI_CLASS,
1864 X86_64_SSE_CLASS,
1865 X86_64_SSESF_CLASS,
1866 X86_64_SSEDF_CLASS,
1867 X86_64_SSEUP_CLASS,
1868 X86_64_X87_CLASS,
1869 X86_64_X87UP_CLASS,
1870 X86_64_COMPLEX_X87_CLASS,
1871 X86_64_MEMORY_CLASS
1874 #define MAX_CLASSES 4
1876 /* Table of constants used by fldpi, fldln2, etc.... */
1877 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1878 static bool ext_80387_constants_init = 0;
1881 static struct machine_function * ix86_init_machine_status (void);
1882 static rtx ix86_function_value (const_tree, const_tree, bool);
1883 static rtx ix86_static_chain (const_tree, bool);
1884 static int ix86_function_regparm (const_tree, const_tree);
1885 static void ix86_compute_frame_layout (struct ix86_frame *);
1886 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1887 rtx, rtx, int);
1888 static void ix86_add_new_builtins (int);
1889 static rtx ix86_expand_vec_perm_builtin (tree);
1891 enum ix86_function_specific_strings
1893 IX86_FUNCTION_SPECIFIC_ARCH,
1894 IX86_FUNCTION_SPECIFIC_TUNE,
1895 IX86_FUNCTION_SPECIFIC_FPMATH,
1896 IX86_FUNCTION_SPECIFIC_MAX
1899 static char *ix86_target_string (int, int, const char *, const char *,
1900 const char *, bool);
1901 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1902 static void ix86_function_specific_save (struct cl_target_option *);
1903 static void ix86_function_specific_restore (struct cl_target_option *);
1904 static void ix86_function_specific_print (FILE *, int,
1905 struct cl_target_option *);
1906 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1907 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1908 static bool ix86_can_inline_p (tree, tree);
1909 static void ix86_set_current_function (tree);
1910 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1912 static enum calling_abi ix86_function_abi (const_tree);
1915 #ifndef SUBTARGET32_DEFAULT_CPU
1916 #define SUBTARGET32_DEFAULT_CPU "i386"
1917 #endif
1919 /* The svr4 ABI for the i386 says that records and unions are returned
1920 in memory. */
1921 #ifndef DEFAULT_PCC_STRUCT_RETURN
1922 #define DEFAULT_PCC_STRUCT_RETURN 1
1923 #endif
1925 /* Whether -mtune= or -march= were specified */
1926 static int ix86_tune_defaulted;
1927 static int ix86_arch_specified;
1929 /* Bit flags that specify the ISA we are compiling for. */
1930 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1932 /* A mask of ix86_isa_flags that includes bit X if X
1933 was set or cleared on the command line. */
1934 static int ix86_isa_flags_explicit;
1936 /* Define a set of ISAs which are available when a given ISA is
1937 enabled. MMX and SSE ISAs are handled separately. */
1939 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1940 #define OPTION_MASK_ISA_3DNOW_SET \
1941 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1943 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1944 #define OPTION_MASK_ISA_SSE2_SET \
1945 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1946 #define OPTION_MASK_ISA_SSE3_SET \
1947 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1948 #define OPTION_MASK_ISA_SSSE3_SET \
1949 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1950 #define OPTION_MASK_ISA_SSE4_1_SET \
1951 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1952 #define OPTION_MASK_ISA_SSE4_2_SET \
1953 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1954 #define OPTION_MASK_ISA_AVX_SET \
1955 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1956 #define OPTION_MASK_ISA_FMA_SET \
1957 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1959 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1960 as -msse4.2. */
1961 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1963 #define OPTION_MASK_ISA_SSE4A_SET \
1964 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1965 #define OPTION_MASK_ISA_FMA4_SET \
1966 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1967 | OPTION_MASK_ISA_AVX_SET)
1968 #define OPTION_MASK_ISA_XOP_SET \
1969 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1970 #define OPTION_MASK_ISA_LWP_SET \
1971 OPTION_MASK_ISA_LWP
1973 /* AES and PCLMUL need SSE2 because they use xmm registers */
1974 #define OPTION_MASK_ISA_AES_SET \
1975 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1976 #define OPTION_MASK_ISA_PCLMUL_SET \
1977 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1979 #define OPTION_MASK_ISA_ABM_SET \
1980 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1982 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1983 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1984 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1985 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1986 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1988 /* Define a set of ISAs which aren't available when a given ISA is
1989 disabled. MMX and SSE ISAs are handled separately. */
1991 #define OPTION_MASK_ISA_MMX_UNSET \
1992 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1993 #define OPTION_MASK_ISA_3DNOW_UNSET \
1994 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1995 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1997 #define OPTION_MASK_ISA_SSE_UNSET \
1998 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1999 #define OPTION_MASK_ISA_SSE2_UNSET \
2000 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2001 #define OPTION_MASK_ISA_SSE3_UNSET \
2002 (OPTION_MASK_ISA_SSE3 \
2003 | OPTION_MASK_ISA_SSSE3_UNSET \
2004 | OPTION_MASK_ISA_SSE4A_UNSET )
2005 #define OPTION_MASK_ISA_SSSE3_UNSET \
2006 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2007 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2008 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2009 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2010 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2011 #define OPTION_MASK_ISA_AVX_UNSET \
2012 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2013 | OPTION_MASK_ISA_FMA4_UNSET)
2014 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2016 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2017 as -mno-sse4.1. */
2018 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2020 #define OPTION_MASK_ISA_SSE4A_UNSET \
2021 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2023 #define OPTION_MASK_ISA_FMA4_UNSET \
2024 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2025 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2026 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2028 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2029 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2030 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2031 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2032 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2033 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2034 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2035 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2037 /* Vectorization library interface and handlers. */
2038 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2039 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2040 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2042 /* Processor target table, indexed by processor number */
2043 struct ptt
2045 const struct processor_costs *cost; /* Processor costs */
2046 const int align_loop; /* Default alignments. */
2047 const int align_loop_max_skip;
2048 const int align_jump;
2049 const int align_jump_max_skip;
2050 const int align_func;
2053 static const struct ptt processor_target_table[PROCESSOR_max] =
2055 {&i386_cost, 4, 3, 4, 3, 4},
2056 {&i486_cost, 16, 15, 16, 15, 16},
2057 {&pentium_cost, 16, 7, 16, 7, 16},
2058 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2059 {&geode_cost, 0, 0, 0, 0, 0},
2060 {&k6_cost, 32, 7, 32, 7, 32},
2061 {&athlon_cost, 16, 7, 16, 7, 16},
2062 {&pentium4_cost, 0, 0, 0, 0, 0},
2063 {&k8_cost, 16, 7, 16, 7, 16},
2064 {&nocona_cost, 0, 0, 0, 0, 0},
2065 {&core2_cost, 16, 10, 16, 10, 16},
2066 {&generic32_cost, 16, 7, 16, 7, 16},
2067 {&generic64_cost, 16, 10, 16, 10, 16},
2068 {&amdfam10_cost, 32, 24, 32, 7, 32},
2069 {&atom_cost, 16, 7, 16, 7, 16}
2072 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2074 "generic",
2075 "i386",
2076 "i486",
2077 "pentium",
2078 "pentium-mmx",
2079 "pentiumpro",
2080 "pentium2",
2081 "pentium3",
2082 "pentium4",
2083 "pentium-m",
2084 "prescott",
2085 "nocona",
2086 "core2",
2087 "atom",
2088 "geode",
2089 "k6",
2090 "k6-2",
2091 "k6-3",
2092 "athlon",
2093 "athlon-4",
2094 "k8",
2095 "amdfam10"
2098 /* Implement TARGET_HANDLE_OPTION. */
2100 static bool
2101 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2103 switch (code)
2105 case OPT_mmmx:
2106 if (value)
2108 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2109 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2111 else
2113 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2116 return true;
2118 case OPT_m3dnow:
2119 if (value)
2121 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2122 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2124 else
2126 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2127 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2129 return true;
2131 case OPT_m3dnowa:
2132 return false;
2134 case OPT_msse:
2135 if (value)
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2140 else
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2145 return true;
2147 case OPT_msse2:
2148 if (value)
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2153 else
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2158 return true;
2160 case OPT_msse3:
2161 if (value)
2163 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2166 else
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2171 return true;
2173 case OPT_mssse3:
2174 if (value)
2176 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2179 else
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2184 return true;
2186 case OPT_msse4_1:
2187 if (value)
2189 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2192 else
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2197 return true;
2199 case OPT_msse4_2:
2200 if (value)
2202 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2205 else
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2210 return true;
2212 case OPT_mavx:
2213 if (value)
2215 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2218 else
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2223 return true;
2225 case OPT_mfma:
2226 if (value)
2228 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2231 else
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2236 return true;
2238 case OPT_msse4:
2239 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2240 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2241 return true;
2243 case OPT_mno_sse4:
2244 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2245 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2246 return true;
2248 case OPT_msse4a:
2249 if (value)
2251 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2254 else
2256 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2257 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2259 return true;
2261 case OPT_mfma4:
2262 if (value)
2264 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2267 else
2269 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2270 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2272 return true;
2274 case OPT_mxop:
2275 if (value)
2277 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2280 else
2282 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2283 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2285 return true;
2287 case OPT_mlwp:
2288 if (value)
2290 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2293 else
2295 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2296 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2298 return true;
2300 case OPT_mabm:
2301 if (value)
2303 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2306 else
2308 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2309 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2311 return true;
2313 case OPT_mpopcnt:
2314 if (value)
2316 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2319 else
2321 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2322 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2324 return true;
2326 case OPT_msahf:
2327 if (value)
2329 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2332 else
2334 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2335 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2337 return true;
2339 case OPT_mcx16:
2340 if (value)
2342 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2343 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2345 else
2347 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2350 return true;
2352 case OPT_mmovbe:
2353 if (value)
2355 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2356 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2358 else
2360 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2363 return true;
2365 case OPT_mcrc32:
2366 if (value)
2368 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2369 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2371 else
2373 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2376 return true;
2378 case OPT_maes:
2379 if (value)
2381 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2382 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2384 else
2386 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2389 return true;
2391 case OPT_mpclmul:
2392 if (value)
2394 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2395 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2397 else
2399 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2402 return true;
2404 default:
2405 return true;
2409 /* Return a string that documents the current -m options. The caller is
2410 responsible for freeing the string. */
2412 static char *
2413 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2414 const char *fpmath, bool add_nl_p)
2416 struct ix86_target_opts
2418 const char *option; /* option string */
2419 int mask; /* isa mask options */
2422 /* This table is ordered so that options like -msse4.2 that imply
2423 preceding options while match those first. */
2424 static struct ix86_target_opts isa_opts[] =
2426 { "-m64", OPTION_MASK_ISA_64BIT },
2427 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2428 { "-mfma", OPTION_MASK_ISA_FMA },
2429 { "-mxop", OPTION_MASK_ISA_XOP },
2430 { "-mlwp", OPTION_MASK_ISA_LWP },
2431 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2432 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2433 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2434 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2435 { "-msse3", OPTION_MASK_ISA_SSE3 },
2436 { "-msse2", OPTION_MASK_ISA_SSE2 },
2437 { "-msse", OPTION_MASK_ISA_SSE },
2438 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2439 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2440 { "-mmmx", OPTION_MASK_ISA_MMX },
2441 { "-mabm", OPTION_MASK_ISA_ABM },
2442 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2443 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2444 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2445 { "-maes", OPTION_MASK_ISA_AES },
2446 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2449 /* Flag options. */
2450 static struct ix86_target_opts flag_opts[] =
2452 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2453 { "-m80387", MASK_80387 },
2454 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2455 { "-malign-double", MASK_ALIGN_DOUBLE },
2456 { "-mcld", MASK_CLD },
2457 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2458 { "-mieee-fp", MASK_IEEE_FP },
2459 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2460 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2461 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2462 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2463 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2464 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2465 { "-mno-red-zone", MASK_NO_RED_ZONE },
2466 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2467 { "-mrecip", MASK_RECIP },
2468 { "-mrtd", MASK_RTD },
2469 { "-msseregparm", MASK_SSEREGPARM },
2470 { "-mstack-arg-probe", MASK_STACK_PROBE },
2471 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2474 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2476 char isa_other[40];
2477 char target_other[40];
2478 unsigned num = 0;
2479 unsigned i, j;
2480 char *ret;
2481 char *ptr;
2482 size_t len;
2483 size_t line_len;
2484 size_t sep_len;
2486 memset (opts, '\0', sizeof (opts));
2488 /* Add -march= option. */
2489 if (arch)
2491 opts[num][0] = "-march=";
2492 opts[num++][1] = arch;
2495 /* Add -mtune= option. */
2496 if (tune)
2498 opts[num][0] = "-mtune=";
2499 opts[num++][1] = tune;
2502 /* Pick out the options in isa options. */
2503 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2505 if ((isa & isa_opts[i].mask) != 0)
2507 opts[num++][0] = isa_opts[i].option;
2508 isa &= ~ isa_opts[i].mask;
2512 if (isa && add_nl_p)
2514 opts[num++][0] = isa_other;
2515 sprintf (isa_other, "(other isa: 0x%x)", isa);
2518 /* Add flag options. */
2519 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2521 if ((flags & flag_opts[i].mask) != 0)
2523 opts[num++][0] = flag_opts[i].option;
2524 flags &= ~ flag_opts[i].mask;
2528 if (flags && add_nl_p)
2530 opts[num++][0] = target_other;
2531 sprintf (target_other, "(other flags: 0x%x)", isa);
2534 /* Add -fpmath= option. */
2535 if (fpmath)
2537 opts[num][0] = "-mfpmath=";
2538 opts[num++][1] = fpmath;
2541 /* Any options? */
2542 if (num == 0)
2543 return NULL;
2545 gcc_assert (num < ARRAY_SIZE (opts));
2547 /* Size the string. */
2548 len = 0;
2549 sep_len = (add_nl_p) ? 3 : 1;
2550 for (i = 0; i < num; i++)
2552 len += sep_len;
2553 for (j = 0; j < 2; j++)
2554 if (opts[i][j])
2555 len += strlen (opts[i][j]);
2558 /* Build the string. */
2559 ret = ptr = (char *) xmalloc (len);
2560 line_len = 0;
2562 for (i = 0; i < num; i++)
2564 size_t len2[2];
2566 for (j = 0; j < 2; j++)
2567 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2569 if (i != 0)
2571 *ptr++ = ' ';
2572 line_len++;
2574 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2576 *ptr++ = '\\';
2577 *ptr++ = '\n';
2578 line_len = 0;
2582 for (j = 0; j < 2; j++)
2583 if (opts[i][j])
2585 memcpy (ptr, opts[i][j], len2[j]);
2586 ptr += len2[j];
2587 line_len += len2[j];
2591 *ptr = '\0';
2592 gcc_assert (ret + len >= ptr);
2594 return ret;
2597 /* Function that is callable from the debugger to print the current
2598 options. */
2599 void
2600 ix86_debug_options (void)
2602 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2603 ix86_arch_string, ix86_tune_string,
2604 ix86_fpmath_string, true);
2606 if (opts)
2608 fprintf (stderr, "%s\n\n", opts);
2609 free (opts);
2611 else
2612 fputs ("<no options>\n\n", stderr);
2614 return;
2617 /* Sometimes certain combinations of command options do not make
2618 sense on a particular target machine. You can define a macro
2619 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2620 defined, is executed once just after all the command options have
2621 been parsed.
2623 Don't use this macro to turn on various extra optimizations for
2624 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2626 void
2627 override_options (bool main_args_p)
2629 int i;
2630 unsigned int ix86_arch_mask, ix86_tune_mask;
2631 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2632 const char *prefix;
2633 const char *suffix;
2634 const char *sw;
2636 /* Comes from final.c -- no real reason to change it. */
2637 #define MAX_CODE_ALIGN 16
2639 enum pta_flags
2641 PTA_SSE = 1 << 0,
2642 PTA_SSE2 = 1 << 1,
2643 PTA_SSE3 = 1 << 2,
2644 PTA_MMX = 1 << 3,
2645 PTA_PREFETCH_SSE = 1 << 4,
2646 PTA_3DNOW = 1 << 5,
2647 PTA_3DNOW_A = 1 << 6,
2648 PTA_64BIT = 1 << 7,
2649 PTA_SSSE3 = 1 << 8,
2650 PTA_CX16 = 1 << 9,
2651 PTA_POPCNT = 1 << 10,
2652 PTA_ABM = 1 << 11,
2653 PTA_SSE4A = 1 << 12,
2654 PTA_NO_SAHF = 1 << 13,
2655 PTA_SSE4_1 = 1 << 14,
2656 PTA_SSE4_2 = 1 << 15,
2657 PTA_AES = 1 << 16,
2658 PTA_PCLMUL = 1 << 17,
2659 PTA_AVX = 1 << 18,
2660 PTA_FMA = 1 << 19,
2661 PTA_MOVBE = 1 << 20,
2662 PTA_FMA4 = 1 << 21,
2663 PTA_XOP = 1 << 22,
2664 PTA_LWP = 1 << 23
2667 static struct pta
2669 const char *const name; /* processor name or nickname. */
2670 const enum processor_type processor;
2671 const enum attr_cpu schedule;
2672 const unsigned /*enum pta_flags*/ flags;
2674 const processor_alias_table[] =
2676 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2677 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2678 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2679 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2680 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2681 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2682 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2683 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2684 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2685 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2686 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2687 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2688 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2689 PTA_MMX | PTA_SSE},
2690 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2691 PTA_MMX | PTA_SSE},
2692 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2693 PTA_MMX | PTA_SSE | PTA_SSE2},
2694 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2695 PTA_MMX |PTA_SSE | PTA_SSE2},
2696 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2697 PTA_MMX | PTA_SSE | PTA_SSE2},
2698 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2699 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2700 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2701 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2702 | PTA_CX16 | PTA_NO_SAHF},
2703 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2704 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2705 | PTA_SSSE3 | PTA_CX16},
2706 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2707 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2708 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2709 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2710 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2711 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2712 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2713 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2714 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2716 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2717 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2718 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2719 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2720 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2721 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2722 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2723 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2724 {"x86-64", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2726 {"k8", PROCESSOR_K8, CPU_K8,
2727 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2728 | PTA_SSE2 | PTA_NO_SAHF},
2729 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2730 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2731 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2732 {"opteron", PROCESSOR_K8, CPU_K8,
2733 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2734 | PTA_SSE2 | PTA_NO_SAHF},
2735 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2736 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2737 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2738 {"athlon64", PROCESSOR_K8, CPU_K8,
2739 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2740 | PTA_SSE2 | PTA_NO_SAHF},
2741 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2742 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2743 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2744 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2745 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2746 | PTA_SSE2 | PTA_NO_SAHF},
2747 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2748 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2749 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2750 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2751 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2752 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2753 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2754 0 /* flags are only used for -march switch. */ },
2755 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2756 PTA_64BIT /* flags are only used for -march switch. */ },
2759 int const pta_size = ARRAY_SIZE (processor_alias_table);
2761 /* Set up prefix/suffix so the error messages refer to either the command
2762 line argument, or the attribute(target). */
2763 if (main_args_p)
2765 prefix = "-m";
2766 suffix = "";
2767 sw = "switch";
2769 else
2771 prefix = "option(\"";
2772 suffix = "\")";
2773 sw = "attribute";
2776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2777 SUBTARGET_OVERRIDE_OPTIONS;
2778 #endif
2780 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2781 SUBSUBTARGET_OVERRIDE_OPTIONS;
2782 #endif
2784 /* -fPIC is the default for x86_64. */
2785 if (TARGET_MACHO && TARGET_64BIT)
2786 flag_pic = 2;
2788 /* Set the default values for switches whose default depends on TARGET_64BIT
2789 in case they weren't overwritten by command line options. */
2790 if (TARGET_64BIT)
2792 /* Mach-O doesn't support omitting the frame pointer for now. */
2793 if (flag_omit_frame_pointer == 2)
2794 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2795 if (flag_asynchronous_unwind_tables == 2)
2796 flag_asynchronous_unwind_tables = 1;
2797 if (flag_pcc_struct_return == 2)
2798 flag_pcc_struct_return = 0;
2800 else
2802 if (flag_omit_frame_pointer == 2)
2803 flag_omit_frame_pointer = 0;
2804 if (flag_asynchronous_unwind_tables == 2)
2805 flag_asynchronous_unwind_tables = 0;
2806 if (flag_pcc_struct_return == 2)
2807 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2810 /* Need to check -mtune=generic first. */
2811 if (ix86_tune_string)
2813 if (!strcmp (ix86_tune_string, "generic")
2814 || !strcmp (ix86_tune_string, "i686")
2815 /* As special support for cross compilers we read -mtune=native
2816 as -mtune=generic. With native compilers we won't see the
2817 -mtune=native, as it was changed by the driver. */
2818 || !strcmp (ix86_tune_string, "native"))
2820 if (TARGET_64BIT)
2821 ix86_tune_string = "generic64";
2822 else
2823 ix86_tune_string = "generic32";
2825 /* If this call is for setting the option attribute, allow the
2826 generic32/generic64 that was previously set. */
2827 else if (!main_args_p
2828 && (!strcmp (ix86_tune_string, "generic32")
2829 || !strcmp (ix86_tune_string, "generic64")))
2831 else if (!strncmp (ix86_tune_string, "generic", 7))
2832 error ("bad value (%s) for %stune=%s %s",
2833 ix86_tune_string, prefix, suffix, sw);
2834 else if (!strcmp (ix86_tune_string, "x86-64"))
2835 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2836 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2837 prefix, suffix, prefix, suffix, prefix, suffix);
2839 else
2841 if (ix86_arch_string)
2842 ix86_tune_string = ix86_arch_string;
2843 if (!ix86_tune_string)
2845 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2846 ix86_tune_defaulted = 1;
2849 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2850 need to use a sensible tune option. */
2851 if (!strcmp (ix86_tune_string, "generic")
2852 || !strcmp (ix86_tune_string, "x86-64")
2853 || !strcmp (ix86_tune_string, "i686"))
2855 if (TARGET_64BIT)
2856 ix86_tune_string = "generic64";
2857 else
2858 ix86_tune_string = "generic32";
2862 if (ix86_stringop_string)
2864 if (!strcmp (ix86_stringop_string, "rep_byte"))
2865 stringop_alg = rep_prefix_1_byte;
2866 else if (!strcmp (ix86_stringop_string, "libcall"))
2867 stringop_alg = libcall;
2868 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2869 stringop_alg = rep_prefix_4_byte;
2870 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2871 && TARGET_64BIT)
2872 /* rep; movq isn't available in 32-bit code. */
2873 stringop_alg = rep_prefix_8_byte;
2874 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2875 stringop_alg = loop_1_byte;
2876 else if (!strcmp (ix86_stringop_string, "loop"))
2877 stringop_alg = loop;
2878 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2879 stringop_alg = unrolled_loop;
2880 else
2881 error ("bad value (%s) for %sstringop-strategy=%s %s",
2882 ix86_stringop_string, prefix, suffix, sw);
2885 if (!ix86_arch_string)
2886 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2887 else
2888 ix86_arch_specified = 1;
2890 /* Validate -mabi= value. */
2891 if (ix86_abi_string)
2893 if (strcmp (ix86_abi_string, "sysv") == 0)
2894 ix86_abi = SYSV_ABI;
2895 else if (strcmp (ix86_abi_string, "ms") == 0)
2896 ix86_abi = MS_ABI;
2897 else
2898 error ("unknown ABI (%s) for %sabi=%s %s",
2899 ix86_abi_string, prefix, suffix, sw);
2901 else
2902 ix86_abi = DEFAULT_ABI;
2904 if (ix86_cmodel_string != 0)
2906 if (!strcmp (ix86_cmodel_string, "small"))
2907 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2908 else if (!strcmp (ix86_cmodel_string, "medium"))
2909 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2910 else if (!strcmp (ix86_cmodel_string, "large"))
2911 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2912 else if (flag_pic)
2913 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2914 else if (!strcmp (ix86_cmodel_string, "32"))
2915 ix86_cmodel = CM_32;
2916 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2917 ix86_cmodel = CM_KERNEL;
2918 else
2919 error ("bad value (%s) for %scmodel=%s %s",
2920 ix86_cmodel_string, prefix, suffix, sw);
2922 else
2924 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2925 use of rip-relative addressing. This eliminates fixups that
2926 would otherwise be needed if this object is to be placed in a
2927 DLL, and is essentially just as efficient as direct addressing. */
2928 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2929 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2930 else if (TARGET_64BIT)
2931 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2932 else
2933 ix86_cmodel = CM_32;
2935 if (ix86_asm_string != 0)
2937 if (! TARGET_MACHO
2938 && !strcmp (ix86_asm_string, "intel"))
2939 ix86_asm_dialect = ASM_INTEL;
2940 else if (!strcmp (ix86_asm_string, "att"))
2941 ix86_asm_dialect = ASM_ATT;
2942 else
2943 error ("bad value (%s) for %sasm=%s %s",
2944 ix86_asm_string, prefix, suffix, sw);
2946 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2947 error ("code model %qs not supported in the %s bit mode",
2948 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2949 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2950 sorry ("%i-bit mode not compiled in",
2951 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2953 for (i = 0; i < pta_size; i++)
2954 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2956 ix86_schedule = processor_alias_table[i].schedule;
2957 ix86_arch = processor_alias_table[i].processor;
2958 /* Default cpu tuning to the architecture. */
2959 ix86_tune = ix86_arch;
2961 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2962 error ("CPU you selected does not support x86-64 "
2963 "instruction set");
2965 if (processor_alias_table[i].flags & PTA_MMX
2966 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2967 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2968 if (processor_alias_table[i].flags & PTA_3DNOW
2969 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2970 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2971 if (processor_alias_table[i].flags & PTA_3DNOW_A
2972 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2973 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2974 if (processor_alias_table[i].flags & PTA_SSE
2975 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2976 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2977 if (processor_alias_table[i].flags & PTA_SSE2
2978 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2979 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2980 if (processor_alias_table[i].flags & PTA_SSE3
2981 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2982 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2983 if (processor_alias_table[i].flags & PTA_SSSE3
2984 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2985 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2986 if (processor_alias_table[i].flags & PTA_SSE4_1
2987 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2988 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2989 if (processor_alias_table[i].flags & PTA_SSE4_2
2990 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2991 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2992 if (processor_alias_table[i].flags & PTA_AVX
2993 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2994 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2995 if (processor_alias_table[i].flags & PTA_FMA
2996 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2997 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2998 if (processor_alias_table[i].flags & PTA_SSE4A
2999 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3000 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3001 if (processor_alias_table[i].flags & PTA_FMA4
3002 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3003 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3004 if (processor_alias_table[i].flags & PTA_XOP
3005 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3006 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3007 if (processor_alias_table[i].flags & PTA_LWP
3008 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3009 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3010 if (processor_alias_table[i].flags & PTA_ABM
3011 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3012 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3013 if (processor_alias_table[i].flags & PTA_CX16
3014 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3015 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3016 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3017 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3018 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3019 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3020 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3021 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3022 if (processor_alias_table[i].flags & PTA_MOVBE
3023 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3024 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3025 if (processor_alias_table[i].flags & PTA_AES
3026 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3027 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3028 if (processor_alias_table[i].flags & PTA_PCLMUL
3029 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3030 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3031 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3032 x86_prefetch_sse = true;
3034 break;
3037 if (!strcmp (ix86_arch_string, "generic"))
3038 error ("generic CPU can be used only for %stune=%s %s",
3039 prefix, suffix, sw);
3040 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3041 error ("bad value (%s) for %sarch=%s %s",
3042 ix86_arch_string, prefix, suffix, sw);
3044 ix86_arch_mask = 1u << ix86_arch;
3045 for (i = 0; i < X86_ARCH_LAST; ++i)
3046 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3048 for (i = 0; i < pta_size; i++)
3049 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3051 ix86_schedule = processor_alias_table[i].schedule;
3052 ix86_tune = processor_alias_table[i].processor;
3053 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3055 if (ix86_tune_defaulted)
3057 ix86_tune_string = "x86-64";
3058 for (i = 0; i < pta_size; i++)
3059 if (! strcmp (ix86_tune_string,
3060 processor_alias_table[i].name))
3061 break;
3062 ix86_schedule = processor_alias_table[i].schedule;
3063 ix86_tune = processor_alias_table[i].processor;
3065 else
3066 error ("CPU you selected does not support x86-64 "
3067 "instruction set");
3069 /* Intel CPUs have always interpreted SSE prefetch instructions as
3070 NOPs; so, we can enable SSE prefetch instructions even when
3071 -mtune (rather than -march) points us to a processor that has them.
3072 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3073 higher processors. */
3074 if (TARGET_CMOVE
3075 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3076 x86_prefetch_sse = true;
3077 break;
3080 if (ix86_tune_specified && i == pta_size)
3081 error ("bad value (%s) for %stune=%s %s",
3082 ix86_tune_string, prefix, suffix, sw);
3084 ix86_tune_mask = 1u << ix86_tune;
3085 for (i = 0; i < X86_TUNE_LAST; ++i)
3086 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3088 if (optimize_size)
3089 ix86_cost = &ix86_size_cost;
3090 else
3091 ix86_cost = processor_target_table[ix86_tune].cost;
3093 /* Arrange to set up i386_stack_locals for all functions. */
3094 init_machine_status = ix86_init_machine_status;
3096 /* Validate -mregparm= value. */
3097 if (ix86_regparm_string)
3099 if (TARGET_64BIT)
3100 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3101 i = atoi (ix86_regparm_string);
3102 if (i < 0 || i > REGPARM_MAX)
3103 error ("%sregparm=%d%s is not between 0 and %d",
3104 prefix, i, suffix, REGPARM_MAX);
3105 else
3106 ix86_regparm = i;
3108 if (TARGET_64BIT)
3109 ix86_regparm = REGPARM_MAX;
3111 /* If the user has provided any of the -malign-* options,
3112 warn and use that value only if -falign-* is not set.
3113 Remove this code in GCC 3.2 or later. */
3114 if (ix86_align_loops_string)
3116 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3117 prefix, suffix, suffix);
3118 if (align_loops == 0)
3120 i = atoi (ix86_align_loops_string);
3121 if (i < 0 || i > MAX_CODE_ALIGN)
3122 error ("%salign-loops=%d%s is not between 0 and %d",
3123 prefix, i, suffix, MAX_CODE_ALIGN);
3124 else
3125 align_loops = 1 << i;
3129 if (ix86_align_jumps_string)
3131 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3132 prefix, suffix, suffix);
3133 if (align_jumps == 0)
3135 i = atoi (ix86_align_jumps_string);
3136 if (i < 0 || i > MAX_CODE_ALIGN)
3137 error ("%salign-loops=%d%s is not between 0 and %d",
3138 prefix, i, suffix, MAX_CODE_ALIGN);
3139 else
3140 align_jumps = 1 << i;
3144 if (ix86_align_funcs_string)
3146 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3147 prefix, suffix, suffix);
3148 if (align_functions == 0)
3150 i = atoi (ix86_align_funcs_string);
3151 if (i < 0 || i > MAX_CODE_ALIGN)
3152 error ("%salign-loops=%d%s is not between 0 and %d",
3153 prefix, i, suffix, MAX_CODE_ALIGN);
3154 else
3155 align_functions = 1 << i;
3159 /* Default align_* from the processor table. */
3160 if (align_loops == 0)
3162 align_loops = processor_target_table[ix86_tune].align_loop;
3163 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3165 if (align_jumps == 0)
3167 align_jumps = processor_target_table[ix86_tune].align_jump;
3168 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3170 if (align_functions == 0)
3172 align_functions = processor_target_table[ix86_tune].align_func;
3175 /* Validate -mbranch-cost= value, or provide default. */
3176 ix86_branch_cost = ix86_cost->branch_cost;
3177 if (ix86_branch_cost_string)
3179 i = atoi (ix86_branch_cost_string);
3180 if (i < 0 || i > 5)
3181 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3182 else
3183 ix86_branch_cost = i;
3185 if (ix86_section_threshold_string)
3187 i = atoi (ix86_section_threshold_string);
3188 if (i < 0)
3189 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3190 else
3191 ix86_section_threshold = i;
3194 if (ix86_tls_dialect_string)
3196 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3197 ix86_tls_dialect = TLS_DIALECT_GNU;
3198 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3199 ix86_tls_dialect = TLS_DIALECT_GNU2;
3200 else
3201 error ("bad value (%s) for %stls-dialect=%s %s",
3202 ix86_tls_dialect_string, prefix, suffix, sw);
3205 if (ix87_precision_string)
3207 i = atoi (ix87_precision_string);
3208 if (i != 32 && i != 64 && i != 80)
3209 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3212 if (TARGET_64BIT)
3214 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3216 /* Enable by default the SSE and MMX builtins. Do allow the user to
3217 explicitly disable any of these. In particular, disabling SSE and
3218 MMX for kernel code is extremely useful. */
3219 if (!ix86_arch_specified)
3220 ix86_isa_flags
3221 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3222 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3224 if (TARGET_RTD)
3225 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3227 else
3229 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3231 if (!ix86_arch_specified)
3232 ix86_isa_flags
3233 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3235 /* i386 ABI does not specify red zone. It still makes sense to use it
3236 when programmer takes care to stack from being destroyed. */
3237 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3238 target_flags |= MASK_NO_RED_ZONE;
3241 /* Keep nonleaf frame pointers. */
3242 if (flag_omit_frame_pointer)
3243 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3244 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3245 flag_omit_frame_pointer = 1;
3247 /* If we're doing fast math, we don't care about comparison order
3248 wrt NaNs. This lets us use a shorter comparison sequence. */
3249 if (flag_finite_math_only)
3250 target_flags &= ~MASK_IEEE_FP;
3252 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3253 since the insns won't need emulation. */
3254 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3255 target_flags &= ~MASK_NO_FANCY_MATH_387;
3257 /* Likewise, if the target doesn't have a 387, or we've specified
3258 software floating point, don't use 387 inline intrinsics. */
3259 if (!TARGET_80387)
3260 target_flags |= MASK_NO_FANCY_MATH_387;
3262 /* Turn on MMX builtins for -msse. */
3263 if (TARGET_SSE)
3265 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3266 x86_prefetch_sse = true;
3269 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3270 if (TARGET_SSE4_2 || TARGET_ABM)
3271 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3273 /* Validate -mpreferred-stack-boundary= value or default it to
3274 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3275 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3276 if (ix86_preferred_stack_boundary_string)
3278 i = atoi (ix86_preferred_stack_boundary_string);
3279 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3280 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3281 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3282 else
3283 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3286 /* Set the default value for -mstackrealign. */
3287 if (ix86_force_align_arg_pointer == -1)
3288 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3290 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3292 /* Validate -mincoming-stack-boundary= value or default it to
3293 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3294 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3295 if (ix86_incoming_stack_boundary_string)
3297 i = atoi (ix86_incoming_stack_boundary_string);
3298 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3299 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3300 i, TARGET_64BIT ? 4 : 2);
3301 else
3303 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3304 ix86_incoming_stack_boundary
3305 = ix86_user_incoming_stack_boundary;
3309 /* Accept -msseregparm only if at least SSE support is enabled. */
3310 if (TARGET_SSEREGPARM
3311 && ! TARGET_SSE)
3312 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3314 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3315 if (ix86_fpmath_string != 0)
3317 if (! strcmp (ix86_fpmath_string, "387"))
3318 ix86_fpmath = FPMATH_387;
3319 else if (! strcmp (ix86_fpmath_string, "sse"))
3321 if (!TARGET_SSE)
3323 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3324 ix86_fpmath = FPMATH_387;
3326 else
3327 ix86_fpmath = FPMATH_SSE;
3329 else if (! strcmp (ix86_fpmath_string, "387,sse")
3330 || ! strcmp (ix86_fpmath_string, "387+sse")
3331 || ! strcmp (ix86_fpmath_string, "sse,387")
3332 || ! strcmp (ix86_fpmath_string, "sse+387")
3333 || ! strcmp (ix86_fpmath_string, "both"))
3335 if (!TARGET_SSE)
3337 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3338 ix86_fpmath = FPMATH_387;
3340 else if (!TARGET_80387)
3342 warning (0, "387 instruction set disabled, using SSE arithmetics");
3343 ix86_fpmath = FPMATH_SSE;
3345 else
3346 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3348 else
3349 error ("bad value (%s) for %sfpmath=%s %s",
3350 ix86_fpmath_string, prefix, suffix, sw);
3353 /* If the i387 is disabled, then do not return values in it. */
3354 if (!TARGET_80387)
3355 target_flags &= ~MASK_FLOAT_RETURNS;
3357 /* Use external vectorized library in vectorizing intrinsics. */
3358 if (ix86_veclibabi_string)
3360 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3361 ix86_veclib_handler = ix86_veclibabi_svml;
3362 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3363 ix86_veclib_handler = ix86_veclibabi_acml;
3364 else
3365 error ("unknown vectorization library ABI type (%s) for "
3366 "%sveclibabi=%s %s", ix86_veclibabi_string,
3367 prefix, suffix, sw);
3370 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3371 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3372 && !optimize_size)
3373 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3375 /* ??? Unwind info is not correct around the CFG unless either a frame
3376 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3377 unwind info generation to be aware of the CFG and propagating states
3378 around edges. */
3379 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3380 || flag_exceptions || flag_non_call_exceptions)
3381 && flag_omit_frame_pointer
3382 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3384 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3385 warning (0, "unwind tables currently require either a frame pointer "
3386 "or %saccumulate-outgoing-args%s for correctness",
3387 prefix, suffix);
3388 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3391 /* If stack probes are required, the space used for large function
3392 arguments on the stack must also be probed, so enable
3393 -maccumulate-outgoing-args so this happens in the prologue. */
3394 if (TARGET_STACK_PROBE
3395 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3397 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3398 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3399 "for correctness", prefix, suffix);
3400 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3403 /* For sane SSE instruction set generation we need fcomi instruction.
3404 It is safe to enable all CMOVE instructions. */
3405 if (TARGET_SSE)
3406 TARGET_CMOVE = 1;
3408 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3410 char *p;
3411 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3412 p = strchr (internal_label_prefix, 'X');
3413 internal_label_prefix_len = p - internal_label_prefix;
3414 *p = '\0';
3417 /* When scheduling description is not available, disable scheduler pass
3418 so it won't slow down the compilation and make x87 code slower. */
3419 if (!TARGET_SCHEDULE)
3420 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3422 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3423 set_param_value ("simultaneous-prefetches",
3424 ix86_cost->simultaneous_prefetches);
3425 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3426 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3427 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3428 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3429 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3430 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3432 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3433 can be optimized to ap = __builtin_next_arg (0). */
3434 if (!TARGET_64BIT)
3435 targetm.expand_builtin_va_start = NULL;
3437 if (TARGET_64BIT)
3439 ix86_gen_leave = gen_leave_rex64;
3440 ix86_gen_pop1 = gen_popdi1;
3441 ix86_gen_add3 = gen_adddi3;
3442 ix86_gen_sub3 = gen_subdi3;
3443 ix86_gen_sub3_carry = gen_subdi3_carry;
3444 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3445 ix86_gen_monitor = gen_sse3_monitor64;
3446 ix86_gen_andsp = gen_anddi3;
3448 else
3450 ix86_gen_leave = gen_leave;
3451 ix86_gen_pop1 = gen_popsi1;
3452 ix86_gen_add3 = gen_addsi3;
3453 ix86_gen_sub3 = gen_subsi3;
3454 ix86_gen_sub3_carry = gen_subsi3_carry;
3455 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3456 ix86_gen_monitor = gen_sse3_monitor;
3457 ix86_gen_andsp = gen_andsi3;
3460 #ifdef USE_IX86_CLD
3461 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3462 if (!TARGET_64BIT)
3463 target_flags |= MASK_CLD & ~target_flags_explicit;
3464 #endif
3466 /* Save the initial options in case the user does function specific options */
3467 if (main_args_p)
3468 target_option_default_node = target_option_current_node
3469 = build_target_option_node ();
3472 /* Update register usage after having seen the compiler flags. */
3474 void
3475 ix86_conditional_register_usage (void)
3477 int i;
3478 unsigned int j;
3480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3482 if (fixed_regs[i] > 1)
3483 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 if (call_used_regs[i] > 1)
3485 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3488 /* The PIC register, if it exists, is fixed. */
3489 j = PIC_OFFSET_TABLE_REGNUM;
3490 if (j != INVALID_REGNUM)
3491 fixed_regs[j] = call_used_regs[j] = 1;
3493 /* The MS_ABI changes the set of call-used registers. */
3494 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3496 call_used_regs[SI_REG] = 0;
3497 call_used_regs[DI_REG] = 0;
3498 call_used_regs[XMM6_REG] = 0;
3499 call_used_regs[XMM7_REG] = 0;
3500 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3501 call_used_regs[i] = 0;
3504 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3505 other call-clobbered regs for 64-bit. */
3506 if (TARGET_64BIT)
3508 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3510 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3511 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3512 && call_used_regs[i])
3513 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3516 /* If MMX is disabled, squash the registers. */
3517 if (! TARGET_MMX)
3518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3519 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3520 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3522 /* If SSE is disabled, squash the registers. */
3523 if (! TARGET_SSE)
3524 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3525 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3526 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3528 /* If the FPU is disabled, squash the registers. */
3529 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3531 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3532 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3534 /* If 32-bit, squash the 64-bit registers. */
3535 if (! TARGET_64BIT)
3537 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3538 reg_names[i] = "";
3539 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3540 reg_names[i] = "";
3545 /* Save the current options */
3547 static void
3548 ix86_function_specific_save (struct cl_target_option *ptr)
3550 ptr->arch = ix86_arch;
3551 ptr->schedule = ix86_schedule;
3552 ptr->tune = ix86_tune;
3553 ptr->fpmath = ix86_fpmath;
3554 ptr->branch_cost = ix86_branch_cost;
3555 ptr->tune_defaulted = ix86_tune_defaulted;
3556 ptr->arch_specified = ix86_arch_specified;
3557 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3558 ptr->target_flags_explicit = target_flags_explicit;
3560 /* The fields are char but the variables are not; make sure the
3561 values fit in the fields. */
3562 gcc_assert (ptr->arch == ix86_arch);
3563 gcc_assert (ptr->schedule == ix86_schedule);
3564 gcc_assert (ptr->tune == ix86_tune);
3565 gcc_assert (ptr->fpmath == ix86_fpmath);
3566 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3569 /* Restore the current options */
3571 static void
3572 ix86_function_specific_restore (struct cl_target_option *ptr)
3574 enum processor_type old_tune = ix86_tune;
3575 enum processor_type old_arch = ix86_arch;
3576 unsigned int ix86_arch_mask, ix86_tune_mask;
3577 int i;
3579 ix86_arch = (enum processor_type) ptr->arch;
3580 ix86_schedule = (enum attr_cpu) ptr->schedule;
3581 ix86_tune = (enum processor_type) ptr->tune;
3582 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3583 ix86_branch_cost = ptr->branch_cost;
3584 ix86_tune_defaulted = ptr->tune_defaulted;
3585 ix86_arch_specified = ptr->arch_specified;
3586 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3587 target_flags_explicit = ptr->target_flags_explicit;
3589 /* Recreate the arch feature tests if the arch changed */
3590 if (old_arch != ix86_arch)
3592 ix86_arch_mask = 1u << ix86_arch;
3593 for (i = 0; i < X86_ARCH_LAST; ++i)
3594 ix86_arch_features[i]
3595 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3598 /* Recreate the tune optimization tests */
3599 if (old_tune != ix86_tune)
3601 ix86_tune_mask = 1u << ix86_tune;
3602 for (i = 0; i < X86_TUNE_LAST; ++i)
3603 ix86_tune_features[i]
3604 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3608 /* Print the current options */
3610 static void
3611 ix86_function_specific_print (FILE *file, int indent,
3612 struct cl_target_option *ptr)
3614 char *target_string
3615 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3616 NULL, NULL, NULL, false);
3618 fprintf (file, "%*sarch = %d (%s)\n",
3619 indent, "",
3620 ptr->arch,
3621 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3622 ? cpu_names[ptr->arch]
3623 : "<unknown>"));
3625 fprintf (file, "%*stune = %d (%s)\n",
3626 indent, "",
3627 ptr->tune,
3628 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3629 ? cpu_names[ptr->tune]
3630 : "<unknown>"));
3632 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3633 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3634 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3635 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3637 if (target_string)
3639 fprintf (file, "%*s%s\n", indent, "", target_string);
3640 free (target_string);
3645 /* Inner function to process the attribute((target(...))), take an argument and
3646 set the current options from the argument. If we have a list, recursively go
3647 over the list. */
3649 static bool
3650 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3652 char *next_optstr;
3653 bool ret = true;
3655 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3656 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3657 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3658 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3660 enum ix86_opt_type
3662 ix86_opt_unknown,
3663 ix86_opt_yes,
3664 ix86_opt_no,
3665 ix86_opt_str,
3666 ix86_opt_isa
3669 static const struct
3671 const char *string;
3672 size_t len;
3673 enum ix86_opt_type type;
3674 int opt;
3675 int mask;
3676 } attrs[] = {
3677 /* isa options */
3678 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3679 IX86_ATTR_ISA ("abm", OPT_mabm),
3680 IX86_ATTR_ISA ("aes", OPT_maes),
3681 IX86_ATTR_ISA ("avx", OPT_mavx),
3682 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3683 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3684 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3685 IX86_ATTR_ISA ("sse", OPT_msse),
3686 IX86_ATTR_ISA ("sse2", OPT_msse2),
3687 IX86_ATTR_ISA ("sse3", OPT_msse3),
3688 IX86_ATTR_ISA ("sse4", OPT_msse4),
3689 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3690 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3691 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3692 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3693 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3694 IX86_ATTR_ISA ("xop", OPT_mxop),
3695 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3697 /* string options */
3698 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3699 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3700 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3702 /* flag options */
3703 IX86_ATTR_YES ("cld",
3704 OPT_mcld,
3705 MASK_CLD),
3707 IX86_ATTR_NO ("fancy-math-387",
3708 OPT_mfancy_math_387,
3709 MASK_NO_FANCY_MATH_387),
3711 IX86_ATTR_YES ("ieee-fp",
3712 OPT_mieee_fp,
3713 MASK_IEEE_FP),
3715 IX86_ATTR_YES ("inline-all-stringops",
3716 OPT_minline_all_stringops,
3717 MASK_INLINE_ALL_STRINGOPS),
3719 IX86_ATTR_YES ("inline-stringops-dynamically",
3720 OPT_minline_stringops_dynamically,
3721 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3723 IX86_ATTR_NO ("align-stringops",
3724 OPT_mno_align_stringops,
3725 MASK_NO_ALIGN_STRINGOPS),
3727 IX86_ATTR_YES ("recip",
3728 OPT_mrecip,
3729 MASK_RECIP),
3733 /* If this is a list, recurse to get the options. */
3734 if (TREE_CODE (args) == TREE_LIST)
3736 bool ret = true;
3738 for (; args; args = TREE_CHAIN (args))
3739 if (TREE_VALUE (args)
3740 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3741 ret = false;
3743 return ret;
3746 else if (TREE_CODE (args) != STRING_CST)
3747 gcc_unreachable ();
3749 /* Handle multiple arguments separated by commas. */
3750 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3752 while (next_optstr && *next_optstr != '\0')
3754 char *p = next_optstr;
3755 char *orig_p = p;
3756 char *comma = strchr (next_optstr, ',');
3757 const char *opt_string;
3758 size_t len, opt_len;
3759 int opt;
3760 bool opt_set_p;
3761 char ch;
3762 unsigned i;
3763 enum ix86_opt_type type = ix86_opt_unknown;
3764 int mask = 0;
3766 if (comma)
3768 *comma = '\0';
3769 len = comma - next_optstr;
3770 next_optstr = comma + 1;
3772 else
3774 len = strlen (p);
3775 next_optstr = NULL;
3778 /* Recognize no-xxx. */
3779 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3781 opt_set_p = false;
3782 p += 3;
3783 len -= 3;
3785 else
3786 opt_set_p = true;
3788 /* Find the option. */
3789 ch = *p;
3790 opt = N_OPTS;
3791 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3793 type = attrs[i].type;
3794 opt_len = attrs[i].len;
3795 if (ch == attrs[i].string[0]
3796 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3797 && memcmp (p, attrs[i].string, opt_len) == 0)
3799 opt = attrs[i].opt;
3800 mask = attrs[i].mask;
3801 opt_string = attrs[i].string;
3802 break;
3806 /* Process the option. */
3807 if (opt == N_OPTS)
3809 error ("attribute(target(\"%s\")) is unknown", orig_p);
3810 ret = false;
3813 else if (type == ix86_opt_isa)
3814 ix86_handle_option (opt, p, opt_set_p);
3816 else if (type == ix86_opt_yes || type == ix86_opt_no)
3818 if (type == ix86_opt_no)
3819 opt_set_p = !opt_set_p;
3821 if (opt_set_p)
3822 target_flags |= mask;
3823 else
3824 target_flags &= ~mask;
3827 else if (type == ix86_opt_str)
3829 if (p_strings[opt])
3831 error ("option(\"%s\") was already specified", opt_string);
3832 ret = false;
3834 else
3835 p_strings[opt] = xstrdup (p + opt_len);
3838 else
3839 gcc_unreachable ();
3842 return ret;
3845 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3847 tree
3848 ix86_valid_target_attribute_tree (tree args)
3850 const char *orig_arch_string = ix86_arch_string;
3851 const char *orig_tune_string = ix86_tune_string;
3852 const char *orig_fpmath_string = ix86_fpmath_string;
3853 int orig_tune_defaulted = ix86_tune_defaulted;
3854 int orig_arch_specified = ix86_arch_specified;
3855 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3856 tree t = NULL_TREE;
3857 int i;
3858 struct cl_target_option *def
3859 = TREE_TARGET_OPTION (target_option_default_node);
3861 /* Process each of the options on the chain. */
3862 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3863 return NULL_TREE;
3865 /* If the changed options are different from the default, rerun override_options,
3866 and then save the options away. The string options are are attribute options,
3867 and will be undone when we copy the save structure. */
3868 if (ix86_isa_flags != def->ix86_isa_flags
3869 || target_flags != def->target_flags
3870 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3871 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3872 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3874 /* If we are using the default tune= or arch=, undo the string assigned,
3875 and use the default. */
3876 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3877 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3878 else if (!orig_arch_specified)
3879 ix86_arch_string = NULL;
3881 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3882 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3883 else if (orig_tune_defaulted)
3884 ix86_tune_string = NULL;
3886 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3887 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3888 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3889 else if (!TARGET_64BIT && TARGET_SSE)
3890 ix86_fpmath_string = "sse,387";
3892 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3893 override_options (false);
3895 /* Add any builtin functions with the new isa if any. */
3896 ix86_add_new_builtins (ix86_isa_flags);
3898 /* Save the current options unless we are validating options for
3899 #pragma. */
3900 t = build_target_option_node ();
3902 ix86_arch_string = orig_arch_string;
3903 ix86_tune_string = orig_tune_string;
3904 ix86_fpmath_string = orig_fpmath_string;
3906 /* Free up memory allocated to hold the strings */
3907 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3908 if (option_strings[i])
3909 free (option_strings[i]);
3912 return t;
3915 /* Hook to validate attribute((target("string"))). */
3917 static bool
3918 ix86_valid_target_attribute_p (tree fndecl,
3919 tree ARG_UNUSED (name),
3920 tree args,
3921 int ARG_UNUSED (flags))
3923 struct cl_target_option cur_target;
3924 bool ret = true;
3925 tree old_optimize = build_optimization_node ();
3926 tree new_target, new_optimize;
3927 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3929 /* If the function changed the optimization levels as well as setting target
3930 options, start with the optimizations specified. */
3931 if (func_optimize && func_optimize != old_optimize)
3932 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3934 /* The target attributes may also change some optimization flags, so update
3935 the optimization options if necessary. */
3936 cl_target_option_save (&cur_target);
3937 new_target = ix86_valid_target_attribute_tree (args);
3938 new_optimize = build_optimization_node ();
3940 if (!new_target)
3941 ret = false;
3943 else if (fndecl)
3945 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3947 if (old_optimize != new_optimize)
3948 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3951 cl_target_option_restore (&cur_target);
3953 if (old_optimize != new_optimize)
3954 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3956 return ret;
3960 /* Hook to determine if one function can safely inline another. */
3962 static bool
3963 ix86_can_inline_p (tree caller, tree callee)
3965 bool ret = false;
3966 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3967 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3969 /* If callee has no option attributes, then it is ok to inline. */
3970 if (!callee_tree)
3971 ret = true;
3973 /* If caller has no option attributes, but callee does then it is not ok to
3974 inline. */
3975 else if (!caller_tree)
3976 ret = false;
3978 else
3980 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3981 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3983 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3984 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3985 function. */
3986 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3987 != callee_opts->ix86_isa_flags)
3988 ret = false;
3990 /* See if we have the same non-isa options. */
3991 else if (caller_opts->target_flags != callee_opts->target_flags)
3992 ret = false;
3994 /* See if arch, tune, etc. are the same. */
3995 else if (caller_opts->arch != callee_opts->arch)
3996 ret = false;
3998 else if (caller_opts->tune != callee_opts->tune)
3999 ret = false;
4001 else if (caller_opts->fpmath != callee_opts->fpmath)
4002 ret = false;
4004 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4005 ret = false;
4007 else
4008 ret = true;
4011 return ret;
4015 /* Remember the last target of ix86_set_current_function. */
4016 static GTY(()) tree ix86_previous_fndecl;
4018 /* Establish appropriate back-end context for processing the function
4019 FNDECL. The argument might be NULL to indicate processing at top
4020 level, outside of any function scope. */
4021 static void
4022 ix86_set_current_function (tree fndecl)
4024 /* Only change the context if the function changes. This hook is called
4025 several times in the course of compiling a function, and we don't want to
4026 slow things down too much or call target_reinit when it isn't safe. */
4027 if (fndecl && fndecl != ix86_previous_fndecl)
4029 tree old_tree = (ix86_previous_fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4031 : NULL_TREE);
4033 tree new_tree = (fndecl
4034 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4035 : NULL_TREE);
4037 ix86_previous_fndecl = fndecl;
4038 if (old_tree == new_tree)
4041 else if (new_tree)
4043 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4044 target_reinit ();
4047 else if (old_tree)
4049 struct cl_target_option *def
4050 = TREE_TARGET_OPTION (target_option_current_node);
4052 cl_target_option_restore (def);
4053 target_reinit ();
4059 /* Return true if this goes in large data/bss. */
4061 static bool
4062 ix86_in_large_data_p (tree exp)
4064 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4065 return false;
4067 /* Functions are never large data. */
4068 if (TREE_CODE (exp) == FUNCTION_DECL)
4069 return false;
4071 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4073 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4074 if (strcmp (section, ".ldata") == 0
4075 || strcmp (section, ".lbss") == 0)
4076 return true;
4077 return false;
4079 else
4081 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4083 /* If this is an incomplete type with size 0, then we can't put it
4084 in data because it might be too big when completed. */
4085 if (!size || size > ix86_section_threshold)
4086 return true;
4089 return false;
4092 /* Switch to the appropriate section for output of DECL.
4093 DECL is either a `VAR_DECL' node or a constant of some sort.
4094 RELOC indicates whether forming the initial value of DECL requires
4095 link-time relocations. */
4097 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4098 ATTRIBUTE_UNUSED;
4100 static section *
4101 x86_64_elf_select_section (tree decl, int reloc,
4102 unsigned HOST_WIDE_INT align)
4104 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4105 && ix86_in_large_data_p (decl))
4107 const char *sname = NULL;
4108 unsigned int flags = SECTION_WRITE;
4109 switch (categorize_decl_for_section (decl, reloc))
4111 case SECCAT_DATA:
4112 sname = ".ldata";
4113 break;
4114 case SECCAT_DATA_REL:
4115 sname = ".ldata.rel";
4116 break;
4117 case SECCAT_DATA_REL_LOCAL:
4118 sname = ".ldata.rel.local";
4119 break;
4120 case SECCAT_DATA_REL_RO:
4121 sname = ".ldata.rel.ro";
4122 break;
4123 case SECCAT_DATA_REL_RO_LOCAL:
4124 sname = ".ldata.rel.ro.local";
4125 break;
4126 case SECCAT_BSS:
4127 sname = ".lbss";
4128 flags |= SECTION_BSS;
4129 break;
4130 case SECCAT_RODATA:
4131 case SECCAT_RODATA_MERGE_STR:
4132 case SECCAT_RODATA_MERGE_STR_INIT:
4133 case SECCAT_RODATA_MERGE_CONST:
4134 sname = ".lrodata";
4135 flags = 0;
4136 break;
4137 case SECCAT_SRODATA:
4138 case SECCAT_SDATA:
4139 case SECCAT_SBSS:
4140 gcc_unreachable ();
4141 case SECCAT_TEXT:
4142 case SECCAT_TDATA:
4143 case SECCAT_TBSS:
4144 /* We don't split these for medium model. Place them into
4145 default sections and hope for best. */
4146 break;
4147 case SECCAT_EMUTLS_VAR:
4148 case SECCAT_EMUTLS_TMPL:
4149 gcc_unreachable ();
4151 if (sname)
4153 /* We might get called with string constants, but get_named_section
4154 doesn't like them as they are not DECLs. Also, we need to set
4155 flags in that case. */
4156 if (!DECL_P (decl))
4157 return get_section (sname, flags, NULL);
4158 return get_named_section (decl, sname, reloc);
4161 return default_elf_select_section (decl, reloc, align);
4164 /* Build up a unique section name, expressed as a
4165 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4166 RELOC indicates whether the initial value of EXP requires
4167 link-time relocations. */
4169 static void ATTRIBUTE_UNUSED
4170 x86_64_elf_unique_section (tree decl, int reloc)
4172 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4173 && ix86_in_large_data_p (decl))
4175 const char *prefix = NULL;
4176 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4177 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4179 switch (categorize_decl_for_section (decl, reloc))
4181 case SECCAT_DATA:
4182 case SECCAT_DATA_REL:
4183 case SECCAT_DATA_REL_LOCAL:
4184 case SECCAT_DATA_REL_RO:
4185 case SECCAT_DATA_REL_RO_LOCAL:
4186 prefix = one_only ? ".ld" : ".ldata";
4187 break;
4188 case SECCAT_BSS:
4189 prefix = one_only ? ".lb" : ".lbss";
4190 break;
4191 case SECCAT_RODATA:
4192 case SECCAT_RODATA_MERGE_STR:
4193 case SECCAT_RODATA_MERGE_STR_INIT:
4194 case SECCAT_RODATA_MERGE_CONST:
4195 prefix = one_only ? ".lr" : ".lrodata";
4196 break;
4197 case SECCAT_SRODATA:
4198 case SECCAT_SDATA:
4199 case SECCAT_SBSS:
4200 gcc_unreachable ();
4201 case SECCAT_TEXT:
4202 case SECCAT_TDATA:
4203 case SECCAT_TBSS:
4204 /* We don't split these for medium model. Place them into
4205 default sections and hope for best. */
4206 break;
4207 case SECCAT_EMUTLS_VAR:
4208 prefix = targetm.emutls.var_section;
4209 break;
4210 case SECCAT_EMUTLS_TMPL:
4211 prefix = targetm.emutls.tmpl_section;
4212 break;
4214 if (prefix)
4216 const char *name, *linkonce;
4217 char *string;
4219 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4220 name = targetm.strip_name_encoding (name);
4222 /* If we're using one_only, then there needs to be a .gnu.linkonce
4223 prefix to the section name. */
4224 linkonce = one_only ? ".gnu.linkonce" : "";
4226 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4228 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4229 return;
4232 default_unique_section (decl, reloc);
4235 #ifdef COMMON_ASM_OP
4236 /* This says how to output assembler code to declare an
4237 uninitialized external linkage data object.
4239 For medium model x86-64 we need to use .largecomm opcode for
4240 large objects. */
4241 void
4242 x86_elf_aligned_common (FILE *file,
4243 const char *name, unsigned HOST_WIDE_INT size,
4244 int align)
4246 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4247 && size > (unsigned int)ix86_section_threshold)
4248 fputs (".largecomm\t", file);
4249 else
4250 fputs (COMMON_ASM_OP, file);
4251 assemble_name (file, name);
4252 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4253 size, align / BITS_PER_UNIT);
4255 #endif
4257 /* Utility function for targets to use in implementing
4258 ASM_OUTPUT_ALIGNED_BSS. */
4260 void
4261 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4262 const char *name, unsigned HOST_WIDE_INT size,
4263 int align)
4265 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4266 && size > (unsigned int)ix86_section_threshold)
4267 switch_to_section (get_named_section (decl, ".lbss", 0));
4268 else
4269 switch_to_section (bss_section);
4270 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4271 #ifdef ASM_DECLARE_OBJECT_NAME
4272 last_assemble_variable_decl = decl;
4273 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4274 #else
4275 /* Standard thing is just output label for the object. */
4276 ASM_OUTPUT_LABEL (file, name);
4277 #endif /* ASM_DECLARE_OBJECT_NAME */
4278 ASM_OUTPUT_SKIP (file, size ? size : 1);
4281 void
4282 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4284 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4285 make the problem with not enough registers even worse. */
4286 #ifdef INSN_SCHEDULING
4287 if (level > 1)
4288 flag_schedule_insns = 0;
4289 #endif
4291 if (TARGET_MACHO)
4292 /* The Darwin libraries never set errno, so we might as well
4293 avoid calling them when that's the only reason we would. */
4294 flag_errno_math = 0;
4296 /* The default values of these switches depend on the TARGET_64BIT
4297 that is not known at this moment. Mark these values with 2 and
4298 let user the to override these. In case there is no command line option
4299 specifying them, we will set the defaults in override_options. */
4300 if (optimize >= 1)
4301 flag_omit_frame_pointer = 2;
4302 flag_pcc_struct_return = 2;
4303 flag_asynchronous_unwind_tables = 2;
4304 flag_vect_cost_model = 1;
4305 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4306 SUBTARGET_OPTIMIZATION_OPTIONS;
4307 #endif
4310 /* Decide whether we can make a sibling call to a function. DECL is the
4311 declaration of the function being targeted by the call and EXP is the
4312 CALL_EXPR representing the call. */
4314 static bool
4315 ix86_function_ok_for_sibcall (tree decl, tree exp)
4317 tree type, decl_or_type;
4318 rtx a, b;
4320 /* If we are generating position-independent code, we cannot sibcall
4321 optimize any indirect call, or a direct call to a global function,
4322 as the PLT requires %ebx be live. */
4323 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4324 return false;
4326 /* If we need to align the outgoing stack, then sibcalling would
4327 unalign the stack, which may break the called function. */
4328 if (ix86_minimum_incoming_stack_boundary (true)
4329 < PREFERRED_STACK_BOUNDARY)
4330 return false;
4332 if (decl)
4334 decl_or_type = decl;
4335 type = TREE_TYPE (decl);
4337 else
4339 /* We're looking at the CALL_EXPR, we need the type of the function. */
4340 type = CALL_EXPR_FN (exp); /* pointer expression */
4341 type = TREE_TYPE (type); /* pointer type */
4342 type = TREE_TYPE (type); /* function type */
4343 decl_or_type = type;
4346 /* Check that the return value locations are the same. Like
4347 if we are returning floats on the 80387 register stack, we cannot
4348 make a sibcall from a function that doesn't return a float to a
4349 function that does or, conversely, from a function that does return
4350 a float to a function that doesn't; the necessary stack adjustment
4351 would not be executed. This is also the place we notice
4352 differences in the return value ABI. Note that it is ok for one
4353 of the functions to have void return type as long as the return
4354 value of the other is passed in a register. */
4355 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4356 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4357 cfun->decl, false);
4358 if (STACK_REG_P (a) || STACK_REG_P (b))
4360 if (!rtx_equal_p (a, b))
4361 return false;
4363 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4365 else if (!rtx_equal_p (a, b))
4366 return false;
4368 if (TARGET_64BIT)
4370 /* The SYSV ABI has more call-clobbered registers;
4371 disallow sibcalls from MS to SYSV. */
4372 if (cfun->machine->call_abi == MS_ABI
4373 && ix86_function_type_abi (type) == SYSV_ABI)
4374 return false;
4376 else
4378 /* If this call is indirect, we'll need to be able to use a
4379 call-clobbered register for the address of the target function.
4380 Make sure that all such registers are not used for passing
4381 parameters. Note that DLLIMPORT functions are indirect. */
4382 if (!decl
4383 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4385 if (ix86_function_regparm (type, NULL) >= 3)
4387 /* ??? Need to count the actual number of registers to be used,
4388 not the possible number of registers. Fix later. */
4389 return false;
4394 /* Otherwise okay. That also includes certain types of indirect calls. */
4395 return true;
4398 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4399 and "sseregparm" calling convention attributes;
4400 arguments as in struct attribute_spec.handler. */
4402 static tree
4403 ix86_handle_cconv_attribute (tree *node, tree name,
4404 tree args,
4405 int flags ATTRIBUTE_UNUSED,
4406 bool *no_add_attrs)
4408 if (TREE_CODE (*node) != FUNCTION_TYPE
4409 && TREE_CODE (*node) != METHOD_TYPE
4410 && TREE_CODE (*node) != FIELD_DECL
4411 && TREE_CODE (*node) != TYPE_DECL)
4413 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4414 name);
4415 *no_add_attrs = true;
4416 return NULL_TREE;
4419 /* Can combine regparm with all attributes but fastcall. */
4420 if (is_attribute_p ("regparm", name))
4422 tree cst;
4424 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4426 error ("fastcall and regparm attributes are not compatible");
4429 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4431 error ("regparam and thiscall attributes are not compatible");
4434 cst = TREE_VALUE (args);
4435 if (TREE_CODE (cst) != INTEGER_CST)
4437 warning (OPT_Wattributes,
4438 "%qE attribute requires an integer constant argument",
4439 name);
4440 *no_add_attrs = true;
4442 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4444 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4445 name, REGPARM_MAX);
4446 *no_add_attrs = true;
4449 return NULL_TREE;
4452 if (TARGET_64BIT)
4454 /* Do not warn when emulating the MS ABI. */
4455 if ((TREE_CODE (*node) != FUNCTION_TYPE
4456 && TREE_CODE (*node) != METHOD_TYPE)
4457 || ix86_function_type_abi (*node) != MS_ABI)
4458 warning (OPT_Wattributes, "%qE attribute ignored",
4459 name);
4460 *no_add_attrs = true;
4461 return NULL_TREE;
4464 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4465 if (is_attribute_p ("fastcall", name))
4467 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4469 error ("fastcall and cdecl attributes are not compatible");
4471 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4473 error ("fastcall and stdcall attributes are not compatible");
4475 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4477 error ("fastcall and regparm attributes are not compatible");
4479 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4481 error ("fastcall and thiscall attributes are not compatible");
4485 /* Can combine stdcall with fastcall (redundant), regparm and
4486 sseregparm. */
4487 else if (is_attribute_p ("stdcall", name))
4489 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4491 error ("stdcall and cdecl attributes are not compatible");
4493 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4495 error ("stdcall and fastcall attributes are not compatible");
4497 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4499 error ("stdcall and thiscall attributes are not compatible");
4503 /* Can combine cdecl with regparm and sseregparm. */
4504 else if (is_attribute_p ("cdecl", name))
4506 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4508 error ("stdcall and cdecl attributes are not compatible");
4510 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4512 error ("fastcall and cdecl attributes are not compatible");
4514 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4516 error ("cdecl and thiscall attributes are not compatible");
4519 else if (is_attribute_p ("thiscall", name))
4521 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4522 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4523 name);
4524 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4526 error ("stdcall and thiscall attributes are not compatible");
4528 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4530 error ("fastcall and thiscall attributes are not compatible");
4532 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4534 error ("cdecl and thiscall attributes are not compatible");
4538 /* Can combine sseregparm with all attributes. */
4540 return NULL_TREE;
4543 /* Return 0 if the attributes for two types are incompatible, 1 if they
4544 are compatible, and 2 if they are nearly compatible (which causes a
4545 warning to be generated). */
4547 static int
4548 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4550 /* Check for mismatch of non-default calling convention. */
4551 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4553 if (TREE_CODE (type1) != FUNCTION_TYPE
4554 && TREE_CODE (type1) != METHOD_TYPE)
4555 return 1;
4557 /* Check for mismatched fastcall/regparm types. */
4558 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4559 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4560 || (ix86_function_regparm (type1, NULL)
4561 != ix86_function_regparm (type2, NULL)))
4562 return 0;
4564 /* Check for mismatched sseregparm types. */
4565 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4566 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4567 return 0;
4569 /* Check for mismatched thiscall types. */
4570 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4571 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4572 return 0;
4574 /* Check for mismatched return types (cdecl vs stdcall). */
4575 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4576 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4577 return 0;
4579 return 1;
4582 /* Return the regparm value for a function with the indicated TYPE and DECL.
4583 DECL may be NULL when calling function indirectly
4584 or considering a libcall. */
4586 static int
4587 ix86_function_regparm (const_tree type, const_tree decl)
4589 tree attr;
4590 int regparm;
4592 if (TARGET_64BIT)
4593 return (ix86_function_type_abi (type) == SYSV_ABI
4594 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4596 regparm = ix86_regparm;
4597 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4598 if (attr)
4600 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4601 return regparm;
4604 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4605 return 2;
4607 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4608 return 1;
4610 /* Use register calling convention for local functions when possible. */
4611 if (decl
4612 && TREE_CODE (decl) == FUNCTION_DECL
4613 && optimize
4614 && !profile_flag)
4616 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4617 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4618 if (i && i->local)
4620 int local_regparm, globals = 0, regno;
4622 /* Make sure no regparm register is taken by a
4623 fixed register variable. */
4624 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4625 if (fixed_regs[local_regparm])
4626 break;
4628 /* We don't want to use regparm(3) for nested functions as
4629 these use a static chain pointer in the third argument. */
4630 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4631 local_regparm = 2;
4633 /* Each fixed register usage increases register pressure,
4634 so less registers should be used for argument passing.
4635 This functionality can be overriden by an explicit
4636 regparm value. */
4637 for (regno = 0; regno <= DI_REG; regno++)
4638 if (fixed_regs[regno])
4639 globals++;
4641 local_regparm
4642 = globals < local_regparm ? local_regparm - globals : 0;
4644 if (local_regparm > regparm)
4645 regparm = local_regparm;
4649 return regparm;
4652 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4653 DFmode (2) arguments in SSE registers for a function with the
4654 indicated TYPE and DECL. DECL may be NULL when calling function
4655 indirectly or considering a libcall. Otherwise return 0. */
4657 static int
4658 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4660 gcc_assert (!TARGET_64BIT);
4662 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4663 by the sseregparm attribute. */
4664 if (TARGET_SSEREGPARM
4665 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4667 if (!TARGET_SSE)
4669 if (warn)
4671 if (decl)
4672 error ("Calling %qD with attribute sseregparm without "
4673 "SSE/SSE2 enabled", decl);
4674 else
4675 error ("Calling %qT with attribute sseregparm without "
4676 "SSE/SSE2 enabled", type);
4678 return 0;
4681 return 2;
4684 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4685 (and DFmode for SSE2) arguments in SSE registers. */
4686 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4688 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4689 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4690 if (i && i->local)
4691 return TARGET_SSE2 ? 2 : 1;
4694 return 0;
4697 /* Return true if EAX is live at the start of the function. Used by
4698 ix86_expand_prologue to determine if we need special help before
4699 calling allocate_stack_worker. */
4701 static bool
4702 ix86_eax_live_at_start_p (void)
4704 /* Cheat. Don't bother working forward from ix86_function_regparm
4705 to the function type to whether an actual argument is located in
4706 eax. Instead just look at cfg info, which is still close enough
4707 to correct at this point. This gives false positives for broken
4708 functions that might use uninitialized data that happens to be
4709 allocated in eax, but who cares? */
4710 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4713 /* Value is the number of bytes of arguments automatically
4714 popped when returning from a subroutine call.
4715 FUNDECL is the declaration node of the function (as a tree),
4716 FUNTYPE is the data type of the function (as a tree),
4717 or for a library call it is an identifier node for the subroutine name.
4718 SIZE is the number of bytes of arguments passed on the stack.
4720 On the 80386, the RTD insn may be used to pop them if the number
4721 of args is fixed, but if the number is variable then the caller
4722 must pop them all. RTD can't be used for library calls now
4723 because the library is compiled with the Unix compiler.
4724 Use of RTD is a selectable option, since it is incompatible with
4725 standard Unix calling sequences. If the option is not selected,
4726 the caller must always pop the args.
4728 The attribute stdcall is equivalent to RTD on a per module basis. */
4731 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4733 int rtd;
4735 /* None of the 64-bit ABIs pop arguments. */
4736 if (TARGET_64BIT)
4737 return 0;
4739 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4741 /* Cdecl functions override -mrtd, and never pop the stack. */
4742 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4744 /* Stdcall and fastcall functions will pop the stack if not
4745 variable args. */
4746 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4747 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4748 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4749 rtd = 1;
4751 if (rtd && ! stdarg_p (funtype))
4752 return size;
4755 /* Lose any fake structure return argument if it is passed on the stack. */
4756 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4757 && !KEEP_AGGREGATE_RETURN_POINTER)
4759 int nregs = ix86_function_regparm (funtype, fundecl);
4760 if (nregs == 0)
4761 return GET_MODE_SIZE (Pmode);
4764 return 0;
4767 /* Argument support functions. */
4769 /* Return true when register may be used to pass function parameters. */
4770 bool
4771 ix86_function_arg_regno_p (int regno)
4773 int i;
4774 const int *parm_regs;
4776 if (!TARGET_64BIT)
4778 if (TARGET_MACHO)
4779 return (regno < REGPARM_MAX
4780 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4781 else
4782 return (regno < REGPARM_MAX
4783 || (TARGET_MMX && MMX_REGNO_P (regno)
4784 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4785 || (TARGET_SSE && SSE_REGNO_P (regno)
4786 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4789 if (TARGET_MACHO)
4791 if (SSE_REGNO_P (regno) && TARGET_SSE)
4792 return true;
4794 else
4796 if (TARGET_SSE && SSE_REGNO_P (regno)
4797 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4798 return true;
4801 /* TODO: The function should depend on current function ABI but
4802 builtins.c would need updating then. Therefore we use the
4803 default ABI. */
4805 /* RAX is used as hidden argument to va_arg functions. */
4806 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4807 return true;
4809 if (ix86_abi == MS_ABI)
4810 parm_regs = x86_64_ms_abi_int_parameter_registers;
4811 else
4812 parm_regs = x86_64_int_parameter_registers;
4813 for (i = 0; i < (ix86_abi == MS_ABI
4814 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4815 if (regno == parm_regs[i])
4816 return true;
4817 return false;
4820 /* Return if we do not know how to pass TYPE solely in registers. */
4822 static bool
4823 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4825 if (must_pass_in_stack_var_size_or_pad (mode, type))
4826 return true;
4828 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4829 The layout_type routine is crafty and tries to trick us into passing
4830 currently unsupported vector types on the stack by using TImode. */
4831 return (!TARGET_64BIT && mode == TImode
4832 && type && TREE_CODE (type) != VECTOR_TYPE);
4835 /* It returns the size, in bytes, of the area reserved for arguments passed
4836 in registers for the function represented by fndecl dependent to the used
4837 abi format. */
4839 ix86_reg_parm_stack_space (const_tree fndecl)
4841 enum calling_abi call_abi = SYSV_ABI;
4842 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4843 call_abi = ix86_function_abi (fndecl);
4844 else
4845 call_abi = ix86_function_type_abi (fndecl);
4846 if (call_abi == MS_ABI)
4847 return 32;
4848 return 0;
4851 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4852 call abi used. */
4853 enum calling_abi
4854 ix86_function_type_abi (const_tree fntype)
4856 if (TARGET_64BIT && fntype != NULL)
4858 enum calling_abi abi = ix86_abi;
4859 if (abi == SYSV_ABI)
4861 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4862 abi = MS_ABI;
4864 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4865 abi = SYSV_ABI;
4866 return abi;
4868 return ix86_abi;
4871 static bool
4872 ix86_function_ms_hook_prologue (const_tree fntype)
4874 if (!TARGET_64BIT)
4876 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4878 if (decl_function_context (fntype) != NULL_TREE)
4880 error_at (DECL_SOURCE_LOCATION (fntype),
4881 "ms_hook_prologue is not compatible with nested function");
4884 return true;
4887 return false;
4890 static enum calling_abi
4891 ix86_function_abi (const_tree fndecl)
4893 if (! fndecl)
4894 return ix86_abi;
4895 return ix86_function_type_abi (TREE_TYPE (fndecl));
4898 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4899 call abi used. */
4900 enum calling_abi
4901 ix86_cfun_abi (void)
4903 if (! cfun || ! TARGET_64BIT)
4904 return ix86_abi;
4905 return cfun->machine->call_abi;
4908 /* regclass.c */
4909 extern void init_regs (void);
4911 /* Implementation of call abi switching target hook. Specific to FNDECL
4912 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4913 for more details. */
4914 void
4915 ix86_call_abi_override (const_tree fndecl)
4917 if (fndecl == NULL_TREE)
4918 cfun->machine->call_abi = ix86_abi;
4919 else
4920 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4923 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4924 re-initialization of init_regs each time we switch function context since
4925 this is needed only during RTL expansion. */
4926 static void
4927 ix86_maybe_switch_abi (void)
4929 if (TARGET_64BIT &&
4930 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4931 reinit_regs ();
4934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4935 for a call to a function whose data type is FNTYPE.
4936 For a library call, FNTYPE is 0. */
4938 void
4939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4940 tree fntype, /* tree ptr for function decl */
4941 rtx libname, /* SYMBOL_REF of library name or 0 */
4942 tree fndecl)
4944 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4945 memset (cum, 0, sizeof (*cum));
4947 if (fndecl)
4948 cum->call_abi = ix86_function_abi (fndecl);
4949 else
4950 cum->call_abi = ix86_function_type_abi (fntype);
4951 /* Set up the number of registers to use for passing arguments. */
4953 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4954 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4955 "or subtarget optimization implying it");
4956 cum->nregs = ix86_regparm;
4957 if (TARGET_64BIT)
4959 if (cum->call_abi != ix86_abi)
4960 cum->nregs = (ix86_abi != SYSV_ABI
4961 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4963 if (TARGET_SSE)
4965 cum->sse_nregs = SSE_REGPARM_MAX;
4966 if (TARGET_64BIT)
4968 if (cum->call_abi != ix86_abi)
4969 cum->sse_nregs = (ix86_abi != SYSV_ABI
4970 ? X86_64_SSE_REGPARM_MAX
4971 : X86_64_MS_SSE_REGPARM_MAX);
4974 if (TARGET_MMX)
4975 cum->mmx_nregs = MMX_REGPARM_MAX;
4976 cum->warn_avx = true;
4977 cum->warn_sse = true;
4978 cum->warn_mmx = true;
4980 /* Because type might mismatch in between caller and callee, we need to
4981 use actual type of function for local calls.
4982 FIXME: cgraph_analyze can be told to actually record if function uses
4983 va_start so for local functions maybe_vaarg can be made aggressive
4984 helping K&R code.
4985 FIXME: once typesytem is fixed, we won't need this code anymore. */
4986 if (i && i->local)
4987 fntype = TREE_TYPE (fndecl);
4988 cum->maybe_vaarg = (fntype
4989 ? (!prototype_p (fntype) || stdarg_p (fntype))
4990 : !libname);
4992 if (!TARGET_64BIT)
4994 /* If there are variable arguments, then we won't pass anything
4995 in registers in 32-bit mode. */
4996 if (stdarg_p (fntype))
4998 cum->nregs = 0;
4999 cum->sse_nregs = 0;
5000 cum->mmx_nregs = 0;
5001 cum->warn_avx = 0;
5002 cum->warn_sse = 0;
5003 cum->warn_mmx = 0;
5004 return;
5007 /* Use ecx and edx registers if function has fastcall attribute,
5008 else look for regparm information. */
5009 if (fntype)
5011 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5013 cum->nregs = 1;
5014 cum->fastcall = 1; /* Same first register as in fastcall. */
5016 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5018 cum->nregs = 2;
5019 cum->fastcall = 1;
5021 else
5022 cum->nregs = ix86_function_regparm (fntype, fndecl);
5025 /* Set up the number of SSE registers used for passing SFmode
5026 and DFmode arguments. Warn for mismatching ABI. */
5027 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5031 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5032 But in the case of vector types, it is some vector mode.
5034 When we have only some of our vector isa extensions enabled, then there
5035 are some modes for which vector_mode_supported_p is false. For these
5036 modes, the generic vector support in gcc will choose some non-vector mode
5037 in order to implement the type. By computing the natural mode, we'll
5038 select the proper ABI location for the operand and not depend on whatever
5039 the middle-end decides to do with these vector types.
5041 The midde-end can't deal with the vector types > 16 bytes. In this
5042 case, we return the original mode and warn ABI change if CUM isn't
5043 NULL. */
5045 static enum machine_mode
5046 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5048 enum machine_mode mode = TYPE_MODE (type);
5050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5052 HOST_WIDE_INT size = int_size_in_bytes (type);
5053 if ((size == 8 || size == 16 || size == 32)
5054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5055 && TYPE_VECTOR_SUBPARTS (type) > 1)
5057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5060 mode = MIN_MODE_VECTOR_FLOAT;
5061 else
5062 mode = MIN_MODE_VECTOR_INT;
5064 /* Get the mode which has this inner mode and number of units. */
5065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5067 && GET_MODE_INNER (mode) == innermode)
5069 if (size == 32 && !TARGET_AVX)
5071 static bool warnedavx;
5073 if (cum
5074 && !warnedavx
5075 && cum->warn_avx)
5077 warnedavx = true;
5078 warning (0, "AVX vector argument without AVX "
5079 "enabled changes the ABI");
5081 return TYPE_MODE (type);
5083 else
5084 return mode;
5087 gcc_unreachable ();
5091 return mode;
5094 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5095 this may not agree with the mode that the type system has chosen for the
5096 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5097 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5099 static rtx
5100 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5101 unsigned int regno)
5103 rtx tmp;
5105 if (orig_mode != BLKmode)
5106 tmp = gen_rtx_REG (orig_mode, regno);
5107 else
5109 tmp = gen_rtx_REG (mode, regno);
5110 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5111 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5114 return tmp;
5117 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5118 of this code is to classify each 8bytes of incoming argument by the register
5119 class and assign registers accordingly. */
5121 /* Return the union class of CLASS1 and CLASS2.
5122 See the x86-64 PS ABI for details. */
5124 static enum x86_64_reg_class
5125 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5127 /* Rule #1: If both classes are equal, this is the resulting class. */
5128 if (class1 == class2)
5129 return class1;
5131 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5132 the other class. */
5133 if (class1 == X86_64_NO_CLASS)
5134 return class2;
5135 if (class2 == X86_64_NO_CLASS)
5136 return class1;
5138 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5139 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5140 return X86_64_MEMORY_CLASS;
5142 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5143 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5144 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5145 return X86_64_INTEGERSI_CLASS;
5146 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5147 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5148 return X86_64_INTEGER_CLASS;
5150 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5151 MEMORY is used. */
5152 if (class1 == X86_64_X87_CLASS
5153 || class1 == X86_64_X87UP_CLASS
5154 || class1 == X86_64_COMPLEX_X87_CLASS
5155 || class2 == X86_64_X87_CLASS
5156 || class2 == X86_64_X87UP_CLASS
5157 || class2 == X86_64_COMPLEX_X87_CLASS)
5158 return X86_64_MEMORY_CLASS;
5160 /* Rule #6: Otherwise class SSE is used. */
5161 return X86_64_SSE_CLASS;
5164 /* Classify the argument of type TYPE and mode MODE.
5165 CLASSES will be filled by the register class used to pass each word
5166 of the operand. The number of words is returned. In case the parameter
5167 should be passed in memory, 0 is returned. As a special case for zero
5168 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5170 BIT_OFFSET is used internally for handling records and specifies offset
5171 of the offset in bits modulo 256 to avoid overflow cases.
5173 See the x86-64 PS ABI for details.
5176 static int
5177 classify_argument (enum machine_mode mode, const_tree type,
5178 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5180 HOST_WIDE_INT bytes =
5181 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5182 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5184 /* Variable sized entities are always passed/returned in memory. */
5185 if (bytes < 0)
5186 return 0;
5188 if (mode != VOIDmode
5189 && targetm.calls.must_pass_in_stack (mode, type))
5190 return 0;
5192 if (type && AGGREGATE_TYPE_P (type))
5194 int i;
5195 tree field;
5196 enum x86_64_reg_class subclasses[MAX_CLASSES];
5198 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5199 if (bytes > 32)
5200 return 0;
5202 for (i = 0; i < words; i++)
5203 classes[i] = X86_64_NO_CLASS;
5205 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5206 signalize memory class, so handle it as special case. */
5207 if (!words)
5209 classes[0] = X86_64_NO_CLASS;
5210 return 1;
5213 /* Classify each field of record and merge classes. */
5214 switch (TREE_CODE (type))
5216 case RECORD_TYPE:
5217 /* And now merge the fields of structure. */
5218 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5220 if (TREE_CODE (field) == FIELD_DECL)
5222 int num;
5224 if (TREE_TYPE (field) == error_mark_node)
5225 continue;
5227 /* Bitfields are always classified as integer. Handle them
5228 early, since later code would consider them to be
5229 misaligned integers. */
5230 if (DECL_BIT_FIELD (field))
5232 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5233 i < ((int_bit_position (field) + (bit_offset % 64))
5234 + tree_low_cst (DECL_SIZE (field), 0)
5235 + 63) / 8 / 8; i++)
5236 classes[i] =
5237 merge_classes (X86_64_INTEGER_CLASS,
5238 classes[i]);
5240 else
5242 int pos;
5244 type = TREE_TYPE (field);
5246 /* Flexible array member is ignored. */
5247 if (TYPE_MODE (type) == BLKmode
5248 && TREE_CODE (type) == ARRAY_TYPE
5249 && TYPE_SIZE (type) == NULL_TREE
5250 && TYPE_DOMAIN (type) != NULL_TREE
5251 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5252 == NULL_TREE))
5254 static bool warned;
5256 if (!warned && warn_psabi)
5258 warned = true;
5259 inform (input_location,
5260 "The ABI of passing struct with"
5261 " a flexible array member has"
5262 " changed in GCC 4.4");
5264 continue;
5266 num = classify_argument (TYPE_MODE (type), type,
5267 subclasses,
5268 (int_bit_position (field)
5269 + bit_offset) % 256);
5270 if (!num)
5271 return 0;
5272 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5273 for (i = 0; i < num && (i + pos) < words; i++)
5274 classes[i + pos] =
5275 merge_classes (subclasses[i], classes[i + pos]);
5279 break;
5281 case ARRAY_TYPE:
5282 /* Arrays are handled as small records. */
5284 int num;
5285 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5286 TREE_TYPE (type), subclasses, bit_offset);
5287 if (!num)
5288 return 0;
5290 /* The partial classes are now full classes. */
5291 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5292 subclasses[0] = X86_64_SSE_CLASS;
5293 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5294 && !((bit_offset % 64) == 0 && bytes == 4))
5295 subclasses[0] = X86_64_INTEGER_CLASS;
5297 for (i = 0; i < words; i++)
5298 classes[i] = subclasses[i % num];
5300 break;
5302 case UNION_TYPE:
5303 case QUAL_UNION_TYPE:
5304 /* Unions are similar to RECORD_TYPE but offset is always 0.
5306 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5308 if (TREE_CODE (field) == FIELD_DECL)
5310 int num;
5312 if (TREE_TYPE (field) == error_mark_node)
5313 continue;
5315 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5316 TREE_TYPE (field), subclasses,
5317 bit_offset);
5318 if (!num)
5319 return 0;
5320 for (i = 0; i < num; i++)
5321 classes[i] = merge_classes (subclasses[i], classes[i]);
5324 break;
5326 default:
5327 gcc_unreachable ();
5330 if (words > 2)
5332 /* When size > 16 bytes, if the first one isn't
5333 X86_64_SSE_CLASS or any other ones aren't
5334 X86_64_SSEUP_CLASS, everything should be passed in
5335 memory. */
5336 if (classes[0] != X86_64_SSE_CLASS)
5337 return 0;
5339 for (i = 1; i < words; i++)
5340 if (classes[i] != X86_64_SSEUP_CLASS)
5341 return 0;
5344 /* Final merger cleanup. */
5345 for (i = 0; i < words; i++)
5347 /* If one class is MEMORY, everything should be passed in
5348 memory. */
5349 if (classes[i] == X86_64_MEMORY_CLASS)
5350 return 0;
5352 /* The X86_64_SSEUP_CLASS should be always preceded by
5353 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5354 if (classes[i] == X86_64_SSEUP_CLASS
5355 && classes[i - 1] != X86_64_SSE_CLASS
5356 && classes[i - 1] != X86_64_SSEUP_CLASS)
5358 /* The first one should never be X86_64_SSEUP_CLASS. */
5359 gcc_assert (i != 0);
5360 classes[i] = X86_64_SSE_CLASS;
5363 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5364 everything should be passed in memory. */
5365 if (classes[i] == X86_64_X87UP_CLASS
5366 && (classes[i - 1] != X86_64_X87_CLASS))
5368 static bool warned;
5370 /* The first one should never be X86_64_X87UP_CLASS. */
5371 gcc_assert (i != 0);
5372 if (!warned && warn_psabi)
5374 warned = true;
5375 inform (input_location,
5376 "The ABI of passing union with long double"
5377 " has changed in GCC 4.4");
5379 return 0;
5382 return words;
5385 /* Compute alignment needed. We align all types to natural boundaries with
5386 exception of XFmode that is aligned to 64bits. */
5387 if (mode != VOIDmode && mode != BLKmode)
5389 int mode_alignment = GET_MODE_BITSIZE (mode);
5391 if (mode == XFmode)
5392 mode_alignment = 128;
5393 else if (mode == XCmode)
5394 mode_alignment = 256;
5395 if (COMPLEX_MODE_P (mode))
5396 mode_alignment /= 2;
5397 /* Misaligned fields are always returned in memory. */
5398 if (bit_offset % mode_alignment)
5399 return 0;
5402 /* for V1xx modes, just use the base mode */
5403 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5404 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5405 mode = GET_MODE_INNER (mode);
5407 /* Classification of atomic types. */
5408 switch (mode)
5410 case SDmode:
5411 case DDmode:
5412 classes[0] = X86_64_SSE_CLASS;
5413 return 1;
5414 case TDmode:
5415 classes[0] = X86_64_SSE_CLASS;
5416 classes[1] = X86_64_SSEUP_CLASS;
5417 return 2;
5418 case DImode:
5419 case SImode:
5420 case HImode:
5421 case QImode:
5422 case CSImode:
5423 case CHImode:
5424 case CQImode:
5426 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5428 if (size <= 32)
5430 classes[0] = X86_64_INTEGERSI_CLASS;
5431 return 1;
5433 else if (size <= 64)
5435 classes[0] = X86_64_INTEGER_CLASS;
5436 return 1;
5438 else if (size <= 64+32)
5440 classes[0] = X86_64_INTEGER_CLASS;
5441 classes[1] = X86_64_INTEGERSI_CLASS;
5442 return 2;
5444 else if (size <= 64+64)
5446 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5447 return 2;
5449 else
5450 gcc_unreachable ();
5452 case CDImode:
5453 case TImode:
5454 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5455 return 2;
5456 case COImode:
5457 case OImode:
5458 /* OImode shouldn't be used directly. */
5459 gcc_unreachable ();
5460 case CTImode:
5461 return 0;
5462 case SFmode:
5463 if (!(bit_offset % 64))
5464 classes[0] = X86_64_SSESF_CLASS;
5465 else
5466 classes[0] = X86_64_SSE_CLASS;
5467 return 1;
5468 case DFmode:
5469 classes[0] = X86_64_SSEDF_CLASS;
5470 return 1;
5471 case XFmode:
5472 classes[0] = X86_64_X87_CLASS;
5473 classes[1] = X86_64_X87UP_CLASS;
5474 return 2;
5475 case TFmode:
5476 classes[0] = X86_64_SSE_CLASS;
5477 classes[1] = X86_64_SSEUP_CLASS;
5478 return 2;
5479 case SCmode:
5480 classes[0] = X86_64_SSE_CLASS;
5481 if (!(bit_offset % 64))
5482 return 1;
5483 else
5485 static bool warned;
5487 if (!warned && warn_psabi)
5489 warned = true;
5490 inform (input_location,
5491 "The ABI of passing structure with complex float"
5492 " member has changed in GCC 4.4");
5494 classes[1] = X86_64_SSESF_CLASS;
5495 return 2;
5497 case DCmode:
5498 classes[0] = X86_64_SSEDF_CLASS;
5499 classes[1] = X86_64_SSEDF_CLASS;
5500 return 2;
5501 case XCmode:
5502 classes[0] = X86_64_COMPLEX_X87_CLASS;
5503 return 1;
5504 case TCmode:
5505 /* This modes is larger than 16 bytes. */
5506 return 0;
5507 case V8SFmode:
5508 case V8SImode:
5509 case V32QImode:
5510 case V16HImode:
5511 case V4DFmode:
5512 case V4DImode:
5513 classes[0] = X86_64_SSE_CLASS;
5514 classes[1] = X86_64_SSEUP_CLASS;
5515 classes[2] = X86_64_SSEUP_CLASS;
5516 classes[3] = X86_64_SSEUP_CLASS;
5517 return 4;
5518 case V4SFmode:
5519 case V4SImode:
5520 case V16QImode:
5521 case V8HImode:
5522 case V2DFmode:
5523 case V2DImode:
5524 classes[0] = X86_64_SSE_CLASS;
5525 classes[1] = X86_64_SSEUP_CLASS;
5526 return 2;
5527 case V1TImode:
5528 case V1DImode:
5529 case V2SFmode:
5530 case V2SImode:
5531 case V4HImode:
5532 case V8QImode:
5533 classes[0] = X86_64_SSE_CLASS;
5534 return 1;
5535 case BLKmode:
5536 case VOIDmode:
5537 return 0;
5538 default:
5539 gcc_assert (VECTOR_MODE_P (mode));
5541 if (bytes > 16)
5542 return 0;
5544 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5546 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5547 classes[0] = X86_64_INTEGERSI_CLASS;
5548 else
5549 classes[0] = X86_64_INTEGER_CLASS;
5550 classes[1] = X86_64_INTEGER_CLASS;
5551 return 1 + (bytes > 8);
5555 /* Examine the argument and return set number of register required in each
5556 class. Return 0 iff parameter should be passed in memory. */
5557 static int
5558 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5559 int *int_nregs, int *sse_nregs)
5561 enum x86_64_reg_class regclass[MAX_CLASSES];
5562 int n = classify_argument (mode, type, regclass, 0);
5564 *int_nregs = 0;
5565 *sse_nregs = 0;
5566 if (!n)
5567 return 0;
5568 for (n--; n >= 0; n--)
5569 switch (regclass[n])
5571 case X86_64_INTEGER_CLASS:
5572 case X86_64_INTEGERSI_CLASS:
5573 (*int_nregs)++;
5574 break;
5575 case X86_64_SSE_CLASS:
5576 case X86_64_SSESF_CLASS:
5577 case X86_64_SSEDF_CLASS:
5578 (*sse_nregs)++;
5579 break;
5580 case X86_64_NO_CLASS:
5581 case X86_64_SSEUP_CLASS:
5582 break;
5583 case X86_64_X87_CLASS:
5584 case X86_64_X87UP_CLASS:
5585 if (!in_return)
5586 return 0;
5587 break;
5588 case X86_64_COMPLEX_X87_CLASS:
5589 return in_return ? 2 : 0;
5590 case X86_64_MEMORY_CLASS:
5591 gcc_unreachable ();
5593 return 1;
5596 /* Construct container for the argument used by GCC interface. See
5597 FUNCTION_ARG for the detailed description. */
5599 static rtx
5600 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5601 const_tree type, int in_return, int nintregs, int nsseregs,
5602 const int *intreg, int sse_regno)
5604 /* The following variables hold the static issued_error state. */
5605 static bool issued_sse_arg_error;
5606 static bool issued_sse_ret_error;
5607 static bool issued_x87_ret_error;
5609 enum machine_mode tmpmode;
5610 int bytes =
5611 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5612 enum x86_64_reg_class regclass[MAX_CLASSES];
5613 int n;
5614 int i;
5615 int nexps = 0;
5616 int needed_sseregs, needed_intregs;
5617 rtx exp[MAX_CLASSES];
5618 rtx ret;
5620 n = classify_argument (mode, type, regclass, 0);
5621 if (!n)
5622 return NULL;
5623 if (!examine_argument (mode, type, in_return, &needed_intregs,
5624 &needed_sseregs))
5625 return NULL;
5626 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5627 return NULL;
5629 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5630 some less clueful developer tries to use floating-point anyway. */
5631 if (needed_sseregs && !TARGET_SSE)
5633 if (in_return)
5635 if (!issued_sse_ret_error)
5637 error ("SSE register return with SSE disabled");
5638 issued_sse_ret_error = true;
5641 else if (!issued_sse_arg_error)
5643 error ("SSE register argument with SSE disabled");
5644 issued_sse_arg_error = true;
5646 return NULL;
5649 /* Likewise, error if the ABI requires us to return values in the
5650 x87 registers and the user specified -mno-80387. */
5651 if (!TARGET_80387 && in_return)
5652 for (i = 0; i < n; i++)
5653 if (regclass[i] == X86_64_X87_CLASS
5654 || regclass[i] == X86_64_X87UP_CLASS
5655 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5657 if (!issued_x87_ret_error)
5659 error ("x87 register return with x87 disabled");
5660 issued_x87_ret_error = true;
5662 return NULL;
5665 /* First construct simple cases. Avoid SCmode, since we want to use
5666 single register to pass this type. */
5667 if (n == 1 && mode != SCmode)
5668 switch (regclass[0])
5670 case X86_64_INTEGER_CLASS:
5671 case X86_64_INTEGERSI_CLASS:
5672 return gen_rtx_REG (mode, intreg[0]);
5673 case X86_64_SSE_CLASS:
5674 case X86_64_SSESF_CLASS:
5675 case X86_64_SSEDF_CLASS:
5676 if (mode != BLKmode)
5677 return gen_reg_or_parallel (mode, orig_mode,
5678 SSE_REGNO (sse_regno));
5679 break;
5680 case X86_64_X87_CLASS:
5681 case X86_64_COMPLEX_X87_CLASS:
5682 return gen_rtx_REG (mode, FIRST_STACK_REG);
5683 case X86_64_NO_CLASS:
5684 /* Zero sized array, struct or class. */
5685 return NULL;
5686 default:
5687 gcc_unreachable ();
5689 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5690 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5691 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5692 if (n == 4
5693 && regclass[0] == X86_64_SSE_CLASS
5694 && regclass[1] == X86_64_SSEUP_CLASS
5695 && regclass[2] == X86_64_SSEUP_CLASS
5696 && regclass[3] == X86_64_SSEUP_CLASS
5697 && mode != BLKmode)
5698 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5700 if (n == 2
5701 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5702 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5703 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5704 && regclass[1] == X86_64_INTEGER_CLASS
5705 && (mode == CDImode || mode == TImode || mode == TFmode)
5706 && intreg[0] + 1 == intreg[1])
5707 return gen_rtx_REG (mode, intreg[0]);
5709 /* Otherwise figure out the entries of the PARALLEL. */
5710 for (i = 0; i < n; i++)
5712 int pos;
5714 switch (regclass[i])
5716 case X86_64_NO_CLASS:
5717 break;
5718 case X86_64_INTEGER_CLASS:
5719 case X86_64_INTEGERSI_CLASS:
5720 /* Merge TImodes on aligned occasions here too. */
5721 if (i * 8 + 8 > bytes)
5722 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5723 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5724 tmpmode = SImode;
5725 else
5726 tmpmode = DImode;
5727 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5728 if (tmpmode == BLKmode)
5729 tmpmode = DImode;
5730 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5731 gen_rtx_REG (tmpmode, *intreg),
5732 GEN_INT (i*8));
5733 intreg++;
5734 break;
5735 case X86_64_SSESF_CLASS:
5736 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5737 gen_rtx_REG (SFmode,
5738 SSE_REGNO (sse_regno)),
5739 GEN_INT (i*8));
5740 sse_regno++;
5741 break;
5742 case X86_64_SSEDF_CLASS:
5743 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5744 gen_rtx_REG (DFmode,
5745 SSE_REGNO (sse_regno)),
5746 GEN_INT (i*8));
5747 sse_regno++;
5748 break;
5749 case X86_64_SSE_CLASS:
5750 pos = i;
5751 switch (n)
5753 case 1:
5754 tmpmode = DImode;
5755 break;
5756 case 2:
5757 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5759 tmpmode = TImode;
5760 i++;
5762 else
5763 tmpmode = DImode;
5764 break;
5765 case 4:
5766 gcc_assert (i == 0
5767 && regclass[1] == X86_64_SSEUP_CLASS
5768 && regclass[2] == X86_64_SSEUP_CLASS
5769 && regclass[3] == X86_64_SSEUP_CLASS);
5770 tmpmode = OImode;
5771 i += 3;
5772 break;
5773 default:
5774 gcc_unreachable ();
5776 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5777 gen_rtx_REG (tmpmode,
5778 SSE_REGNO (sse_regno)),
5779 GEN_INT (pos*8));
5780 sse_regno++;
5781 break;
5782 default:
5783 gcc_unreachable ();
5787 /* Empty aligned struct, union or class. */
5788 if (nexps == 0)
5789 return NULL;
5791 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5792 for (i = 0; i < nexps; i++)
5793 XVECEXP (ret, 0, i) = exp [i];
5794 return ret;
5797 /* Update the data in CUM to advance over an argument of mode MODE
5798 and data type TYPE. (TYPE is null for libcalls where that information
5799 may not be available.) */
5801 static void
5802 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5803 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5805 switch (mode)
5807 default:
5808 break;
5810 case BLKmode:
5811 if (bytes < 0)
5812 break;
5813 /* FALLTHRU */
5815 case DImode:
5816 case SImode:
5817 case HImode:
5818 case QImode:
5819 cum->words += words;
5820 cum->nregs -= words;
5821 cum->regno += words;
5823 if (cum->nregs <= 0)
5825 cum->nregs = 0;
5826 cum->regno = 0;
5828 break;
5830 case OImode:
5831 /* OImode shouldn't be used directly. */
5832 gcc_unreachable ();
5834 case DFmode:
5835 if (cum->float_in_sse < 2)
5836 break;
5837 case SFmode:
5838 if (cum->float_in_sse < 1)
5839 break;
5840 /* FALLTHRU */
5842 case V8SFmode:
5843 case V8SImode:
5844 case V32QImode:
5845 case V16HImode:
5846 case V4DFmode:
5847 case V4DImode:
5848 case TImode:
5849 case V16QImode:
5850 case V8HImode:
5851 case V4SImode:
5852 case V2DImode:
5853 case V4SFmode:
5854 case V2DFmode:
5855 if (!type || !AGGREGATE_TYPE_P (type))
5857 cum->sse_words += words;
5858 cum->sse_nregs -= 1;
5859 cum->sse_regno += 1;
5860 if (cum->sse_nregs <= 0)
5862 cum->sse_nregs = 0;
5863 cum->sse_regno = 0;
5866 break;
5868 case V8QImode:
5869 case V4HImode:
5870 case V2SImode:
5871 case V2SFmode:
5872 case V1TImode:
5873 case V1DImode:
5874 if (!type || !AGGREGATE_TYPE_P (type))
5876 cum->mmx_words += words;
5877 cum->mmx_nregs -= 1;
5878 cum->mmx_regno += 1;
5879 if (cum->mmx_nregs <= 0)
5881 cum->mmx_nregs = 0;
5882 cum->mmx_regno = 0;
5885 break;
5889 static void
5890 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5891 tree type, HOST_WIDE_INT words, int named)
5893 int int_nregs, sse_nregs;
5895 /* Unnamed 256bit vector mode parameters are passed on stack. */
5896 if (!named && VALID_AVX256_REG_MODE (mode))
5897 return;
5899 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5900 cum->words += words;
5901 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5903 cum->nregs -= int_nregs;
5904 cum->sse_nregs -= sse_nregs;
5905 cum->regno += int_nregs;
5906 cum->sse_regno += sse_nregs;
5908 else
5909 cum->words += words;
5912 static void
5913 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5914 HOST_WIDE_INT words)
5916 /* Otherwise, this should be passed indirect. */
5917 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5919 cum->words += words;
5920 if (cum->nregs > 0)
5922 cum->nregs -= 1;
5923 cum->regno += 1;
5927 void
5928 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5929 tree type, int named)
5931 HOST_WIDE_INT bytes, words;
5933 if (mode == BLKmode)
5934 bytes = int_size_in_bytes (type);
5935 else
5936 bytes = GET_MODE_SIZE (mode);
5937 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5939 if (type)
5940 mode = type_natural_mode (type, NULL);
5942 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5943 function_arg_advance_ms_64 (cum, bytes, words);
5944 else if (TARGET_64BIT)
5945 function_arg_advance_64 (cum, mode, type, words, named);
5946 else
5947 function_arg_advance_32 (cum, mode, type, bytes, words);
5950 /* Define where to put the arguments to a function.
5951 Value is zero to push the argument on the stack,
5952 or a hard register in which to store the argument.
5954 MODE is the argument's machine mode.
5955 TYPE is the data type of the argument (as a tree).
5956 This is null for libcalls where that information may
5957 not be available.
5958 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5959 the preceding args and about the function being called.
5960 NAMED is nonzero if this argument is a named parameter
5961 (otherwise it is an extra parameter matching an ellipsis). */
5963 static rtx
5964 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5965 enum machine_mode orig_mode, tree type,
5966 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5968 static bool warnedsse, warnedmmx;
5970 /* Avoid the AL settings for the Unix64 ABI. */
5971 if (mode == VOIDmode)
5972 return constm1_rtx;
5974 switch (mode)
5976 default:
5977 break;
5979 case BLKmode:
5980 if (bytes < 0)
5981 break;
5982 /* FALLTHRU */
5983 case DImode:
5984 case SImode:
5985 case HImode:
5986 case QImode:
5987 if (words <= cum->nregs)
5989 int regno = cum->regno;
5991 /* Fastcall allocates the first two DWORD (SImode) or
5992 smaller arguments to ECX and EDX if it isn't an
5993 aggregate type . */
5994 if (cum->fastcall)
5996 if (mode == BLKmode
5997 || mode == DImode
5998 || (type && AGGREGATE_TYPE_P (type)))
5999 break;
6001 /* ECX not EAX is the first allocated register. */
6002 if (regno == AX_REG)
6003 regno = CX_REG;
6005 return gen_rtx_REG (mode, regno);
6007 break;
6009 case DFmode:
6010 if (cum->float_in_sse < 2)
6011 break;
6012 case SFmode:
6013 if (cum->float_in_sse < 1)
6014 break;
6015 /* FALLTHRU */
6016 case TImode:
6017 /* In 32bit, we pass TImode in xmm registers. */
6018 case V16QImode:
6019 case V8HImode:
6020 case V4SImode:
6021 case V2DImode:
6022 case V4SFmode:
6023 case V2DFmode:
6024 if (!type || !AGGREGATE_TYPE_P (type))
6026 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6028 warnedsse = true;
6029 warning (0, "SSE vector argument without SSE enabled "
6030 "changes the ABI");
6032 if (cum->sse_nregs)
6033 return gen_reg_or_parallel (mode, orig_mode,
6034 cum->sse_regno + FIRST_SSE_REG);
6036 break;
6038 case OImode:
6039 /* OImode shouldn't be used directly. */
6040 gcc_unreachable ();
6042 case V8SFmode:
6043 case V8SImode:
6044 case V32QImode:
6045 case V16HImode:
6046 case V4DFmode:
6047 case V4DImode:
6048 if (!type || !AGGREGATE_TYPE_P (type))
6050 if (cum->sse_nregs)
6051 return gen_reg_or_parallel (mode, orig_mode,
6052 cum->sse_regno + FIRST_SSE_REG);
6054 break;
6056 case V8QImode:
6057 case V4HImode:
6058 case V2SImode:
6059 case V2SFmode:
6060 case V1TImode:
6061 case V1DImode:
6062 if (!type || !AGGREGATE_TYPE_P (type))
6064 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6066 warnedmmx = true;
6067 warning (0, "MMX vector argument without MMX enabled "
6068 "changes the ABI");
6070 if (cum->mmx_nregs)
6071 return gen_reg_or_parallel (mode, orig_mode,
6072 cum->mmx_regno + FIRST_MMX_REG);
6074 break;
6077 return NULL_RTX;
6080 static rtx
6081 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6082 enum machine_mode orig_mode, tree type, int named)
6084 /* Handle a hidden AL argument containing number of registers
6085 for varargs x86-64 functions. */
6086 if (mode == VOIDmode)
6087 return GEN_INT (cum->maybe_vaarg
6088 ? (cum->sse_nregs < 0
6089 ? (cum->call_abi == ix86_abi
6090 ? SSE_REGPARM_MAX
6091 : (ix86_abi != SYSV_ABI
6092 ? X86_64_SSE_REGPARM_MAX
6093 : X86_64_MS_SSE_REGPARM_MAX))
6094 : cum->sse_regno)
6095 : -1);
6097 switch (mode)
6099 default:
6100 break;
6102 case V8SFmode:
6103 case V8SImode:
6104 case V32QImode:
6105 case V16HImode:
6106 case V4DFmode:
6107 case V4DImode:
6108 /* Unnamed 256bit vector mode parameters are passed on stack. */
6109 if (!named)
6110 return NULL;
6111 break;
6114 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6115 cum->sse_nregs,
6116 &x86_64_int_parameter_registers [cum->regno],
6117 cum->sse_regno);
6120 static rtx
6121 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6122 enum machine_mode orig_mode, int named,
6123 HOST_WIDE_INT bytes)
6125 unsigned int regno;
6127 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6128 We use value of -2 to specify that current function call is MSABI. */
6129 if (mode == VOIDmode)
6130 return GEN_INT (-2);
6132 /* If we've run out of registers, it goes on the stack. */
6133 if (cum->nregs == 0)
6134 return NULL_RTX;
6136 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6138 /* Only floating point modes are passed in anything but integer regs. */
6139 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6141 if (named)
6142 regno = cum->regno + FIRST_SSE_REG;
6143 else
6145 rtx t1, t2;
6147 /* Unnamed floating parameters are passed in both the
6148 SSE and integer registers. */
6149 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6150 t2 = gen_rtx_REG (mode, regno);
6151 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6152 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6153 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6156 /* Handle aggregated types passed in register. */
6157 if (orig_mode == BLKmode)
6159 if (bytes > 0 && bytes <= 8)
6160 mode = (bytes > 4 ? DImode : SImode);
6161 if (mode == BLKmode)
6162 mode = DImode;
6165 return gen_reg_or_parallel (mode, orig_mode, regno);
6169 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6170 tree type, int named)
6172 enum machine_mode mode = omode;
6173 HOST_WIDE_INT bytes, words;
6175 if (mode == BLKmode)
6176 bytes = int_size_in_bytes (type);
6177 else
6178 bytes = GET_MODE_SIZE (mode);
6179 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6181 /* To simplify the code below, represent vector types with a vector mode
6182 even if MMX/SSE are not active. */
6183 if (type && TREE_CODE (type) == VECTOR_TYPE)
6184 mode = type_natural_mode (type, cum);
6186 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6187 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6188 else if (TARGET_64BIT)
6189 return function_arg_64 (cum, mode, omode, type, named);
6190 else
6191 return function_arg_32 (cum, mode, omode, type, bytes, words);
6194 /* A C expression that indicates when an argument must be passed by
6195 reference. If nonzero for an argument, a copy of that argument is
6196 made in memory and a pointer to the argument is passed instead of
6197 the argument itself. The pointer is passed in whatever way is
6198 appropriate for passing a pointer to that type. */
6200 static bool
6201 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6202 enum machine_mode mode ATTRIBUTE_UNUSED,
6203 const_tree type, bool named ATTRIBUTE_UNUSED)
6205 /* See Windows x64 Software Convention. */
6206 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6208 int msize = (int) GET_MODE_SIZE (mode);
6209 if (type)
6211 /* Arrays are passed by reference. */
6212 if (TREE_CODE (type) == ARRAY_TYPE)
6213 return true;
6215 if (AGGREGATE_TYPE_P (type))
6217 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6218 are passed by reference. */
6219 msize = int_size_in_bytes (type);
6223 /* __m128 is passed by reference. */
6224 switch (msize) {
6225 case 1: case 2: case 4: case 8:
6226 break;
6227 default:
6228 return true;
6231 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6232 return 1;
6234 return 0;
6237 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6238 ABI. */
6239 static bool
6240 contains_aligned_value_p (tree type)
6242 enum machine_mode mode = TYPE_MODE (type);
6243 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6244 || mode == TDmode
6245 || mode == TFmode
6246 || mode == TCmode)
6247 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6248 return true;
6249 if (TYPE_ALIGN (type) < 128)
6250 return false;
6252 if (AGGREGATE_TYPE_P (type))
6254 /* Walk the aggregates recursively. */
6255 switch (TREE_CODE (type))
6257 case RECORD_TYPE:
6258 case UNION_TYPE:
6259 case QUAL_UNION_TYPE:
6261 tree field;
6263 /* Walk all the structure fields. */
6264 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6266 if (TREE_CODE (field) == FIELD_DECL
6267 && contains_aligned_value_p (TREE_TYPE (field)))
6268 return true;
6270 break;
6273 case ARRAY_TYPE:
6274 /* Just for use if some languages passes arrays by value. */
6275 if (contains_aligned_value_p (TREE_TYPE (type)))
6276 return true;
6277 break;
6279 default:
6280 gcc_unreachable ();
6283 return false;
6286 /* Gives the alignment boundary, in bits, of an argument with the
6287 specified mode and type. */
6290 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6292 int align;
6293 if (type)
6295 /* Since canonical type is used for call, we convert it to
6296 canonical type if needed. */
6297 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6298 type = TYPE_CANONICAL (type);
6299 align = TYPE_ALIGN (type);
6301 else
6302 align = GET_MODE_ALIGNMENT (mode);
6303 if (align < PARM_BOUNDARY)
6304 align = PARM_BOUNDARY;
6305 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6306 natural boundaries. */
6307 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6309 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6310 make an exception for SSE modes since these require 128bit
6311 alignment.
6313 The handling here differs from field_alignment. ICC aligns MMX
6314 arguments to 4 byte boundaries, while structure fields are aligned
6315 to 8 byte boundaries. */
6316 if (!type)
6318 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6319 align = PARM_BOUNDARY;
6321 else
6323 if (!contains_aligned_value_p (type))
6324 align = PARM_BOUNDARY;
6327 if (align > BIGGEST_ALIGNMENT)
6328 align = BIGGEST_ALIGNMENT;
6329 return align;
6332 /* Return true if N is a possible register number of function value. */
6334 bool
6335 ix86_function_value_regno_p (int regno)
6337 switch (regno)
6339 case 0:
6340 return true;
6342 case FIRST_FLOAT_REG:
6343 /* TODO: The function should depend on current function ABI but
6344 builtins.c would need updating then. Therefore we use the
6345 default ABI. */
6346 if (TARGET_64BIT && ix86_abi == MS_ABI)
6347 return false;
6348 return TARGET_FLOAT_RETURNS_IN_80387;
6350 case FIRST_SSE_REG:
6351 return TARGET_SSE;
6353 case FIRST_MMX_REG:
6354 if (TARGET_MACHO || TARGET_64BIT)
6355 return false;
6356 return TARGET_MMX;
6359 return false;
6362 /* Define how to find the value returned by a function.
6363 VALTYPE is the data type of the value (as a tree).
6364 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6365 otherwise, FUNC is 0. */
6367 static rtx
6368 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6369 const_tree fntype, const_tree fn)
6371 unsigned int regno;
6373 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6374 we normally prevent this case when mmx is not available. However
6375 some ABIs may require the result to be returned like DImode. */
6376 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6377 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6379 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6380 we prevent this case when sse is not available. However some ABIs
6381 may require the result to be returned like integer TImode. */
6382 else if (mode == TImode
6383 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6384 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6386 /* 32-byte vector modes in %ymm0. */
6387 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6388 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6390 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6391 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6392 regno = FIRST_FLOAT_REG;
6393 else
6394 /* Most things go in %eax. */
6395 regno = AX_REG;
6397 /* Override FP return register with %xmm0 for local functions when
6398 SSE math is enabled or for functions with sseregparm attribute. */
6399 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6401 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6402 if ((sse_level >= 1 && mode == SFmode)
6403 || (sse_level == 2 && mode == DFmode))
6404 regno = FIRST_SSE_REG;
6407 /* OImode shouldn't be used directly. */
6408 gcc_assert (mode != OImode);
6410 return gen_rtx_REG (orig_mode, regno);
6413 static rtx
6414 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6415 const_tree valtype)
6417 rtx ret;
6419 /* Handle libcalls, which don't provide a type node. */
6420 if (valtype == NULL)
6422 switch (mode)
6424 case SFmode:
6425 case SCmode:
6426 case DFmode:
6427 case DCmode:
6428 case TFmode:
6429 case SDmode:
6430 case DDmode:
6431 case TDmode:
6432 return gen_rtx_REG (mode, FIRST_SSE_REG);
6433 case XFmode:
6434 case XCmode:
6435 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6436 case TCmode:
6437 return NULL;
6438 default:
6439 return gen_rtx_REG (mode, AX_REG);
6443 ret = construct_container (mode, orig_mode, valtype, 1,
6444 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6445 x86_64_int_return_registers, 0);
6447 /* For zero sized structures, construct_container returns NULL, but we
6448 need to keep rest of compiler happy by returning meaningful value. */
6449 if (!ret)
6450 ret = gen_rtx_REG (orig_mode, AX_REG);
6452 return ret;
6455 static rtx
6456 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6458 unsigned int regno = AX_REG;
6460 if (TARGET_SSE)
6462 switch (GET_MODE_SIZE (mode))
6464 case 16:
6465 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6466 && !COMPLEX_MODE_P (mode))
6467 regno = FIRST_SSE_REG;
6468 break;
6469 case 8:
6470 case 4:
6471 if (mode == SFmode || mode == DFmode)
6472 regno = FIRST_SSE_REG;
6473 break;
6474 default:
6475 break;
6478 return gen_rtx_REG (orig_mode, regno);
6481 static rtx
6482 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6483 enum machine_mode orig_mode, enum machine_mode mode)
6485 const_tree fn, fntype;
6487 fn = NULL_TREE;
6488 if (fntype_or_decl && DECL_P (fntype_or_decl))
6489 fn = fntype_or_decl;
6490 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6492 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6493 return function_value_ms_64 (orig_mode, mode);
6494 else if (TARGET_64BIT)
6495 return function_value_64 (orig_mode, mode, valtype);
6496 else
6497 return function_value_32 (orig_mode, mode, fntype, fn);
6500 static rtx
6501 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6502 bool outgoing ATTRIBUTE_UNUSED)
6504 enum machine_mode mode, orig_mode;
6506 orig_mode = TYPE_MODE (valtype);
6507 mode = type_natural_mode (valtype, NULL);
6508 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6512 ix86_libcall_value (enum machine_mode mode)
6514 return ix86_function_value_1 (NULL, NULL, mode, mode);
6517 /* Return true iff type is returned in memory. */
6519 static int ATTRIBUTE_UNUSED
6520 return_in_memory_32 (const_tree type, enum machine_mode mode)
6522 HOST_WIDE_INT size;
6524 if (mode == BLKmode)
6525 return 1;
6527 size = int_size_in_bytes (type);
6529 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6530 return 0;
6532 if (VECTOR_MODE_P (mode) || mode == TImode)
6534 /* User-created vectors small enough to fit in EAX. */
6535 if (size < 8)
6536 return 0;
6538 /* MMX/3dNow values are returned in MM0,
6539 except when it doesn't exits. */
6540 if (size == 8)
6541 return (TARGET_MMX ? 0 : 1);
6543 /* SSE values are returned in XMM0, except when it doesn't exist. */
6544 if (size == 16)
6545 return (TARGET_SSE ? 0 : 1);
6547 /* AVX values are returned in YMM0, except when it doesn't exist. */
6548 if (size == 32)
6549 return TARGET_AVX ? 0 : 1;
6552 if (mode == XFmode)
6553 return 0;
6555 if (size > 12)
6556 return 1;
6558 /* OImode shouldn't be used directly. */
6559 gcc_assert (mode != OImode);
6561 return 0;
6564 static int ATTRIBUTE_UNUSED
6565 return_in_memory_64 (const_tree type, enum machine_mode mode)
6567 int needed_intregs, needed_sseregs;
6568 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6571 static int ATTRIBUTE_UNUSED
6572 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6574 HOST_WIDE_INT size = int_size_in_bytes (type);
6576 /* __m128 is returned in xmm0. */
6577 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6578 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6579 return 0;
6581 /* Otherwise, the size must be exactly in [1248]. */
6582 return (size != 1 && size != 2 && size != 4 && size != 8);
6585 static bool
6586 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6588 #ifdef SUBTARGET_RETURN_IN_MEMORY
6589 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6590 #else
6591 const enum machine_mode mode = type_natural_mode (type, NULL);
6593 if (TARGET_64BIT)
6595 if (ix86_function_type_abi (fntype) == MS_ABI)
6596 return return_in_memory_ms_64 (type, mode);
6597 else
6598 return return_in_memory_64 (type, mode);
6600 else
6601 return return_in_memory_32 (type, mode);
6602 #endif
6605 /* Return false iff TYPE is returned in memory. This version is used
6606 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6607 but differs notably in that when MMX is available, 8-byte vectors
6608 are returned in memory, rather than in MMX registers. */
6610 bool
6611 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6613 int size;
6614 enum machine_mode mode = type_natural_mode (type, NULL);
6616 if (TARGET_64BIT)
6617 return return_in_memory_64 (type, mode);
6619 if (mode == BLKmode)
6620 return 1;
6622 size = int_size_in_bytes (type);
6624 if (VECTOR_MODE_P (mode))
6626 /* Return in memory only if MMX registers *are* available. This
6627 seems backwards, but it is consistent with the existing
6628 Solaris x86 ABI. */
6629 if (size == 8)
6630 return TARGET_MMX;
6631 if (size == 16)
6632 return !TARGET_SSE;
6634 else if (mode == TImode)
6635 return !TARGET_SSE;
6636 else if (mode == XFmode)
6637 return 0;
6639 return size > 12;
6642 /* When returning SSE vector types, we have a choice of either
6643 (1) being abi incompatible with a -march switch, or
6644 (2) generating an error.
6645 Given no good solution, I think the safest thing is one warning.
6646 The user won't be able to use -Werror, but....
6648 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6649 called in response to actually generating a caller or callee that
6650 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6651 via aggregate_value_p for general type probing from tree-ssa. */
6653 static rtx
6654 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6656 static bool warnedsse, warnedmmx;
6658 if (!TARGET_64BIT && type)
6660 /* Look at the return type of the function, not the function type. */
6661 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6663 if (!TARGET_SSE && !warnedsse)
6665 if (mode == TImode
6666 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6668 warnedsse = true;
6669 warning (0, "SSE vector return without SSE enabled "
6670 "changes the ABI");
6674 if (!TARGET_MMX && !warnedmmx)
6676 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6678 warnedmmx = true;
6679 warning (0, "MMX vector return without MMX enabled "
6680 "changes the ABI");
6685 return NULL;
6689 /* Create the va_list data type. */
6691 /* Returns the calling convention specific va_list date type.
6692 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6694 static tree
6695 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6697 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6699 /* For i386 we use plain pointer to argument area. */
6700 if (!TARGET_64BIT || abi == MS_ABI)
6701 return build_pointer_type (char_type_node);
6703 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6704 type_decl = build_decl (BUILTINS_LOCATION,
6705 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6707 f_gpr = build_decl (BUILTINS_LOCATION,
6708 FIELD_DECL, get_identifier ("gp_offset"),
6709 unsigned_type_node);
6710 f_fpr = build_decl (BUILTINS_LOCATION,
6711 FIELD_DECL, get_identifier ("fp_offset"),
6712 unsigned_type_node);
6713 f_ovf = build_decl (BUILTINS_LOCATION,
6714 FIELD_DECL, get_identifier ("overflow_arg_area"),
6715 ptr_type_node);
6716 f_sav = build_decl (BUILTINS_LOCATION,
6717 FIELD_DECL, get_identifier ("reg_save_area"),
6718 ptr_type_node);
6720 va_list_gpr_counter_field = f_gpr;
6721 va_list_fpr_counter_field = f_fpr;
6723 DECL_FIELD_CONTEXT (f_gpr) = record;
6724 DECL_FIELD_CONTEXT (f_fpr) = record;
6725 DECL_FIELD_CONTEXT (f_ovf) = record;
6726 DECL_FIELD_CONTEXT (f_sav) = record;
6728 TREE_CHAIN (record) = type_decl;
6729 TYPE_NAME (record) = type_decl;
6730 TYPE_FIELDS (record) = f_gpr;
6731 TREE_CHAIN (f_gpr) = f_fpr;
6732 TREE_CHAIN (f_fpr) = f_ovf;
6733 TREE_CHAIN (f_ovf) = f_sav;
6735 layout_type (record);
6737 /* The correct type is an array type of one element. */
6738 return build_array_type (record, build_index_type (size_zero_node));
6741 /* Setup the builtin va_list data type and for 64-bit the additional
6742 calling convention specific va_list data types. */
6744 static tree
6745 ix86_build_builtin_va_list (void)
6747 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6749 /* Initialize abi specific va_list builtin types. */
6750 if (TARGET_64BIT)
6752 tree t;
6753 if (ix86_abi == MS_ABI)
6755 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6756 if (TREE_CODE (t) != RECORD_TYPE)
6757 t = build_variant_type_copy (t);
6758 sysv_va_list_type_node = t;
6760 else
6762 t = ret;
6763 if (TREE_CODE (t) != RECORD_TYPE)
6764 t = build_variant_type_copy (t);
6765 sysv_va_list_type_node = t;
6767 if (ix86_abi != MS_ABI)
6769 t = ix86_build_builtin_va_list_abi (MS_ABI);
6770 if (TREE_CODE (t) != RECORD_TYPE)
6771 t = build_variant_type_copy (t);
6772 ms_va_list_type_node = t;
6774 else
6776 t = ret;
6777 if (TREE_CODE (t) != RECORD_TYPE)
6778 t = build_variant_type_copy (t);
6779 ms_va_list_type_node = t;
6783 return ret;
6786 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6788 static void
6789 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6791 rtx save_area, mem;
6792 rtx label;
6793 rtx label_ref;
6794 rtx tmp_reg;
6795 rtx nsse_reg;
6796 alias_set_type set;
6797 int i;
6798 int regparm = ix86_regparm;
6800 if (cum->call_abi != ix86_abi)
6801 regparm = (ix86_abi != SYSV_ABI
6802 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6804 /* GPR size of varargs save area. */
6805 if (cfun->va_list_gpr_size)
6806 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6807 else
6808 ix86_varargs_gpr_size = 0;
6810 /* FPR size of varargs save area. We don't need it if we don't pass
6811 anything in SSE registers. */
6812 if (cum->sse_nregs && cfun->va_list_fpr_size)
6813 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6814 else
6815 ix86_varargs_fpr_size = 0;
6817 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6818 return;
6820 save_area = frame_pointer_rtx;
6821 set = get_varargs_alias_set ();
6823 for (i = cum->regno;
6824 i < regparm
6825 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6826 i++)
6828 mem = gen_rtx_MEM (Pmode,
6829 plus_constant (save_area, i * UNITS_PER_WORD));
6830 MEM_NOTRAP_P (mem) = 1;
6831 set_mem_alias_set (mem, set);
6832 emit_move_insn (mem, gen_rtx_REG (Pmode,
6833 x86_64_int_parameter_registers[i]));
6836 if (ix86_varargs_fpr_size)
6838 /* Now emit code to save SSE registers. The AX parameter contains number
6839 of SSE parameter registers used to call this function. We use
6840 sse_prologue_save insn template that produces computed jump across
6841 SSE saves. We need some preparation work to get this working. */
6843 label = gen_label_rtx ();
6844 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6846 /* Compute address to jump to :
6847 label - eax*4 + nnamed_sse_arguments*4 Or
6848 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6849 tmp_reg = gen_reg_rtx (Pmode);
6850 nsse_reg = gen_reg_rtx (Pmode);
6851 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6852 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6853 gen_rtx_MULT (Pmode, nsse_reg,
6854 GEN_INT (4))));
6856 /* vmovaps is one byte longer than movaps. */
6857 if (TARGET_AVX)
6858 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6859 gen_rtx_PLUS (Pmode, tmp_reg,
6860 nsse_reg)));
6862 if (cum->sse_regno)
6863 emit_move_insn
6864 (nsse_reg,
6865 gen_rtx_CONST (DImode,
6866 gen_rtx_PLUS (DImode,
6867 label_ref,
6868 GEN_INT (cum->sse_regno
6869 * (TARGET_AVX ? 5 : 4)))));
6870 else
6871 emit_move_insn (nsse_reg, label_ref);
6872 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6874 /* Compute address of memory block we save into. We always use pointer
6875 pointing 127 bytes after first byte to store - this is needed to keep
6876 instruction size limited by 4 bytes (5 bytes for AVX) with one
6877 byte displacement. */
6878 tmp_reg = gen_reg_rtx (Pmode);
6879 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6880 plus_constant (save_area,
6881 ix86_varargs_gpr_size + 127)));
6882 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6883 MEM_NOTRAP_P (mem) = 1;
6884 set_mem_alias_set (mem, set);
6885 set_mem_align (mem, BITS_PER_WORD);
6887 /* And finally do the dirty job! */
6888 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6889 GEN_INT (cum->sse_regno), label));
6893 static void
6894 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6896 alias_set_type set = get_varargs_alias_set ();
6897 int i;
6899 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6901 rtx reg, mem;
6903 mem = gen_rtx_MEM (Pmode,
6904 plus_constant (virtual_incoming_args_rtx,
6905 i * UNITS_PER_WORD));
6906 MEM_NOTRAP_P (mem) = 1;
6907 set_mem_alias_set (mem, set);
6909 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6910 emit_move_insn (mem, reg);
6914 static void
6915 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6916 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6917 int no_rtl)
6919 CUMULATIVE_ARGS next_cum;
6920 tree fntype;
6922 /* This argument doesn't appear to be used anymore. Which is good,
6923 because the old code here didn't suppress rtl generation. */
6924 gcc_assert (!no_rtl);
6926 if (!TARGET_64BIT)
6927 return;
6929 fntype = TREE_TYPE (current_function_decl);
6931 /* For varargs, we do not want to skip the dummy va_dcl argument.
6932 For stdargs, we do want to skip the last named argument. */
6933 next_cum = *cum;
6934 if (stdarg_p (fntype))
6935 function_arg_advance (&next_cum, mode, type, 1);
6937 if (cum->call_abi == MS_ABI)
6938 setup_incoming_varargs_ms_64 (&next_cum);
6939 else
6940 setup_incoming_varargs_64 (&next_cum);
6943 /* Checks if TYPE is of kind va_list char *. */
6945 static bool
6946 is_va_list_char_pointer (tree type)
6948 tree canonic;
6950 /* For 32-bit it is always true. */
6951 if (!TARGET_64BIT)
6952 return true;
6953 canonic = ix86_canonical_va_list_type (type);
6954 return (canonic == ms_va_list_type_node
6955 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6958 /* Implement va_start. */
6960 static void
6961 ix86_va_start (tree valist, rtx nextarg)
6963 HOST_WIDE_INT words, n_gpr, n_fpr;
6964 tree f_gpr, f_fpr, f_ovf, f_sav;
6965 tree gpr, fpr, ovf, sav, t;
6966 tree type;
6968 /* Only 64bit target needs something special. */
6969 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6971 std_expand_builtin_va_start (valist, nextarg);
6972 return;
6975 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6976 f_fpr = TREE_CHAIN (f_gpr);
6977 f_ovf = TREE_CHAIN (f_fpr);
6978 f_sav = TREE_CHAIN (f_ovf);
6980 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6981 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6982 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6983 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6984 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6986 /* Count number of gp and fp argument registers used. */
6987 words = crtl->args.info.words;
6988 n_gpr = crtl->args.info.regno;
6989 n_fpr = crtl->args.info.sse_regno;
6991 if (cfun->va_list_gpr_size)
6993 type = TREE_TYPE (gpr);
6994 t = build2 (MODIFY_EXPR, type,
6995 gpr, build_int_cst (type, n_gpr * 8));
6996 TREE_SIDE_EFFECTS (t) = 1;
6997 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7000 if (TARGET_SSE && cfun->va_list_fpr_size)
7002 type = TREE_TYPE (fpr);
7003 t = build2 (MODIFY_EXPR, type, fpr,
7004 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7005 TREE_SIDE_EFFECTS (t) = 1;
7006 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7009 /* Find the overflow area. */
7010 type = TREE_TYPE (ovf);
7011 t = make_tree (type, crtl->args.internal_arg_pointer);
7012 if (words != 0)
7013 t = build2 (POINTER_PLUS_EXPR, type, t,
7014 size_int (words * UNITS_PER_WORD));
7015 t = build2 (MODIFY_EXPR, type, ovf, t);
7016 TREE_SIDE_EFFECTS (t) = 1;
7017 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7019 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7021 /* Find the register save area.
7022 Prologue of the function save it right above stack frame. */
7023 type = TREE_TYPE (sav);
7024 t = make_tree (type, frame_pointer_rtx);
7025 if (!ix86_varargs_gpr_size)
7026 t = build2 (POINTER_PLUS_EXPR, type, t,
7027 size_int (-8 * X86_64_REGPARM_MAX));
7028 t = build2 (MODIFY_EXPR, type, sav, t);
7029 TREE_SIDE_EFFECTS (t) = 1;
7030 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7034 /* Implement va_arg. */
7036 static tree
7037 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7038 gimple_seq *post_p)
7040 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7041 tree f_gpr, f_fpr, f_ovf, f_sav;
7042 tree gpr, fpr, ovf, sav, t;
7043 int size, rsize;
7044 tree lab_false, lab_over = NULL_TREE;
7045 tree addr, t2;
7046 rtx container;
7047 int indirect_p = 0;
7048 tree ptrtype;
7049 enum machine_mode nat_mode;
7050 int arg_boundary;
7052 /* Only 64bit target needs something special. */
7053 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7054 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7056 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7057 f_fpr = TREE_CHAIN (f_gpr);
7058 f_ovf = TREE_CHAIN (f_fpr);
7059 f_sav = TREE_CHAIN (f_ovf);
7061 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7062 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7063 valist = build_va_arg_indirect_ref (valist);
7064 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7065 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7066 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7068 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7069 if (indirect_p)
7070 type = build_pointer_type (type);
7071 size = int_size_in_bytes (type);
7072 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7074 nat_mode = type_natural_mode (type, NULL);
7075 switch (nat_mode)
7077 case V8SFmode:
7078 case V8SImode:
7079 case V32QImode:
7080 case V16HImode:
7081 case V4DFmode:
7082 case V4DImode:
7083 /* Unnamed 256bit vector mode parameters are passed on stack. */
7084 if (ix86_cfun_abi () == SYSV_ABI)
7086 container = NULL;
7087 break;
7090 default:
7091 container = construct_container (nat_mode, TYPE_MODE (type),
7092 type, 0, X86_64_REGPARM_MAX,
7093 X86_64_SSE_REGPARM_MAX, intreg,
7095 break;
7098 /* Pull the value out of the saved registers. */
7100 addr = create_tmp_var (ptr_type_node, "addr");
7102 if (container)
7104 int needed_intregs, needed_sseregs;
7105 bool need_temp;
7106 tree int_addr, sse_addr;
7108 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7109 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7111 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7113 need_temp = (!REG_P (container)
7114 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7115 || TYPE_ALIGN (type) > 128));
7117 /* In case we are passing structure, verify that it is consecutive block
7118 on the register save area. If not we need to do moves. */
7119 if (!need_temp && !REG_P (container))
7121 /* Verify that all registers are strictly consecutive */
7122 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7124 int i;
7126 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7128 rtx slot = XVECEXP (container, 0, i);
7129 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7130 || INTVAL (XEXP (slot, 1)) != i * 16)
7131 need_temp = 1;
7134 else
7136 int i;
7138 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7140 rtx slot = XVECEXP (container, 0, i);
7141 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7142 || INTVAL (XEXP (slot, 1)) != i * 8)
7143 need_temp = 1;
7147 if (!need_temp)
7149 int_addr = addr;
7150 sse_addr = addr;
7152 else
7154 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7155 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7158 /* First ensure that we fit completely in registers. */
7159 if (needed_intregs)
7161 t = build_int_cst (TREE_TYPE (gpr),
7162 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7163 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7164 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7165 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7166 gimplify_and_add (t, pre_p);
7168 if (needed_sseregs)
7170 t = build_int_cst (TREE_TYPE (fpr),
7171 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7172 + X86_64_REGPARM_MAX * 8);
7173 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7174 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7175 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7176 gimplify_and_add (t, pre_p);
7179 /* Compute index to start of area used for integer regs. */
7180 if (needed_intregs)
7182 /* int_addr = gpr + sav; */
7183 t = fold_convert (sizetype, gpr);
7184 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7185 gimplify_assign (int_addr, t, pre_p);
7187 if (needed_sseregs)
7189 /* sse_addr = fpr + sav; */
7190 t = fold_convert (sizetype, fpr);
7191 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7192 gimplify_assign (sse_addr, t, pre_p);
7194 if (need_temp)
7196 int i;
7197 tree temp = create_tmp_var (type, "va_arg_tmp");
7199 /* addr = &temp; */
7200 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7201 gimplify_assign (addr, t, pre_p);
7203 for (i = 0; i < XVECLEN (container, 0); i++)
7205 rtx slot = XVECEXP (container, 0, i);
7206 rtx reg = XEXP (slot, 0);
7207 enum machine_mode mode = GET_MODE (reg);
7208 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7209 tree addr_type = build_pointer_type (piece_type);
7210 tree daddr_type = build_pointer_type_for_mode (piece_type,
7211 ptr_mode, true);
7212 tree src_addr, src;
7213 int src_offset;
7214 tree dest_addr, dest;
7216 if (SSE_REGNO_P (REGNO (reg)))
7218 src_addr = sse_addr;
7219 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7221 else
7223 src_addr = int_addr;
7224 src_offset = REGNO (reg) * 8;
7226 src_addr = fold_convert (addr_type, src_addr);
7227 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7228 size_int (src_offset));
7229 src = build_va_arg_indirect_ref (src_addr);
7231 dest_addr = fold_convert (daddr_type, addr);
7232 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7233 size_int (INTVAL (XEXP (slot, 1))));
7234 dest = build_va_arg_indirect_ref (dest_addr);
7236 gimplify_assign (dest, src, pre_p);
7240 if (needed_intregs)
7242 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7243 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7244 gimplify_assign (gpr, t, pre_p);
7247 if (needed_sseregs)
7249 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7250 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7251 gimplify_assign (fpr, t, pre_p);
7254 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7256 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7259 /* ... otherwise out of the overflow area. */
7261 /* When we align parameter on stack for caller, if the parameter
7262 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7263 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7264 here with caller. */
7265 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7266 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7267 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7269 /* Care for on-stack alignment if needed. */
7270 if (arg_boundary <= 64
7271 || integer_zerop (TYPE_SIZE (type)))
7272 t = ovf;
7273 else
7275 HOST_WIDE_INT align = arg_boundary / 8;
7276 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7277 size_int (align - 1));
7278 t = fold_convert (sizetype, t);
7279 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7280 size_int (-align));
7281 t = fold_convert (TREE_TYPE (ovf), t);
7283 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7284 gimplify_assign (addr, t, pre_p);
7286 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7287 size_int (rsize * UNITS_PER_WORD));
7288 gimplify_assign (unshare_expr (ovf), t, pre_p);
7290 if (container)
7291 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7293 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7294 addr = fold_convert (ptrtype, addr);
7296 if (indirect_p)
7297 addr = build_va_arg_indirect_ref (addr);
7298 return build_va_arg_indirect_ref (addr);
7301 /* Return nonzero if OPNUM's MEM should be matched
7302 in movabs* patterns. */
7305 ix86_check_movabs (rtx insn, int opnum)
7307 rtx set, mem;
7309 set = PATTERN (insn);
7310 if (GET_CODE (set) == PARALLEL)
7311 set = XVECEXP (set, 0, 0);
7312 gcc_assert (GET_CODE (set) == SET);
7313 mem = XEXP (set, opnum);
7314 while (GET_CODE (mem) == SUBREG)
7315 mem = SUBREG_REG (mem);
7316 gcc_assert (MEM_P (mem));
7317 return (volatile_ok || !MEM_VOLATILE_P (mem));
7320 /* Initialize the table of extra 80387 mathematical constants. */
7322 static void
7323 init_ext_80387_constants (void)
7325 static const char * cst[5] =
7327 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7328 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7329 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7330 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7331 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7333 int i;
7335 for (i = 0; i < 5; i++)
7337 real_from_string (&ext_80387_constants_table[i], cst[i]);
7338 /* Ensure each constant is rounded to XFmode precision. */
7339 real_convert (&ext_80387_constants_table[i],
7340 XFmode, &ext_80387_constants_table[i]);
7343 ext_80387_constants_init = 1;
7346 /* Return true if the constant is something that can be loaded with
7347 a special instruction. */
7350 standard_80387_constant_p (rtx x)
7352 enum machine_mode mode = GET_MODE (x);
7354 REAL_VALUE_TYPE r;
7356 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7357 return -1;
7359 if (x == CONST0_RTX (mode))
7360 return 1;
7361 if (x == CONST1_RTX (mode))
7362 return 2;
7364 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7366 /* For XFmode constants, try to find a special 80387 instruction when
7367 optimizing for size or on those CPUs that benefit from them. */
7368 if (mode == XFmode
7369 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7371 int i;
7373 if (! ext_80387_constants_init)
7374 init_ext_80387_constants ();
7376 for (i = 0; i < 5; i++)
7377 if (real_identical (&r, &ext_80387_constants_table[i]))
7378 return i + 3;
7381 /* Load of the constant -0.0 or -1.0 will be split as
7382 fldz;fchs or fld1;fchs sequence. */
7383 if (real_isnegzero (&r))
7384 return 8;
7385 if (real_identical (&r, &dconstm1))
7386 return 9;
7388 return 0;
7391 /* Return the opcode of the special instruction to be used to load
7392 the constant X. */
7394 const char *
7395 standard_80387_constant_opcode (rtx x)
7397 switch (standard_80387_constant_p (x))
7399 case 1:
7400 return "fldz";
7401 case 2:
7402 return "fld1";
7403 case 3:
7404 return "fldlg2";
7405 case 4:
7406 return "fldln2";
7407 case 5:
7408 return "fldl2e";
7409 case 6:
7410 return "fldl2t";
7411 case 7:
7412 return "fldpi";
7413 case 8:
7414 case 9:
7415 return "#";
7416 default:
7417 gcc_unreachable ();
7421 /* Return the CONST_DOUBLE representing the 80387 constant that is
7422 loaded by the specified special instruction. The argument IDX
7423 matches the return value from standard_80387_constant_p. */
7426 standard_80387_constant_rtx (int idx)
7428 int i;
7430 if (! ext_80387_constants_init)
7431 init_ext_80387_constants ();
7433 switch (idx)
7435 case 3:
7436 case 4:
7437 case 5:
7438 case 6:
7439 case 7:
7440 i = idx - 3;
7441 break;
7443 default:
7444 gcc_unreachable ();
7447 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7448 XFmode);
7451 /* Return 1 if X is all 0s and 2 if x is all 1s
7452 in supported SSE vector mode. */
7455 standard_sse_constant_p (rtx x)
7457 enum machine_mode mode = GET_MODE (x);
7459 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7460 return 1;
7461 if (vector_all_ones_operand (x, mode))
7462 switch (mode)
7464 case V16QImode:
7465 case V8HImode:
7466 case V4SImode:
7467 case V2DImode:
7468 if (TARGET_SSE2)
7469 return 2;
7470 default:
7471 break;
7474 return 0;
7477 /* Return the opcode of the special instruction to be used to load
7478 the constant X. */
7480 const char *
7481 standard_sse_constant_opcode (rtx insn, rtx x)
7483 switch (standard_sse_constant_p (x))
7485 case 1:
7486 switch (get_attr_mode (insn))
7488 case MODE_V4SF:
7489 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7490 case MODE_V2DF:
7491 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7492 case MODE_TI:
7493 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7494 case MODE_V8SF:
7495 return "vxorps\t%x0, %x0, %x0";
7496 case MODE_V4DF:
7497 return "vxorpd\t%x0, %x0, %x0";
7498 case MODE_OI:
7499 return "vpxor\t%x0, %x0, %x0";
7500 default:
7501 break;
7503 case 2:
7504 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7505 default:
7506 break;
7508 gcc_unreachable ();
7511 /* Returns 1 if OP contains a symbol reference */
7514 symbolic_reference_mentioned_p (rtx op)
7516 const char *fmt;
7517 int i;
7519 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7520 return 1;
7522 fmt = GET_RTX_FORMAT (GET_CODE (op));
7523 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7525 if (fmt[i] == 'E')
7527 int j;
7529 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7530 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7531 return 1;
7534 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7535 return 1;
7538 return 0;
7541 /* Return 1 if it is appropriate to emit `ret' instructions in the
7542 body of a function. Do this only if the epilogue is simple, needing a
7543 couple of insns. Prior to reloading, we can't tell how many registers
7544 must be saved, so return 0 then. Return 0 if there is no frame
7545 marker to de-allocate. */
7548 ix86_can_use_return_insn_p (void)
7550 struct ix86_frame frame;
7552 if (! reload_completed || frame_pointer_needed)
7553 return 0;
7555 /* Don't allow more than 32 pop, since that's all we can do
7556 with one instruction. */
7557 if (crtl->args.pops_args
7558 && crtl->args.size >= 32768)
7559 return 0;
7561 ix86_compute_frame_layout (&frame);
7562 return frame.to_allocate == 0 && frame.padding0 == 0
7563 && (frame.nregs + frame.nsseregs) == 0;
7566 /* Value should be nonzero if functions must have frame pointers.
7567 Zero means the frame pointer need not be set up (and parms may
7568 be accessed via the stack pointer) in functions that seem suitable. */
7570 static bool
7571 ix86_frame_pointer_required (void)
7573 /* If we accessed previous frames, then the generated code expects
7574 to be able to access the saved ebp value in our frame. */
7575 if (cfun->machine->accesses_prev_frame)
7576 return true;
7578 /* Several x86 os'es need a frame pointer for other reasons,
7579 usually pertaining to setjmp. */
7580 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7581 return true;
7583 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7584 the frame pointer by default. Turn it back on now if we've not
7585 got a leaf function. */
7586 if (TARGET_OMIT_LEAF_FRAME_POINTER
7587 && (!current_function_is_leaf
7588 || ix86_current_function_calls_tls_descriptor))
7589 return true;
7591 if (crtl->profile)
7592 return true;
7594 return false;
7597 /* Record that the current function accesses previous call frames. */
7599 void
7600 ix86_setup_frame_addresses (void)
7602 cfun->machine->accesses_prev_frame = 1;
7605 #ifndef USE_HIDDEN_LINKONCE
7606 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7607 # define USE_HIDDEN_LINKONCE 1
7608 # else
7609 # define USE_HIDDEN_LINKONCE 0
7610 # endif
7611 #endif
7613 static int pic_labels_used;
7615 /* Fills in the label name that should be used for a pc thunk for
7616 the given register. */
7618 static void
7619 get_pc_thunk_name (char name[32], unsigned int regno)
7621 gcc_assert (!TARGET_64BIT);
7623 if (USE_HIDDEN_LINKONCE)
7624 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7625 else
7626 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7630 /* This function generates code for -fpic that loads %ebx with
7631 the return address of the caller and then returns. */
7633 static void
7634 ix86_code_end (void)
7636 rtx xops[2];
7637 int regno;
7639 for (regno = 0; regno < 8; ++regno)
7641 char name[32];
7642 tree decl;
7644 if (! ((pic_labels_used >> regno) & 1))
7645 continue;
7647 get_pc_thunk_name (name, regno);
7649 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7650 get_identifier (name),
7651 build_function_type (void_type_node, void_list_node));
7652 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7653 NULL_TREE, void_type_node);
7654 TREE_PUBLIC (decl) = 1;
7655 TREE_STATIC (decl) = 1;
7657 #if TARGET_MACHO
7658 if (TARGET_MACHO)
7660 switch_to_section (darwin_sections[text_coal_section]);
7661 fputs ("\t.weak_definition\t", asm_out_file);
7662 assemble_name (asm_out_file, name);
7663 fputs ("\n\t.private_extern\t", asm_out_file);
7664 assemble_name (asm_out_file, name);
7665 fputs ("\n", asm_out_file);
7666 ASM_OUTPUT_LABEL (asm_out_file, name);
7667 DECL_WEAK (decl) = 1;
7669 else
7670 #endif
7671 if (USE_HIDDEN_LINKONCE)
7673 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7675 (*targetm.asm_out.unique_section) (decl, 0);
7676 switch_to_section (get_named_section (decl, NULL, 0));
7678 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7679 fputs ("\t.hidden\t", asm_out_file);
7680 assemble_name (asm_out_file, name);
7681 putc ('\n', asm_out_file);
7682 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7684 else
7686 switch_to_section (text_section);
7687 ASM_OUTPUT_LABEL (asm_out_file, name);
7690 DECL_INITIAL (decl) = make_node (BLOCK);
7691 current_function_decl = decl;
7692 init_function_start (decl);
7693 first_function_block_is_cold = false;
7694 /* Make sure unwind info is emitted for the thunk if needed. */
7695 final_start_function (emit_barrier (), asm_out_file, 1);
7697 xops[0] = gen_rtx_REG (Pmode, regno);
7698 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7699 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7700 output_asm_insn ("ret", xops);
7701 final_end_function ();
7702 init_insn_lengths ();
7703 free_after_compilation (cfun);
7704 set_cfun (NULL);
7705 current_function_decl = NULL;
7709 /* Emit code for the SET_GOT patterns. */
7711 const char *
7712 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7714 rtx xops[3];
7716 xops[0] = dest;
7718 if (TARGET_VXWORKS_RTP && flag_pic)
7720 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7721 xops[2] = gen_rtx_MEM (Pmode,
7722 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7723 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7725 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7726 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7727 an unadorned address. */
7728 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7729 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7730 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7731 return "";
7734 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7736 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7738 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7740 if (!flag_pic)
7741 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7742 else
7744 output_asm_insn ("call\t%a2", xops);
7745 #ifdef DWARF2_UNWIND_INFO
7746 /* The call to next label acts as a push. */
7747 if (dwarf2out_do_frame ())
7749 rtx insn;
7750 start_sequence ();
7751 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7752 gen_rtx_PLUS (Pmode,
7753 stack_pointer_rtx,
7754 GEN_INT (-4))));
7755 RTX_FRAME_RELATED_P (insn) = 1;
7756 dwarf2out_frame_debug (insn, true);
7757 end_sequence ();
7759 #endif
7762 #if TARGET_MACHO
7763 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7764 is what will be referenced by the Mach-O PIC subsystem. */
7765 if (!label)
7766 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7767 #endif
7769 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7770 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7772 if (flag_pic)
7774 output_asm_insn ("pop%z0\t%0", xops);
7775 #ifdef DWARF2_UNWIND_INFO
7776 /* The pop is a pop and clobbers dest, but doesn't restore it
7777 for unwind info purposes. */
7778 if (dwarf2out_do_frame ())
7780 rtx insn;
7781 start_sequence ();
7782 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7783 dwarf2out_frame_debug (insn, true);
7784 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7785 gen_rtx_PLUS (Pmode,
7786 stack_pointer_rtx,
7787 GEN_INT (4))));
7788 RTX_FRAME_RELATED_P (insn) = 1;
7789 dwarf2out_frame_debug (insn, true);
7790 end_sequence ();
7792 #endif
7795 else
7797 char name[32];
7798 get_pc_thunk_name (name, REGNO (dest));
7799 pic_labels_used |= 1 << REGNO (dest);
7801 #ifdef DWARF2_UNWIND_INFO
7802 /* Ensure all queued register saves are flushed before the
7803 call. */
7804 if (dwarf2out_do_frame ())
7806 rtx insn;
7807 start_sequence ();
7808 insn = emit_barrier ();
7809 end_sequence ();
7810 dwarf2out_frame_debug (insn, false);
7812 #endif
7813 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7814 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7815 output_asm_insn ("call\t%X2", xops);
7816 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7817 is what will be referenced by the Mach-O PIC subsystem. */
7818 #if TARGET_MACHO
7819 if (!label)
7820 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7821 else
7822 targetm.asm_out.internal_label (asm_out_file, "L",
7823 CODE_LABEL_NUMBER (label));
7824 #endif
7827 if (TARGET_MACHO)
7828 return "";
7830 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7831 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7832 else
7833 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7835 return "";
7838 /* Generate an "push" pattern for input ARG. */
7840 static rtx
7841 gen_push (rtx arg)
7843 if (ix86_cfa_state->reg == stack_pointer_rtx)
7844 ix86_cfa_state->offset += UNITS_PER_WORD;
7846 return gen_rtx_SET (VOIDmode,
7847 gen_rtx_MEM (Pmode,
7848 gen_rtx_PRE_DEC (Pmode,
7849 stack_pointer_rtx)),
7850 arg);
7853 /* Return >= 0 if there is an unused call-clobbered register available
7854 for the entire function. */
7856 static unsigned int
7857 ix86_select_alt_pic_regnum (void)
7859 if (current_function_is_leaf && !crtl->profile
7860 && !ix86_current_function_calls_tls_descriptor)
7862 int i, drap;
7863 /* Can't use the same register for both PIC and DRAP. */
7864 if (crtl->drap_reg)
7865 drap = REGNO (crtl->drap_reg);
7866 else
7867 drap = -1;
7868 for (i = 2; i >= 0; --i)
7869 if (i != drap && !df_regs_ever_live_p (i))
7870 return i;
7873 return INVALID_REGNUM;
7876 /* Return 1 if we need to save REGNO. */
7877 static int
7878 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7880 if (pic_offset_table_rtx
7881 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7882 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7883 || crtl->profile
7884 || crtl->calls_eh_return
7885 || crtl->uses_const_pool))
7887 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7888 return 0;
7889 return 1;
7892 if (crtl->calls_eh_return && maybe_eh_return)
7894 unsigned i;
7895 for (i = 0; ; i++)
7897 unsigned test = EH_RETURN_DATA_REGNO (i);
7898 if (test == INVALID_REGNUM)
7899 break;
7900 if (test == regno)
7901 return 1;
7905 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7906 return 1;
7908 return (df_regs_ever_live_p (regno)
7909 && !call_used_regs[regno]
7910 && !fixed_regs[regno]
7911 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7914 /* Return number of saved general prupose registers. */
7916 static int
7917 ix86_nsaved_regs (void)
7919 int nregs = 0;
7920 int regno;
7922 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7923 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7924 nregs ++;
7925 return nregs;
7928 /* Return number of saved SSE registrers. */
7930 static int
7931 ix86_nsaved_sseregs (void)
7933 int nregs = 0;
7934 int regno;
7936 if (ix86_cfun_abi () != MS_ABI)
7937 return 0;
7938 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7939 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7940 nregs ++;
7941 return nregs;
7944 /* Given FROM and TO register numbers, say whether this elimination is
7945 allowed. If stack alignment is needed, we can only replace argument
7946 pointer with hard frame pointer, or replace frame pointer with stack
7947 pointer. Otherwise, frame pointer elimination is automatically
7948 handled and all other eliminations are valid. */
7950 static bool
7951 ix86_can_eliminate (const int from, const int to)
7953 if (stack_realign_fp)
7954 return ((from == ARG_POINTER_REGNUM
7955 && to == HARD_FRAME_POINTER_REGNUM)
7956 || (from == FRAME_POINTER_REGNUM
7957 && to == STACK_POINTER_REGNUM));
7958 else
7959 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7962 /* Return the offset between two registers, one to be eliminated, and the other
7963 its replacement, at the start of a routine. */
7965 HOST_WIDE_INT
7966 ix86_initial_elimination_offset (int from, int to)
7968 struct ix86_frame frame;
7969 ix86_compute_frame_layout (&frame);
7971 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7972 return frame.hard_frame_pointer_offset;
7973 else if (from == FRAME_POINTER_REGNUM
7974 && to == HARD_FRAME_POINTER_REGNUM)
7975 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7976 else
7978 gcc_assert (to == STACK_POINTER_REGNUM);
7980 if (from == ARG_POINTER_REGNUM)
7981 return frame.stack_pointer_offset;
7983 gcc_assert (from == FRAME_POINTER_REGNUM);
7984 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7988 /* In a dynamically-aligned function, we can't know the offset from
7989 stack pointer to frame pointer, so we must ensure that setjmp
7990 eliminates fp against the hard fp (%ebp) rather than trying to
7991 index from %esp up to the top of the frame across a gap that is
7992 of unknown (at compile-time) size. */
7993 static rtx
7994 ix86_builtin_setjmp_frame_value (void)
7996 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7999 /* Fill structure ix86_frame about frame of currently computed function. */
8001 static void
8002 ix86_compute_frame_layout (struct ix86_frame *frame)
8004 unsigned int stack_alignment_needed;
8005 HOST_WIDE_INT offset;
8006 unsigned int preferred_alignment;
8007 HOST_WIDE_INT size = get_frame_size ();
8009 frame->nregs = ix86_nsaved_regs ();
8010 frame->nsseregs = ix86_nsaved_sseregs ();
8012 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8013 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8015 /* MS ABI seem to require stack alignment to be always 16 except for function
8016 prologues. */
8017 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8019 preferred_alignment = 16;
8020 stack_alignment_needed = 16;
8021 crtl->preferred_stack_boundary = 128;
8022 crtl->stack_alignment_needed = 128;
8025 gcc_assert (!size || stack_alignment_needed);
8026 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8027 gcc_assert (preferred_alignment <= stack_alignment_needed);
8029 /* During reload iteration the amount of registers saved can change.
8030 Recompute the value as needed. Do not recompute when amount of registers
8031 didn't change as reload does multiple calls to the function and does not
8032 expect the decision to change within single iteration. */
8033 if (!optimize_function_for_size_p (cfun)
8034 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8036 int count = frame->nregs;
8038 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8039 /* The fast prologue uses move instead of push to save registers. This
8040 is significantly longer, but also executes faster as modern hardware
8041 can execute the moves in parallel, but can't do that for push/pop.
8043 Be careful about choosing what prologue to emit: When function takes
8044 many instructions to execute we may use slow version as well as in
8045 case function is known to be outside hot spot (this is known with
8046 feedback only). Weight the size of function by number of registers
8047 to save as it is cheap to use one or two push instructions but very
8048 slow to use many of them. */
8049 if (count)
8050 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8051 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
8052 || (flag_branch_probabilities
8053 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
8054 cfun->machine->use_fast_prologue_epilogue = false;
8055 else
8056 cfun->machine->use_fast_prologue_epilogue
8057 = !expensive_function_p (count);
8059 if (TARGET_PROLOGUE_USING_MOVE
8060 && cfun->machine->use_fast_prologue_epilogue)
8061 frame->save_regs_using_mov = true;
8062 else
8063 frame->save_regs_using_mov = false;
8065 /* Skip return address. */
8066 offset = UNITS_PER_WORD;
8068 /* Skip pushed static chain. */
8069 if (ix86_static_chain_on_stack)
8070 offset += UNITS_PER_WORD;
8072 /* Skip saved base pointer. */
8073 if (frame_pointer_needed)
8074 offset += UNITS_PER_WORD;
8076 frame->hard_frame_pointer_offset = offset;
8078 /* Set offset to aligned because the realigned frame starts from
8079 here. */
8080 if (stack_realign_fp)
8081 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8083 /* Register save area */
8084 offset += frame->nregs * UNITS_PER_WORD;
8086 /* Align SSE reg save area. */
8087 if (frame->nsseregs)
8088 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8089 else
8090 frame->padding0 = 0;
8092 /* SSE register save area. */
8093 offset += frame->padding0 + frame->nsseregs * 16;
8095 /* Va-arg area */
8096 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8097 offset += frame->va_arg_size;
8099 /* Align start of frame for local function. */
8100 frame->padding1 = ((offset + stack_alignment_needed - 1)
8101 & -stack_alignment_needed) - offset;
8103 offset += frame->padding1;
8105 /* Frame pointer points here. */
8106 frame->frame_pointer_offset = offset;
8108 offset += size;
8110 /* Add outgoing arguments area. Can be skipped if we eliminated
8111 all the function calls as dead code.
8112 Skipping is however impossible when function calls alloca. Alloca
8113 expander assumes that last crtl->outgoing_args_size
8114 of stack frame are unused. */
8115 if (ACCUMULATE_OUTGOING_ARGS
8116 && (!current_function_is_leaf || cfun->calls_alloca
8117 || ix86_current_function_calls_tls_descriptor))
8119 offset += crtl->outgoing_args_size;
8120 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8122 else
8123 frame->outgoing_arguments_size = 0;
8125 /* Align stack boundary. Only needed if we're calling another function
8126 or using alloca. */
8127 if (!current_function_is_leaf || cfun->calls_alloca
8128 || ix86_current_function_calls_tls_descriptor)
8129 frame->padding2 = ((offset + preferred_alignment - 1)
8130 & -preferred_alignment) - offset;
8131 else
8132 frame->padding2 = 0;
8134 offset += frame->padding2;
8136 /* We've reached end of stack frame. */
8137 frame->stack_pointer_offset = offset;
8139 /* Size prologue needs to allocate. */
8140 frame->to_allocate =
8141 (size + frame->padding1 + frame->padding2
8142 + frame->outgoing_arguments_size + frame->va_arg_size);
8144 if ((!frame->to_allocate && frame->nregs <= 1)
8145 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8146 frame->save_regs_using_mov = false;
8148 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8149 && current_function_sp_is_unchanging
8150 && current_function_is_leaf
8151 && !ix86_current_function_calls_tls_descriptor)
8153 frame->red_zone_size = frame->to_allocate;
8154 if (frame->save_regs_using_mov)
8155 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8156 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8157 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8159 else
8160 frame->red_zone_size = 0;
8161 frame->to_allocate -= frame->red_zone_size;
8162 frame->stack_pointer_offset -= frame->red_zone_size;
8165 /* Emit code to save registers in the prologue. */
8167 static void
8168 ix86_emit_save_regs (void)
8170 unsigned int regno;
8171 rtx insn;
8173 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8174 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8176 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8177 RTX_FRAME_RELATED_P (insn) = 1;
8181 /* Emit code to save registers using MOV insns. First register
8182 is restored from POINTER + OFFSET. */
8183 static void
8184 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8186 unsigned int regno;
8187 rtx insn;
8189 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8190 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8192 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8193 Pmode, offset),
8194 gen_rtx_REG (Pmode, regno));
8195 RTX_FRAME_RELATED_P (insn) = 1;
8196 offset += UNITS_PER_WORD;
8200 /* Emit code to save registers using MOV insns. First register
8201 is restored from POINTER + OFFSET. */
8202 static void
8203 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8205 unsigned int regno;
8206 rtx insn;
8207 rtx mem;
8209 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8210 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8212 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8213 set_mem_align (mem, 128);
8214 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8215 RTX_FRAME_RELATED_P (insn) = 1;
8216 offset += 16;
8220 static GTY(()) rtx queued_cfa_restores;
8222 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8223 manipulation insn. Don't add it if the previously
8224 saved value will be left untouched within stack red-zone till return,
8225 as unwinders can find the same value in the register and
8226 on the stack. */
8228 static void
8229 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8231 if (TARGET_RED_ZONE
8232 && !TARGET_64BIT_MS_ABI
8233 && red_offset + RED_ZONE_SIZE >= 0
8234 && crtl->args.pops_args < 65536)
8235 return;
8237 if (insn)
8239 add_reg_note (insn, REG_CFA_RESTORE, reg);
8240 RTX_FRAME_RELATED_P (insn) = 1;
8242 else
8243 queued_cfa_restores
8244 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8247 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8249 static void
8250 ix86_add_queued_cfa_restore_notes (rtx insn)
8252 rtx last;
8253 if (!queued_cfa_restores)
8254 return;
8255 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8257 XEXP (last, 1) = REG_NOTES (insn);
8258 REG_NOTES (insn) = queued_cfa_restores;
8259 queued_cfa_restores = NULL_RTX;
8260 RTX_FRAME_RELATED_P (insn) = 1;
8263 /* Expand prologue or epilogue stack adjustment.
8264 The pattern exist to put a dependency on all ebp-based memory accesses.
8265 STYLE should be negative if instructions should be marked as frame related,
8266 zero if %r11 register is live and cannot be freely used and positive
8267 otherwise. */
8269 static void
8270 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8271 int style, bool set_cfa)
8273 rtx insn;
8275 if (! TARGET_64BIT)
8276 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8277 else if (x86_64_immediate_operand (offset, DImode))
8278 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8279 else
8281 rtx r11;
8282 /* r11 is used by indirect sibcall return as well, set before the
8283 epilogue and used after the epilogue. ATM indirect sibcall
8284 shouldn't be used together with huge frame sizes in one
8285 function because of the frame_size check in sibcall.c. */
8286 gcc_assert (style);
8287 r11 = gen_rtx_REG (DImode, R11_REG);
8288 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8289 if (style < 0)
8290 RTX_FRAME_RELATED_P (insn) = 1;
8291 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8292 offset));
8295 if (style >= 0)
8296 ix86_add_queued_cfa_restore_notes (insn);
8298 if (set_cfa)
8300 rtx r;
8302 gcc_assert (ix86_cfa_state->reg == src);
8303 ix86_cfa_state->offset += INTVAL (offset);
8304 ix86_cfa_state->reg = dest;
8306 r = gen_rtx_PLUS (Pmode, src, offset);
8307 r = gen_rtx_SET (VOIDmode, dest, r);
8308 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8309 RTX_FRAME_RELATED_P (insn) = 1;
8311 else if (style < 0)
8312 RTX_FRAME_RELATED_P (insn) = 1;
8315 /* Find an available register to be used as dynamic realign argument
8316 pointer regsiter. Such a register will be written in prologue and
8317 used in begin of body, so it must not be
8318 1. parameter passing register.
8319 2. GOT pointer.
8320 We reuse static-chain register if it is available. Otherwise, we
8321 use DI for i386 and R13 for x86-64. We chose R13 since it has
8322 shorter encoding.
8324 Return: the regno of chosen register. */
8326 static unsigned int
8327 find_drap_reg (void)
8329 tree decl = cfun->decl;
8331 if (TARGET_64BIT)
8333 /* Use R13 for nested function or function need static chain.
8334 Since function with tail call may use any caller-saved
8335 registers in epilogue, DRAP must not use caller-saved
8336 register in such case. */
8337 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8338 return R13_REG;
8340 return R10_REG;
8342 else
8344 /* Use DI for nested function or function need static chain.
8345 Since function with tail call may use any caller-saved
8346 registers in epilogue, DRAP must not use caller-saved
8347 register in such case. */
8348 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8349 return DI_REG;
8351 /* Reuse static chain register if it isn't used for parameter
8352 passing. */
8353 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8354 && !lookup_attribute ("fastcall",
8355 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8356 && !lookup_attribute ("thiscall",
8357 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8358 return CX_REG;
8359 else
8360 return DI_REG;
8364 /* Return minimum incoming stack alignment. */
8366 static unsigned int
8367 ix86_minimum_incoming_stack_boundary (bool sibcall)
8369 unsigned int incoming_stack_boundary;
8371 /* Prefer the one specified at command line. */
8372 if (ix86_user_incoming_stack_boundary)
8373 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8374 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8375 if -mstackrealign is used, it isn't used for sibcall check and
8376 estimated stack alignment is 128bit. */
8377 else if (!sibcall
8378 && !TARGET_64BIT
8379 && ix86_force_align_arg_pointer
8380 && crtl->stack_alignment_estimated == 128)
8381 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8382 else
8383 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8385 /* Incoming stack alignment can be changed on individual functions
8386 via force_align_arg_pointer attribute. We use the smallest
8387 incoming stack boundary. */
8388 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8389 && lookup_attribute (ix86_force_align_arg_pointer_string,
8390 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8391 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8393 /* The incoming stack frame has to be aligned at least at
8394 parm_stack_boundary. */
8395 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8396 incoming_stack_boundary = crtl->parm_stack_boundary;
8398 /* Stack at entrance of main is aligned by runtime. We use the
8399 smallest incoming stack boundary. */
8400 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8401 && DECL_NAME (current_function_decl)
8402 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8403 && DECL_FILE_SCOPE_P (current_function_decl))
8404 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8406 return incoming_stack_boundary;
8409 /* Update incoming stack boundary and estimated stack alignment. */
8411 static void
8412 ix86_update_stack_boundary (void)
8414 ix86_incoming_stack_boundary
8415 = ix86_minimum_incoming_stack_boundary (false);
8417 /* x86_64 vararg needs 16byte stack alignment for register save
8418 area. */
8419 if (TARGET_64BIT
8420 && cfun->stdarg
8421 && crtl->stack_alignment_estimated < 128)
8422 crtl->stack_alignment_estimated = 128;
8425 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8426 needed or an rtx for DRAP otherwise. */
8428 static rtx
8429 ix86_get_drap_rtx (void)
8431 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8432 crtl->need_drap = true;
8434 if (stack_realign_drap)
8436 /* Assign DRAP to vDRAP and returns vDRAP */
8437 unsigned int regno = find_drap_reg ();
8438 rtx drap_vreg;
8439 rtx arg_ptr;
8440 rtx seq, insn;
8442 arg_ptr = gen_rtx_REG (Pmode, regno);
8443 crtl->drap_reg = arg_ptr;
8445 start_sequence ();
8446 drap_vreg = copy_to_reg (arg_ptr);
8447 seq = get_insns ();
8448 end_sequence ();
8450 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8451 if (!optimize)
8453 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8454 RTX_FRAME_RELATED_P (insn) = 1;
8456 return drap_vreg;
8458 else
8459 return NULL;
8462 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8464 static rtx
8465 ix86_internal_arg_pointer (void)
8467 return virtual_incoming_args_rtx;
8470 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8471 to be generated in correct form. */
8472 static void
8473 ix86_finalize_stack_realign_flags (void)
8475 /* Check if stack realign is really needed after reload, and
8476 stores result in cfun */
8477 unsigned int incoming_stack_boundary
8478 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8479 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8480 unsigned int stack_realign = (incoming_stack_boundary
8481 < (current_function_is_leaf
8482 ? crtl->max_used_stack_slot_alignment
8483 : crtl->stack_alignment_needed));
8485 if (crtl->stack_realign_finalized)
8487 /* After stack_realign_needed is finalized, we can't no longer
8488 change it. */
8489 gcc_assert (crtl->stack_realign_needed == stack_realign);
8491 else
8493 crtl->stack_realign_needed = stack_realign;
8494 crtl->stack_realign_finalized = true;
8498 /* Expand the prologue into a bunch of separate insns. */
8500 void
8501 ix86_expand_prologue (void)
8503 rtx insn;
8504 bool pic_reg_used;
8505 struct ix86_frame frame;
8506 HOST_WIDE_INT allocate;
8507 int gen_frame_pointer = frame_pointer_needed;
8509 ix86_finalize_stack_realign_flags ();
8511 /* DRAP should not coexist with stack_realign_fp */
8512 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8514 /* Initialize CFA state for before the prologue. */
8515 ix86_cfa_state->reg = stack_pointer_rtx;
8516 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8518 ix86_compute_frame_layout (&frame);
8520 if (ix86_function_ms_hook_prologue (current_function_decl))
8522 rtx push, mov;
8524 /* Make sure the function starts with
8525 8b ff movl.s %edi,%edi
8526 55 push %ebp
8527 8b ec movl.s %esp,%ebp
8529 This matches the hookable function prologue in Win32 API
8530 functions in Microsoft Windows XP Service Pack 2 and newer.
8531 Wine uses this to enable Windows apps to hook the Win32 API
8532 functions provided by Wine. */
8533 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8534 gen_rtx_REG (SImode, DI_REG)));
8535 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8536 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8537 stack_pointer_rtx));
8539 if (frame_pointer_needed && !(crtl->drap_reg
8540 && crtl->stack_realign_needed))
8542 /* The push %ebp and movl.s %esp, %ebp already set up
8543 the frame pointer. No need to do this again. */
8544 gen_frame_pointer = 0;
8545 RTX_FRAME_RELATED_P (push) = 1;
8546 RTX_FRAME_RELATED_P (mov) = 1;
8547 if (ix86_cfa_state->reg == stack_pointer_rtx)
8548 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8550 else
8551 /* If the frame pointer is not needed, pop %ebp again. This
8552 could be optimized for cases where ebp needs to be backed up
8553 for some other reason. If stack realignment is needed, pop
8554 the base pointer again, align the stack, and later regenerate
8555 the frame pointer setup. The frame pointer generated by the
8556 hook prologue is not aligned, so it can't be used. */
8557 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8560 /* The first insn of a function that accepts its static chain on the
8561 stack is to push the register that would be filled in by a direct
8562 call. This insn will be skipped by the trampoline. */
8563 if (ix86_static_chain_on_stack)
8565 rtx t;
8567 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8568 emit_insn (gen_blockage ());
8570 /* We don't want to interpret this push insn as a register save,
8571 only as a stack adjustment. The real copy of the register as
8572 a save will be done later, if needed. */
8573 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8574 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8575 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8576 RTX_FRAME_RELATED_P (insn) = 1;
8579 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8580 of DRAP is needed and stack realignment is really needed after reload */
8581 if (crtl->drap_reg && crtl->stack_realign_needed)
8583 rtx x, y;
8584 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8585 int param_ptr_offset = UNITS_PER_WORD;
8587 if (ix86_static_chain_on_stack)
8588 param_ptr_offset += UNITS_PER_WORD;
8589 if (!call_used_regs[REGNO (crtl->drap_reg)])
8590 param_ptr_offset += UNITS_PER_WORD;
8592 gcc_assert (stack_realign_drap);
8594 /* Grab the argument pointer. */
8595 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8596 y = crtl->drap_reg;
8598 /* Only need to push parameter pointer reg if it is caller
8599 saved reg */
8600 if (!call_used_regs[REGNO (crtl->drap_reg)])
8602 /* Push arg pointer reg */
8603 insn = emit_insn (gen_push (y));
8604 RTX_FRAME_RELATED_P (insn) = 1;
8607 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8608 RTX_FRAME_RELATED_P (insn) = 1;
8609 ix86_cfa_state->reg = crtl->drap_reg;
8611 /* Align the stack. */
8612 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8613 stack_pointer_rtx,
8614 GEN_INT (-align_bytes)));
8615 RTX_FRAME_RELATED_P (insn) = 1;
8617 /* Replicate the return address on the stack so that return
8618 address can be reached via (argp - 1) slot. This is needed
8619 to implement macro RETURN_ADDR_RTX and intrinsic function
8620 expand_builtin_return_addr etc. */
8621 x = crtl->drap_reg;
8622 x = gen_frame_mem (Pmode,
8623 plus_constant (x, -UNITS_PER_WORD));
8624 insn = emit_insn (gen_push (x));
8625 RTX_FRAME_RELATED_P (insn) = 1;
8628 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8629 slower on all targets. Also sdb doesn't like it. */
8631 if (gen_frame_pointer)
8633 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8634 RTX_FRAME_RELATED_P (insn) = 1;
8636 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8637 RTX_FRAME_RELATED_P (insn) = 1;
8639 if (ix86_cfa_state->reg == stack_pointer_rtx)
8640 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8643 if (stack_realign_fp)
8645 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8646 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8648 /* Align the stack. */
8649 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8650 stack_pointer_rtx,
8651 GEN_INT (-align_bytes)));
8652 RTX_FRAME_RELATED_P (insn) = 1;
8655 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8657 if (!frame.save_regs_using_mov)
8658 ix86_emit_save_regs ();
8659 else
8660 allocate += frame.nregs * UNITS_PER_WORD;
8662 /* When using red zone we may start register saving before allocating
8663 the stack frame saving one cycle of the prologue. However I will
8664 avoid doing this if I am going to have to probe the stack since
8665 at least on x86_64 the stack probe can turn into a call that clobbers
8666 a red zone location */
8667 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8668 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8669 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8670 && !crtl->stack_realign_needed)
8671 ? hard_frame_pointer_rtx
8672 : stack_pointer_rtx,
8673 -frame.nregs * UNITS_PER_WORD);
8675 if (allocate == 0)
8677 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8678 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8679 GEN_INT (-allocate), -1,
8680 ix86_cfa_state->reg == stack_pointer_rtx);
8681 else
8683 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8684 bool eax_live;
8685 rtx t;
8687 if (cfun->machine->call_abi == MS_ABI)
8688 eax_live = false;
8689 else
8690 eax_live = ix86_eax_live_at_start_p ();
8692 if (eax_live)
8694 emit_insn (gen_push (eax));
8695 allocate -= UNITS_PER_WORD;
8698 emit_move_insn (eax, GEN_INT (allocate));
8700 if (TARGET_64BIT)
8701 insn = gen_allocate_stack_worker_64 (eax, eax);
8702 else
8703 insn = gen_allocate_stack_worker_32 (eax, eax);
8704 insn = emit_insn (insn);
8706 if (ix86_cfa_state->reg == stack_pointer_rtx)
8708 ix86_cfa_state->offset += allocate;
8709 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8710 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8711 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8712 RTX_FRAME_RELATED_P (insn) = 1;
8715 if (eax_live)
8717 if (frame_pointer_needed)
8718 t = plus_constant (hard_frame_pointer_rtx,
8719 allocate
8720 - frame.to_allocate
8721 - frame.nregs * UNITS_PER_WORD);
8722 else
8723 t = plus_constant (stack_pointer_rtx, allocate);
8724 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8728 if (frame.save_regs_using_mov
8729 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8730 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8732 if (!frame_pointer_needed
8733 || !(frame.to_allocate + frame.padding0)
8734 || crtl->stack_realign_needed)
8735 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8736 frame.to_allocate
8737 + frame.nsseregs * 16 + frame.padding0);
8738 else
8739 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8740 -frame.nregs * UNITS_PER_WORD);
8742 if (!frame_pointer_needed
8743 || !(frame.to_allocate + frame.padding0)
8744 || crtl->stack_realign_needed)
8745 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8746 frame.to_allocate);
8747 else
8748 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8749 - frame.nregs * UNITS_PER_WORD
8750 - frame.nsseregs * 16
8751 - frame.padding0);
8753 pic_reg_used = false;
8754 if (pic_offset_table_rtx
8755 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8756 || crtl->profile))
8758 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8760 if (alt_pic_reg_used != INVALID_REGNUM)
8761 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8763 pic_reg_used = true;
8766 if (pic_reg_used)
8768 if (TARGET_64BIT)
8770 if (ix86_cmodel == CM_LARGE_PIC)
8772 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8773 rtx label = gen_label_rtx ();
8774 emit_label (label);
8775 LABEL_PRESERVE_P (label) = 1;
8776 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8777 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8778 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8779 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8780 pic_offset_table_rtx, tmp_reg));
8782 else
8783 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8785 else
8786 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8789 /* In the pic_reg_used case, make sure that the got load isn't deleted
8790 when mcount needs it. Blockage to avoid call movement across mcount
8791 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8792 note. */
8793 if (crtl->profile && pic_reg_used)
8794 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8796 if (crtl->drap_reg && !crtl->stack_realign_needed)
8798 /* vDRAP is setup but after reload it turns out stack realign
8799 isn't necessary, here we will emit prologue to setup DRAP
8800 without stack realign adjustment */
8801 rtx x;
8802 int drap_bp_offset = UNITS_PER_WORD * 2;
8804 if (ix86_static_chain_on_stack)
8805 drap_bp_offset += UNITS_PER_WORD;
8806 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8807 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8810 /* Prevent instructions from being scheduled into register save push
8811 sequence when access to the redzone area is done through frame pointer.
8812 The offset between the frame pointer and the stack pointer is calculated
8813 relative to the value of the stack pointer at the end of the function
8814 prologue, and moving instructions that access redzone area via frame
8815 pointer inside push sequence violates this assumption. */
8816 if (frame_pointer_needed && frame.red_zone_size)
8817 emit_insn (gen_memory_blockage ());
8819 /* Emit cld instruction if stringops are used in the function. */
8820 if (TARGET_CLD && ix86_current_function_needs_cld)
8821 emit_insn (gen_cld ());
8824 /* Emit code to restore REG using a POP insn. */
8826 static void
8827 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8829 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8831 if (ix86_cfa_state->reg == crtl->drap_reg
8832 && REGNO (reg) == REGNO (crtl->drap_reg))
8834 /* Previously we'd represented the CFA as an expression
8835 like *(%ebp - 8). We've just popped that value from
8836 the stack, which means we need to reset the CFA to
8837 the drap register. This will remain until we restore
8838 the stack pointer. */
8839 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8840 RTX_FRAME_RELATED_P (insn) = 1;
8841 return;
8844 if (ix86_cfa_state->reg == stack_pointer_rtx)
8846 ix86_cfa_state->offset -= UNITS_PER_WORD;
8847 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8848 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8849 RTX_FRAME_RELATED_P (insn) = 1;
8852 /* When the frame pointer is the CFA, and we pop it, we are
8853 swapping back to the stack pointer as the CFA. This happens
8854 for stack frames that don't allocate other data, so we assume
8855 the stack pointer is now pointing at the return address, i.e.
8856 the function entry state, which makes the offset be 1 word. */
8857 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8858 && reg == hard_frame_pointer_rtx)
8860 ix86_cfa_state->reg = stack_pointer_rtx;
8861 ix86_cfa_state->offset -= UNITS_PER_WORD;
8863 add_reg_note (insn, REG_CFA_DEF_CFA,
8864 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8865 GEN_INT (ix86_cfa_state->offset)));
8866 RTX_FRAME_RELATED_P (insn) = 1;
8869 ix86_add_cfa_restore_note (insn, reg, red_offset);
8872 /* Emit code to restore saved registers using POP insns. */
8874 static void
8875 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8877 int regno;
8879 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8880 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8882 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8883 red_offset);
8884 red_offset += UNITS_PER_WORD;
8888 /* Emit code and notes for the LEAVE instruction. */
8890 static void
8891 ix86_emit_leave (HOST_WIDE_INT red_offset)
8893 rtx insn = emit_insn (ix86_gen_leave ());
8895 ix86_add_queued_cfa_restore_notes (insn);
8897 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8899 ix86_cfa_state->reg = stack_pointer_rtx;
8900 ix86_cfa_state->offset -= UNITS_PER_WORD;
8902 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8903 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8904 RTX_FRAME_RELATED_P (insn) = 1;
8905 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8909 /* Emit code to restore saved registers using MOV insns. First register
8910 is restored from POINTER + OFFSET. */
8911 static void
8912 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8913 HOST_WIDE_INT red_offset,
8914 int maybe_eh_return)
8916 unsigned int regno;
8917 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8918 rtx insn;
8920 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8921 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8923 rtx reg = gen_rtx_REG (Pmode, regno);
8925 /* Ensure that adjust_address won't be forced to produce pointer
8926 out of range allowed by x86-64 instruction set. */
8927 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8929 rtx r11;
8931 r11 = gen_rtx_REG (DImode, R11_REG);
8932 emit_move_insn (r11, GEN_INT (offset));
8933 emit_insn (gen_adddi3 (r11, r11, pointer));
8934 base_address = gen_rtx_MEM (Pmode, r11);
8935 offset = 0;
8937 insn = emit_move_insn (reg,
8938 adjust_address (base_address, Pmode, offset));
8939 offset += UNITS_PER_WORD;
8941 if (ix86_cfa_state->reg == crtl->drap_reg
8942 && regno == REGNO (crtl->drap_reg))
8944 /* Previously we'd represented the CFA as an expression
8945 like *(%ebp - 8). We've just popped that value from
8946 the stack, which means we need to reset the CFA to
8947 the drap register. This will remain until we restore
8948 the stack pointer. */
8949 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8950 RTX_FRAME_RELATED_P (insn) = 1;
8952 else
8953 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8955 red_offset += UNITS_PER_WORD;
8959 /* Emit code to restore saved registers using MOV insns. First register
8960 is restored from POINTER + OFFSET. */
8961 static void
8962 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8963 HOST_WIDE_INT red_offset,
8964 int maybe_eh_return)
8966 int regno;
8967 rtx base_address = gen_rtx_MEM (TImode, pointer);
8968 rtx mem;
8970 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8971 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8973 rtx reg = gen_rtx_REG (TImode, regno);
8975 /* Ensure that adjust_address won't be forced to produce pointer
8976 out of range allowed by x86-64 instruction set. */
8977 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8979 rtx r11;
8981 r11 = gen_rtx_REG (DImode, R11_REG);
8982 emit_move_insn (r11, GEN_INT (offset));
8983 emit_insn (gen_adddi3 (r11, r11, pointer));
8984 base_address = gen_rtx_MEM (TImode, r11);
8985 offset = 0;
8987 mem = adjust_address (base_address, TImode, offset);
8988 set_mem_align (mem, 128);
8989 emit_move_insn (reg, mem);
8990 offset += 16;
8992 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8994 red_offset += 16;
8998 /* Restore function stack, frame, and registers. */
9000 void
9001 ix86_expand_epilogue (int style)
9003 int sp_valid;
9004 struct ix86_frame frame;
9005 HOST_WIDE_INT offset, red_offset;
9006 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9007 bool using_drap;
9009 ix86_finalize_stack_realign_flags ();
9011 /* When stack is realigned, SP must be valid. */
9012 sp_valid = (!frame_pointer_needed
9013 || current_function_sp_is_unchanging
9014 || stack_realign_fp);
9016 ix86_compute_frame_layout (&frame);
9018 /* See the comment about red zone and frame
9019 pointer usage in ix86_expand_prologue. */
9020 if (frame_pointer_needed && frame.red_zone_size)
9021 emit_insn (gen_memory_blockage ());
9023 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9024 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9026 /* Calculate start of saved registers relative to ebp. Special care
9027 must be taken for the normal return case of a function using
9028 eh_return: the eax and edx registers are marked as saved, but not
9029 restored along this path. */
9030 offset = frame.nregs;
9031 if (crtl->calls_eh_return && style != 2)
9032 offset -= 2;
9033 offset *= -UNITS_PER_WORD;
9034 offset -= frame.nsseregs * 16 + frame.padding0;
9036 /* Calculate start of saved registers relative to esp on entry of the
9037 function. When realigning stack, this needs to be the most negative
9038 value possible at runtime. */
9039 red_offset = offset;
9040 if (using_drap)
9041 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9042 + UNITS_PER_WORD;
9043 else if (stack_realign_fp)
9044 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9045 - UNITS_PER_WORD;
9046 if (ix86_static_chain_on_stack)
9047 red_offset -= UNITS_PER_WORD;
9048 if (frame_pointer_needed)
9049 red_offset -= UNITS_PER_WORD;
9051 /* If we're only restoring one register and sp is not valid then
9052 using a move instruction to restore the register since it's
9053 less work than reloading sp and popping the register.
9055 The default code result in stack adjustment using add/lea instruction,
9056 while this code results in LEAVE instruction (or discrete equivalent),
9057 so it is profitable in some other cases as well. Especially when there
9058 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9059 and there is exactly one register to pop. This heuristic may need some
9060 tuning in future. */
9061 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9062 || (TARGET_EPILOGUE_USING_MOVE
9063 && cfun->machine->use_fast_prologue_epilogue
9064 && ((frame.nregs + frame.nsseregs) > 1
9065 || (frame.to_allocate + frame.padding0) != 0))
9066 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9067 && (frame.to_allocate + frame.padding0) != 0)
9068 || (frame_pointer_needed && TARGET_USE_LEAVE
9069 && cfun->machine->use_fast_prologue_epilogue
9070 && (frame.nregs + frame.nsseregs) == 1)
9071 || crtl->calls_eh_return)
9073 /* Restore registers. We can use ebp or esp to address the memory
9074 locations. If both are available, default to ebp, since offsets
9075 are known to be small. Only exception is esp pointing directly
9076 to the end of block of saved registers, where we may simplify
9077 addressing mode.
9079 If we are realigning stack with bp and sp, regs restore can't
9080 be addressed by bp. sp must be used instead. */
9082 if (!frame_pointer_needed
9083 || (sp_valid && !(frame.to_allocate + frame.padding0))
9084 || stack_realign_fp)
9086 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9087 frame.to_allocate, red_offset,
9088 style == 2);
9089 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9090 frame.to_allocate
9091 + frame.nsseregs * 16
9092 + frame.padding0,
9093 red_offset
9094 + frame.nsseregs * 16
9095 + frame.padding0, style == 2);
9097 else
9099 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9100 offset, red_offset,
9101 style == 2);
9102 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9103 offset
9104 + frame.nsseregs * 16
9105 + frame.padding0,
9106 red_offset
9107 + frame.nsseregs * 16
9108 + frame.padding0, style == 2);
9111 red_offset -= offset;
9113 /* eh_return epilogues need %ecx added to the stack pointer. */
9114 if (style == 2)
9116 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9118 /* Stack align doesn't work with eh_return. */
9119 gcc_assert (!crtl->stack_realign_needed);
9120 /* Neither does regparm nested functions. */
9121 gcc_assert (!ix86_static_chain_on_stack);
9123 if (frame_pointer_needed)
9125 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9126 tmp = plus_constant (tmp, UNITS_PER_WORD);
9127 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9129 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9130 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9132 /* Note that we use SA as a temporary CFA, as the return
9133 address is at the proper place relative to it. We
9134 pretend this happens at the FP restore insn because
9135 prior to this insn the FP would be stored at the wrong
9136 offset relative to SA, and after this insn we have no
9137 other reasonable register to use for the CFA. We don't
9138 bother resetting the CFA to the SP for the duration of
9139 the return insn. */
9140 add_reg_note (tmp, REG_CFA_DEF_CFA,
9141 plus_constant (sa, UNITS_PER_WORD));
9142 ix86_add_queued_cfa_restore_notes (tmp);
9143 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9144 RTX_FRAME_RELATED_P (tmp) = 1;
9145 ix86_cfa_state->reg = sa;
9146 ix86_cfa_state->offset = UNITS_PER_WORD;
9148 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9149 const0_rtx, style, false);
9151 else
9153 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9154 tmp = plus_constant (tmp, (frame.to_allocate
9155 + frame.nregs * UNITS_PER_WORD
9156 + frame.nsseregs * 16
9157 + frame.padding0));
9158 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9159 ix86_add_queued_cfa_restore_notes (tmp);
9161 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9162 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9164 ix86_cfa_state->offset = UNITS_PER_WORD;
9165 add_reg_note (tmp, REG_CFA_DEF_CFA,
9166 plus_constant (stack_pointer_rtx,
9167 UNITS_PER_WORD));
9168 RTX_FRAME_RELATED_P (tmp) = 1;
9172 else if (!frame_pointer_needed)
9173 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9174 GEN_INT (frame.to_allocate
9175 + frame.nregs * UNITS_PER_WORD
9176 + frame.nsseregs * 16
9177 + frame.padding0),
9178 style, !using_drap);
9179 /* If not an i386, mov & pop is faster than "leave". */
9180 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9181 || !cfun->machine->use_fast_prologue_epilogue)
9182 ix86_emit_leave (red_offset);
9183 else
9185 pro_epilogue_adjust_stack (stack_pointer_rtx,
9186 hard_frame_pointer_rtx,
9187 const0_rtx, style, !using_drap);
9189 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9192 else
9194 /* First step is to deallocate the stack frame so that we can
9195 pop the registers.
9197 If we realign stack with frame pointer, then stack pointer
9198 won't be able to recover via lea $offset(%bp), %sp, because
9199 there is a padding area between bp and sp for realign.
9200 "add $to_allocate, %sp" must be used instead. */
9201 if (!sp_valid)
9203 gcc_assert (frame_pointer_needed);
9204 gcc_assert (!stack_realign_fp);
9205 pro_epilogue_adjust_stack (stack_pointer_rtx,
9206 hard_frame_pointer_rtx,
9207 GEN_INT (offset), style, false);
9208 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9209 0, red_offset,
9210 style == 2);
9211 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9212 GEN_INT (frame.nsseregs * 16
9213 + frame.padding0),
9214 style, false);
9216 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9218 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9219 frame.to_allocate, red_offset,
9220 style == 2);
9221 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9222 GEN_INT (frame.to_allocate
9223 + frame.nsseregs * 16
9224 + frame.padding0), style,
9225 !using_drap && !frame_pointer_needed);
9228 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9229 + frame.padding0);
9230 red_offset -= offset;
9232 if (frame_pointer_needed)
9234 /* Leave results in shorter dependency chains on CPUs that are
9235 able to grok it fast. */
9236 if (TARGET_USE_LEAVE)
9237 ix86_emit_leave (red_offset);
9238 else
9240 /* For stack realigned really happens, recover stack
9241 pointer to hard frame pointer is a must, if not using
9242 leave. */
9243 if (stack_realign_fp)
9244 pro_epilogue_adjust_stack (stack_pointer_rtx,
9245 hard_frame_pointer_rtx,
9246 const0_rtx, style, !using_drap);
9247 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9248 red_offset);
9253 if (using_drap)
9255 int param_ptr_offset = UNITS_PER_WORD;
9256 rtx insn;
9258 gcc_assert (stack_realign_drap);
9260 if (ix86_static_chain_on_stack)
9261 param_ptr_offset += UNITS_PER_WORD;
9262 if (!call_used_regs[REGNO (crtl->drap_reg)])
9263 param_ptr_offset += UNITS_PER_WORD;
9265 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9266 crtl->drap_reg,
9267 GEN_INT (-param_ptr_offset)));
9269 ix86_cfa_state->reg = stack_pointer_rtx;
9270 ix86_cfa_state->offset = param_ptr_offset;
9272 add_reg_note (insn, REG_CFA_DEF_CFA,
9273 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9274 GEN_INT (ix86_cfa_state->offset)));
9275 RTX_FRAME_RELATED_P (insn) = 1;
9277 if (!call_used_regs[REGNO (crtl->drap_reg)])
9278 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9281 /* Remove the saved static chain from the stack. The use of ECX is
9282 merely as a scratch register, not as the actual static chain. */
9283 if (ix86_static_chain_on_stack)
9285 rtx r, insn;
9287 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9288 ix86_cfa_state->offset += UNITS_PER_WORD;
9290 r = gen_rtx_REG (Pmode, CX_REG);
9291 insn = emit_insn (ix86_gen_pop1 (r));
9293 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9294 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9295 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9296 RTX_FRAME_RELATED_P (insn) = 1;
9299 /* Sibcall epilogues don't want a return instruction. */
9300 if (style == 0)
9302 *ix86_cfa_state = cfa_state_save;
9303 return;
9306 if (crtl->args.pops_args && crtl->args.size)
9308 rtx popc = GEN_INT (crtl->args.pops_args);
9310 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9311 address, do explicit add, and jump indirectly to the caller. */
9313 if (crtl->args.pops_args >= 65536)
9315 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9316 rtx insn;
9318 /* There is no "pascal" calling convention in any 64bit ABI. */
9319 gcc_assert (!TARGET_64BIT);
9321 insn = emit_insn (gen_popsi1 (ecx));
9322 ix86_cfa_state->offset -= UNITS_PER_WORD;
9324 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9325 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9326 add_reg_note (insn, REG_CFA_REGISTER,
9327 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9328 RTX_FRAME_RELATED_P (insn) = 1;
9330 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9331 popc, -1, true);
9332 emit_jump_insn (gen_return_indirect_internal (ecx));
9334 else
9335 emit_jump_insn (gen_return_pop_internal (popc));
9337 else
9338 emit_jump_insn (gen_return_internal ());
9340 /* Restore the state back to the state from the prologue,
9341 so that it's correct for the next epilogue. */
9342 *ix86_cfa_state = cfa_state_save;
9345 /* Reset from the function's potential modifications. */
9347 static void
9348 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9349 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9351 if (pic_offset_table_rtx)
9352 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9353 #if TARGET_MACHO
9354 /* Mach-O doesn't support labels at the end of objects, so if
9355 it looks like we might want one, insert a NOP. */
9357 rtx insn = get_last_insn ();
9358 while (insn
9359 && NOTE_P (insn)
9360 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9361 insn = PREV_INSN (insn);
9362 if (insn
9363 && (LABEL_P (insn)
9364 || (NOTE_P (insn)
9365 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9366 fputs ("\tnop\n", file);
9368 #endif
9372 /* Extract the parts of an RTL expression that is a valid memory address
9373 for an instruction. Return 0 if the structure of the address is
9374 grossly off. Return -1 if the address contains ASHIFT, so it is not
9375 strictly valid, but still used for computing length of lea instruction. */
9378 ix86_decompose_address (rtx addr, struct ix86_address *out)
9380 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9381 rtx base_reg, index_reg;
9382 HOST_WIDE_INT scale = 1;
9383 rtx scale_rtx = NULL_RTX;
9384 int retval = 1;
9385 enum ix86_address_seg seg = SEG_DEFAULT;
9387 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9388 base = addr;
9389 else if (GET_CODE (addr) == PLUS)
9391 rtx addends[4], op;
9392 int n = 0, i;
9394 op = addr;
9397 if (n >= 4)
9398 return 0;
9399 addends[n++] = XEXP (op, 1);
9400 op = XEXP (op, 0);
9402 while (GET_CODE (op) == PLUS);
9403 if (n >= 4)
9404 return 0;
9405 addends[n] = op;
9407 for (i = n; i >= 0; --i)
9409 op = addends[i];
9410 switch (GET_CODE (op))
9412 case MULT:
9413 if (index)
9414 return 0;
9415 index = XEXP (op, 0);
9416 scale_rtx = XEXP (op, 1);
9417 break;
9419 case UNSPEC:
9420 if (XINT (op, 1) == UNSPEC_TP
9421 && TARGET_TLS_DIRECT_SEG_REFS
9422 && seg == SEG_DEFAULT)
9423 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9424 else
9425 return 0;
9426 break;
9428 case REG:
9429 case SUBREG:
9430 if (!base)
9431 base = op;
9432 else if (!index)
9433 index = op;
9434 else
9435 return 0;
9436 break;
9438 case CONST:
9439 case CONST_INT:
9440 case SYMBOL_REF:
9441 case LABEL_REF:
9442 if (disp)
9443 return 0;
9444 disp = op;
9445 break;
9447 default:
9448 return 0;
9452 else if (GET_CODE (addr) == MULT)
9454 index = XEXP (addr, 0); /* index*scale */
9455 scale_rtx = XEXP (addr, 1);
9457 else if (GET_CODE (addr) == ASHIFT)
9459 rtx tmp;
9461 /* We're called for lea too, which implements ashift on occasion. */
9462 index = XEXP (addr, 0);
9463 tmp = XEXP (addr, 1);
9464 if (!CONST_INT_P (tmp))
9465 return 0;
9466 scale = INTVAL (tmp);
9467 if ((unsigned HOST_WIDE_INT) scale > 3)
9468 return 0;
9469 scale = 1 << scale;
9470 retval = -1;
9472 else
9473 disp = addr; /* displacement */
9475 /* Extract the integral value of scale. */
9476 if (scale_rtx)
9478 if (!CONST_INT_P (scale_rtx))
9479 return 0;
9480 scale = INTVAL (scale_rtx);
9483 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9484 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9486 /* Avoid useless 0 displacement. */
9487 if (disp == const0_rtx && (base || index))
9488 disp = NULL_RTX;
9490 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9491 if (base_reg && index_reg && scale == 1
9492 && (index_reg == arg_pointer_rtx
9493 || index_reg == frame_pointer_rtx
9494 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9496 rtx tmp;
9497 tmp = base, base = index, index = tmp;
9498 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9501 /* Special case: %ebp cannot be encoded as a base without a displacement.
9502 Similarly %r13. */
9503 if (!disp
9504 && base_reg
9505 && (base_reg == hard_frame_pointer_rtx
9506 || base_reg == frame_pointer_rtx
9507 || base_reg == arg_pointer_rtx
9508 || (REG_P (base_reg)
9509 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9510 || REGNO (base_reg) == R13_REG))))
9511 disp = const0_rtx;
9513 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9514 Avoid this by transforming to [%esi+0].
9515 Reload calls address legitimization without cfun defined, so we need
9516 to test cfun for being non-NULL. */
9517 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9518 && base_reg && !index_reg && !disp
9519 && REG_P (base_reg)
9520 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9521 disp = const0_rtx;
9523 /* Special case: encode reg+reg instead of reg*2. */
9524 if (!base && index && scale == 2)
9525 base = index, base_reg = index_reg, scale = 1;
9527 /* Special case: scaling cannot be encoded without base or displacement. */
9528 if (!base && !disp && index && scale != 1)
9529 disp = const0_rtx;
9531 out->base = base;
9532 out->index = index;
9533 out->disp = disp;
9534 out->scale = scale;
9535 out->seg = seg;
9537 return retval;
9540 /* Return cost of the memory address x.
9541 For i386, it is better to use a complex address than let gcc copy
9542 the address into a reg and make a new pseudo. But not if the address
9543 requires to two regs - that would mean more pseudos with longer
9544 lifetimes. */
9545 static int
9546 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9548 struct ix86_address parts;
9549 int cost = 1;
9550 int ok = ix86_decompose_address (x, &parts);
9552 gcc_assert (ok);
9554 if (parts.base && GET_CODE (parts.base) == SUBREG)
9555 parts.base = SUBREG_REG (parts.base);
9556 if (parts.index && GET_CODE (parts.index) == SUBREG)
9557 parts.index = SUBREG_REG (parts.index);
9559 /* Attempt to minimize number of registers in the address. */
9560 if ((parts.base
9561 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9562 || (parts.index
9563 && (!REG_P (parts.index)
9564 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9565 cost++;
9567 if (parts.base
9568 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9569 && parts.index
9570 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9571 && parts.base != parts.index)
9572 cost++;
9574 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9575 since it's predecode logic can't detect the length of instructions
9576 and it degenerates to vector decoded. Increase cost of such
9577 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9578 to split such addresses or even refuse such addresses at all.
9580 Following addressing modes are affected:
9581 [base+scale*index]
9582 [scale*index+disp]
9583 [base+index]
9585 The first and last case may be avoidable by explicitly coding the zero in
9586 memory address, but I don't have AMD-K6 machine handy to check this
9587 theory. */
9589 if (TARGET_K6
9590 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9591 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9592 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9593 cost += 10;
9595 return cost;
9598 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9599 this is used for to form addresses to local data when -fPIC is in
9600 use. */
9602 static bool
9603 darwin_local_data_pic (rtx disp)
9605 return (GET_CODE (disp) == UNSPEC
9606 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9609 /* Determine if a given RTX is a valid constant. We already know this
9610 satisfies CONSTANT_P. */
9612 bool
9613 legitimate_constant_p (rtx x)
9615 switch (GET_CODE (x))
9617 case CONST:
9618 x = XEXP (x, 0);
9620 if (GET_CODE (x) == PLUS)
9622 if (!CONST_INT_P (XEXP (x, 1)))
9623 return false;
9624 x = XEXP (x, 0);
9627 if (TARGET_MACHO && darwin_local_data_pic (x))
9628 return true;
9630 /* Only some unspecs are valid as "constants". */
9631 if (GET_CODE (x) == UNSPEC)
9632 switch (XINT (x, 1))
9634 case UNSPEC_GOT:
9635 case UNSPEC_GOTOFF:
9636 case UNSPEC_PLTOFF:
9637 return TARGET_64BIT;
9638 case UNSPEC_TPOFF:
9639 case UNSPEC_NTPOFF:
9640 x = XVECEXP (x, 0, 0);
9641 return (GET_CODE (x) == SYMBOL_REF
9642 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9643 case UNSPEC_DTPOFF:
9644 x = XVECEXP (x, 0, 0);
9645 return (GET_CODE (x) == SYMBOL_REF
9646 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9647 default:
9648 return false;
9651 /* We must have drilled down to a symbol. */
9652 if (GET_CODE (x) == LABEL_REF)
9653 return true;
9654 if (GET_CODE (x) != SYMBOL_REF)
9655 return false;
9656 /* FALLTHRU */
9658 case SYMBOL_REF:
9659 /* TLS symbols are never valid. */
9660 if (SYMBOL_REF_TLS_MODEL (x))
9661 return false;
9663 /* DLLIMPORT symbols are never valid. */
9664 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9665 && SYMBOL_REF_DLLIMPORT_P (x))
9666 return false;
9667 break;
9669 case CONST_DOUBLE:
9670 if (GET_MODE (x) == TImode
9671 && x != CONST0_RTX (TImode)
9672 && !TARGET_64BIT)
9673 return false;
9674 break;
9676 case CONST_VECTOR:
9677 if (!standard_sse_constant_p (x))
9678 return false;
9680 default:
9681 break;
9684 /* Otherwise we handle everything else in the move patterns. */
9685 return true;
9688 /* Determine if it's legal to put X into the constant pool. This
9689 is not possible for the address of thread-local symbols, which
9690 is checked above. */
9692 static bool
9693 ix86_cannot_force_const_mem (rtx x)
9695 /* We can always put integral constants and vectors in memory. */
9696 switch (GET_CODE (x))
9698 case CONST_INT:
9699 case CONST_DOUBLE:
9700 case CONST_VECTOR:
9701 return false;
9703 default:
9704 break;
9706 return !legitimate_constant_p (x);
9710 /* Nonzero if the constant value X is a legitimate general operand
9711 when generating PIC code. It is given that flag_pic is on and
9712 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9714 bool
9715 legitimate_pic_operand_p (rtx x)
9717 rtx inner;
9719 switch (GET_CODE (x))
9721 case CONST:
9722 inner = XEXP (x, 0);
9723 if (GET_CODE (inner) == PLUS
9724 && CONST_INT_P (XEXP (inner, 1)))
9725 inner = XEXP (inner, 0);
9727 /* Only some unspecs are valid as "constants". */
9728 if (GET_CODE (inner) == UNSPEC)
9729 switch (XINT (inner, 1))
9731 case UNSPEC_GOT:
9732 case UNSPEC_GOTOFF:
9733 case UNSPEC_PLTOFF:
9734 return TARGET_64BIT;
9735 case UNSPEC_TPOFF:
9736 x = XVECEXP (inner, 0, 0);
9737 return (GET_CODE (x) == SYMBOL_REF
9738 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9739 case UNSPEC_MACHOPIC_OFFSET:
9740 return legitimate_pic_address_disp_p (x);
9741 default:
9742 return false;
9744 /* FALLTHRU */
9746 case SYMBOL_REF:
9747 case LABEL_REF:
9748 return legitimate_pic_address_disp_p (x);
9750 default:
9751 return true;
9755 /* Determine if a given CONST RTX is a valid memory displacement
9756 in PIC mode. */
9759 legitimate_pic_address_disp_p (rtx disp)
9761 bool saw_plus;
9763 /* In 64bit mode we can allow direct addresses of symbols and labels
9764 when they are not dynamic symbols. */
9765 if (TARGET_64BIT)
9767 rtx op0 = disp, op1;
9769 switch (GET_CODE (disp))
9771 case LABEL_REF:
9772 return true;
9774 case CONST:
9775 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9776 break;
9777 op0 = XEXP (XEXP (disp, 0), 0);
9778 op1 = XEXP (XEXP (disp, 0), 1);
9779 if (!CONST_INT_P (op1)
9780 || INTVAL (op1) >= 16*1024*1024
9781 || INTVAL (op1) < -16*1024*1024)
9782 break;
9783 if (GET_CODE (op0) == LABEL_REF)
9784 return true;
9785 if (GET_CODE (op0) != SYMBOL_REF)
9786 break;
9787 /* FALLTHRU */
9789 case SYMBOL_REF:
9790 /* TLS references should always be enclosed in UNSPEC. */
9791 if (SYMBOL_REF_TLS_MODEL (op0))
9792 return false;
9793 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9794 && ix86_cmodel != CM_LARGE_PIC)
9795 return true;
9796 break;
9798 default:
9799 break;
9802 if (GET_CODE (disp) != CONST)
9803 return 0;
9804 disp = XEXP (disp, 0);
9806 if (TARGET_64BIT)
9808 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9809 of GOT tables. We should not need these anyway. */
9810 if (GET_CODE (disp) != UNSPEC
9811 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9812 && XINT (disp, 1) != UNSPEC_GOTOFF
9813 && XINT (disp, 1) != UNSPEC_PLTOFF))
9814 return 0;
9816 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9817 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9818 return 0;
9819 return 1;
9822 saw_plus = false;
9823 if (GET_CODE (disp) == PLUS)
9825 if (!CONST_INT_P (XEXP (disp, 1)))
9826 return 0;
9827 disp = XEXP (disp, 0);
9828 saw_plus = true;
9831 if (TARGET_MACHO && darwin_local_data_pic (disp))
9832 return 1;
9834 if (GET_CODE (disp) != UNSPEC)
9835 return 0;
9837 switch (XINT (disp, 1))
9839 case UNSPEC_GOT:
9840 if (saw_plus)
9841 return false;
9842 /* We need to check for both symbols and labels because VxWorks loads
9843 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9844 details. */
9845 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9846 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9847 case UNSPEC_GOTOFF:
9848 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9849 While ABI specify also 32bit relocation but we don't produce it in
9850 small PIC model at all. */
9851 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9852 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9853 && !TARGET_64BIT)
9854 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9855 return false;
9856 case UNSPEC_GOTTPOFF:
9857 case UNSPEC_GOTNTPOFF:
9858 case UNSPEC_INDNTPOFF:
9859 if (saw_plus)
9860 return false;
9861 disp = XVECEXP (disp, 0, 0);
9862 return (GET_CODE (disp) == SYMBOL_REF
9863 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9864 case UNSPEC_NTPOFF:
9865 disp = XVECEXP (disp, 0, 0);
9866 return (GET_CODE (disp) == SYMBOL_REF
9867 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9868 case UNSPEC_DTPOFF:
9869 disp = XVECEXP (disp, 0, 0);
9870 return (GET_CODE (disp) == SYMBOL_REF
9871 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9874 return 0;
9877 /* Recognizes RTL expressions that are valid memory addresses for an
9878 instruction. The MODE argument is the machine mode for the MEM
9879 expression that wants to use this address.
9881 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9882 convert common non-canonical forms to canonical form so that they will
9883 be recognized. */
9885 static bool
9886 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9887 rtx addr, bool strict)
9889 struct ix86_address parts;
9890 rtx base, index, disp;
9891 HOST_WIDE_INT scale;
9893 if (ix86_decompose_address (addr, &parts) <= 0)
9894 /* Decomposition failed. */
9895 return false;
9897 base = parts.base;
9898 index = parts.index;
9899 disp = parts.disp;
9900 scale = parts.scale;
9902 /* Validate base register.
9904 Don't allow SUBREG's that span more than a word here. It can lead to spill
9905 failures when the base is one word out of a two word structure, which is
9906 represented internally as a DImode int. */
9908 if (base)
9910 rtx reg;
9912 if (REG_P (base))
9913 reg = base;
9914 else if (GET_CODE (base) == SUBREG
9915 && REG_P (SUBREG_REG (base))
9916 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9917 <= UNITS_PER_WORD)
9918 reg = SUBREG_REG (base);
9919 else
9920 /* Base is not a register. */
9921 return false;
9923 if (GET_MODE (base) != Pmode)
9924 /* Base is not in Pmode. */
9925 return false;
9927 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9928 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9929 /* Base is not valid. */
9930 return false;
9933 /* Validate index register.
9935 Don't allow SUBREG's that span more than a word here -- same as above. */
9937 if (index)
9939 rtx reg;
9941 if (REG_P (index))
9942 reg = index;
9943 else if (GET_CODE (index) == SUBREG
9944 && REG_P (SUBREG_REG (index))
9945 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9946 <= UNITS_PER_WORD)
9947 reg = SUBREG_REG (index);
9948 else
9949 /* Index is not a register. */
9950 return false;
9952 if (GET_MODE (index) != Pmode)
9953 /* Index is not in Pmode. */
9954 return false;
9956 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9957 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9958 /* Index is not valid. */
9959 return false;
9962 /* Validate scale factor. */
9963 if (scale != 1)
9965 if (!index)
9966 /* Scale without index. */
9967 return false;
9969 if (scale != 2 && scale != 4 && scale != 8)
9970 /* Scale is not a valid multiplier. */
9971 return false;
9974 /* Validate displacement. */
9975 if (disp)
9977 if (GET_CODE (disp) == CONST
9978 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9979 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9980 switch (XINT (XEXP (disp, 0), 1))
9982 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9983 used. While ABI specify also 32bit relocations, we don't produce
9984 them at all and use IP relative instead. */
9985 case UNSPEC_GOT:
9986 case UNSPEC_GOTOFF:
9987 gcc_assert (flag_pic);
9988 if (!TARGET_64BIT)
9989 goto is_legitimate_pic;
9991 /* 64bit address unspec. */
9992 return false;
9994 case UNSPEC_GOTPCREL:
9995 gcc_assert (flag_pic);
9996 goto is_legitimate_pic;
9998 case UNSPEC_GOTTPOFF:
9999 case UNSPEC_GOTNTPOFF:
10000 case UNSPEC_INDNTPOFF:
10001 case UNSPEC_NTPOFF:
10002 case UNSPEC_DTPOFF:
10003 break;
10005 default:
10006 /* Invalid address unspec. */
10007 return false;
10010 else if (SYMBOLIC_CONST (disp)
10011 && (flag_pic
10012 || (TARGET_MACHO
10013 #if TARGET_MACHO
10014 && MACHOPIC_INDIRECT
10015 && !machopic_operand_p (disp)
10016 #endif
10020 is_legitimate_pic:
10021 if (TARGET_64BIT && (index || base))
10023 /* foo@dtpoff(%rX) is ok. */
10024 if (GET_CODE (disp) != CONST
10025 || GET_CODE (XEXP (disp, 0)) != PLUS
10026 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10027 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10028 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10029 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10030 /* Non-constant pic memory reference. */
10031 return false;
10033 else if (! legitimate_pic_address_disp_p (disp))
10034 /* Displacement is an invalid pic construct. */
10035 return false;
10037 /* This code used to verify that a symbolic pic displacement
10038 includes the pic_offset_table_rtx register.
10040 While this is good idea, unfortunately these constructs may
10041 be created by "adds using lea" optimization for incorrect
10042 code like:
10044 int a;
10045 int foo(int i)
10047 return *(&a+i);
10050 This code is nonsensical, but results in addressing
10051 GOT table with pic_offset_table_rtx base. We can't
10052 just refuse it easily, since it gets matched by
10053 "addsi3" pattern, that later gets split to lea in the
10054 case output register differs from input. While this
10055 can be handled by separate addsi pattern for this case
10056 that never results in lea, this seems to be easier and
10057 correct fix for crash to disable this test. */
10059 else if (GET_CODE (disp) != LABEL_REF
10060 && !CONST_INT_P (disp)
10061 && (GET_CODE (disp) != CONST
10062 || !legitimate_constant_p (disp))
10063 && (GET_CODE (disp) != SYMBOL_REF
10064 || !legitimate_constant_p (disp)))
10065 /* Displacement is not constant. */
10066 return false;
10067 else if (TARGET_64BIT
10068 && !x86_64_immediate_operand (disp, VOIDmode))
10069 /* Displacement is out of range. */
10070 return false;
10073 /* Everything looks valid. */
10074 return true;
10077 /* Determine if a given RTX is a valid constant address. */
10079 bool
10080 constant_address_p (rtx x)
10082 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10085 /* Return a unique alias set for the GOT. */
10087 static alias_set_type
10088 ix86_GOT_alias_set (void)
10090 static alias_set_type set = -1;
10091 if (set == -1)
10092 set = new_alias_set ();
10093 return set;
10096 /* Return a legitimate reference for ORIG (an address) using the
10097 register REG. If REG is 0, a new pseudo is generated.
10099 There are two types of references that must be handled:
10101 1. Global data references must load the address from the GOT, via
10102 the PIC reg. An insn is emitted to do this load, and the reg is
10103 returned.
10105 2. Static data references, constant pool addresses, and code labels
10106 compute the address as an offset from the GOT, whose base is in
10107 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10108 differentiate them from global data objects. The returned
10109 address is the PIC reg + an unspec constant.
10111 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10112 reg also appears in the address. */
10114 static rtx
10115 legitimize_pic_address (rtx orig, rtx reg)
10117 rtx addr = orig;
10118 rtx new_rtx = orig;
10119 rtx base;
10121 #if TARGET_MACHO
10122 if (TARGET_MACHO && !TARGET_64BIT)
10124 if (reg == 0)
10125 reg = gen_reg_rtx (Pmode);
10126 /* Use the generic Mach-O PIC machinery. */
10127 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10129 #endif
10131 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10132 new_rtx = addr;
10133 else if (TARGET_64BIT
10134 && ix86_cmodel != CM_SMALL_PIC
10135 && gotoff_operand (addr, Pmode))
10137 rtx tmpreg;
10138 /* This symbol may be referenced via a displacement from the PIC
10139 base address (@GOTOFF). */
10141 if (reload_in_progress)
10142 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10143 if (GET_CODE (addr) == CONST)
10144 addr = XEXP (addr, 0);
10145 if (GET_CODE (addr) == PLUS)
10147 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10148 UNSPEC_GOTOFF);
10149 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10151 else
10152 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10153 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10154 if (!reg)
10155 tmpreg = gen_reg_rtx (Pmode);
10156 else
10157 tmpreg = reg;
10158 emit_move_insn (tmpreg, new_rtx);
10160 if (reg != 0)
10162 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10163 tmpreg, 1, OPTAB_DIRECT);
10164 new_rtx = reg;
10166 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10168 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10170 /* This symbol may be referenced via a displacement from the PIC
10171 base address (@GOTOFF). */
10173 if (reload_in_progress)
10174 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10175 if (GET_CODE (addr) == CONST)
10176 addr = XEXP (addr, 0);
10177 if (GET_CODE (addr) == PLUS)
10179 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10180 UNSPEC_GOTOFF);
10181 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10183 else
10184 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10185 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10186 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10188 if (reg != 0)
10190 emit_move_insn (reg, new_rtx);
10191 new_rtx = reg;
10194 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10195 /* We can't use @GOTOFF for text labels on VxWorks;
10196 see gotoff_operand. */
10197 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10199 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10201 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10202 return legitimize_dllimport_symbol (addr, true);
10203 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10204 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10205 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10207 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10208 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10212 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10214 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10215 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10216 new_rtx = gen_const_mem (Pmode, new_rtx);
10217 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10219 if (reg == 0)
10220 reg = gen_reg_rtx (Pmode);
10221 /* Use directly gen_movsi, otherwise the address is loaded
10222 into register for CSE. We don't want to CSE this addresses,
10223 instead we CSE addresses from the GOT table, so skip this. */
10224 emit_insn (gen_movsi (reg, new_rtx));
10225 new_rtx = reg;
10227 else
10229 /* This symbol must be referenced via a load from the
10230 Global Offset Table (@GOT). */
10232 if (reload_in_progress)
10233 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10234 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10235 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10236 if (TARGET_64BIT)
10237 new_rtx = force_reg (Pmode, new_rtx);
10238 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10239 new_rtx = gen_const_mem (Pmode, new_rtx);
10240 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10242 if (reg == 0)
10243 reg = gen_reg_rtx (Pmode);
10244 emit_move_insn (reg, new_rtx);
10245 new_rtx = reg;
10248 else
10250 if (CONST_INT_P (addr)
10251 && !x86_64_immediate_operand (addr, VOIDmode))
10253 if (reg)
10255 emit_move_insn (reg, addr);
10256 new_rtx = reg;
10258 else
10259 new_rtx = force_reg (Pmode, addr);
10261 else if (GET_CODE (addr) == CONST)
10263 addr = XEXP (addr, 0);
10265 /* We must match stuff we generate before. Assume the only
10266 unspecs that can get here are ours. Not that we could do
10267 anything with them anyway.... */
10268 if (GET_CODE (addr) == UNSPEC
10269 || (GET_CODE (addr) == PLUS
10270 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10271 return orig;
10272 gcc_assert (GET_CODE (addr) == PLUS);
10274 if (GET_CODE (addr) == PLUS)
10276 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10278 /* Check first to see if this is a constant offset from a @GOTOFF
10279 symbol reference. */
10280 if (gotoff_operand (op0, Pmode)
10281 && CONST_INT_P (op1))
10283 if (!TARGET_64BIT)
10285 if (reload_in_progress)
10286 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10287 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10288 UNSPEC_GOTOFF);
10289 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10290 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10291 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10293 if (reg != 0)
10295 emit_move_insn (reg, new_rtx);
10296 new_rtx = reg;
10299 else
10301 if (INTVAL (op1) < -16*1024*1024
10302 || INTVAL (op1) >= 16*1024*1024)
10304 if (!x86_64_immediate_operand (op1, Pmode))
10305 op1 = force_reg (Pmode, op1);
10306 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10310 else
10312 base = legitimize_pic_address (XEXP (addr, 0), reg);
10313 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10314 base == reg ? NULL_RTX : reg);
10316 if (CONST_INT_P (new_rtx))
10317 new_rtx = plus_constant (base, INTVAL (new_rtx));
10318 else
10320 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10322 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10323 new_rtx = XEXP (new_rtx, 1);
10325 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10330 return new_rtx;
10333 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10335 static rtx
10336 get_thread_pointer (int to_reg)
10338 rtx tp, reg, insn;
10340 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10341 if (!to_reg)
10342 return tp;
10344 reg = gen_reg_rtx (Pmode);
10345 insn = gen_rtx_SET (VOIDmode, reg, tp);
10346 insn = emit_insn (insn);
10348 return reg;
10351 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10352 false if we expect this to be used for a memory address and true if
10353 we expect to load the address into a register. */
10355 static rtx
10356 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10358 rtx dest, base, off, pic, tp;
10359 int type;
10361 switch (model)
10363 case TLS_MODEL_GLOBAL_DYNAMIC:
10364 dest = gen_reg_rtx (Pmode);
10365 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10367 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10369 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10371 start_sequence ();
10372 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10373 insns = get_insns ();
10374 end_sequence ();
10376 RTL_CONST_CALL_P (insns) = 1;
10377 emit_libcall_block (insns, dest, rax, x);
10379 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10380 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10381 else
10382 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10384 if (TARGET_GNU2_TLS)
10386 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10388 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10390 break;
10392 case TLS_MODEL_LOCAL_DYNAMIC:
10393 base = gen_reg_rtx (Pmode);
10394 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10396 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10398 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10400 start_sequence ();
10401 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10402 insns = get_insns ();
10403 end_sequence ();
10405 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10406 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10407 RTL_CONST_CALL_P (insns) = 1;
10408 emit_libcall_block (insns, base, rax, note);
10410 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10411 emit_insn (gen_tls_local_dynamic_base_64 (base));
10412 else
10413 emit_insn (gen_tls_local_dynamic_base_32 (base));
10415 if (TARGET_GNU2_TLS)
10417 rtx x = ix86_tls_module_base ();
10419 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10420 gen_rtx_MINUS (Pmode, x, tp));
10423 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10424 off = gen_rtx_CONST (Pmode, off);
10426 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10428 if (TARGET_GNU2_TLS)
10430 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10432 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10435 break;
10437 case TLS_MODEL_INITIAL_EXEC:
10438 if (TARGET_64BIT)
10440 pic = NULL;
10441 type = UNSPEC_GOTNTPOFF;
10443 else if (flag_pic)
10445 if (reload_in_progress)
10446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10447 pic = pic_offset_table_rtx;
10448 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10450 else if (!TARGET_ANY_GNU_TLS)
10452 pic = gen_reg_rtx (Pmode);
10453 emit_insn (gen_set_got (pic));
10454 type = UNSPEC_GOTTPOFF;
10456 else
10458 pic = NULL;
10459 type = UNSPEC_INDNTPOFF;
10462 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10463 off = gen_rtx_CONST (Pmode, off);
10464 if (pic)
10465 off = gen_rtx_PLUS (Pmode, pic, off);
10466 off = gen_const_mem (Pmode, off);
10467 set_mem_alias_set (off, ix86_GOT_alias_set ());
10469 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10471 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10472 off = force_reg (Pmode, off);
10473 return gen_rtx_PLUS (Pmode, base, off);
10475 else
10477 base = get_thread_pointer (true);
10478 dest = gen_reg_rtx (Pmode);
10479 emit_insn (gen_subsi3 (dest, base, off));
10481 break;
10483 case TLS_MODEL_LOCAL_EXEC:
10484 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10485 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10486 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10487 off = gen_rtx_CONST (Pmode, off);
10489 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10491 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10492 return gen_rtx_PLUS (Pmode, base, off);
10494 else
10496 base = get_thread_pointer (true);
10497 dest = gen_reg_rtx (Pmode);
10498 emit_insn (gen_subsi3 (dest, base, off));
10500 break;
10502 default:
10503 gcc_unreachable ();
10506 return dest;
10509 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10510 to symbol DECL. */
10512 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10513 htab_t dllimport_map;
10515 static tree
10516 get_dllimport_decl (tree decl)
10518 struct tree_map *h, in;
10519 void **loc;
10520 const char *name;
10521 const char *prefix;
10522 size_t namelen, prefixlen;
10523 char *imp_name;
10524 tree to;
10525 rtx rtl;
10527 if (!dllimport_map)
10528 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10530 in.hash = htab_hash_pointer (decl);
10531 in.base.from = decl;
10532 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10533 h = (struct tree_map *) *loc;
10534 if (h)
10535 return h->to;
10537 *loc = h = GGC_NEW (struct tree_map);
10538 h->hash = in.hash;
10539 h->base.from = decl;
10540 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10541 VAR_DECL, NULL, ptr_type_node);
10542 DECL_ARTIFICIAL (to) = 1;
10543 DECL_IGNORED_P (to) = 1;
10544 DECL_EXTERNAL (to) = 1;
10545 TREE_READONLY (to) = 1;
10547 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10548 name = targetm.strip_name_encoding (name);
10549 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10550 ? "*__imp_" : "*__imp__";
10551 namelen = strlen (name);
10552 prefixlen = strlen (prefix);
10553 imp_name = (char *) alloca (namelen + prefixlen + 1);
10554 memcpy (imp_name, prefix, prefixlen);
10555 memcpy (imp_name + prefixlen, name, namelen + 1);
10557 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10558 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10559 SET_SYMBOL_REF_DECL (rtl, to);
10560 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10562 rtl = gen_const_mem (Pmode, rtl);
10563 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10565 SET_DECL_RTL (to, rtl);
10566 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10568 return to;
10571 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10572 true if we require the result be a register. */
10574 static rtx
10575 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10577 tree imp_decl;
10578 rtx x;
10580 gcc_assert (SYMBOL_REF_DECL (symbol));
10581 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10583 x = DECL_RTL (imp_decl);
10584 if (want_reg)
10585 x = force_reg (Pmode, x);
10586 return x;
10589 /* Try machine-dependent ways of modifying an illegitimate address
10590 to be legitimate. If we find one, return the new, valid address.
10591 This macro is used in only one place: `memory_address' in explow.c.
10593 OLDX is the address as it was before break_out_memory_refs was called.
10594 In some cases it is useful to look at this to decide what needs to be done.
10596 It is always safe for this macro to do nothing. It exists to recognize
10597 opportunities to optimize the output.
10599 For the 80386, we handle X+REG by loading X into a register R and
10600 using R+REG. R will go in a general reg and indexing will be used.
10601 However, if REG is a broken-out memory address or multiplication,
10602 nothing needs to be done because REG can certainly go in a general reg.
10604 When -fpic is used, special handling is needed for symbolic references.
10605 See comments by legitimize_pic_address in i386.c for details. */
10607 static rtx
10608 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10609 enum machine_mode mode)
10611 int changed = 0;
10612 unsigned log;
10614 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10615 if (log)
10616 return legitimize_tls_address (x, (enum tls_model) log, false);
10617 if (GET_CODE (x) == CONST
10618 && GET_CODE (XEXP (x, 0)) == PLUS
10619 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10620 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10622 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10623 (enum tls_model) log, false);
10624 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10627 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10629 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10630 return legitimize_dllimport_symbol (x, true);
10631 if (GET_CODE (x) == CONST
10632 && GET_CODE (XEXP (x, 0)) == PLUS
10633 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10634 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10636 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10637 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10641 if (flag_pic && SYMBOLIC_CONST (x))
10642 return legitimize_pic_address (x, 0);
10644 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10645 if (GET_CODE (x) == ASHIFT
10646 && CONST_INT_P (XEXP (x, 1))
10647 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10649 changed = 1;
10650 log = INTVAL (XEXP (x, 1));
10651 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10652 GEN_INT (1 << log));
10655 if (GET_CODE (x) == PLUS)
10657 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10659 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10660 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10661 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10663 changed = 1;
10664 log = INTVAL (XEXP (XEXP (x, 0), 1));
10665 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10666 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10667 GEN_INT (1 << log));
10670 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10671 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10672 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10674 changed = 1;
10675 log = INTVAL (XEXP (XEXP (x, 1), 1));
10676 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10677 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10678 GEN_INT (1 << log));
10681 /* Put multiply first if it isn't already. */
10682 if (GET_CODE (XEXP (x, 1)) == MULT)
10684 rtx tmp = XEXP (x, 0);
10685 XEXP (x, 0) = XEXP (x, 1);
10686 XEXP (x, 1) = tmp;
10687 changed = 1;
10690 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10691 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10692 created by virtual register instantiation, register elimination, and
10693 similar optimizations. */
10694 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10696 changed = 1;
10697 x = gen_rtx_PLUS (Pmode,
10698 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10699 XEXP (XEXP (x, 1), 0)),
10700 XEXP (XEXP (x, 1), 1));
10703 /* Canonicalize
10704 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10705 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10706 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10707 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10708 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10709 && CONSTANT_P (XEXP (x, 1)))
10711 rtx constant;
10712 rtx other = NULL_RTX;
10714 if (CONST_INT_P (XEXP (x, 1)))
10716 constant = XEXP (x, 1);
10717 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10719 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10721 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10722 other = XEXP (x, 1);
10724 else
10725 constant = 0;
10727 if (constant)
10729 changed = 1;
10730 x = gen_rtx_PLUS (Pmode,
10731 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10732 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10733 plus_constant (other, INTVAL (constant)));
10737 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10738 return x;
10740 if (GET_CODE (XEXP (x, 0)) == MULT)
10742 changed = 1;
10743 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10746 if (GET_CODE (XEXP (x, 1)) == MULT)
10748 changed = 1;
10749 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10752 if (changed
10753 && REG_P (XEXP (x, 1))
10754 && REG_P (XEXP (x, 0)))
10755 return x;
10757 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10759 changed = 1;
10760 x = legitimize_pic_address (x, 0);
10763 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10764 return x;
10766 if (REG_P (XEXP (x, 0)))
10768 rtx temp = gen_reg_rtx (Pmode);
10769 rtx val = force_operand (XEXP (x, 1), temp);
10770 if (val != temp)
10771 emit_move_insn (temp, val);
10773 XEXP (x, 1) = temp;
10774 return x;
10777 else if (REG_P (XEXP (x, 1)))
10779 rtx temp = gen_reg_rtx (Pmode);
10780 rtx val = force_operand (XEXP (x, 0), temp);
10781 if (val != temp)
10782 emit_move_insn (temp, val);
10784 XEXP (x, 0) = temp;
10785 return x;
10789 return x;
10792 /* Print an integer constant expression in assembler syntax. Addition
10793 and subtraction are the only arithmetic that may appear in these
10794 expressions. FILE is the stdio stream to write to, X is the rtx, and
10795 CODE is the operand print code from the output string. */
10797 static void
10798 output_pic_addr_const (FILE *file, rtx x, int code)
10800 char buf[256];
10802 switch (GET_CODE (x))
10804 case PC:
10805 gcc_assert (flag_pic);
10806 putc ('.', file);
10807 break;
10809 case SYMBOL_REF:
10810 if (! TARGET_MACHO || TARGET_64BIT)
10811 output_addr_const (file, x);
10812 else
10814 const char *name = XSTR (x, 0);
10816 /* Mark the decl as referenced so that cgraph will
10817 output the function. */
10818 if (SYMBOL_REF_DECL (x))
10819 mark_decl_referenced (SYMBOL_REF_DECL (x));
10821 #if TARGET_MACHO
10822 if (MACHOPIC_INDIRECT
10823 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10824 name = machopic_indirection_name (x, /*stub_p=*/true);
10825 #endif
10826 assemble_name (file, name);
10828 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10829 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10830 fputs ("@PLT", file);
10831 break;
10833 case LABEL_REF:
10834 x = XEXP (x, 0);
10835 /* FALLTHRU */
10836 case CODE_LABEL:
10837 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10838 assemble_name (asm_out_file, buf);
10839 break;
10841 case CONST_INT:
10842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10843 break;
10845 case CONST:
10846 /* This used to output parentheses around the expression,
10847 but that does not work on the 386 (either ATT or BSD assembler). */
10848 output_pic_addr_const (file, XEXP (x, 0), code);
10849 break;
10851 case CONST_DOUBLE:
10852 if (GET_MODE (x) == VOIDmode)
10854 /* We can use %d if the number is <32 bits and positive. */
10855 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10856 fprintf (file, "0x%lx%08lx",
10857 (unsigned long) CONST_DOUBLE_HIGH (x),
10858 (unsigned long) CONST_DOUBLE_LOW (x));
10859 else
10860 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10862 else
10863 /* We can't handle floating point constants;
10864 PRINT_OPERAND must handle them. */
10865 output_operand_lossage ("floating constant misused");
10866 break;
10868 case PLUS:
10869 /* Some assemblers need integer constants to appear first. */
10870 if (CONST_INT_P (XEXP (x, 0)))
10872 output_pic_addr_const (file, XEXP (x, 0), code);
10873 putc ('+', file);
10874 output_pic_addr_const (file, XEXP (x, 1), code);
10876 else
10878 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10879 output_pic_addr_const (file, XEXP (x, 1), code);
10880 putc ('+', file);
10881 output_pic_addr_const (file, XEXP (x, 0), code);
10883 break;
10885 case MINUS:
10886 if (!TARGET_MACHO)
10887 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10888 output_pic_addr_const (file, XEXP (x, 0), code);
10889 putc ('-', file);
10890 output_pic_addr_const (file, XEXP (x, 1), code);
10891 if (!TARGET_MACHO)
10892 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10893 break;
10895 case UNSPEC:
10896 gcc_assert (XVECLEN (x, 0) == 1);
10897 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10898 switch (XINT (x, 1))
10900 case UNSPEC_GOT:
10901 fputs ("@GOT", file);
10902 break;
10903 case UNSPEC_GOTOFF:
10904 fputs ("@GOTOFF", file);
10905 break;
10906 case UNSPEC_PLTOFF:
10907 fputs ("@PLTOFF", file);
10908 break;
10909 case UNSPEC_GOTPCREL:
10910 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10911 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10912 break;
10913 case UNSPEC_GOTTPOFF:
10914 /* FIXME: This might be @TPOFF in Sun ld too. */
10915 fputs ("@gottpoff", file);
10916 break;
10917 case UNSPEC_TPOFF:
10918 fputs ("@tpoff", file);
10919 break;
10920 case UNSPEC_NTPOFF:
10921 if (TARGET_64BIT)
10922 fputs ("@tpoff", file);
10923 else
10924 fputs ("@ntpoff", file);
10925 break;
10926 case UNSPEC_DTPOFF:
10927 fputs ("@dtpoff", file);
10928 break;
10929 case UNSPEC_GOTNTPOFF:
10930 if (TARGET_64BIT)
10931 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10932 "@gottpoff(%rip)": "@gottpoff[rip]", file);
10933 else
10934 fputs ("@gotntpoff", file);
10935 break;
10936 case UNSPEC_INDNTPOFF:
10937 fputs ("@indntpoff", file);
10938 break;
10939 #if TARGET_MACHO
10940 case UNSPEC_MACHOPIC_OFFSET:
10941 putc ('-', file);
10942 machopic_output_function_base_name (file);
10943 break;
10944 #endif
10945 default:
10946 output_operand_lossage ("invalid UNSPEC as operand");
10947 break;
10949 break;
10951 default:
10952 output_operand_lossage ("invalid expression as operand");
10956 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10957 We need to emit DTP-relative relocations. */
10959 static void ATTRIBUTE_UNUSED
10960 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10962 fputs (ASM_LONG, file);
10963 output_addr_const (file, x);
10964 fputs ("@dtpoff", file);
10965 switch (size)
10967 case 4:
10968 break;
10969 case 8:
10970 fputs (", 0", file);
10971 break;
10972 default:
10973 gcc_unreachable ();
10977 /* Return true if X is a representation of the PIC register. This copes
10978 with calls from ix86_find_base_term, where the register might have
10979 been replaced by a cselib value. */
10981 static bool
10982 ix86_pic_register_p (rtx x)
10984 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10985 return (pic_offset_table_rtx
10986 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10987 else
10988 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10991 /* In the name of slightly smaller debug output, and to cater to
10992 general assembler lossage, recognize PIC+GOTOFF and turn it back
10993 into a direct symbol reference.
10995 On Darwin, this is necessary to avoid a crash, because Darwin
10996 has a different PIC label for each routine but the DWARF debugging
10997 information is not associated with any particular routine, so it's
10998 necessary to remove references to the PIC label from RTL stored by
10999 the DWARF output code. */
11001 static rtx
11002 ix86_delegitimize_address (rtx x)
11004 rtx orig_x = delegitimize_mem_from_attrs (x);
11005 /* addend is NULL or some rtx if x is something+GOTOFF where
11006 something doesn't include the PIC register. */
11007 rtx addend = NULL_RTX;
11008 /* reg_addend is NULL or a multiple of some register. */
11009 rtx reg_addend = NULL_RTX;
11010 /* const_addend is NULL or a const_int. */
11011 rtx const_addend = NULL_RTX;
11012 /* This is the result, or NULL. */
11013 rtx result = NULL_RTX;
11015 x = orig_x;
11017 if (MEM_P (x))
11018 x = XEXP (x, 0);
11020 if (TARGET_64BIT)
11022 if (GET_CODE (x) != CONST
11023 || GET_CODE (XEXP (x, 0)) != UNSPEC
11024 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11025 || !MEM_P (orig_x))
11026 return orig_x;
11027 return XVECEXP (XEXP (x, 0), 0, 0);
11030 if (GET_CODE (x) != PLUS
11031 || GET_CODE (XEXP (x, 1)) != CONST)
11032 return orig_x;
11034 if (ix86_pic_register_p (XEXP (x, 0)))
11035 /* %ebx + GOT/GOTOFF */
11037 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11039 /* %ebx + %reg * scale + GOT/GOTOFF */
11040 reg_addend = XEXP (x, 0);
11041 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11042 reg_addend = XEXP (reg_addend, 1);
11043 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11044 reg_addend = XEXP (reg_addend, 0);
11045 else
11047 reg_addend = NULL_RTX;
11048 addend = XEXP (x, 0);
11051 else
11052 addend = XEXP (x, 0);
11054 x = XEXP (XEXP (x, 1), 0);
11055 if (GET_CODE (x) == PLUS
11056 && CONST_INT_P (XEXP (x, 1)))
11058 const_addend = XEXP (x, 1);
11059 x = XEXP (x, 0);
11062 if (GET_CODE (x) == UNSPEC
11063 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11064 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11065 result = XVECEXP (x, 0, 0);
11067 if (TARGET_MACHO && darwin_local_data_pic (x)
11068 && !MEM_P (orig_x))
11069 result = XVECEXP (x, 0, 0);
11071 if (! result)
11072 return orig_x;
11074 if (const_addend)
11075 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11076 if (reg_addend)
11077 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11078 if (addend)
11080 /* If the rest of original X doesn't involve the PIC register, add
11081 addend and subtract pic_offset_table_rtx. This can happen e.g.
11082 for code like:
11083 leal (%ebx, %ecx, 4), %ecx
11085 movl foo@GOTOFF(%ecx), %edx
11086 in which case we return (%ecx - %ebx) + foo. */
11087 if (pic_offset_table_rtx)
11088 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11089 pic_offset_table_rtx),
11090 result);
11091 else
11092 return orig_x;
11094 return result;
11097 /* If X is a machine specific address (i.e. a symbol or label being
11098 referenced as a displacement from the GOT implemented using an
11099 UNSPEC), then return the base term. Otherwise return X. */
11102 ix86_find_base_term (rtx x)
11104 rtx term;
11106 if (TARGET_64BIT)
11108 if (GET_CODE (x) != CONST)
11109 return x;
11110 term = XEXP (x, 0);
11111 if (GET_CODE (term) == PLUS
11112 && (CONST_INT_P (XEXP (term, 1))
11113 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11114 term = XEXP (term, 0);
11115 if (GET_CODE (term) != UNSPEC
11116 || XINT (term, 1) != UNSPEC_GOTPCREL)
11117 return x;
11119 return XVECEXP (term, 0, 0);
11122 return ix86_delegitimize_address (x);
11125 static void
11126 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11127 int fp, FILE *file)
11129 const char *suffix;
11131 if (mode == CCFPmode || mode == CCFPUmode)
11133 code = ix86_fp_compare_code_to_integer (code);
11134 mode = CCmode;
11136 if (reverse)
11137 code = reverse_condition (code);
11139 switch (code)
11141 case EQ:
11142 switch (mode)
11144 case CCAmode:
11145 suffix = "a";
11146 break;
11148 case CCCmode:
11149 suffix = "c";
11150 break;
11152 case CCOmode:
11153 suffix = "o";
11154 break;
11156 case CCSmode:
11157 suffix = "s";
11158 break;
11160 default:
11161 suffix = "e";
11163 break;
11164 case NE:
11165 switch (mode)
11167 case CCAmode:
11168 suffix = "na";
11169 break;
11171 case CCCmode:
11172 suffix = "nc";
11173 break;
11175 case CCOmode:
11176 suffix = "no";
11177 break;
11179 case CCSmode:
11180 suffix = "ns";
11181 break;
11183 default:
11184 suffix = "ne";
11186 break;
11187 case GT:
11188 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11189 suffix = "g";
11190 break;
11191 case GTU:
11192 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11193 Those same assemblers have the same but opposite lossage on cmov. */
11194 if (mode == CCmode)
11195 suffix = fp ? "nbe" : "a";
11196 else if (mode == CCCmode)
11197 suffix = "b";
11198 else
11199 gcc_unreachable ();
11200 break;
11201 case LT:
11202 switch (mode)
11204 case CCNOmode:
11205 case CCGOCmode:
11206 suffix = "s";
11207 break;
11209 case CCmode:
11210 case CCGCmode:
11211 suffix = "l";
11212 break;
11214 default:
11215 gcc_unreachable ();
11217 break;
11218 case LTU:
11219 gcc_assert (mode == CCmode || mode == CCCmode);
11220 suffix = "b";
11221 break;
11222 case GE:
11223 switch (mode)
11225 case CCNOmode:
11226 case CCGOCmode:
11227 suffix = "ns";
11228 break;
11230 case CCmode:
11231 case CCGCmode:
11232 suffix = "ge";
11233 break;
11235 default:
11236 gcc_unreachable ();
11238 break;
11239 case GEU:
11240 /* ??? As above. */
11241 gcc_assert (mode == CCmode || mode == CCCmode);
11242 suffix = fp ? "nb" : "ae";
11243 break;
11244 case LE:
11245 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11246 suffix = "le";
11247 break;
11248 case LEU:
11249 /* ??? As above. */
11250 if (mode == CCmode)
11251 suffix = "be";
11252 else if (mode == CCCmode)
11253 suffix = fp ? "nb" : "ae";
11254 else
11255 gcc_unreachable ();
11256 break;
11257 case UNORDERED:
11258 suffix = fp ? "u" : "p";
11259 break;
11260 case ORDERED:
11261 suffix = fp ? "nu" : "np";
11262 break;
11263 default:
11264 gcc_unreachable ();
11266 fputs (suffix, file);
11269 /* Print the name of register X to FILE based on its machine mode and number.
11270 If CODE is 'w', pretend the mode is HImode.
11271 If CODE is 'b', pretend the mode is QImode.
11272 If CODE is 'k', pretend the mode is SImode.
11273 If CODE is 'q', pretend the mode is DImode.
11274 If CODE is 'x', pretend the mode is V4SFmode.
11275 If CODE is 't', pretend the mode is V8SFmode.
11276 If CODE is 'h', pretend the reg is the 'high' byte register.
11277 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11278 If CODE is 'd', duplicate the operand for AVX instruction.
11281 void
11282 print_reg (rtx x, int code, FILE *file)
11284 const char *reg;
11285 bool duplicated = code == 'd' && TARGET_AVX;
11287 gcc_assert (x == pc_rtx
11288 || (REGNO (x) != ARG_POINTER_REGNUM
11289 && REGNO (x) != FRAME_POINTER_REGNUM
11290 && REGNO (x) != FLAGS_REG
11291 && REGNO (x) != FPSR_REG
11292 && REGNO (x) != FPCR_REG));
11294 if (ASSEMBLER_DIALECT == ASM_ATT)
11295 putc ('%', file);
11297 if (x == pc_rtx)
11299 gcc_assert (TARGET_64BIT);
11300 fputs ("rip", file);
11301 return;
11304 if (code == 'w' || MMX_REG_P (x))
11305 code = 2;
11306 else if (code == 'b')
11307 code = 1;
11308 else if (code == 'k')
11309 code = 4;
11310 else if (code == 'q')
11311 code = 8;
11312 else if (code == 'y')
11313 code = 3;
11314 else if (code == 'h')
11315 code = 0;
11316 else if (code == 'x')
11317 code = 16;
11318 else if (code == 't')
11319 code = 32;
11320 else
11321 code = GET_MODE_SIZE (GET_MODE (x));
11323 /* Irritatingly, AMD extended registers use different naming convention
11324 from the normal registers. */
11325 if (REX_INT_REG_P (x))
11327 gcc_assert (TARGET_64BIT);
11328 switch (code)
11330 case 0:
11331 error ("extended registers have no high halves");
11332 break;
11333 case 1:
11334 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11335 break;
11336 case 2:
11337 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11338 break;
11339 case 4:
11340 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11341 break;
11342 case 8:
11343 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11344 break;
11345 default:
11346 error ("unsupported operand size for extended register");
11347 break;
11349 return;
11352 reg = NULL;
11353 switch (code)
11355 case 3:
11356 if (STACK_TOP_P (x))
11358 reg = "st(0)";
11359 break;
11361 /* FALLTHRU */
11362 case 8:
11363 case 4:
11364 case 12:
11365 if (! ANY_FP_REG_P (x))
11366 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11367 /* FALLTHRU */
11368 case 16:
11369 case 2:
11370 normal:
11371 reg = hi_reg_name[REGNO (x)];
11372 break;
11373 case 1:
11374 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11375 goto normal;
11376 reg = qi_reg_name[REGNO (x)];
11377 break;
11378 case 0:
11379 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11380 goto normal;
11381 reg = qi_high_reg_name[REGNO (x)];
11382 break;
11383 case 32:
11384 if (SSE_REG_P (x))
11386 gcc_assert (!duplicated);
11387 putc ('y', file);
11388 fputs (hi_reg_name[REGNO (x)] + 1, file);
11389 return;
11391 break;
11392 default:
11393 gcc_unreachable ();
11396 fputs (reg, file);
11397 if (duplicated)
11399 if (ASSEMBLER_DIALECT == ASM_ATT)
11400 fprintf (file, ", %%%s", reg);
11401 else
11402 fprintf (file, ", %s", reg);
11406 /* Locate some local-dynamic symbol still in use by this function
11407 so that we can print its name in some tls_local_dynamic_base
11408 pattern. */
11410 static int
11411 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11413 rtx x = *px;
11415 if (GET_CODE (x) == SYMBOL_REF
11416 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11418 cfun->machine->some_ld_name = XSTR (x, 0);
11419 return 1;
11422 return 0;
11425 static const char *
11426 get_some_local_dynamic_name (void)
11428 rtx insn;
11430 if (cfun->machine->some_ld_name)
11431 return cfun->machine->some_ld_name;
11433 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11434 if (INSN_P (insn)
11435 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11436 return cfun->machine->some_ld_name;
11438 return NULL;
11441 /* Meaning of CODE:
11442 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11443 C -- print opcode suffix for set/cmov insn.
11444 c -- like C, but print reversed condition
11445 F,f -- likewise, but for floating-point.
11446 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11447 otherwise nothing
11448 R -- print the prefix for register names.
11449 z -- print the opcode suffix for the size of the current operand.
11450 Z -- likewise, with special suffixes for x87 instructions.
11451 * -- print a star (in certain assembler syntax)
11452 A -- print an absolute memory reference.
11453 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11454 s -- print a shift double count, followed by the assemblers argument
11455 delimiter.
11456 b -- print the QImode name of the register for the indicated operand.
11457 %b0 would print %al if operands[0] is reg 0.
11458 w -- likewise, print the HImode name of the register.
11459 k -- likewise, print the SImode name of the register.
11460 q -- likewise, print the DImode name of the register.
11461 x -- likewise, print the V4SFmode name of the register.
11462 t -- likewise, print the V8SFmode name of the register.
11463 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11464 y -- print "st(0)" instead of "st" as a register.
11465 d -- print duplicated register operand for AVX instruction.
11466 D -- print condition for SSE cmp instruction.
11467 P -- if PIC, print an @PLT suffix.
11468 X -- don't print any sort of PIC '@' suffix for a symbol.
11469 & -- print some in-use local-dynamic symbol name.
11470 H -- print a memory address offset by 8; used for sse high-parts
11471 Y -- print condition for XOP pcom* instruction.
11472 + -- print a branch hint as 'cs' or 'ds' prefix
11473 ; -- print a semicolon (after prefixes due to bug in older gas).
11476 void
11477 print_operand (FILE *file, rtx x, int code)
11479 if (code)
11481 switch (code)
11483 case '*':
11484 if (ASSEMBLER_DIALECT == ASM_ATT)
11485 putc ('*', file);
11486 return;
11488 case '&':
11490 const char *name = get_some_local_dynamic_name ();
11491 if (name == NULL)
11492 output_operand_lossage ("'%%&' used without any "
11493 "local dynamic TLS references");
11494 else
11495 assemble_name (file, name);
11496 return;
11499 case 'A':
11500 switch (ASSEMBLER_DIALECT)
11502 case ASM_ATT:
11503 putc ('*', file);
11504 break;
11506 case ASM_INTEL:
11507 /* Intel syntax. For absolute addresses, registers should not
11508 be surrounded by braces. */
11509 if (!REG_P (x))
11511 putc ('[', file);
11512 PRINT_OPERAND (file, x, 0);
11513 putc (']', file);
11514 return;
11516 break;
11518 default:
11519 gcc_unreachable ();
11522 PRINT_OPERAND (file, x, 0);
11523 return;
11526 case 'L':
11527 if (ASSEMBLER_DIALECT == ASM_ATT)
11528 putc ('l', file);
11529 return;
11531 case 'W':
11532 if (ASSEMBLER_DIALECT == ASM_ATT)
11533 putc ('w', file);
11534 return;
11536 case 'B':
11537 if (ASSEMBLER_DIALECT == ASM_ATT)
11538 putc ('b', file);
11539 return;
11541 case 'Q':
11542 if (ASSEMBLER_DIALECT == ASM_ATT)
11543 putc ('l', file);
11544 return;
11546 case 'S':
11547 if (ASSEMBLER_DIALECT == ASM_ATT)
11548 putc ('s', file);
11549 return;
11551 case 'T':
11552 if (ASSEMBLER_DIALECT == ASM_ATT)
11553 putc ('t', file);
11554 return;
11556 case 'z':
11557 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11559 /* Opcodes don't get size suffixes if using Intel opcodes. */
11560 if (ASSEMBLER_DIALECT == ASM_INTEL)
11561 return;
11563 switch (GET_MODE_SIZE (GET_MODE (x)))
11565 case 1:
11566 putc ('b', file);
11567 return;
11569 case 2:
11570 putc ('w', file);
11571 return;
11573 case 4:
11574 putc ('l', file);
11575 return;
11577 case 8:
11578 putc ('q', file);
11579 return;
11581 default:
11582 output_operand_lossage
11583 ("invalid operand size for operand code '%c'", code);
11584 return;
11588 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11589 warning
11590 (0, "non-integer operand used with operand code '%c'", code);
11591 /* FALLTHRU */
11593 case 'Z':
11594 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11595 if (ASSEMBLER_DIALECT == ASM_INTEL)
11596 return;
11598 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11600 switch (GET_MODE_SIZE (GET_MODE (x)))
11602 case 2:
11603 #ifdef HAVE_AS_IX86_FILDS
11604 putc ('s', file);
11605 #endif
11606 return;
11608 case 4:
11609 putc ('l', file);
11610 return;
11612 case 8:
11613 #ifdef HAVE_AS_IX86_FILDQ
11614 putc ('q', file);
11615 #else
11616 fputs ("ll", file);
11617 #endif
11618 return;
11620 default:
11621 break;
11624 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11626 /* 387 opcodes don't get size suffixes
11627 if the operands are registers. */
11628 if (STACK_REG_P (x))
11629 return;
11631 switch (GET_MODE_SIZE (GET_MODE (x)))
11633 case 4:
11634 putc ('s', file);
11635 return;
11637 case 8:
11638 putc ('l', file);
11639 return;
11641 case 12:
11642 case 16:
11643 putc ('t', file);
11644 return;
11646 default:
11647 break;
11650 else
11652 output_operand_lossage
11653 ("invalid operand type used with operand code '%c'", code);
11654 return;
11657 output_operand_lossage
11658 ("invalid operand size for operand code '%c'", code);
11659 return;
11661 case 'd':
11662 case 'b':
11663 case 'w':
11664 case 'k':
11665 case 'q':
11666 case 'h':
11667 case 't':
11668 case 'y':
11669 case 'x':
11670 case 'X':
11671 case 'P':
11672 break;
11674 case 's':
11675 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11677 PRINT_OPERAND (file, x, 0);
11678 fputs (", ", file);
11680 return;
11682 case 'D':
11683 /* Little bit of braindamage here. The SSE compare instructions
11684 does use completely different names for the comparisons that the
11685 fp conditional moves. */
11686 if (TARGET_AVX)
11688 switch (GET_CODE (x))
11690 case EQ:
11691 fputs ("eq", file);
11692 break;
11693 case UNEQ:
11694 fputs ("eq_us", file);
11695 break;
11696 case LT:
11697 fputs ("lt", file);
11698 break;
11699 case UNLT:
11700 fputs ("nge", file);
11701 break;
11702 case LE:
11703 fputs ("le", file);
11704 break;
11705 case UNLE:
11706 fputs ("ngt", file);
11707 break;
11708 case UNORDERED:
11709 fputs ("unord", file);
11710 break;
11711 case NE:
11712 fputs ("neq", file);
11713 break;
11714 case LTGT:
11715 fputs ("neq_oq", file);
11716 break;
11717 case GE:
11718 fputs ("ge", file);
11719 break;
11720 case UNGE:
11721 fputs ("nlt", file);
11722 break;
11723 case GT:
11724 fputs ("gt", file);
11725 break;
11726 case UNGT:
11727 fputs ("nle", file);
11728 break;
11729 case ORDERED:
11730 fputs ("ord", file);
11731 break;
11732 default:
11733 output_operand_lossage ("operand is not a condition code, "
11734 "invalid operand code 'D'");
11735 return;
11738 else
11740 switch (GET_CODE (x))
11742 case EQ:
11743 case UNEQ:
11744 fputs ("eq", file);
11745 break;
11746 case LT:
11747 case UNLT:
11748 fputs ("lt", file);
11749 break;
11750 case LE:
11751 case UNLE:
11752 fputs ("le", file);
11753 break;
11754 case UNORDERED:
11755 fputs ("unord", file);
11756 break;
11757 case NE:
11758 case LTGT:
11759 fputs ("neq", file);
11760 break;
11761 case UNGE:
11762 case GE:
11763 fputs ("nlt", file);
11764 break;
11765 case UNGT:
11766 case GT:
11767 fputs ("nle", file);
11768 break;
11769 case ORDERED:
11770 fputs ("ord", file);
11771 break;
11772 default:
11773 output_operand_lossage ("operand is not a condition code, "
11774 "invalid operand code 'D'");
11775 return;
11778 return;
11779 case 'O':
11780 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11781 if (ASSEMBLER_DIALECT == ASM_ATT)
11783 switch (GET_MODE (x))
11785 case HImode: putc ('w', file); break;
11786 case SImode:
11787 case SFmode: putc ('l', file); break;
11788 case DImode:
11789 case DFmode: putc ('q', file); break;
11790 default: gcc_unreachable ();
11792 putc ('.', file);
11794 #endif
11795 return;
11796 case 'C':
11797 if (!COMPARISON_P (x))
11799 output_operand_lossage ("operand is neither a constant nor a "
11800 "condition code, invalid operand code "
11801 "'C'");
11802 return;
11804 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11805 return;
11806 case 'F':
11807 if (!COMPARISON_P (x))
11809 output_operand_lossage ("operand is neither a constant nor a "
11810 "condition code, invalid operand code "
11811 "'F'");
11812 return;
11814 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11815 if (ASSEMBLER_DIALECT == ASM_ATT)
11816 putc ('.', file);
11817 #endif
11818 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11819 return;
11821 /* Like above, but reverse condition */
11822 case 'c':
11823 /* Check to see if argument to %c is really a constant
11824 and not a condition code which needs to be reversed. */
11825 if (!COMPARISON_P (x))
11827 output_operand_lossage ("operand is neither a constant nor a "
11828 "condition code, invalid operand "
11829 "code 'c'");
11830 return;
11832 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11833 return;
11834 case 'f':
11835 if (!COMPARISON_P (x))
11837 output_operand_lossage ("operand is neither a constant nor a "
11838 "condition code, invalid operand "
11839 "code 'f'");
11840 return;
11842 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11843 if (ASSEMBLER_DIALECT == ASM_ATT)
11844 putc ('.', file);
11845 #endif
11846 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11847 return;
11849 case 'H':
11850 /* It doesn't actually matter what mode we use here, as we're
11851 only going to use this for printing. */
11852 x = adjust_address_nv (x, DImode, 8);
11853 break;
11855 case '+':
11857 rtx x;
11859 if (!optimize
11860 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11861 return;
11863 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11864 if (x)
11866 int pred_val = INTVAL (XEXP (x, 0));
11868 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11869 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11871 int taken = pred_val > REG_BR_PROB_BASE / 2;
11872 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11874 /* Emit hints only in the case default branch prediction
11875 heuristics would fail. */
11876 if (taken != cputaken)
11878 /* We use 3e (DS) prefix for taken branches and
11879 2e (CS) prefix for not taken branches. */
11880 if (taken)
11881 fputs ("ds ; ", file);
11882 else
11883 fputs ("cs ; ", file);
11887 return;
11890 case 'Y':
11891 switch (GET_CODE (x))
11893 case NE:
11894 fputs ("neq", file);
11895 break;
11896 case EQ:
11897 fputs ("eq", file);
11898 break;
11899 case GE:
11900 case GEU:
11901 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11902 break;
11903 case GT:
11904 case GTU:
11905 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11906 break;
11907 case LE:
11908 case LEU:
11909 fputs ("le", file);
11910 break;
11911 case LT:
11912 case LTU:
11913 fputs ("lt", file);
11914 break;
11915 case UNORDERED:
11916 fputs ("unord", file);
11917 break;
11918 case ORDERED:
11919 fputs ("ord", file);
11920 break;
11921 case UNEQ:
11922 fputs ("ueq", file);
11923 break;
11924 case UNGE:
11925 fputs ("nlt", file);
11926 break;
11927 case UNGT:
11928 fputs ("nle", file);
11929 break;
11930 case UNLE:
11931 fputs ("ule", file);
11932 break;
11933 case UNLT:
11934 fputs ("ult", file);
11935 break;
11936 case LTGT:
11937 fputs ("une", file);
11938 break;
11939 default:
11940 output_operand_lossage ("operand is not a condition code, "
11941 "invalid operand code 'Y'");
11942 return;
11944 return;
11946 case ';':
11947 #if TARGET_MACHO
11948 fputs (" ; ", file);
11949 #else
11950 putc (' ', file);
11951 #endif
11952 return;
11954 default:
11955 output_operand_lossage ("invalid operand code '%c'", code);
11959 if (REG_P (x))
11960 print_reg (x, code, file);
11962 else if (MEM_P (x))
11964 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11965 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11966 && GET_MODE (x) != BLKmode)
11968 const char * size;
11969 switch (GET_MODE_SIZE (GET_MODE (x)))
11971 case 1: size = "BYTE"; break;
11972 case 2: size = "WORD"; break;
11973 case 4: size = "DWORD"; break;
11974 case 8: size = "QWORD"; break;
11975 case 12: size = "TBYTE"; break;
11976 case 16:
11977 if (GET_MODE (x) == XFmode)
11978 size = "TBYTE";
11979 else
11980 size = "XMMWORD";
11981 break;
11982 case 32: size = "YMMWORD"; break;
11983 default:
11984 gcc_unreachable ();
11987 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11988 if (code == 'b')
11989 size = "BYTE";
11990 else if (code == 'w')
11991 size = "WORD";
11992 else if (code == 'k')
11993 size = "DWORD";
11995 fputs (size, file);
11996 fputs (" PTR ", file);
11999 x = XEXP (x, 0);
12000 /* Avoid (%rip) for call operands. */
12001 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12002 && !CONST_INT_P (x))
12003 output_addr_const (file, x);
12004 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12005 output_operand_lossage ("invalid constraints for operand");
12006 else
12007 output_address (x);
12010 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12012 REAL_VALUE_TYPE r;
12013 long l;
12015 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12016 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12018 if (ASSEMBLER_DIALECT == ASM_ATT)
12019 putc ('$', file);
12020 fprintf (file, "0x%08lx", (long unsigned int) l);
12023 /* These float cases don't actually occur as immediate operands. */
12024 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12026 char dstr[30];
12028 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12029 fputs (dstr, file);
12032 else if (GET_CODE (x) == CONST_DOUBLE
12033 && GET_MODE (x) == XFmode)
12035 char dstr[30];
12037 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12038 fputs (dstr, file);
12041 else
12043 /* We have patterns that allow zero sets of memory, for instance.
12044 In 64-bit mode, we should probably support all 8-byte vectors,
12045 since we can in fact encode that into an immediate. */
12046 if (GET_CODE (x) == CONST_VECTOR)
12048 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12049 x = const0_rtx;
12052 if (code != 'P')
12054 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12056 if (ASSEMBLER_DIALECT == ASM_ATT)
12057 putc ('$', file);
12059 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12060 || GET_CODE (x) == LABEL_REF)
12062 if (ASSEMBLER_DIALECT == ASM_ATT)
12063 putc ('$', file);
12064 else
12065 fputs ("OFFSET FLAT:", file);
12068 if (CONST_INT_P (x))
12069 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12070 else if (flag_pic)
12071 output_pic_addr_const (file, x, code);
12072 else
12073 output_addr_const (file, x);
12077 /* Print a memory operand whose address is ADDR. */
12079 void
12080 print_operand_address (FILE *file, rtx addr)
12082 struct ix86_address parts;
12083 rtx base, index, disp;
12084 int scale;
12085 int ok = ix86_decompose_address (addr, &parts);
12087 gcc_assert (ok);
12089 base = parts.base;
12090 index = parts.index;
12091 disp = parts.disp;
12092 scale = parts.scale;
12094 switch (parts.seg)
12096 case SEG_DEFAULT:
12097 break;
12098 case SEG_FS:
12099 case SEG_GS:
12100 if (ASSEMBLER_DIALECT == ASM_ATT)
12101 putc ('%', file);
12102 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12103 break;
12104 default:
12105 gcc_unreachable ();
12108 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12109 if (TARGET_64BIT && !base && !index)
12111 rtx symbol = disp;
12113 if (GET_CODE (disp) == CONST
12114 && GET_CODE (XEXP (disp, 0)) == PLUS
12115 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12116 symbol = XEXP (XEXP (disp, 0), 0);
12118 if (GET_CODE (symbol) == LABEL_REF
12119 || (GET_CODE (symbol) == SYMBOL_REF
12120 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12121 base = pc_rtx;
12123 if (!base && !index)
12125 /* Displacement only requires special attention. */
12127 if (CONST_INT_P (disp))
12129 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12130 fputs ("ds:", file);
12131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12133 else if (flag_pic)
12134 output_pic_addr_const (file, disp, 0);
12135 else
12136 output_addr_const (file, disp);
12138 else
12140 if (ASSEMBLER_DIALECT == ASM_ATT)
12142 if (disp)
12144 if (flag_pic)
12145 output_pic_addr_const (file, disp, 0);
12146 else if (GET_CODE (disp) == LABEL_REF)
12147 output_asm_label (disp);
12148 else
12149 output_addr_const (file, disp);
12152 putc ('(', file);
12153 if (base)
12154 print_reg (base, 0, file);
12155 if (index)
12157 putc (',', file);
12158 print_reg (index, 0, file);
12159 if (scale != 1)
12160 fprintf (file, ",%d", scale);
12162 putc (')', file);
12164 else
12166 rtx offset = NULL_RTX;
12168 if (disp)
12170 /* Pull out the offset of a symbol; print any symbol itself. */
12171 if (GET_CODE (disp) == CONST
12172 && GET_CODE (XEXP (disp, 0)) == PLUS
12173 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12175 offset = XEXP (XEXP (disp, 0), 1);
12176 disp = gen_rtx_CONST (VOIDmode,
12177 XEXP (XEXP (disp, 0), 0));
12180 if (flag_pic)
12181 output_pic_addr_const (file, disp, 0);
12182 else if (GET_CODE (disp) == LABEL_REF)
12183 output_asm_label (disp);
12184 else if (CONST_INT_P (disp))
12185 offset = disp;
12186 else
12187 output_addr_const (file, disp);
12190 putc ('[', file);
12191 if (base)
12193 print_reg (base, 0, file);
12194 if (offset)
12196 if (INTVAL (offset) >= 0)
12197 putc ('+', file);
12198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12201 else if (offset)
12202 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12203 else
12204 putc ('0', file);
12206 if (index)
12208 putc ('+', file);
12209 print_reg (index, 0, file);
12210 if (scale != 1)
12211 fprintf (file, "*%d", scale);
12213 putc (']', file);
12218 bool
12219 output_addr_const_extra (FILE *file, rtx x)
12221 rtx op;
12223 if (GET_CODE (x) != UNSPEC)
12224 return false;
12226 op = XVECEXP (x, 0, 0);
12227 switch (XINT (x, 1))
12229 case UNSPEC_GOTTPOFF:
12230 output_addr_const (file, op);
12231 /* FIXME: This might be @TPOFF in Sun ld. */
12232 fputs ("@gottpoff", file);
12233 break;
12234 case UNSPEC_TPOFF:
12235 output_addr_const (file, op);
12236 fputs ("@tpoff", file);
12237 break;
12238 case UNSPEC_NTPOFF:
12239 output_addr_const (file, op);
12240 if (TARGET_64BIT)
12241 fputs ("@tpoff", file);
12242 else
12243 fputs ("@ntpoff", file);
12244 break;
12245 case UNSPEC_DTPOFF:
12246 output_addr_const (file, op);
12247 fputs ("@dtpoff", file);
12248 break;
12249 case UNSPEC_GOTNTPOFF:
12250 output_addr_const (file, op);
12251 if (TARGET_64BIT)
12252 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12253 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12254 else
12255 fputs ("@gotntpoff", file);
12256 break;
12257 case UNSPEC_INDNTPOFF:
12258 output_addr_const (file, op);
12259 fputs ("@indntpoff", file);
12260 break;
12261 #if TARGET_MACHO
12262 case UNSPEC_MACHOPIC_OFFSET:
12263 output_addr_const (file, op);
12264 putc ('-', file);
12265 machopic_output_function_base_name (file);
12266 break;
12267 #endif
12269 default:
12270 return false;
12273 return true;
12276 /* Split one or more DImode RTL references into pairs of SImode
12277 references. The RTL can be REG, offsettable MEM, integer constant, or
12278 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12279 split and "num" is its length. lo_half and hi_half are output arrays
12280 that parallel "operands". */
12282 void
12283 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12285 while (num--)
12287 rtx op = operands[num];
12289 /* simplify_subreg refuse to split volatile memory addresses,
12290 but we still have to handle it. */
12291 if (MEM_P (op))
12293 lo_half[num] = adjust_address (op, SImode, 0);
12294 hi_half[num] = adjust_address (op, SImode, 4);
12296 else
12298 lo_half[num] = simplify_gen_subreg (SImode, op,
12299 GET_MODE (op) == VOIDmode
12300 ? DImode : GET_MODE (op), 0);
12301 hi_half[num] = simplify_gen_subreg (SImode, op,
12302 GET_MODE (op) == VOIDmode
12303 ? DImode : GET_MODE (op), 4);
12307 /* Split one or more TImode RTL references into pairs of DImode
12308 references. The RTL can be REG, offsettable MEM, integer constant, or
12309 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12310 split and "num" is its length. lo_half and hi_half are output arrays
12311 that parallel "operands". */
12313 void
12314 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12316 while (num--)
12318 rtx op = operands[num];
12320 /* simplify_subreg refuse to split volatile memory addresses, but we
12321 still have to handle it. */
12322 if (MEM_P (op))
12324 lo_half[num] = adjust_address (op, DImode, 0);
12325 hi_half[num] = adjust_address (op, DImode, 8);
12327 else
12329 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12330 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12335 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12336 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12337 is the expression of the binary operation. The output may either be
12338 emitted here, or returned to the caller, like all output_* functions.
12340 There is no guarantee that the operands are the same mode, as they
12341 might be within FLOAT or FLOAT_EXTEND expressions. */
12343 #ifndef SYSV386_COMPAT
12344 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12345 wants to fix the assemblers because that causes incompatibility
12346 with gcc. No-one wants to fix gcc because that causes
12347 incompatibility with assemblers... You can use the option of
12348 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12349 #define SYSV386_COMPAT 1
12350 #endif
12352 const char *
12353 output_387_binary_op (rtx insn, rtx *operands)
12355 static char buf[40];
12356 const char *p;
12357 const char *ssep;
12358 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12360 #ifdef ENABLE_CHECKING
12361 /* Even if we do not want to check the inputs, this documents input
12362 constraints. Which helps in understanding the following code. */
12363 if (STACK_REG_P (operands[0])
12364 && ((REG_P (operands[1])
12365 && REGNO (operands[0]) == REGNO (operands[1])
12366 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12367 || (REG_P (operands[2])
12368 && REGNO (operands[0]) == REGNO (operands[2])
12369 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12370 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12371 ; /* ok */
12372 else
12373 gcc_assert (is_sse);
12374 #endif
12376 switch (GET_CODE (operands[3]))
12378 case PLUS:
12379 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12380 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12381 p = "fiadd";
12382 else
12383 p = "fadd";
12384 ssep = "vadd";
12385 break;
12387 case MINUS:
12388 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12389 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12390 p = "fisub";
12391 else
12392 p = "fsub";
12393 ssep = "vsub";
12394 break;
12396 case MULT:
12397 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12398 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12399 p = "fimul";
12400 else
12401 p = "fmul";
12402 ssep = "vmul";
12403 break;
12405 case DIV:
12406 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12407 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12408 p = "fidiv";
12409 else
12410 p = "fdiv";
12411 ssep = "vdiv";
12412 break;
12414 default:
12415 gcc_unreachable ();
12418 if (is_sse)
12420 if (TARGET_AVX)
12422 strcpy (buf, ssep);
12423 if (GET_MODE (operands[0]) == SFmode)
12424 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12425 else
12426 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12428 else
12430 strcpy (buf, ssep + 1);
12431 if (GET_MODE (operands[0]) == SFmode)
12432 strcat (buf, "ss\t{%2, %0|%0, %2}");
12433 else
12434 strcat (buf, "sd\t{%2, %0|%0, %2}");
12436 return buf;
12438 strcpy (buf, p);
12440 switch (GET_CODE (operands[3]))
12442 case MULT:
12443 case PLUS:
12444 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12446 rtx temp = operands[2];
12447 operands[2] = operands[1];
12448 operands[1] = temp;
12451 /* know operands[0] == operands[1]. */
12453 if (MEM_P (operands[2]))
12455 p = "%Z2\t%2";
12456 break;
12459 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12461 if (STACK_TOP_P (operands[0]))
12462 /* How is it that we are storing to a dead operand[2]?
12463 Well, presumably operands[1] is dead too. We can't
12464 store the result to st(0) as st(0) gets popped on this
12465 instruction. Instead store to operands[2] (which I
12466 think has to be st(1)). st(1) will be popped later.
12467 gcc <= 2.8.1 didn't have this check and generated
12468 assembly code that the Unixware assembler rejected. */
12469 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12470 else
12471 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12472 break;
12475 if (STACK_TOP_P (operands[0]))
12476 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12477 else
12478 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12479 break;
12481 case MINUS:
12482 case DIV:
12483 if (MEM_P (operands[1]))
12485 p = "r%Z1\t%1";
12486 break;
12489 if (MEM_P (operands[2]))
12491 p = "%Z2\t%2";
12492 break;
12495 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12497 #if SYSV386_COMPAT
12498 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12499 derived assemblers, confusingly reverse the direction of
12500 the operation for fsub{r} and fdiv{r} when the
12501 destination register is not st(0). The Intel assembler
12502 doesn't have this brain damage. Read !SYSV386_COMPAT to
12503 figure out what the hardware really does. */
12504 if (STACK_TOP_P (operands[0]))
12505 p = "{p\t%0, %2|rp\t%2, %0}";
12506 else
12507 p = "{rp\t%2, %0|p\t%0, %2}";
12508 #else
12509 if (STACK_TOP_P (operands[0]))
12510 /* As above for fmul/fadd, we can't store to st(0). */
12511 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12512 else
12513 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12514 #endif
12515 break;
12518 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12520 #if SYSV386_COMPAT
12521 if (STACK_TOP_P (operands[0]))
12522 p = "{rp\t%0, %1|p\t%1, %0}";
12523 else
12524 p = "{p\t%1, %0|rp\t%0, %1}";
12525 #else
12526 if (STACK_TOP_P (operands[0]))
12527 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12528 else
12529 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12530 #endif
12531 break;
12534 if (STACK_TOP_P (operands[0]))
12536 if (STACK_TOP_P (operands[1]))
12537 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12538 else
12539 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12540 break;
12542 else if (STACK_TOP_P (operands[1]))
12544 #if SYSV386_COMPAT
12545 p = "{\t%1, %0|r\t%0, %1}";
12546 #else
12547 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12548 #endif
12550 else
12552 #if SYSV386_COMPAT
12553 p = "{r\t%2, %0|\t%0, %2}";
12554 #else
12555 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12556 #endif
12558 break;
12560 default:
12561 gcc_unreachable ();
12564 strcat (buf, p);
12565 return buf;
12568 /* Return needed mode for entity in optimize_mode_switching pass. */
12571 ix86_mode_needed (int entity, rtx insn)
12573 enum attr_i387_cw mode;
12575 /* The mode UNINITIALIZED is used to store control word after a
12576 function call or ASM pattern. The mode ANY specify that function
12577 has no requirements on the control word and make no changes in the
12578 bits we are interested in. */
12580 if (CALL_P (insn)
12581 || (NONJUMP_INSN_P (insn)
12582 && (asm_noperands (PATTERN (insn)) >= 0
12583 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12584 return I387_CW_UNINITIALIZED;
12586 if (recog_memoized (insn) < 0)
12587 return I387_CW_ANY;
12589 mode = get_attr_i387_cw (insn);
12591 switch (entity)
12593 case I387_TRUNC:
12594 if (mode == I387_CW_TRUNC)
12595 return mode;
12596 break;
12598 case I387_FLOOR:
12599 if (mode == I387_CW_FLOOR)
12600 return mode;
12601 break;
12603 case I387_CEIL:
12604 if (mode == I387_CW_CEIL)
12605 return mode;
12606 break;
12608 case I387_MASK_PM:
12609 if (mode == I387_CW_MASK_PM)
12610 return mode;
12611 break;
12613 default:
12614 gcc_unreachable ();
12617 return I387_CW_ANY;
12620 /* Output code to initialize control word copies used by trunc?f?i and
12621 rounding patterns. CURRENT_MODE is set to current control word,
12622 while NEW_MODE is set to new control word. */
12624 void
12625 emit_i387_cw_initialization (int mode)
12627 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12628 rtx new_mode;
12630 enum ix86_stack_slot slot;
12632 rtx reg = gen_reg_rtx (HImode);
12634 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12635 emit_move_insn (reg, copy_rtx (stored_mode));
12637 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12638 || optimize_function_for_size_p (cfun))
12640 switch (mode)
12642 case I387_CW_TRUNC:
12643 /* round toward zero (truncate) */
12644 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12645 slot = SLOT_CW_TRUNC;
12646 break;
12648 case I387_CW_FLOOR:
12649 /* round down toward -oo */
12650 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12651 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12652 slot = SLOT_CW_FLOOR;
12653 break;
12655 case I387_CW_CEIL:
12656 /* round up toward +oo */
12657 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12658 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12659 slot = SLOT_CW_CEIL;
12660 break;
12662 case I387_CW_MASK_PM:
12663 /* mask precision exception for nearbyint() */
12664 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12665 slot = SLOT_CW_MASK_PM;
12666 break;
12668 default:
12669 gcc_unreachable ();
12672 else
12674 switch (mode)
12676 case I387_CW_TRUNC:
12677 /* round toward zero (truncate) */
12678 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12679 slot = SLOT_CW_TRUNC;
12680 break;
12682 case I387_CW_FLOOR:
12683 /* round down toward -oo */
12684 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12685 slot = SLOT_CW_FLOOR;
12686 break;
12688 case I387_CW_CEIL:
12689 /* round up toward +oo */
12690 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12691 slot = SLOT_CW_CEIL;
12692 break;
12694 case I387_CW_MASK_PM:
12695 /* mask precision exception for nearbyint() */
12696 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12697 slot = SLOT_CW_MASK_PM;
12698 break;
12700 default:
12701 gcc_unreachable ();
12705 gcc_assert (slot < MAX_386_STACK_LOCALS);
12707 new_mode = assign_386_stack_local (HImode, slot);
12708 emit_move_insn (new_mode, reg);
12711 /* Output code for INSN to convert a float to a signed int. OPERANDS
12712 are the insn operands. The output may be [HSD]Imode and the input
12713 operand may be [SDX]Fmode. */
12715 const char *
12716 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12718 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12719 int dimode_p = GET_MODE (operands[0]) == DImode;
12720 int round_mode = get_attr_i387_cw (insn);
12722 /* Jump through a hoop or two for DImode, since the hardware has no
12723 non-popping instruction. We used to do this a different way, but
12724 that was somewhat fragile and broke with post-reload splitters. */
12725 if ((dimode_p || fisttp) && !stack_top_dies)
12726 output_asm_insn ("fld\t%y1", operands);
12728 gcc_assert (STACK_TOP_P (operands[1]));
12729 gcc_assert (MEM_P (operands[0]));
12730 gcc_assert (GET_MODE (operands[1]) != TFmode);
12732 if (fisttp)
12733 output_asm_insn ("fisttp%Z0\t%0", operands);
12734 else
12736 if (round_mode != I387_CW_ANY)
12737 output_asm_insn ("fldcw\t%3", operands);
12738 if (stack_top_dies || dimode_p)
12739 output_asm_insn ("fistp%Z0\t%0", operands);
12740 else
12741 output_asm_insn ("fist%Z0\t%0", operands);
12742 if (round_mode != I387_CW_ANY)
12743 output_asm_insn ("fldcw\t%2", operands);
12746 return "";
12749 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12750 have the values zero or one, indicates the ffreep insn's operand
12751 from the OPERANDS array. */
12753 static const char *
12754 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12756 if (TARGET_USE_FFREEP)
12757 #ifdef HAVE_AS_IX86_FFREEP
12758 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12759 #else
12761 static char retval[32];
12762 int regno = REGNO (operands[opno]);
12764 gcc_assert (FP_REGNO_P (regno));
12766 regno -= FIRST_STACK_REG;
12768 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12769 return retval;
12771 #endif
12773 return opno ? "fstp\t%y1" : "fstp\t%y0";
12777 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12778 should be used. UNORDERED_P is true when fucom should be used. */
12780 const char *
12781 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12783 int stack_top_dies;
12784 rtx cmp_op0, cmp_op1;
12785 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12787 if (eflags_p)
12789 cmp_op0 = operands[0];
12790 cmp_op1 = operands[1];
12792 else
12794 cmp_op0 = operands[1];
12795 cmp_op1 = operands[2];
12798 if (is_sse)
12800 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12801 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12802 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12803 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12805 if (GET_MODE (operands[0]) == SFmode)
12806 if (unordered_p)
12807 return &ucomiss[TARGET_AVX ? 0 : 1];
12808 else
12809 return &comiss[TARGET_AVX ? 0 : 1];
12810 else
12811 if (unordered_p)
12812 return &ucomisd[TARGET_AVX ? 0 : 1];
12813 else
12814 return &comisd[TARGET_AVX ? 0 : 1];
12817 gcc_assert (STACK_TOP_P (cmp_op0));
12819 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12821 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12823 if (stack_top_dies)
12825 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12826 return output_387_ffreep (operands, 1);
12828 else
12829 return "ftst\n\tfnstsw\t%0";
12832 if (STACK_REG_P (cmp_op1)
12833 && stack_top_dies
12834 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12835 && REGNO (cmp_op1) != FIRST_STACK_REG)
12837 /* If both the top of the 387 stack dies, and the other operand
12838 is also a stack register that dies, then this must be a
12839 `fcompp' float compare */
12841 if (eflags_p)
12843 /* There is no double popping fcomi variant. Fortunately,
12844 eflags is immune from the fstp's cc clobbering. */
12845 if (unordered_p)
12846 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12847 else
12848 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12849 return output_387_ffreep (operands, 0);
12851 else
12853 if (unordered_p)
12854 return "fucompp\n\tfnstsw\t%0";
12855 else
12856 return "fcompp\n\tfnstsw\t%0";
12859 else
12861 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12863 static const char * const alt[16] =
12865 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12866 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12867 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12868 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12870 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12871 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12872 NULL,
12873 NULL,
12875 "fcomi\t{%y1, %0|%0, %y1}",
12876 "fcomip\t{%y1, %0|%0, %y1}",
12877 "fucomi\t{%y1, %0|%0, %y1}",
12878 "fucomip\t{%y1, %0|%0, %y1}",
12880 NULL,
12881 NULL,
12882 NULL,
12883 NULL
12886 int mask;
12887 const char *ret;
12889 mask = eflags_p << 3;
12890 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12891 mask |= unordered_p << 1;
12892 mask |= stack_top_dies;
12894 gcc_assert (mask < 16);
12895 ret = alt[mask];
12896 gcc_assert (ret);
12898 return ret;
12902 void
12903 ix86_output_addr_vec_elt (FILE *file, int value)
12905 const char *directive = ASM_LONG;
12907 #ifdef ASM_QUAD
12908 if (TARGET_64BIT)
12909 directive = ASM_QUAD;
12910 #else
12911 gcc_assert (!TARGET_64BIT);
12912 #endif
12914 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12917 void
12918 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12920 const char *directive = ASM_LONG;
12922 #ifdef ASM_QUAD
12923 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12924 directive = ASM_QUAD;
12925 #else
12926 gcc_assert (!TARGET_64BIT);
12927 #endif
12928 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12929 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12930 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12931 directive, value, rel);
12932 else if (HAVE_AS_GOTOFF_IN_DATA)
12933 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12934 #if TARGET_MACHO
12935 else if (TARGET_MACHO)
12937 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12938 machopic_output_function_base_name (file);
12939 putc ('\n', file);
12941 #endif
12942 else
12943 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12944 GOT_SYMBOL_NAME, value);
12947 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12948 for the target. */
12950 void
12951 ix86_expand_clear (rtx dest)
12953 rtx tmp;
12955 /* We play register width games, which are only valid after reload. */
12956 gcc_assert (reload_completed);
12958 /* Avoid HImode and its attendant prefix byte. */
12959 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12960 dest = gen_rtx_REG (SImode, REGNO (dest));
12961 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12963 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12964 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12966 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12967 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12970 emit_insn (tmp);
12973 /* X is an unchanging MEM. If it is a constant pool reference, return
12974 the constant pool rtx, else NULL. */
12977 maybe_get_pool_constant (rtx x)
12979 x = ix86_delegitimize_address (XEXP (x, 0));
12981 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12982 return get_pool_constant (x);
12984 return NULL_RTX;
12987 void
12988 ix86_expand_move (enum machine_mode mode, rtx operands[])
12990 rtx op0, op1;
12991 enum tls_model model;
12993 op0 = operands[0];
12994 op1 = operands[1];
12996 if (GET_CODE (op1) == SYMBOL_REF)
12998 model = SYMBOL_REF_TLS_MODEL (op1);
12999 if (model)
13001 op1 = legitimize_tls_address (op1, model, true);
13002 op1 = force_operand (op1, op0);
13003 if (op1 == op0)
13004 return;
13006 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13007 && SYMBOL_REF_DLLIMPORT_P (op1))
13008 op1 = legitimize_dllimport_symbol (op1, false);
13010 else if (GET_CODE (op1) == CONST
13011 && GET_CODE (XEXP (op1, 0)) == PLUS
13012 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13014 rtx addend = XEXP (XEXP (op1, 0), 1);
13015 rtx symbol = XEXP (XEXP (op1, 0), 0);
13016 rtx tmp = NULL;
13018 model = SYMBOL_REF_TLS_MODEL (symbol);
13019 if (model)
13020 tmp = legitimize_tls_address (symbol, model, true);
13021 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13022 && SYMBOL_REF_DLLIMPORT_P (symbol))
13023 tmp = legitimize_dllimport_symbol (symbol, true);
13025 if (tmp)
13027 tmp = force_operand (tmp, NULL);
13028 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13029 op0, 1, OPTAB_DIRECT);
13030 if (tmp == op0)
13031 return;
13035 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13037 if (TARGET_MACHO && !TARGET_64BIT)
13039 #if TARGET_MACHO
13040 if (MACHOPIC_PURE)
13042 rtx temp = ((reload_in_progress
13043 || ((op0 && REG_P (op0))
13044 && mode == Pmode))
13045 ? op0 : gen_reg_rtx (Pmode));
13046 op1 = machopic_indirect_data_reference (op1, temp);
13047 op1 = machopic_legitimize_pic_address (op1, mode,
13048 temp == op1 ? 0 : temp);
13050 else if (MACHOPIC_INDIRECT)
13051 op1 = machopic_indirect_data_reference (op1, 0);
13052 if (op0 == op1)
13053 return;
13054 #endif
13056 else
13058 if (MEM_P (op0))
13059 op1 = force_reg (Pmode, op1);
13060 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13062 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13063 op1 = legitimize_pic_address (op1, reg);
13064 if (op0 == op1)
13065 return;
13069 else
13071 if (MEM_P (op0)
13072 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13073 || !push_operand (op0, mode))
13074 && MEM_P (op1))
13075 op1 = force_reg (mode, op1);
13077 if (push_operand (op0, mode)
13078 && ! general_no_elim_operand (op1, mode))
13079 op1 = copy_to_mode_reg (mode, op1);
13081 /* Force large constants in 64bit compilation into register
13082 to get them CSEed. */
13083 if (can_create_pseudo_p ()
13084 && (mode == DImode) && TARGET_64BIT
13085 && immediate_operand (op1, mode)
13086 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13087 && !register_operand (op0, mode)
13088 && optimize)
13089 op1 = copy_to_mode_reg (mode, op1);
13091 if (can_create_pseudo_p ()
13092 && FLOAT_MODE_P (mode)
13093 && GET_CODE (op1) == CONST_DOUBLE)
13095 /* If we are loading a floating point constant to a register,
13096 force the value to memory now, since we'll get better code
13097 out the back end. */
13099 op1 = validize_mem (force_const_mem (mode, op1));
13100 if (!register_operand (op0, mode))
13102 rtx temp = gen_reg_rtx (mode);
13103 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13104 emit_move_insn (op0, temp);
13105 return;
13110 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13113 void
13114 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13116 rtx op0 = operands[0], op1 = operands[1];
13117 unsigned int align = GET_MODE_ALIGNMENT (mode);
13119 /* Force constants other than zero into memory. We do not know how
13120 the instructions used to build constants modify the upper 64 bits
13121 of the register, once we have that information we may be able
13122 to handle some of them more efficiently. */
13123 if (can_create_pseudo_p ()
13124 && register_operand (op0, mode)
13125 && (CONSTANT_P (op1)
13126 || (GET_CODE (op1) == SUBREG
13127 && CONSTANT_P (SUBREG_REG (op1))))
13128 && !standard_sse_constant_p (op1))
13129 op1 = validize_mem (force_const_mem (mode, op1));
13131 /* We need to check memory alignment for SSE mode since attribute
13132 can make operands unaligned. */
13133 if (can_create_pseudo_p ()
13134 && SSE_REG_MODE_P (mode)
13135 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13136 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13138 rtx tmp[2];
13140 /* ix86_expand_vector_move_misalign() does not like constants ... */
13141 if (CONSTANT_P (op1)
13142 || (GET_CODE (op1) == SUBREG
13143 && CONSTANT_P (SUBREG_REG (op1))))
13144 op1 = validize_mem (force_const_mem (mode, op1));
13146 /* ... nor both arguments in memory. */
13147 if (!register_operand (op0, mode)
13148 && !register_operand (op1, mode))
13149 op1 = force_reg (mode, op1);
13151 tmp[0] = op0; tmp[1] = op1;
13152 ix86_expand_vector_move_misalign (mode, tmp);
13153 return;
13156 /* Make operand1 a register if it isn't already. */
13157 if (can_create_pseudo_p ()
13158 && !register_operand (op0, mode)
13159 && !register_operand (op1, mode))
13161 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13162 return;
13165 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13168 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13169 straight to ix86_expand_vector_move. */
13170 /* Code generation for scalar reg-reg moves of single and double precision data:
13171 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13172 movaps reg, reg
13173 else
13174 movss reg, reg
13175 if (x86_sse_partial_reg_dependency == true)
13176 movapd reg, reg
13177 else
13178 movsd reg, reg
13180 Code generation for scalar loads of double precision data:
13181 if (x86_sse_split_regs == true)
13182 movlpd mem, reg (gas syntax)
13183 else
13184 movsd mem, reg
13186 Code generation for unaligned packed loads of single precision data
13187 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13188 if (x86_sse_unaligned_move_optimal)
13189 movups mem, reg
13191 if (x86_sse_partial_reg_dependency == true)
13193 xorps reg, reg
13194 movlps mem, reg
13195 movhps mem+8, reg
13197 else
13199 movlps mem, reg
13200 movhps mem+8, reg
13203 Code generation for unaligned packed loads of double precision data
13204 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13205 if (x86_sse_unaligned_move_optimal)
13206 movupd mem, reg
13208 if (x86_sse_split_regs == true)
13210 movlpd mem, reg
13211 movhpd mem+8, reg
13213 else
13215 movsd mem, reg
13216 movhpd mem+8, reg
13220 void
13221 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13223 rtx op0, op1, m;
13225 op0 = operands[0];
13226 op1 = operands[1];
13228 if (TARGET_AVX)
13230 switch (GET_MODE_CLASS (mode))
13232 case MODE_VECTOR_INT:
13233 case MODE_INT:
13234 switch (GET_MODE_SIZE (mode))
13236 case 16:
13237 op0 = gen_lowpart (V16QImode, op0);
13238 op1 = gen_lowpart (V16QImode, op1);
13239 emit_insn (gen_avx_movdqu (op0, op1));
13240 break;
13241 case 32:
13242 op0 = gen_lowpart (V32QImode, op0);
13243 op1 = gen_lowpart (V32QImode, op1);
13244 emit_insn (gen_avx_movdqu256 (op0, op1));
13245 break;
13246 default:
13247 gcc_unreachable ();
13249 break;
13250 case MODE_VECTOR_FLOAT:
13251 op0 = gen_lowpart (mode, op0);
13252 op1 = gen_lowpart (mode, op1);
13254 switch (mode)
13256 case V4SFmode:
13257 emit_insn (gen_avx_movups (op0, op1));
13258 break;
13259 case V8SFmode:
13260 emit_insn (gen_avx_movups256 (op0, op1));
13261 break;
13262 case V2DFmode:
13263 emit_insn (gen_avx_movupd (op0, op1));
13264 break;
13265 case V4DFmode:
13266 emit_insn (gen_avx_movupd256 (op0, op1));
13267 break;
13268 default:
13269 gcc_unreachable ();
13271 break;
13273 default:
13274 gcc_unreachable ();
13277 return;
13280 if (MEM_P (op1))
13282 /* If we're optimizing for size, movups is the smallest. */
13283 if (optimize_insn_for_size_p ())
13285 op0 = gen_lowpart (V4SFmode, op0);
13286 op1 = gen_lowpart (V4SFmode, op1);
13287 emit_insn (gen_sse_movups (op0, op1));
13288 return;
13291 /* ??? If we have typed data, then it would appear that using
13292 movdqu is the only way to get unaligned data loaded with
13293 integer type. */
13294 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13296 op0 = gen_lowpart (V16QImode, op0);
13297 op1 = gen_lowpart (V16QImode, op1);
13298 emit_insn (gen_sse2_movdqu (op0, op1));
13299 return;
13302 if (TARGET_SSE2 && mode == V2DFmode)
13304 rtx zero;
13306 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13308 op0 = gen_lowpart (V2DFmode, op0);
13309 op1 = gen_lowpart (V2DFmode, op1);
13310 emit_insn (gen_sse2_movupd (op0, op1));
13311 return;
13314 /* When SSE registers are split into halves, we can avoid
13315 writing to the top half twice. */
13316 if (TARGET_SSE_SPLIT_REGS)
13318 emit_clobber (op0);
13319 zero = op0;
13321 else
13323 /* ??? Not sure about the best option for the Intel chips.
13324 The following would seem to satisfy; the register is
13325 entirely cleared, breaking the dependency chain. We
13326 then store to the upper half, with a dependency depth
13327 of one. A rumor has it that Intel recommends two movsd
13328 followed by an unpacklpd, but this is unconfirmed. And
13329 given that the dependency depth of the unpacklpd would
13330 still be one, I'm not sure why this would be better. */
13331 zero = CONST0_RTX (V2DFmode);
13334 m = adjust_address (op1, DFmode, 0);
13335 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13336 m = adjust_address (op1, DFmode, 8);
13337 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13339 else
13341 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13343 op0 = gen_lowpart (V4SFmode, op0);
13344 op1 = gen_lowpart (V4SFmode, op1);
13345 emit_insn (gen_sse_movups (op0, op1));
13346 return;
13349 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13350 emit_move_insn (op0, CONST0_RTX (mode));
13351 else
13352 emit_clobber (op0);
13354 if (mode != V4SFmode)
13355 op0 = gen_lowpart (V4SFmode, op0);
13356 m = adjust_address (op1, V2SFmode, 0);
13357 emit_insn (gen_sse_loadlps (op0, op0, m));
13358 m = adjust_address (op1, V2SFmode, 8);
13359 emit_insn (gen_sse_loadhps (op0, op0, m));
13362 else if (MEM_P (op0))
13364 /* If we're optimizing for size, movups is the smallest. */
13365 if (optimize_insn_for_size_p ())
13367 op0 = gen_lowpart (V4SFmode, op0);
13368 op1 = gen_lowpart (V4SFmode, op1);
13369 emit_insn (gen_sse_movups (op0, op1));
13370 return;
13373 /* ??? Similar to above, only less clear because of quote
13374 typeless stores unquote. */
13375 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13376 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13378 op0 = gen_lowpart (V16QImode, op0);
13379 op1 = gen_lowpart (V16QImode, op1);
13380 emit_insn (gen_sse2_movdqu (op0, op1));
13381 return;
13384 if (TARGET_SSE2 && mode == V2DFmode)
13386 m = adjust_address (op0, DFmode, 0);
13387 emit_insn (gen_sse2_storelpd (m, op1));
13388 m = adjust_address (op0, DFmode, 8);
13389 emit_insn (gen_sse2_storehpd (m, op1));
13391 else
13393 if (mode != V4SFmode)
13394 op1 = gen_lowpart (V4SFmode, op1);
13395 m = adjust_address (op0, V2SFmode, 0);
13396 emit_insn (gen_sse_storelps (m, op1));
13397 m = adjust_address (op0, V2SFmode, 8);
13398 emit_insn (gen_sse_storehps (m, op1));
13401 else
13402 gcc_unreachable ();
13405 /* Expand a push in MODE. This is some mode for which we do not support
13406 proper push instructions, at least from the registers that we expect
13407 the value to live in. */
13409 void
13410 ix86_expand_push (enum machine_mode mode, rtx x)
13412 rtx tmp;
13414 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13415 GEN_INT (-GET_MODE_SIZE (mode)),
13416 stack_pointer_rtx, 1, OPTAB_DIRECT);
13417 if (tmp != stack_pointer_rtx)
13418 emit_move_insn (stack_pointer_rtx, tmp);
13420 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13422 /* When we push an operand onto stack, it has to be aligned at least
13423 at the function argument boundary. However since we don't have
13424 the argument type, we can't determine the actual argument
13425 boundary. */
13426 emit_move_insn (tmp, x);
13429 /* Helper function of ix86_fixup_binary_operands to canonicalize
13430 operand order. Returns true if the operands should be swapped. */
13432 static bool
13433 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13434 rtx operands[])
13436 rtx dst = operands[0];
13437 rtx src1 = operands[1];
13438 rtx src2 = operands[2];
13440 /* If the operation is not commutative, we can't do anything. */
13441 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13442 return false;
13444 /* Highest priority is that src1 should match dst. */
13445 if (rtx_equal_p (dst, src1))
13446 return false;
13447 if (rtx_equal_p (dst, src2))
13448 return true;
13450 /* Next highest priority is that immediate constants come second. */
13451 if (immediate_operand (src2, mode))
13452 return false;
13453 if (immediate_operand (src1, mode))
13454 return true;
13456 /* Lowest priority is that memory references should come second. */
13457 if (MEM_P (src2))
13458 return false;
13459 if (MEM_P (src1))
13460 return true;
13462 return false;
13466 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13467 destination to use for the operation. If different from the true
13468 destination in operands[0], a copy operation will be required. */
13471 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13472 rtx operands[])
13474 rtx dst = operands[0];
13475 rtx src1 = operands[1];
13476 rtx src2 = operands[2];
13478 /* Canonicalize operand order. */
13479 if (ix86_swap_binary_operands_p (code, mode, operands))
13481 rtx temp;
13483 /* It is invalid to swap operands of different modes. */
13484 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13486 temp = src1;
13487 src1 = src2;
13488 src2 = temp;
13491 /* Both source operands cannot be in memory. */
13492 if (MEM_P (src1) && MEM_P (src2))
13494 /* Optimization: Only read from memory once. */
13495 if (rtx_equal_p (src1, src2))
13497 src2 = force_reg (mode, src2);
13498 src1 = src2;
13500 else
13501 src2 = force_reg (mode, src2);
13504 /* If the destination is memory, and we do not have matching source
13505 operands, do things in registers. */
13506 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13507 dst = gen_reg_rtx (mode);
13509 /* Source 1 cannot be a constant. */
13510 if (CONSTANT_P (src1))
13511 src1 = force_reg (mode, src1);
13513 /* Source 1 cannot be a non-matching memory. */
13514 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13515 src1 = force_reg (mode, src1);
13517 operands[1] = src1;
13518 operands[2] = src2;
13519 return dst;
13522 /* Similarly, but assume that the destination has already been
13523 set up properly. */
13525 void
13526 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13527 enum machine_mode mode, rtx operands[])
13529 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13530 gcc_assert (dst == operands[0]);
13533 /* Attempt to expand a binary operator. Make the expansion closer to the
13534 actual machine, then just general_operand, which will allow 3 separate
13535 memory references (one output, two input) in a single insn. */
13537 void
13538 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13539 rtx operands[])
13541 rtx src1, src2, dst, op, clob;
13543 dst = ix86_fixup_binary_operands (code, mode, operands);
13544 src1 = operands[1];
13545 src2 = operands[2];
13547 /* Emit the instruction. */
13549 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13550 if (reload_in_progress)
13552 /* Reload doesn't know about the flags register, and doesn't know that
13553 it doesn't want to clobber it. We can only do this with PLUS. */
13554 gcc_assert (code == PLUS);
13555 emit_insn (op);
13557 else
13559 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13560 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13563 /* Fix up the destination if needed. */
13564 if (dst != operands[0])
13565 emit_move_insn (operands[0], dst);
13568 /* Return TRUE or FALSE depending on whether the binary operator meets the
13569 appropriate constraints. */
13572 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13573 rtx operands[3])
13575 rtx dst = operands[0];
13576 rtx src1 = operands[1];
13577 rtx src2 = operands[2];
13579 /* Both source operands cannot be in memory. */
13580 if (MEM_P (src1) && MEM_P (src2))
13581 return 0;
13583 /* Canonicalize operand order for commutative operators. */
13584 if (ix86_swap_binary_operands_p (code, mode, operands))
13586 rtx temp = src1;
13587 src1 = src2;
13588 src2 = temp;
13591 /* If the destination is memory, we must have a matching source operand. */
13592 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13593 return 0;
13595 /* Source 1 cannot be a constant. */
13596 if (CONSTANT_P (src1))
13597 return 0;
13599 /* Source 1 cannot be a non-matching memory. */
13600 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13601 return 0;
13603 return 1;
13606 /* Attempt to expand a unary operator. Make the expansion closer to the
13607 actual machine, then just general_operand, which will allow 2 separate
13608 memory references (one output, one input) in a single insn. */
13610 void
13611 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13612 rtx operands[])
13614 int matching_memory;
13615 rtx src, dst, op, clob;
13617 dst = operands[0];
13618 src = operands[1];
13620 /* If the destination is memory, and we do not have matching source
13621 operands, do things in registers. */
13622 matching_memory = 0;
13623 if (MEM_P (dst))
13625 if (rtx_equal_p (dst, src))
13626 matching_memory = 1;
13627 else
13628 dst = gen_reg_rtx (mode);
13631 /* When source operand is memory, destination must match. */
13632 if (MEM_P (src) && !matching_memory)
13633 src = force_reg (mode, src);
13635 /* Emit the instruction. */
13637 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13638 if (reload_in_progress || code == NOT)
13640 /* Reload doesn't know about the flags register, and doesn't know that
13641 it doesn't want to clobber it. */
13642 gcc_assert (code == NOT);
13643 emit_insn (op);
13645 else
13647 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13648 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13651 /* Fix up the destination if needed. */
13652 if (dst != operands[0])
13653 emit_move_insn (operands[0], dst);
13656 #define LEA_SEARCH_THRESHOLD 12
13658 /* Search backward for non-agu definition of register number REGNO1
13659 or register number REGNO2 in INSN's basic block until
13660 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13661 2. Reach BB boundary, or
13662 3. Reach agu definition.
13663 Returns the distance between the non-agu definition point and INSN.
13664 If no definition point, returns -1. */
13666 static int
13667 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13668 rtx insn)
13670 basic_block bb = BLOCK_FOR_INSN (insn);
13671 int distance = 0;
13672 df_ref *def_rec;
13673 enum attr_type insn_type;
13675 if (insn != BB_HEAD (bb))
13677 rtx prev = PREV_INSN (insn);
13678 while (prev && distance < LEA_SEARCH_THRESHOLD)
13680 if (INSN_P (prev))
13682 distance++;
13683 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13684 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13685 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13686 && (regno1 == DF_REF_REGNO (*def_rec)
13687 || regno2 == DF_REF_REGNO (*def_rec)))
13689 insn_type = get_attr_type (prev);
13690 if (insn_type != TYPE_LEA)
13691 goto done;
13694 if (prev == BB_HEAD (bb))
13695 break;
13696 prev = PREV_INSN (prev);
13700 if (distance < LEA_SEARCH_THRESHOLD)
13702 edge e;
13703 edge_iterator ei;
13704 bool simple_loop = false;
13706 FOR_EACH_EDGE (e, ei, bb->preds)
13707 if (e->src == bb)
13709 simple_loop = true;
13710 break;
13713 if (simple_loop)
13715 rtx prev = BB_END (bb);
13716 while (prev
13717 && prev != insn
13718 && distance < LEA_SEARCH_THRESHOLD)
13720 if (INSN_P (prev))
13722 distance++;
13723 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13724 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13725 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13726 && (regno1 == DF_REF_REGNO (*def_rec)
13727 || regno2 == DF_REF_REGNO (*def_rec)))
13729 insn_type = get_attr_type (prev);
13730 if (insn_type != TYPE_LEA)
13731 goto done;
13734 prev = PREV_INSN (prev);
13739 distance = -1;
13741 done:
13742 /* get_attr_type may modify recog data. We want to make sure
13743 that recog data is valid for instruction INSN, on which
13744 distance_non_agu_define is called. INSN is unchanged here. */
13745 extract_insn_cached (insn);
13746 return distance;
13749 /* Return the distance between INSN and the next insn that uses
13750 register number REGNO0 in memory address. Return -1 if no such
13751 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13753 static int
13754 distance_agu_use (unsigned int regno0, rtx insn)
13756 basic_block bb = BLOCK_FOR_INSN (insn);
13757 int distance = 0;
13758 df_ref *def_rec;
13759 df_ref *use_rec;
13761 if (insn != BB_END (bb))
13763 rtx next = NEXT_INSN (insn);
13764 while (next && distance < LEA_SEARCH_THRESHOLD)
13766 if (INSN_P (next))
13768 distance++;
13770 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13771 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13772 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13773 && regno0 == DF_REF_REGNO (*use_rec))
13775 /* Return DISTANCE if OP0 is used in memory
13776 address in NEXT. */
13777 return distance;
13780 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13781 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13782 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13783 && regno0 == DF_REF_REGNO (*def_rec))
13785 /* Return -1 if OP0 is set in NEXT. */
13786 return -1;
13789 if (next == BB_END (bb))
13790 break;
13791 next = NEXT_INSN (next);
13795 if (distance < LEA_SEARCH_THRESHOLD)
13797 edge e;
13798 edge_iterator ei;
13799 bool simple_loop = false;
13801 FOR_EACH_EDGE (e, ei, bb->succs)
13802 if (e->dest == bb)
13804 simple_loop = true;
13805 break;
13808 if (simple_loop)
13810 rtx next = BB_HEAD (bb);
13811 while (next
13812 && next != insn
13813 && distance < LEA_SEARCH_THRESHOLD)
13815 if (INSN_P (next))
13817 distance++;
13819 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13820 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13821 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13822 && regno0 == DF_REF_REGNO (*use_rec))
13824 /* Return DISTANCE if OP0 is used in memory
13825 address in NEXT. */
13826 return distance;
13829 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13830 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13831 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13832 && regno0 == DF_REF_REGNO (*def_rec))
13834 /* Return -1 if OP0 is set in NEXT. */
13835 return -1;
13839 next = NEXT_INSN (next);
13844 return -1;
13847 /* Define this macro to tune LEA priority vs ADD, it take effect when
13848 there is a dilemma of choicing LEA or ADD
13849 Negative value: ADD is more preferred than LEA
13850 Zero: Netrual
13851 Positive value: LEA is more preferred than ADD*/
13852 #define IX86_LEA_PRIORITY 2
13854 /* Return true if it is ok to optimize an ADD operation to LEA
13855 operation to avoid flag register consumation. For the processors
13856 like ATOM, if the destination register of LEA holds an actual
13857 address which will be used soon, LEA is better and otherwise ADD
13858 is better. */
13860 bool
13861 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13862 rtx insn, rtx operands[])
13864 unsigned int regno0 = true_regnum (operands[0]);
13865 unsigned int regno1 = true_regnum (operands[1]);
13866 unsigned int regno2;
13868 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13869 return regno0 != regno1;
13871 regno2 = true_regnum (operands[2]);
13873 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13874 if (regno0 != regno1 && regno0 != regno2)
13875 return true;
13876 else
13878 int dist_define, dist_use;
13879 dist_define = distance_non_agu_define (regno1, regno2, insn);
13880 if (dist_define <= 0)
13881 return true;
13883 /* If this insn has both backward non-agu dependence and forward
13884 agu dependence, the one with short distance take effect. */
13885 dist_use = distance_agu_use (regno0, insn);
13886 if (dist_use <= 0
13887 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13888 return false;
13890 return true;
13894 /* Return true if destination reg of SET_BODY is shift count of
13895 USE_BODY. */
13897 static bool
13898 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13900 rtx set_dest;
13901 rtx shift_rtx;
13902 int i;
13904 /* Retrieve destination of SET_BODY. */
13905 switch (GET_CODE (set_body))
13907 case SET:
13908 set_dest = SET_DEST (set_body);
13909 if (!set_dest || !REG_P (set_dest))
13910 return false;
13911 break;
13912 case PARALLEL:
13913 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13914 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13915 use_body))
13916 return true;
13917 default:
13918 return false;
13919 break;
13922 /* Retrieve shift count of USE_BODY. */
13923 switch (GET_CODE (use_body))
13925 case SET:
13926 shift_rtx = XEXP (use_body, 1);
13927 break;
13928 case PARALLEL:
13929 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13930 if (ix86_dep_by_shift_count_body (set_body,
13931 XVECEXP (use_body, 0, i)))
13932 return true;
13933 default:
13934 return false;
13935 break;
13938 if (shift_rtx
13939 && (GET_CODE (shift_rtx) == ASHIFT
13940 || GET_CODE (shift_rtx) == LSHIFTRT
13941 || GET_CODE (shift_rtx) == ASHIFTRT
13942 || GET_CODE (shift_rtx) == ROTATE
13943 || GET_CODE (shift_rtx) == ROTATERT))
13945 rtx shift_count = XEXP (shift_rtx, 1);
13947 /* Return true if shift count is dest of SET_BODY. */
13948 if (REG_P (shift_count)
13949 && true_regnum (set_dest) == true_regnum (shift_count))
13950 return true;
13953 return false;
13956 /* Return true if destination reg of SET_INSN is shift count of
13957 USE_INSN. */
13959 bool
13960 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13962 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13963 PATTERN (use_insn));
13966 /* Return TRUE or FALSE depending on whether the unary operator meets the
13967 appropriate constraints. */
13970 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13971 enum machine_mode mode ATTRIBUTE_UNUSED,
13972 rtx operands[2] ATTRIBUTE_UNUSED)
13974 /* If one of operands is memory, source and destination must match. */
13975 if ((MEM_P (operands[0])
13976 || MEM_P (operands[1]))
13977 && ! rtx_equal_p (operands[0], operands[1]))
13978 return FALSE;
13979 return TRUE;
13982 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13983 are ok, keeping in mind the possible movddup alternative. */
13985 bool
13986 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13988 if (MEM_P (operands[0]))
13989 return rtx_equal_p (operands[0], operands[1 + high]);
13990 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13991 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13992 return true;
13995 /* Post-reload splitter for converting an SF or DFmode value in an
13996 SSE register into an unsigned SImode. */
13998 void
13999 ix86_split_convert_uns_si_sse (rtx operands[])
14001 enum machine_mode vecmode;
14002 rtx value, large, zero_or_two31, input, two31, x;
14004 large = operands[1];
14005 zero_or_two31 = operands[2];
14006 input = operands[3];
14007 two31 = operands[4];
14008 vecmode = GET_MODE (large);
14009 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14011 /* Load up the value into the low element. We must ensure that the other
14012 elements are valid floats -- zero is the easiest such value. */
14013 if (MEM_P (input))
14015 if (vecmode == V4SFmode)
14016 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14017 else
14018 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14020 else
14022 input = gen_rtx_REG (vecmode, REGNO (input));
14023 emit_move_insn (value, CONST0_RTX (vecmode));
14024 if (vecmode == V4SFmode)
14025 emit_insn (gen_sse_movss (value, value, input));
14026 else
14027 emit_insn (gen_sse2_movsd (value, value, input));
14030 emit_move_insn (large, two31);
14031 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14033 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14034 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14036 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14037 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14039 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14040 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14042 large = gen_rtx_REG (V4SImode, REGNO (large));
14043 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14045 x = gen_rtx_REG (V4SImode, REGNO (value));
14046 if (vecmode == V4SFmode)
14047 emit_insn (gen_sse2_cvttps2dq (x, value));
14048 else
14049 emit_insn (gen_sse2_cvttpd2dq (x, value));
14050 value = x;
14052 emit_insn (gen_xorv4si3 (value, value, large));
14055 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14056 Expects the 64-bit DImode to be supplied in a pair of integral
14057 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14058 -mfpmath=sse, !optimize_size only. */
14060 void
14061 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14063 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14064 rtx int_xmm, fp_xmm;
14065 rtx biases, exponents;
14066 rtx x;
14068 int_xmm = gen_reg_rtx (V4SImode);
14069 if (TARGET_INTER_UNIT_MOVES)
14070 emit_insn (gen_movdi_to_sse (int_xmm, input));
14071 else if (TARGET_SSE_SPLIT_REGS)
14073 emit_clobber (int_xmm);
14074 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14076 else
14078 x = gen_reg_rtx (V2DImode);
14079 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14080 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14083 x = gen_rtx_CONST_VECTOR (V4SImode,
14084 gen_rtvec (4, GEN_INT (0x43300000UL),
14085 GEN_INT (0x45300000UL),
14086 const0_rtx, const0_rtx));
14087 exponents = validize_mem (force_const_mem (V4SImode, x));
14089 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14090 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14092 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14093 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14094 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14095 (0x1.0p84 + double(fp_value_hi_xmm)).
14096 Note these exponents differ by 32. */
14098 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14100 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14101 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14102 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14103 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14104 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14105 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14106 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14107 biases = validize_mem (force_const_mem (V2DFmode, biases));
14108 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14110 /* Add the upper and lower DFmode values together. */
14111 if (TARGET_SSE3)
14112 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14113 else
14115 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14116 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14117 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14120 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14123 /* Not used, but eases macroization of patterns. */
14124 void
14125 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14126 rtx input ATTRIBUTE_UNUSED)
14128 gcc_unreachable ();
14131 /* Convert an unsigned SImode value into a DFmode. Only currently used
14132 for SSE, but applicable anywhere. */
14134 void
14135 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14137 REAL_VALUE_TYPE TWO31r;
14138 rtx x, fp;
14140 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14141 NULL, 1, OPTAB_DIRECT);
14143 fp = gen_reg_rtx (DFmode);
14144 emit_insn (gen_floatsidf2 (fp, x));
14146 real_ldexp (&TWO31r, &dconst1, 31);
14147 x = const_double_from_real_value (TWO31r, DFmode);
14149 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14150 if (x != target)
14151 emit_move_insn (target, x);
14154 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14155 32-bit mode; otherwise we have a direct convert instruction. */
14157 void
14158 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14160 REAL_VALUE_TYPE TWO32r;
14161 rtx fp_lo, fp_hi, x;
14163 fp_lo = gen_reg_rtx (DFmode);
14164 fp_hi = gen_reg_rtx (DFmode);
14166 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14168 real_ldexp (&TWO32r, &dconst1, 32);
14169 x = const_double_from_real_value (TWO32r, DFmode);
14170 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14172 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14174 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14175 0, OPTAB_DIRECT);
14176 if (x != target)
14177 emit_move_insn (target, x);
14180 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14181 For x86_32, -mfpmath=sse, !optimize_size only. */
14182 void
14183 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14185 REAL_VALUE_TYPE ONE16r;
14186 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14188 real_ldexp (&ONE16r, &dconst1, 16);
14189 x = const_double_from_real_value (ONE16r, SFmode);
14190 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14191 NULL, 0, OPTAB_DIRECT);
14192 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14193 NULL, 0, OPTAB_DIRECT);
14194 fp_hi = gen_reg_rtx (SFmode);
14195 fp_lo = gen_reg_rtx (SFmode);
14196 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14197 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14198 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14199 0, OPTAB_DIRECT);
14200 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14201 0, OPTAB_DIRECT);
14202 if (!rtx_equal_p (target, fp_hi))
14203 emit_move_insn (target, fp_hi);
14206 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14207 then replicate the value for all elements of the vector
14208 register. */
14211 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14213 rtvec v;
14214 switch (mode)
14216 case SImode:
14217 gcc_assert (vect);
14218 v = gen_rtvec (4, value, value, value, value);
14219 return gen_rtx_CONST_VECTOR (V4SImode, v);
14221 case DImode:
14222 gcc_assert (vect);
14223 v = gen_rtvec (2, value, value);
14224 return gen_rtx_CONST_VECTOR (V2DImode, v);
14226 case SFmode:
14227 if (vect)
14228 v = gen_rtvec (4, value, value, value, value);
14229 else
14230 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14231 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14232 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14234 case DFmode:
14235 if (vect)
14236 v = gen_rtvec (2, value, value);
14237 else
14238 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14239 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14241 default:
14242 gcc_unreachable ();
14246 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14247 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14248 for an SSE register. If VECT is true, then replicate the mask for
14249 all elements of the vector register. If INVERT is true, then create
14250 a mask excluding the sign bit. */
14253 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14255 enum machine_mode vec_mode, imode;
14256 HOST_WIDE_INT hi, lo;
14257 int shift = 63;
14258 rtx v;
14259 rtx mask;
14261 /* Find the sign bit, sign extended to 2*HWI. */
14262 switch (mode)
14264 case SImode:
14265 case SFmode:
14266 imode = SImode;
14267 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14268 lo = 0x80000000, hi = lo < 0;
14269 break;
14271 case DImode:
14272 case DFmode:
14273 imode = DImode;
14274 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14275 if (HOST_BITS_PER_WIDE_INT >= 64)
14276 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14277 else
14278 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14279 break;
14281 case TImode:
14282 case TFmode:
14283 vec_mode = VOIDmode;
14284 if (HOST_BITS_PER_WIDE_INT >= 64)
14286 imode = TImode;
14287 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14289 else
14291 rtvec vec;
14293 imode = DImode;
14294 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14296 if (invert)
14298 lo = ~lo, hi = ~hi;
14299 v = constm1_rtx;
14301 else
14302 v = const0_rtx;
14304 mask = immed_double_const (lo, hi, imode);
14306 vec = gen_rtvec (2, v, mask);
14307 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14308 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14310 return v;
14312 break;
14314 default:
14315 gcc_unreachable ();
14318 if (invert)
14319 lo = ~lo, hi = ~hi;
14321 /* Force this value into the low part of a fp vector constant. */
14322 mask = immed_double_const (lo, hi, imode);
14323 mask = gen_lowpart (mode, mask);
14325 if (vec_mode == VOIDmode)
14326 return force_reg (mode, mask);
14328 v = ix86_build_const_vector (mode, vect, mask);
14329 return force_reg (vec_mode, v);
14332 /* Generate code for floating point ABS or NEG. */
14334 void
14335 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14336 rtx operands[])
14338 rtx mask, set, use, clob, dst, src;
14339 bool use_sse = false;
14340 bool vector_mode = VECTOR_MODE_P (mode);
14341 enum machine_mode elt_mode = mode;
14343 if (vector_mode)
14345 elt_mode = GET_MODE_INNER (mode);
14346 use_sse = true;
14348 else if (mode == TFmode)
14349 use_sse = true;
14350 else if (TARGET_SSE_MATH)
14351 use_sse = SSE_FLOAT_MODE_P (mode);
14353 /* NEG and ABS performed with SSE use bitwise mask operations.
14354 Create the appropriate mask now. */
14355 if (use_sse)
14356 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14357 else
14358 mask = NULL_RTX;
14360 dst = operands[0];
14361 src = operands[1];
14363 if (vector_mode)
14365 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14366 set = gen_rtx_SET (VOIDmode, dst, set);
14367 emit_insn (set);
14369 else
14371 set = gen_rtx_fmt_e (code, mode, src);
14372 set = gen_rtx_SET (VOIDmode, dst, set);
14373 if (mask)
14375 use = gen_rtx_USE (VOIDmode, mask);
14376 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14377 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14378 gen_rtvec (3, set, use, clob)));
14380 else
14381 emit_insn (set);
14385 /* Expand a copysign operation. Special case operand 0 being a constant. */
14387 void
14388 ix86_expand_copysign (rtx operands[])
14390 enum machine_mode mode;
14391 rtx dest, op0, op1, mask, nmask;
14393 dest = operands[0];
14394 op0 = operands[1];
14395 op1 = operands[2];
14397 mode = GET_MODE (dest);
14399 if (GET_CODE (op0) == CONST_DOUBLE)
14401 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14403 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14404 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14406 if (mode == SFmode || mode == DFmode)
14408 enum machine_mode vmode;
14410 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14412 if (op0 == CONST0_RTX (mode))
14413 op0 = CONST0_RTX (vmode);
14414 else
14416 rtx v = ix86_build_const_vector (mode, false, op0);
14418 op0 = force_reg (vmode, v);
14421 else if (op0 != CONST0_RTX (mode))
14422 op0 = force_reg (mode, op0);
14424 mask = ix86_build_signbit_mask (mode, 0, 0);
14426 if (mode == SFmode)
14427 copysign_insn = gen_copysignsf3_const;
14428 else if (mode == DFmode)
14429 copysign_insn = gen_copysigndf3_const;
14430 else
14431 copysign_insn = gen_copysigntf3_const;
14433 emit_insn (copysign_insn (dest, op0, op1, mask));
14435 else
14437 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14439 nmask = ix86_build_signbit_mask (mode, 0, 1);
14440 mask = ix86_build_signbit_mask (mode, 0, 0);
14442 if (mode == SFmode)
14443 copysign_insn = gen_copysignsf3_var;
14444 else if (mode == DFmode)
14445 copysign_insn = gen_copysigndf3_var;
14446 else
14447 copysign_insn = gen_copysigntf3_var;
14449 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14453 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14454 be a constant, and so has already been expanded into a vector constant. */
14456 void
14457 ix86_split_copysign_const (rtx operands[])
14459 enum machine_mode mode, vmode;
14460 rtx dest, op0, mask, x;
14462 dest = operands[0];
14463 op0 = operands[1];
14464 mask = operands[3];
14466 mode = GET_MODE (dest);
14467 vmode = GET_MODE (mask);
14469 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14470 x = gen_rtx_AND (vmode, dest, mask);
14471 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14473 if (op0 != CONST0_RTX (vmode))
14475 x = gen_rtx_IOR (vmode, dest, op0);
14476 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14480 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14481 so we have to do two masks. */
14483 void
14484 ix86_split_copysign_var (rtx operands[])
14486 enum machine_mode mode, vmode;
14487 rtx dest, scratch, op0, op1, mask, nmask, x;
14489 dest = operands[0];
14490 scratch = operands[1];
14491 op0 = operands[2];
14492 op1 = operands[3];
14493 nmask = operands[4];
14494 mask = operands[5];
14496 mode = GET_MODE (dest);
14497 vmode = GET_MODE (mask);
14499 if (rtx_equal_p (op0, op1))
14501 /* Shouldn't happen often (it's useless, obviously), but when it does
14502 we'd generate incorrect code if we continue below. */
14503 emit_move_insn (dest, op0);
14504 return;
14507 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14509 gcc_assert (REGNO (op1) == REGNO (scratch));
14511 x = gen_rtx_AND (vmode, scratch, mask);
14512 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14514 dest = mask;
14515 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14516 x = gen_rtx_NOT (vmode, dest);
14517 x = gen_rtx_AND (vmode, x, op0);
14518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14520 else
14522 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14524 x = gen_rtx_AND (vmode, scratch, mask);
14526 else /* alternative 2,4 */
14528 gcc_assert (REGNO (mask) == REGNO (scratch));
14529 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14530 x = gen_rtx_AND (vmode, scratch, op1);
14532 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14534 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14536 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14537 x = gen_rtx_AND (vmode, dest, nmask);
14539 else /* alternative 3,4 */
14541 gcc_assert (REGNO (nmask) == REGNO (dest));
14542 dest = nmask;
14543 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14544 x = gen_rtx_AND (vmode, dest, op0);
14546 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14549 x = gen_rtx_IOR (vmode, dest, scratch);
14550 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14553 /* Return TRUE or FALSE depending on whether the first SET in INSN
14554 has source and destination with matching CC modes, and that the
14555 CC mode is at least as constrained as REQ_MODE. */
14558 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14560 rtx set;
14561 enum machine_mode set_mode;
14563 set = PATTERN (insn);
14564 if (GET_CODE (set) == PARALLEL)
14565 set = XVECEXP (set, 0, 0);
14566 gcc_assert (GET_CODE (set) == SET);
14567 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14569 set_mode = GET_MODE (SET_DEST (set));
14570 switch (set_mode)
14572 case CCNOmode:
14573 if (req_mode != CCNOmode
14574 && (req_mode != CCmode
14575 || XEXP (SET_SRC (set), 1) != const0_rtx))
14576 return 0;
14577 break;
14578 case CCmode:
14579 if (req_mode == CCGCmode)
14580 return 0;
14581 /* FALLTHRU */
14582 case CCGCmode:
14583 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14584 return 0;
14585 /* FALLTHRU */
14586 case CCGOCmode:
14587 if (req_mode == CCZmode)
14588 return 0;
14589 /* FALLTHRU */
14590 case CCAmode:
14591 case CCCmode:
14592 case CCOmode:
14593 case CCSmode:
14594 case CCZmode:
14595 break;
14597 default:
14598 gcc_unreachable ();
14601 return (GET_MODE (SET_SRC (set)) == set_mode);
14604 /* Generate insn patterns to do an integer compare of OPERANDS. */
14606 static rtx
14607 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14609 enum machine_mode cmpmode;
14610 rtx tmp, flags;
14612 cmpmode = SELECT_CC_MODE (code, op0, op1);
14613 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14615 /* This is very simple, but making the interface the same as in the
14616 FP case makes the rest of the code easier. */
14617 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14618 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14620 /* Return the test that should be put into the flags user, i.e.
14621 the bcc, scc, or cmov instruction. */
14622 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14625 /* Figure out whether to use ordered or unordered fp comparisons.
14626 Return the appropriate mode to use. */
14628 enum machine_mode
14629 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14631 /* ??? In order to make all comparisons reversible, we do all comparisons
14632 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14633 all forms trapping and nontrapping comparisons, we can make inequality
14634 comparisons trapping again, since it results in better code when using
14635 FCOM based compares. */
14636 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14639 enum machine_mode
14640 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14642 enum machine_mode mode = GET_MODE (op0);
14644 if (SCALAR_FLOAT_MODE_P (mode))
14646 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14647 return ix86_fp_compare_mode (code);
14650 switch (code)
14652 /* Only zero flag is needed. */
14653 case EQ: /* ZF=0 */
14654 case NE: /* ZF!=0 */
14655 return CCZmode;
14656 /* Codes needing carry flag. */
14657 case GEU: /* CF=0 */
14658 case LTU: /* CF=1 */
14659 /* Detect overflow checks. They need just the carry flag. */
14660 if (GET_CODE (op0) == PLUS
14661 && rtx_equal_p (op1, XEXP (op0, 0)))
14662 return CCCmode;
14663 else
14664 return CCmode;
14665 case GTU: /* CF=0 & ZF=0 */
14666 case LEU: /* CF=1 | ZF=1 */
14667 /* Detect overflow checks. They need just the carry flag. */
14668 if (GET_CODE (op0) == MINUS
14669 && rtx_equal_p (op1, XEXP (op0, 0)))
14670 return CCCmode;
14671 else
14672 return CCmode;
14673 /* Codes possibly doable only with sign flag when
14674 comparing against zero. */
14675 case GE: /* SF=OF or SF=0 */
14676 case LT: /* SF<>OF or SF=1 */
14677 if (op1 == const0_rtx)
14678 return CCGOCmode;
14679 else
14680 /* For other cases Carry flag is not required. */
14681 return CCGCmode;
14682 /* Codes doable only with sign flag when comparing
14683 against zero, but we miss jump instruction for it
14684 so we need to use relational tests against overflow
14685 that thus needs to be zero. */
14686 case GT: /* ZF=0 & SF=OF */
14687 case LE: /* ZF=1 | SF<>OF */
14688 if (op1 == const0_rtx)
14689 return CCNOmode;
14690 else
14691 return CCGCmode;
14692 /* strcmp pattern do (use flags) and combine may ask us for proper
14693 mode. */
14694 case USE:
14695 return CCmode;
14696 default:
14697 gcc_unreachable ();
14701 /* Return the fixed registers used for condition codes. */
14703 static bool
14704 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14706 *p1 = FLAGS_REG;
14707 *p2 = FPSR_REG;
14708 return true;
14711 /* If two condition code modes are compatible, return a condition code
14712 mode which is compatible with both. Otherwise, return
14713 VOIDmode. */
14715 static enum machine_mode
14716 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14718 if (m1 == m2)
14719 return m1;
14721 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14722 return VOIDmode;
14724 if ((m1 == CCGCmode && m2 == CCGOCmode)
14725 || (m1 == CCGOCmode && m2 == CCGCmode))
14726 return CCGCmode;
14728 switch (m1)
14730 default:
14731 gcc_unreachable ();
14733 case CCmode:
14734 case CCGCmode:
14735 case CCGOCmode:
14736 case CCNOmode:
14737 case CCAmode:
14738 case CCCmode:
14739 case CCOmode:
14740 case CCSmode:
14741 case CCZmode:
14742 switch (m2)
14744 default:
14745 return VOIDmode;
14747 case CCmode:
14748 case CCGCmode:
14749 case CCGOCmode:
14750 case CCNOmode:
14751 case CCAmode:
14752 case CCCmode:
14753 case CCOmode:
14754 case CCSmode:
14755 case CCZmode:
14756 return CCmode;
14759 case CCFPmode:
14760 case CCFPUmode:
14761 /* These are only compatible with themselves, which we already
14762 checked above. */
14763 return VOIDmode;
14768 /* Return a comparison we can do and that it is equivalent to
14769 swap_condition (code) apart possibly from orderedness.
14770 But, never change orderedness if TARGET_IEEE_FP, returning
14771 UNKNOWN in that case if necessary. */
14773 static enum rtx_code
14774 ix86_fp_swap_condition (enum rtx_code code)
14776 switch (code)
14778 case GT: /* GTU - CF=0 & ZF=0 */
14779 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14780 case GE: /* GEU - CF=0 */
14781 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14782 case UNLT: /* LTU - CF=1 */
14783 return TARGET_IEEE_FP ? UNKNOWN : GT;
14784 case UNLE: /* LEU - CF=1 | ZF=1 */
14785 return TARGET_IEEE_FP ? UNKNOWN : GE;
14786 default:
14787 return swap_condition (code);
14791 /* Return cost of comparison CODE using the best strategy for performance.
14792 All following functions do use number of instructions as a cost metrics.
14793 In future this should be tweaked to compute bytes for optimize_size and
14794 take into account performance of various instructions on various CPUs. */
14796 static int
14797 ix86_fp_comparison_cost (enum rtx_code code)
14799 int arith_cost;
14801 /* The cost of code using bit-twiddling on %ah. */
14802 switch (code)
14804 case UNLE:
14805 case UNLT:
14806 case LTGT:
14807 case GT:
14808 case GE:
14809 case UNORDERED:
14810 case ORDERED:
14811 case UNEQ:
14812 arith_cost = 4;
14813 break;
14814 case LT:
14815 case NE:
14816 case EQ:
14817 case UNGE:
14818 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14819 break;
14820 case LE:
14821 case UNGT:
14822 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14823 break;
14824 default:
14825 gcc_unreachable ();
14828 switch (ix86_fp_comparison_strategy (code))
14830 case IX86_FPCMP_COMI:
14831 return arith_cost > 4 ? 3 : 2;
14832 case IX86_FPCMP_SAHF:
14833 return arith_cost > 4 ? 4 : 3;
14834 default:
14835 return arith_cost;
14839 /* Return strategy to use for floating-point. We assume that fcomi is always
14840 preferrable where available, since that is also true when looking at size
14841 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14843 enum ix86_fpcmp_strategy
14844 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14846 /* Do fcomi/sahf based test when profitable. */
14848 if (TARGET_CMOVE)
14849 return IX86_FPCMP_COMI;
14851 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14852 return IX86_FPCMP_SAHF;
14854 return IX86_FPCMP_ARITH;
14857 /* Swap, force into registers, or otherwise massage the two operands
14858 to a fp comparison. The operands are updated in place; the new
14859 comparison code is returned. */
14861 static enum rtx_code
14862 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14864 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14865 rtx op0 = *pop0, op1 = *pop1;
14866 enum machine_mode op_mode = GET_MODE (op0);
14867 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14869 /* All of the unordered compare instructions only work on registers.
14870 The same is true of the fcomi compare instructions. The XFmode
14871 compare instructions require registers except when comparing
14872 against zero or when converting operand 1 from fixed point to
14873 floating point. */
14875 if (!is_sse
14876 && (fpcmp_mode == CCFPUmode
14877 || (op_mode == XFmode
14878 && ! (standard_80387_constant_p (op0) == 1
14879 || standard_80387_constant_p (op1) == 1)
14880 && GET_CODE (op1) != FLOAT)
14881 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14883 op0 = force_reg (op_mode, op0);
14884 op1 = force_reg (op_mode, op1);
14886 else
14888 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14889 things around if they appear profitable, otherwise force op0
14890 into a register. */
14892 if (standard_80387_constant_p (op0) == 0
14893 || (MEM_P (op0)
14894 && ! (standard_80387_constant_p (op1) == 0
14895 || MEM_P (op1))))
14897 enum rtx_code new_code = ix86_fp_swap_condition (code);
14898 if (new_code != UNKNOWN)
14900 rtx tmp;
14901 tmp = op0, op0 = op1, op1 = tmp;
14902 code = new_code;
14906 if (!REG_P (op0))
14907 op0 = force_reg (op_mode, op0);
14909 if (CONSTANT_P (op1))
14911 int tmp = standard_80387_constant_p (op1);
14912 if (tmp == 0)
14913 op1 = validize_mem (force_const_mem (op_mode, op1));
14914 else if (tmp == 1)
14916 if (TARGET_CMOVE)
14917 op1 = force_reg (op_mode, op1);
14919 else
14920 op1 = force_reg (op_mode, op1);
14924 /* Try to rearrange the comparison to make it cheaper. */
14925 if (ix86_fp_comparison_cost (code)
14926 > ix86_fp_comparison_cost (swap_condition (code))
14927 && (REG_P (op1) || can_create_pseudo_p ()))
14929 rtx tmp;
14930 tmp = op0, op0 = op1, op1 = tmp;
14931 code = swap_condition (code);
14932 if (!REG_P (op0))
14933 op0 = force_reg (op_mode, op0);
14936 *pop0 = op0;
14937 *pop1 = op1;
14938 return code;
14941 /* Convert comparison codes we use to represent FP comparison to integer
14942 code that will result in proper branch. Return UNKNOWN if no such code
14943 is available. */
14945 enum rtx_code
14946 ix86_fp_compare_code_to_integer (enum rtx_code code)
14948 switch (code)
14950 case GT:
14951 return GTU;
14952 case GE:
14953 return GEU;
14954 case ORDERED:
14955 case UNORDERED:
14956 return code;
14957 break;
14958 case UNEQ:
14959 return EQ;
14960 break;
14961 case UNLT:
14962 return LTU;
14963 break;
14964 case UNLE:
14965 return LEU;
14966 break;
14967 case LTGT:
14968 return NE;
14969 break;
14970 default:
14971 return UNKNOWN;
14975 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14977 static rtx
14978 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14980 enum machine_mode fpcmp_mode, intcmp_mode;
14981 rtx tmp, tmp2;
14983 fpcmp_mode = ix86_fp_compare_mode (code);
14984 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14986 /* Do fcomi/sahf based test when profitable. */
14987 switch (ix86_fp_comparison_strategy (code))
14989 case IX86_FPCMP_COMI:
14990 intcmp_mode = fpcmp_mode;
14991 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14992 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14993 tmp);
14994 emit_insn (tmp);
14995 break;
14997 case IX86_FPCMP_SAHF:
14998 intcmp_mode = fpcmp_mode;
14999 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15000 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15001 tmp);
15003 if (!scratch)
15004 scratch = gen_reg_rtx (HImode);
15005 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15006 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15007 break;
15009 case IX86_FPCMP_ARITH:
15010 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15011 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15012 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15013 if (!scratch)
15014 scratch = gen_reg_rtx (HImode);
15015 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15017 /* In the unordered case, we have to check C2 for NaN's, which
15018 doesn't happen to work out to anything nice combination-wise.
15019 So do some bit twiddling on the value we've got in AH to come
15020 up with an appropriate set of condition codes. */
15022 intcmp_mode = CCNOmode;
15023 switch (code)
15025 case GT:
15026 case UNGT:
15027 if (code == GT || !TARGET_IEEE_FP)
15029 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15030 code = EQ;
15032 else
15034 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15035 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15036 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15037 intcmp_mode = CCmode;
15038 code = GEU;
15040 break;
15041 case LT:
15042 case UNLT:
15043 if (code == LT && TARGET_IEEE_FP)
15045 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15046 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15047 intcmp_mode = CCmode;
15048 code = EQ;
15050 else
15052 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15053 code = NE;
15055 break;
15056 case GE:
15057 case UNGE:
15058 if (code == GE || !TARGET_IEEE_FP)
15060 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15061 code = EQ;
15063 else
15065 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15066 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15067 code = NE;
15069 break;
15070 case LE:
15071 case UNLE:
15072 if (code == LE && TARGET_IEEE_FP)
15074 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15075 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15076 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15077 intcmp_mode = CCmode;
15078 code = LTU;
15080 else
15082 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15083 code = NE;
15085 break;
15086 case EQ:
15087 case UNEQ:
15088 if (code == EQ && TARGET_IEEE_FP)
15090 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15091 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15092 intcmp_mode = CCmode;
15093 code = EQ;
15095 else
15097 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15098 code = NE;
15100 break;
15101 case NE:
15102 case LTGT:
15103 if (code == NE && TARGET_IEEE_FP)
15105 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15106 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15107 GEN_INT (0x40)));
15108 code = NE;
15110 else
15112 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15113 code = EQ;
15115 break;
15117 case UNORDERED:
15118 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15119 code = NE;
15120 break;
15121 case ORDERED:
15122 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15123 code = EQ;
15124 break;
15126 default:
15127 gcc_unreachable ();
15129 break;
15131 default:
15132 gcc_unreachable();
15135 /* Return the test that should be put into the flags user, i.e.
15136 the bcc, scc, or cmov instruction. */
15137 return gen_rtx_fmt_ee (code, VOIDmode,
15138 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15139 const0_rtx);
15143 ix86_expand_compare (enum rtx_code code)
15145 rtx op0, op1, ret;
15146 op0 = ix86_compare_op0;
15147 op1 = ix86_compare_op1;
15149 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15150 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15152 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15154 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15155 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15157 else
15158 ret = ix86_expand_int_compare (code, op0, op1);
15160 return ret;
15163 void
15164 ix86_expand_branch (enum rtx_code code, rtx label)
15166 rtx tmp;
15168 switch (GET_MODE (ix86_compare_op0))
15170 case SFmode:
15171 case DFmode:
15172 case XFmode:
15173 case QImode:
15174 case HImode:
15175 case SImode:
15176 simple:
15177 tmp = ix86_expand_compare (code);
15178 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15179 gen_rtx_LABEL_REF (VOIDmode, label),
15180 pc_rtx);
15181 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15182 return;
15184 case DImode:
15185 if (TARGET_64BIT)
15186 goto simple;
15187 case TImode:
15188 /* Expand DImode branch into multiple compare+branch. */
15190 rtx lo[2], hi[2], label2;
15191 enum rtx_code code1, code2, code3;
15192 enum machine_mode submode;
15194 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15196 tmp = ix86_compare_op0;
15197 ix86_compare_op0 = ix86_compare_op1;
15198 ix86_compare_op1 = tmp;
15199 code = swap_condition (code);
15201 if (GET_MODE (ix86_compare_op0) == DImode)
15203 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15204 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15205 submode = SImode;
15207 else
15209 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15210 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15211 submode = DImode;
15214 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15215 avoid two branches. This costs one extra insn, so disable when
15216 optimizing for size. */
15218 if ((code == EQ || code == NE)
15219 && (!optimize_insn_for_size_p ()
15220 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15222 rtx xor0, xor1;
15224 xor1 = hi[0];
15225 if (hi[1] != const0_rtx)
15226 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15227 NULL_RTX, 0, OPTAB_WIDEN);
15229 xor0 = lo[0];
15230 if (lo[1] != const0_rtx)
15231 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15232 NULL_RTX, 0, OPTAB_WIDEN);
15234 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15235 NULL_RTX, 0, OPTAB_WIDEN);
15237 ix86_compare_op0 = tmp;
15238 ix86_compare_op1 = const0_rtx;
15239 ix86_expand_branch (code, label);
15240 return;
15243 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15244 op1 is a constant and the low word is zero, then we can just
15245 examine the high word. Similarly for low word -1 and
15246 less-or-equal-than or greater-than. */
15248 if (CONST_INT_P (hi[1]))
15249 switch (code)
15251 case LT: case LTU: case GE: case GEU:
15252 if (lo[1] == const0_rtx)
15254 ix86_compare_op0 = hi[0];
15255 ix86_compare_op1 = hi[1];
15256 ix86_expand_branch (code, label);
15257 return;
15259 break;
15260 case LE: case LEU: case GT: case GTU:
15261 if (lo[1] == constm1_rtx)
15263 ix86_compare_op0 = hi[0];
15264 ix86_compare_op1 = hi[1];
15265 ix86_expand_branch (code, label);
15266 return;
15268 break;
15269 default:
15270 break;
15273 /* Otherwise, we need two or three jumps. */
15275 label2 = gen_label_rtx ();
15277 code1 = code;
15278 code2 = swap_condition (code);
15279 code3 = unsigned_condition (code);
15281 switch (code)
15283 case LT: case GT: case LTU: case GTU:
15284 break;
15286 case LE: code1 = LT; code2 = GT; break;
15287 case GE: code1 = GT; code2 = LT; break;
15288 case LEU: code1 = LTU; code2 = GTU; break;
15289 case GEU: code1 = GTU; code2 = LTU; break;
15291 case EQ: code1 = UNKNOWN; code2 = NE; break;
15292 case NE: code2 = UNKNOWN; break;
15294 default:
15295 gcc_unreachable ();
15299 * a < b =>
15300 * if (hi(a) < hi(b)) goto true;
15301 * if (hi(a) > hi(b)) goto false;
15302 * if (lo(a) < lo(b)) goto true;
15303 * false:
15306 ix86_compare_op0 = hi[0];
15307 ix86_compare_op1 = hi[1];
15309 if (code1 != UNKNOWN)
15310 ix86_expand_branch (code1, label);
15311 if (code2 != UNKNOWN)
15312 ix86_expand_branch (code2, label2);
15314 ix86_compare_op0 = lo[0];
15315 ix86_compare_op1 = lo[1];
15316 ix86_expand_branch (code3, label);
15318 if (code2 != UNKNOWN)
15319 emit_label (label2);
15320 return;
15323 default:
15324 /* If we have already emitted a compare insn, go straight to simple.
15325 ix86_expand_compare won't emit anything if ix86_compare_emitted
15326 is non NULL. */
15327 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15328 goto simple;
15332 /* Split branch based on floating point condition. */
15333 void
15334 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15335 rtx target1, rtx target2, rtx tmp, rtx pushed)
15337 rtx condition;
15338 rtx i;
15340 if (target2 != pc_rtx)
15342 rtx tmp = target2;
15343 code = reverse_condition_maybe_unordered (code);
15344 target2 = target1;
15345 target1 = tmp;
15348 condition = ix86_expand_fp_compare (code, op1, op2,
15349 tmp);
15351 /* Remove pushed operand from stack. */
15352 if (pushed)
15353 ix86_free_from_memory (GET_MODE (pushed));
15355 i = emit_jump_insn (gen_rtx_SET
15356 (VOIDmode, pc_rtx,
15357 gen_rtx_IF_THEN_ELSE (VOIDmode,
15358 condition, target1, target2)));
15359 if (split_branch_probability >= 0)
15360 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15363 void
15364 ix86_expand_setcc (enum rtx_code code, rtx dest)
15366 rtx ret;
15368 gcc_assert (GET_MODE (dest) == QImode);
15370 ret = ix86_expand_compare (code);
15371 PUT_MODE (ret, QImode);
15372 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15375 /* Expand comparison setting or clearing carry flag. Return true when
15376 successful and set pop for the operation. */
15377 static bool
15378 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15380 enum machine_mode mode =
15381 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15383 /* Do not handle DImode compares that go through special path. */
15384 if (mode == (TARGET_64BIT ? TImode : DImode))
15385 return false;
15387 if (SCALAR_FLOAT_MODE_P (mode))
15389 rtx compare_op, compare_seq;
15391 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15393 /* Shortcut: following common codes never translate
15394 into carry flag compares. */
15395 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15396 || code == ORDERED || code == UNORDERED)
15397 return false;
15399 /* These comparisons require zero flag; swap operands so they won't. */
15400 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15401 && !TARGET_IEEE_FP)
15403 rtx tmp = op0;
15404 op0 = op1;
15405 op1 = tmp;
15406 code = swap_condition (code);
15409 /* Try to expand the comparison and verify that we end up with
15410 carry flag based comparison. This fails to be true only when
15411 we decide to expand comparison using arithmetic that is not
15412 too common scenario. */
15413 start_sequence ();
15414 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15415 compare_seq = get_insns ();
15416 end_sequence ();
15418 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15419 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15420 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15421 else
15422 code = GET_CODE (compare_op);
15424 if (code != LTU && code != GEU)
15425 return false;
15427 emit_insn (compare_seq);
15428 *pop = compare_op;
15429 return true;
15432 if (!INTEGRAL_MODE_P (mode))
15433 return false;
15435 switch (code)
15437 case LTU:
15438 case GEU:
15439 break;
15441 /* Convert a==0 into (unsigned)a<1. */
15442 case EQ:
15443 case NE:
15444 if (op1 != const0_rtx)
15445 return false;
15446 op1 = const1_rtx;
15447 code = (code == EQ ? LTU : GEU);
15448 break;
15450 /* Convert a>b into b<a or a>=b-1. */
15451 case GTU:
15452 case LEU:
15453 if (CONST_INT_P (op1))
15455 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15456 /* Bail out on overflow. We still can swap operands but that
15457 would force loading of the constant into register. */
15458 if (op1 == const0_rtx
15459 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15460 return false;
15461 code = (code == GTU ? GEU : LTU);
15463 else
15465 rtx tmp = op1;
15466 op1 = op0;
15467 op0 = tmp;
15468 code = (code == GTU ? LTU : GEU);
15470 break;
15472 /* Convert a>=0 into (unsigned)a<0x80000000. */
15473 case LT:
15474 case GE:
15475 if (mode == DImode || op1 != const0_rtx)
15476 return false;
15477 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15478 code = (code == LT ? GEU : LTU);
15479 break;
15480 case LE:
15481 case GT:
15482 if (mode == DImode || op1 != constm1_rtx)
15483 return false;
15484 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15485 code = (code == LE ? GEU : LTU);
15486 break;
15488 default:
15489 return false;
15491 /* Swapping operands may cause constant to appear as first operand. */
15492 if (!nonimmediate_operand (op0, VOIDmode))
15494 if (!can_create_pseudo_p ())
15495 return false;
15496 op0 = force_reg (mode, op0);
15498 ix86_compare_op0 = op0;
15499 ix86_compare_op1 = op1;
15500 *pop = ix86_expand_compare (code);
15501 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15502 return true;
15506 ix86_expand_int_movcc (rtx operands[])
15508 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15509 rtx compare_seq, compare_op;
15510 enum machine_mode mode = GET_MODE (operands[0]);
15511 bool sign_bit_compare_p = false;
15513 start_sequence ();
15514 ix86_compare_op0 = XEXP (operands[1], 0);
15515 ix86_compare_op1 = XEXP (operands[1], 1);
15516 compare_op = ix86_expand_compare (code);
15517 compare_seq = get_insns ();
15518 end_sequence ();
15520 compare_code = GET_CODE (compare_op);
15522 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15523 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15524 sign_bit_compare_p = true;
15526 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15527 HImode insns, we'd be swallowed in word prefix ops. */
15529 if ((mode != HImode || TARGET_FAST_PREFIX)
15530 && (mode != (TARGET_64BIT ? TImode : DImode))
15531 && CONST_INT_P (operands[2])
15532 && CONST_INT_P (operands[3]))
15534 rtx out = operands[0];
15535 HOST_WIDE_INT ct = INTVAL (operands[2]);
15536 HOST_WIDE_INT cf = INTVAL (operands[3]);
15537 HOST_WIDE_INT diff;
15539 diff = ct - cf;
15540 /* Sign bit compares are better done using shifts than we do by using
15541 sbb. */
15542 if (sign_bit_compare_p
15543 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15544 ix86_compare_op1, &compare_op))
15546 /* Detect overlap between destination and compare sources. */
15547 rtx tmp = out;
15549 if (!sign_bit_compare_p)
15551 rtx flags;
15552 bool fpcmp = false;
15554 compare_code = GET_CODE (compare_op);
15556 flags = XEXP (compare_op, 0);
15558 if (GET_MODE (flags) == CCFPmode
15559 || GET_MODE (flags) == CCFPUmode)
15561 fpcmp = true;
15562 compare_code
15563 = ix86_fp_compare_code_to_integer (compare_code);
15566 /* To simplify rest of code, restrict to the GEU case. */
15567 if (compare_code == LTU)
15569 HOST_WIDE_INT tmp = ct;
15570 ct = cf;
15571 cf = tmp;
15572 compare_code = reverse_condition (compare_code);
15573 code = reverse_condition (code);
15575 else
15577 if (fpcmp)
15578 PUT_CODE (compare_op,
15579 reverse_condition_maybe_unordered
15580 (GET_CODE (compare_op)));
15581 else
15582 PUT_CODE (compare_op,
15583 reverse_condition (GET_CODE (compare_op)));
15585 diff = ct - cf;
15587 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15588 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15589 tmp = gen_reg_rtx (mode);
15591 if (mode == DImode)
15592 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15593 else
15594 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15595 flags, compare_op));
15597 else
15599 if (code == GT || code == GE)
15600 code = reverse_condition (code);
15601 else
15603 HOST_WIDE_INT tmp = ct;
15604 ct = cf;
15605 cf = tmp;
15606 diff = ct - cf;
15608 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15609 ix86_compare_op1, VOIDmode, 0, -1);
15612 if (diff == 1)
15615 * cmpl op0,op1
15616 * sbbl dest,dest
15617 * [addl dest, ct]
15619 * Size 5 - 8.
15621 if (ct)
15622 tmp = expand_simple_binop (mode, PLUS,
15623 tmp, GEN_INT (ct),
15624 copy_rtx (tmp), 1, OPTAB_DIRECT);
15626 else if (cf == -1)
15629 * cmpl op0,op1
15630 * sbbl dest,dest
15631 * orl $ct, dest
15633 * Size 8.
15635 tmp = expand_simple_binop (mode, IOR,
15636 tmp, GEN_INT (ct),
15637 copy_rtx (tmp), 1, OPTAB_DIRECT);
15639 else if (diff == -1 && ct)
15642 * cmpl op0,op1
15643 * sbbl dest,dest
15644 * notl dest
15645 * [addl dest, cf]
15647 * Size 8 - 11.
15649 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15650 if (cf)
15651 tmp = expand_simple_binop (mode, PLUS,
15652 copy_rtx (tmp), GEN_INT (cf),
15653 copy_rtx (tmp), 1, OPTAB_DIRECT);
15655 else
15658 * cmpl op0,op1
15659 * sbbl dest,dest
15660 * [notl dest]
15661 * andl cf - ct, dest
15662 * [addl dest, ct]
15664 * Size 8 - 11.
15667 if (cf == 0)
15669 cf = ct;
15670 ct = 0;
15671 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15674 tmp = expand_simple_binop (mode, AND,
15675 copy_rtx (tmp),
15676 gen_int_mode (cf - ct, mode),
15677 copy_rtx (tmp), 1, OPTAB_DIRECT);
15678 if (ct)
15679 tmp = expand_simple_binop (mode, PLUS,
15680 copy_rtx (tmp), GEN_INT (ct),
15681 copy_rtx (tmp), 1, OPTAB_DIRECT);
15684 if (!rtx_equal_p (tmp, out))
15685 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15687 return 1; /* DONE */
15690 if (diff < 0)
15692 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15694 HOST_WIDE_INT tmp;
15695 tmp = ct, ct = cf, cf = tmp;
15696 diff = -diff;
15698 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15700 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15702 /* We may be reversing unordered compare to normal compare, that
15703 is not valid in general (we may convert non-trapping condition
15704 to trapping one), however on i386 we currently emit all
15705 comparisons unordered. */
15706 compare_code = reverse_condition_maybe_unordered (compare_code);
15707 code = reverse_condition_maybe_unordered (code);
15709 else
15711 compare_code = reverse_condition (compare_code);
15712 code = reverse_condition (code);
15716 compare_code = UNKNOWN;
15717 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15718 && CONST_INT_P (ix86_compare_op1))
15720 if (ix86_compare_op1 == const0_rtx
15721 && (code == LT || code == GE))
15722 compare_code = code;
15723 else if (ix86_compare_op1 == constm1_rtx)
15725 if (code == LE)
15726 compare_code = LT;
15727 else if (code == GT)
15728 compare_code = GE;
15732 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15733 if (compare_code != UNKNOWN
15734 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15735 && (cf == -1 || ct == -1))
15737 /* If lea code below could be used, only optimize
15738 if it results in a 2 insn sequence. */
15740 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15741 || diff == 3 || diff == 5 || diff == 9)
15742 || (compare_code == LT && ct == -1)
15743 || (compare_code == GE && cf == -1))
15746 * notl op1 (if necessary)
15747 * sarl $31, op1
15748 * orl cf, op1
15750 if (ct != -1)
15752 cf = ct;
15753 ct = -1;
15754 code = reverse_condition (code);
15757 out = emit_store_flag (out, code, ix86_compare_op0,
15758 ix86_compare_op1, VOIDmode, 0, -1);
15760 out = expand_simple_binop (mode, IOR,
15761 out, GEN_INT (cf),
15762 out, 1, OPTAB_DIRECT);
15763 if (out != operands[0])
15764 emit_move_insn (operands[0], out);
15766 return 1; /* DONE */
15771 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15772 || diff == 3 || diff == 5 || diff == 9)
15773 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15774 && (mode != DImode
15775 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15778 * xorl dest,dest
15779 * cmpl op1,op2
15780 * setcc dest
15781 * lea cf(dest*(ct-cf)),dest
15783 * Size 14.
15785 * This also catches the degenerate setcc-only case.
15788 rtx tmp;
15789 int nops;
15791 out = emit_store_flag (out, code, ix86_compare_op0,
15792 ix86_compare_op1, VOIDmode, 0, 1);
15794 nops = 0;
15795 /* On x86_64 the lea instruction operates on Pmode, so we need
15796 to get arithmetics done in proper mode to match. */
15797 if (diff == 1)
15798 tmp = copy_rtx (out);
15799 else
15801 rtx out1;
15802 out1 = copy_rtx (out);
15803 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15804 nops++;
15805 if (diff & 1)
15807 tmp = gen_rtx_PLUS (mode, tmp, out1);
15808 nops++;
15811 if (cf != 0)
15813 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15814 nops++;
15816 if (!rtx_equal_p (tmp, out))
15818 if (nops == 1)
15819 out = force_operand (tmp, copy_rtx (out));
15820 else
15821 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15823 if (!rtx_equal_p (out, operands[0]))
15824 emit_move_insn (operands[0], copy_rtx (out));
15826 return 1; /* DONE */
15830 * General case: Jumpful:
15831 * xorl dest,dest cmpl op1, op2
15832 * cmpl op1, op2 movl ct, dest
15833 * setcc dest jcc 1f
15834 * decl dest movl cf, dest
15835 * andl (cf-ct),dest 1:
15836 * addl ct,dest
15838 * Size 20. Size 14.
15840 * This is reasonably steep, but branch mispredict costs are
15841 * high on modern cpus, so consider failing only if optimizing
15842 * for space.
15845 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15846 && BRANCH_COST (optimize_insn_for_speed_p (),
15847 false) >= 2)
15849 if (cf == 0)
15851 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15853 cf = ct;
15854 ct = 0;
15856 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15858 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15860 /* We may be reversing unordered compare to normal compare,
15861 that is not valid in general (we may convert non-trapping
15862 condition to trapping one), however on i386 we currently
15863 emit all comparisons unordered. */
15864 code = reverse_condition_maybe_unordered (code);
15866 else
15868 code = reverse_condition (code);
15869 if (compare_code != UNKNOWN)
15870 compare_code = reverse_condition (compare_code);
15874 if (compare_code != UNKNOWN)
15876 /* notl op1 (if needed)
15877 sarl $31, op1
15878 andl (cf-ct), op1
15879 addl ct, op1
15881 For x < 0 (resp. x <= -1) there will be no notl,
15882 so if possible swap the constants to get rid of the
15883 complement.
15884 True/false will be -1/0 while code below (store flag
15885 followed by decrement) is 0/-1, so the constants need
15886 to be exchanged once more. */
15888 if (compare_code == GE || !cf)
15890 code = reverse_condition (code);
15891 compare_code = LT;
15893 else
15895 HOST_WIDE_INT tmp = cf;
15896 cf = ct;
15897 ct = tmp;
15900 out = emit_store_flag (out, code, ix86_compare_op0,
15901 ix86_compare_op1, VOIDmode, 0, -1);
15903 else
15905 out = emit_store_flag (out, code, ix86_compare_op0,
15906 ix86_compare_op1, VOIDmode, 0, 1);
15908 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15909 copy_rtx (out), 1, OPTAB_DIRECT);
15912 out = expand_simple_binop (mode, AND, copy_rtx (out),
15913 gen_int_mode (cf - ct, mode),
15914 copy_rtx (out), 1, OPTAB_DIRECT);
15915 if (ct)
15916 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15917 copy_rtx (out), 1, OPTAB_DIRECT);
15918 if (!rtx_equal_p (out, operands[0]))
15919 emit_move_insn (operands[0], copy_rtx (out));
15921 return 1; /* DONE */
15925 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15927 /* Try a few things more with specific constants and a variable. */
15929 optab op;
15930 rtx var, orig_out, out, tmp;
15932 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15933 return 0; /* FAIL */
15935 /* If one of the two operands is an interesting constant, load a
15936 constant with the above and mask it in with a logical operation. */
15938 if (CONST_INT_P (operands[2]))
15940 var = operands[3];
15941 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15942 operands[3] = constm1_rtx, op = and_optab;
15943 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15944 operands[3] = const0_rtx, op = ior_optab;
15945 else
15946 return 0; /* FAIL */
15948 else if (CONST_INT_P (operands[3]))
15950 var = operands[2];
15951 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15952 operands[2] = constm1_rtx, op = and_optab;
15953 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15954 operands[2] = const0_rtx, op = ior_optab;
15955 else
15956 return 0; /* FAIL */
15958 else
15959 return 0; /* FAIL */
15961 orig_out = operands[0];
15962 tmp = gen_reg_rtx (mode);
15963 operands[0] = tmp;
15965 /* Recurse to get the constant loaded. */
15966 if (ix86_expand_int_movcc (operands) == 0)
15967 return 0; /* FAIL */
15969 /* Mask in the interesting variable. */
15970 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15971 OPTAB_WIDEN);
15972 if (!rtx_equal_p (out, orig_out))
15973 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15975 return 1; /* DONE */
15979 * For comparison with above,
15981 * movl cf,dest
15982 * movl ct,tmp
15983 * cmpl op1,op2
15984 * cmovcc tmp,dest
15986 * Size 15.
15989 if (! nonimmediate_operand (operands[2], mode))
15990 operands[2] = force_reg (mode, operands[2]);
15991 if (! nonimmediate_operand (operands[3], mode))
15992 operands[3] = force_reg (mode, operands[3]);
15994 if (! register_operand (operands[2], VOIDmode)
15995 && (mode == QImode
15996 || ! register_operand (operands[3], VOIDmode)))
15997 operands[2] = force_reg (mode, operands[2]);
15999 if (mode == QImode
16000 && ! register_operand (operands[3], VOIDmode))
16001 operands[3] = force_reg (mode, operands[3]);
16003 emit_insn (compare_seq);
16004 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16005 gen_rtx_IF_THEN_ELSE (mode,
16006 compare_op, operands[2],
16007 operands[3])));
16009 return 1; /* DONE */
16012 /* Swap, force into registers, or otherwise massage the two operands
16013 to an sse comparison with a mask result. Thus we differ a bit from
16014 ix86_prepare_fp_compare_args which expects to produce a flags result.
16016 The DEST operand exists to help determine whether to commute commutative
16017 operators. The POP0/POP1 operands are updated in place. The new
16018 comparison code is returned, or UNKNOWN if not implementable. */
16020 static enum rtx_code
16021 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16022 rtx *pop0, rtx *pop1)
16024 rtx tmp;
16026 switch (code)
16028 case LTGT:
16029 case UNEQ:
16030 /* We have no LTGT as an operator. We could implement it with
16031 NE & ORDERED, but this requires an extra temporary. It's
16032 not clear that it's worth it. */
16033 return UNKNOWN;
16035 case LT:
16036 case LE:
16037 case UNGT:
16038 case UNGE:
16039 /* These are supported directly. */
16040 break;
16042 case EQ:
16043 case NE:
16044 case UNORDERED:
16045 case ORDERED:
16046 /* For commutative operators, try to canonicalize the destination
16047 operand to be first in the comparison - this helps reload to
16048 avoid extra moves. */
16049 if (!dest || !rtx_equal_p (dest, *pop1))
16050 break;
16051 /* FALLTHRU */
16053 case GE:
16054 case GT:
16055 case UNLE:
16056 case UNLT:
16057 /* These are not supported directly. Swap the comparison operands
16058 to transform into something that is supported. */
16059 tmp = *pop0;
16060 *pop0 = *pop1;
16061 *pop1 = tmp;
16062 code = swap_condition (code);
16063 break;
16065 default:
16066 gcc_unreachable ();
16069 return code;
16072 /* Detect conditional moves that exactly match min/max operational
16073 semantics. Note that this is IEEE safe, as long as we don't
16074 interchange the operands.
16076 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16077 and TRUE if the operation is successful and instructions are emitted. */
16079 static bool
16080 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16081 rtx cmp_op1, rtx if_true, rtx if_false)
16083 enum machine_mode mode;
16084 bool is_min;
16085 rtx tmp;
16087 if (code == LT)
16089 else if (code == UNGE)
16091 tmp = if_true;
16092 if_true = if_false;
16093 if_false = tmp;
16095 else
16096 return false;
16098 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16099 is_min = true;
16100 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16101 is_min = false;
16102 else
16103 return false;
16105 mode = GET_MODE (dest);
16107 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16108 but MODE may be a vector mode and thus not appropriate. */
16109 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16111 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16112 rtvec v;
16114 if_true = force_reg (mode, if_true);
16115 v = gen_rtvec (2, if_true, if_false);
16116 tmp = gen_rtx_UNSPEC (mode, v, u);
16118 else
16120 code = is_min ? SMIN : SMAX;
16121 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16124 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16125 return true;
16128 /* Expand an sse vector comparison. Return the register with the result. */
16130 static rtx
16131 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16132 rtx op_true, rtx op_false)
16134 enum machine_mode mode = GET_MODE (dest);
16135 rtx x;
16137 cmp_op0 = force_reg (mode, cmp_op0);
16138 if (!nonimmediate_operand (cmp_op1, mode))
16139 cmp_op1 = force_reg (mode, cmp_op1);
16141 if (optimize
16142 || reg_overlap_mentioned_p (dest, op_true)
16143 || reg_overlap_mentioned_p (dest, op_false))
16144 dest = gen_reg_rtx (mode);
16146 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16147 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16149 return dest;
16152 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16153 operations. This is used for both scalar and vector conditional moves. */
16155 static void
16156 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16158 enum machine_mode mode = GET_MODE (dest);
16159 rtx t2, t3, x;
16161 if (op_false == CONST0_RTX (mode))
16163 op_true = force_reg (mode, op_true);
16164 x = gen_rtx_AND (mode, cmp, op_true);
16165 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16167 else if (op_true == CONST0_RTX (mode))
16169 op_false = force_reg (mode, op_false);
16170 x = gen_rtx_NOT (mode, cmp);
16171 x = gen_rtx_AND (mode, x, op_false);
16172 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16174 else if (TARGET_XOP)
16176 rtx pcmov = gen_rtx_SET (mode, dest,
16177 gen_rtx_IF_THEN_ELSE (mode, cmp,
16178 op_true,
16179 op_false));
16180 emit_insn (pcmov);
16182 else
16184 op_true = force_reg (mode, op_true);
16185 op_false = force_reg (mode, op_false);
16187 t2 = gen_reg_rtx (mode);
16188 if (optimize)
16189 t3 = gen_reg_rtx (mode);
16190 else
16191 t3 = dest;
16193 x = gen_rtx_AND (mode, op_true, cmp);
16194 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16196 x = gen_rtx_NOT (mode, cmp);
16197 x = gen_rtx_AND (mode, x, op_false);
16198 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16200 x = gen_rtx_IOR (mode, t3, t2);
16201 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16205 /* Expand a floating-point conditional move. Return true if successful. */
16208 ix86_expand_fp_movcc (rtx operands[])
16210 enum machine_mode mode = GET_MODE (operands[0]);
16211 enum rtx_code code = GET_CODE (operands[1]);
16212 rtx tmp, compare_op;
16214 ix86_compare_op0 = XEXP (operands[1], 0);
16215 ix86_compare_op1 = XEXP (operands[1], 1);
16216 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16218 enum machine_mode cmode;
16220 /* Since we've no cmove for sse registers, don't force bad register
16221 allocation just to gain access to it. Deny movcc when the
16222 comparison mode doesn't match the move mode. */
16223 cmode = GET_MODE (ix86_compare_op0);
16224 if (cmode == VOIDmode)
16225 cmode = GET_MODE (ix86_compare_op1);
16226 if (cmode != mode)
16227 return 0;
16229 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16230 &ix86_compare_op0,
16231 &ix86_compare_op1);
16232 if (code == UNKNOWN)
16233 return 0;
16235 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16236 ix86_compare_op1, operands[2],
16237 operands[3]))
16238 return 1;
16240 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16241 ix86_compare_op1, operands[2], operands[3]);
16242 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16243 return 1;
16246 /* The floating point conditional move instructions don't directly
16247 support conditions resulting from a signed integer comparison. */
16249 compare_op = ix86_expand_compare (code);
16250 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16252 tmp = gen_reg_rtx (QImode);
16253 ix86_expand_setcc (code, tmp);
16254 code = NE;
16255 ix86_compare_op0 = tmp;
16256 ix86_compare_op1 = const0_rtx;
16257 compare_op = ix86_expand_compare (code);
16260 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16261 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16262 operands[2], operands[3])));
16264 return 1;
16267 /* Expand a floating-point vector conditional move; a vcond operation
16268 rather than a movcc operation. */
16270 bool
16271 ix86_expand_fp_vcond (rtx operands[])
16273 enum rtx_code code = GET_CODE (operands[3]);
16274 rtx cmp;
16276 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16277 &operands[4], &operands[5]);
16278 if (code == UNKNOWN)
16279 return false;
16281 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16282 operands[5], operands[1], operands[2]))
16283 return true;
16285 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16286 operands[1], operands[2]);
16287 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16288 return true;
16291 /* Expand a signed/unsigned integral vector conditional move. */
16293 bool
16294 ix86_expand_int_vcond (rtx operands[])
16296 enum machine_mode mode = GET_MODE (operands[0]);
16297 enum rtx_code code = GET_CODE (operands[3]);
16298 bool negate = false;
16299 rtx x, cop0, cop1;
16301 cop0 = operands[4];
16302 cop1 = operands[5];
16304 /* XOP supports all of the comparisons on all vector int types. */
16305 if (!TARGET_XOP)
16307 /* Canonicalize the comparison to EQ, GT, GTU. */
16308 switch (code)
16310 case EQ:
16311 case GT:
16312 case GTU:
16313 break;
16315 case NE:
16316 case LE:
16317 case LEU:
16318 code = reverse_condition (code);
16319 negate = true;
16320 break;
16322 case GE:
16323 case GEU:
16324 code = reverse_condition (code);
16325 negate = true;
16326 /* FALLTHRU */
16328 case LT:
16329 case LTU:
16330 code = swap_condition (code);
16331 x = cop0, cop0 = cop1, cop1 = x;
16332 break;
16334 default:
16335 gcc_unreachable ();
16338 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16339 if (mode == V2DImode)
16341 switch (code)
16343 case EQ:
16344 /* SSE4.1 supports EQ. */
16345 if (!TARGET_SSE4_1)
16346 return false;
16347 break;
16349 case GT:
16350 case GTU:
16351 /* SSE4.2 supports GT/GTU. */
16352 if (!TARGET_SSE4_2)
16353 return false;
16354 break;
16356 default:
16357 gcc_unreachable ();
16361 /* Unsigned parallel compare is not supported by the hardware.
16362 Play some tricks to turn this into a signed comparison
16363 against 0. */
16364 if (code == GTU)
16366 cop0 = force_reg (mode, cop0);
16368 switch (mode)
16370 case V4SImode:
16371 case V2DImode:
16373 rtx t1, t2, mask;
16374 rtx (*gen_sub3) (rtx, rtx, rtx);
16376 /* Subtract (-(INT MAX) - 1) from both operands to make
16377 them signed. */
16378 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16379 true, false);
16380 gen_sub3 = (mode == V4SImode
16381 ? gen_subv4si3 : gen_subv2di3);
16382 t1 = gen_reg_rtx (mode);
16383 emit_insn (gen_sub3 (t1, cop0, mask));
16385 t2 = gen_reg_rtx (mode);
16386 emit_insn (gen_sub3 (t2, cop1, mask));
16388 cop0 = t1;
16389 cop1 = t2;
16390 code = GT;
16392 break;
16394 case V16QImode:
16395 case V8HImode:
16396 /* Perform a parallel unsigned saturating subtraction. */
16397 x = gen_reg_rtx (mode);
16398 emit_insn (gen_rtx_SET (VOIDmode, x,
16399 gen_rtx_US_MINUS (mode, cop0, cop1)));
16401 cop0 = x;
16402 cop1 = CONST0_RTX (mode);
16403 code = EQ;
16404 negate = !negate;
16405 break;
16407 default:
16408 gcc_unreachable ();
16413 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16414 operands[1+negate], operands[2-negate]);
16416 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16417 operands[2-negate]);
16418 return true;
16421 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16422 true if we should do zero extension, else sign extension. HIGH_P is
16423 true if we want the N/2 high elements, else the low elements. */
16425 void
16426 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16428 enum machine_mode imode = GET_MODE (operands[1]);
16429 rtx (*unpack)(rtx, rtx, rtx);
16430 rtx se, dest;
16432 switch (imode)
16434 case V16QImode:
16435 if (high_p)
16436 unpack = gen_vec_interleave_highv16qi;
16437 else
16438 unpack = gen_vec_interleave_lowv16qi;
16439 break;
16440 case V8HImode:
16441 if (high_p)
16442 unpack = gen_vec_interleave_highv8hi;
16443 else
16444 unpack = gen_vec_interleave_lowv8hi;
16445 break;
16446 case V4SImode:
16447 if (high_p)
16448 unpack = gen_vec_interleave_highv4si;
16449 else
16450 unpack = gen_vec_interleave_lowv4si;
16451 break;
16452 default:
16453 gcc_unreachable ();
16456 dest = gen_lowpart (imode, operands[0]);
16458 if (unsigned_p)
16459 se = force_reg (imode, CONST0_RTX (imode));
16460 else
16461 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16462 operands[1], pc_rtx, pc_rtx);
16464 emit_insn (unpack (dest, operands[1], se));
16467 /* This function performs the same task as ix86_expand_sse_unpack,
16468 but with SSE4.1 instructions. */
16470 void
16471 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16473 enum machine_mode imode = GET_MODE (operands[1]);
16474 rtx (*unpack)(rtx, rtx);
16475 rtx src, dest;
16477 switch (imode)
16479 case V16QImode:
16480 if (unsigned_p)
16481 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16482 else
16483 unpack = gen_sse4_1_extendv8qiv8hi2;
16484 break;
16485 case V8HImode:
16486 if (unsigned_p)
16487 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16488 else
16489 unpack = gen_sse4_1_extendv4hiv4si2;
16490 break;
16491 case V4SImode:
16492 if (unsigned_p)
16493 unpack = gen_sse4_1_zero_extendv2siv2di2;
16494 else
16495 unpack = gen_sse4_1_extendv2siv2di2;
16496 break;
16497 default:
16498 gcc_unreachable ();
16501 dest = operands[0];
16502 if (high_p)
16504 /* Shift higher 8 bytes to lower 8 bytes. */
16505 src = gen_reg_rtx (imode);
16506 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16507 gen_lowpart (V1TImode, operands[1]),
16508 GEN_INT (64)));
16510 else
16511 src = operands[1];
16513 emit_insn (unpack (dest, src));
16516 /* Expand conditional increment or decrement using adb/sbb instructions.
16517 The default case using setcc followed by the conditional move can be
16518 done by generic code. */
16520 ix86_expand_int_addcc (rtx operands[])
16522 enum rtx_code code = GET_CODE (operands[1]);
16523 rtx flags;
16524 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16525 rtx compare_op;
16526 rtx val = const0_rtx;
16527 bool fpcmp = false;
16528 enum machine_mode mode;
16530 ix86_compare_op0 = XEXP (operands[1], 0);
16531 ix86_compare_op1 = XEXP (operands[1], 1);
16532 if (operands[3] != const1_rtx
16533 && operands[3] != constm1_rtx)
16534 return 0;
16535 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16536 ix86_compare_op1, &compare_op))
16537 return 0;
16538 code = GET_CODE (compare_op);
16540 flags = XEXP (compare_op, 0);
16542 if (GET_MODE (flags) == CCFPmode
16543 || GET_MODE (flags) == CCFPUmode)
16545 fpcmp = true;
16546 code = ix86_fp_compare_code_to_integer (code);
16549 if (code != LTU)
16551 val = constm1_rtx;
16552 if (fpcmp)
16553 PUT_CODE (compare_op,
16554 reverse_condition_maybe_unordered
16555 (GET_CODE (compare_op)));
16556 else
16557 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16560 mode = GET_MODE (operands[0]);
16562 /* Construct either adc or sbb insn. */
16563 if ((code == LTU) == (operands[3] == constm1_rtx))
16565 switch (mode)
16567 case QImode:
16568 insn = gen_subqi3_carry;
16569 break;
16570 case HImode:
16571 insn = gen_subhi3_carry;
16572 break;
16573 case SImode:
16574 insn = gen_subsi3_carry;
16575 break;
16576 case DImode:
16577 insn = gen_subdi3_carry;
16578 break;
16579 default:
16580 gcc_unreachable ();
16583 else
16585 switch (mode)
16587 case QImode:
16588 insn = gen_addqi3_carry;
16589 break;
16590 case HImode:
16591 insn = gen_addhi3_carry;
16592 break;
16593 case SImode:
16594 insn = gen_addsi3_carry;
16595 break;
16596 case DImode:
16597 insn = gen_adddi3_carry;
16598 break;
16599 default:
16600 gcc_unreachable ();
16603 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16605 return 1; /* DONE */
16609 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16610 works for floating pointer parameters and nonoffsetable memories.
16611 For pushes, it returns just stack offsets; the values will be saved
16612 in the right order. Maximally three parts are generated. */
16614 static int
16615 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16617 int size;
16619 if (!TARGET_64BIT)
16620 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16621 else
16622 size = (GET_MODE_SIZE (mode) + 4) / 8;
16624 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16625 gcc_assert (size >= 2 && size <= 4);
16627 /* Optimize constant pool reference to immediates. This is used by fp
16628 moves, that force all constants to memory to allow combining. */
16629 if (MEM_P (operand) && MEM_READONLY_P (operand))
16631 rtx tmp = maybe_get_pool_constant (operand);
16632 if (tmp)
16633 operand = tmp;
16636 if (MEM_P (operand) && !offsettable_memref_p (operand))
16638 /* The only non-offsetable memories we handle are pushes. */
16639 int ok = push_operand (operand, VOIDmode);
16641 gcc_assert (ok);
16643 operand = copy_rtx (operand);
16644 PUT_MODE (operand, Pmode);
16645 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16646 return size;
16649 if (GET_CODE (operand) == CONST_VECTOR)
16651 enum machine_mode imode = int_mode_for_mode (mode);
16652 /* Caution: if we looked through a constant pool memory above,
16653 the operand may actually have a different mode now. That's
16654 ok, since we want to pun this all the way back to an integer. */
16655 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16656 gcc_assert (operand != NULL);
16657 mode = imode;
16660 if (!TARGET_64BIT)
16662 if (mode == DImode)
16663 split_di (&operand, 1, &parts[0], &parts[1]);
16664 else
16666 int i;
16668 if (REG_P (operand))
16670 gcc_assert (reload_completed);
16671 for (i = 0; i < size; i++)
16672 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16674 else if (offsettable_memref_p (operand))
16676 operand = adjust_address (operand, SImode, 0);
16677 parts[0] = operand;
16678 for (i = 1; i < size; i++)
16679 parts[i] = adjust_address (operand, SImode, 4 * i);
16681 else if (GET_CODE (operand) == CONST_DOUBLE)
16683 REAL_VALUE_TYPE r;
16684 long l[4];
16686 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16687 switch (mode)
16689 case TFmode:
16690 real_to_target (l, &r, mode);
16691 parts[3] = gen_int_mode (l[3], SImode);
16692 parts[2] = gen_int_mode (l[2], SImode);
16693 break;
16694 case XFmode:
16695 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16696 parts[2] = gen_int_mode (l[2], SImode);
16697 break;
16698 case DFmode:
16699 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16700 break;
16701 default:
16702 gcc_unreachable ();
16704 parts[1] = gen_int_mode (l[1], SImode);
16705 parts[0] = gen_int_mode (l[0], SImode);
16707 else
16708 gcc_unreachable ();
16711 else
16713 if (mode == TImode)
16714 split_ti (&operand, 1, &parts[0], &parts[1]);
16715 if (mode == XFmode || mode == TFmode)
16717 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16718 if (REG_P (operand))
16720 gcc_assert (reload_completed);
16721 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16722 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16724 else if (offsettable_memref_p (operand))
16726 operand = adjust_address (operand, DImode, 0);
16727 parts[0] = operand;
16728 parts[1] = adjust_address (operand, upper_mode, 8);
16730 else if (GET_CODE (operand) == CONST_DOUBLE)
16732 REAL_VALUE_TYPE r;
16733 long l[4];
16735 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16736 real_to_target (l, &r, mode);
16738 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16739 if (HOST_BITS_PER_WIDE_INT >= 64)
16740 parts[0]
16741 = gen_int_mode
16742 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16743 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16744 DImode);
16745 else
16746 parts[0] = immed_double_const (l[0], l[1], DImode);
16748 if (upper_mode == SImode)
16749 parts[1] = gen_int_mode (l[2], SImode);
16750 else if (HOST_BITS_PER_WIDE_INT >= 64)
16751 parts[1]
16752 = gen_int_mode
16753 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16754 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16755 DImode);
16756 else
16757 parts[1] = immed_double_const (l[2], l[3], DImode);
16759 else
16760 gcc_unreachable ();
16764 return size;
16767 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16768 Return false when normal moves are needed; true when all required
16769 insns have been emitted. Operands 2-4 contain the input values
16770 int the correct order; operands 5-7 contain the output values. */
16772 void
16773 ix86_split_long_move (rtx operands[])
16775 rtx part[2][4];
16776 int nparts, i, j;
16777 int push = 0;
16778 int collisions = 0;
16779 enum machine_mode mode = GET_MODE (operands[0]);
16780 bool collisionparts[4];
16782 /* The DFmode expanders may ask us to move double.
16783 For 64bit target this is single move. By hiding the fact
16784 here we simplify i386.md splitters. */
16785 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16787 /* Optimize constant pool reference to immediates. This is used by
16788 fp moves, that force all constants to memory to allow combining. */
16790 if (MEM_P (operands[1])
16791 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16792 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16793 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16794 if (push_operand (operands[0], VOIDmode))
16796 operands[0] = copy_rtx (operands[0]);
16797 PUT_MODE (operands[0], Pmode);
16799 else
16800 operands[0] = gen_lowpart (DImode, operands[0]);
16801 operands[1] = gen_lowpart (DImode, operands[1]);
16802 emit_move_insn (operands[0], operands[1]);
16803 return;
16806 /* The only non-offsettable memory we handle is push. */
16807 if (push_operand (operands[0], VOIDmode))
16808 push = 1;
16809 else
16810 gcc_assert (!MEM_P (operands[0])
16811 || offsettable_memref_p (operands[0]));
16813 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16814 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16816 /* When emitting push, take care for source operands on the stack. */
16817 if (push && MEM_P (operands[1])
16818 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16820 rtx src_base = XEXP (part[1][nparts - 1], 0);
16822 /* Compensate for the stack decrement by 4. */
16823 if (!TARGET_64BIT && nparts == 3
16824 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16825 src_base = plus_constant (src_base, 4);
16827 /* src_base refers to the stack pointer and is
16828 automatically decreased by emitted push. */
16829 for (i = 0; i < nparts; i++)
16830 part[1][i] = change_address (part[1][i],
16831 GET_MODE (part[1][i]), src_base);
16834 /* We need to do copy in the right order in case an address register
16835 of the source overlaps the destination. */
16836 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16838 rtx tmp;
16840 for (i = 0; i < nparts; i++)
16842 collisionparts[i]
16843 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16844 if (collisionparts[i])
16845 collisions++;
16848 /* Collision in the middle part can be handled by reordering. */
16849 if (collisions == 1 && nparts == 3 && collisionparts [1])
16851 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16852 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16854 else if (collisions == 1
16855 && nparts == 4
16856 && (collisionparts [1] || collisionparts [2]))
16858 if (collisionparts [1])
16860 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16861 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16863 else
16865 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16866 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16870 /* If there are more collisions, we can't handle it by reordering.
16871 Do an lea to the last part and use only one colliding move. */
16872 else if (collisions > 1)
16874 rtx base;
16876 collisions = 1;
16878 base = part[0][nparts - 1];
16880 /* Handle the case when the last part isn't valid for lea.
16881 Happens in 64-bit mode storing the 12-byte XFmode. */
16882 if (GET_MODE (base) != Pmode)
16883 base = gen_rtx_REG (Pmode, REGNO (base));
16885 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16886 part[1][0] = replace_equiv_address (part[1][0], base);
16887 for (i = 1; i < nparts; i++)
16889 tmp = plus_constant (base, UNITS_PER_WORD * i);
16890 part[1][i] = replace_equiv_address (part[1][i], tmp);
16895 if (push)
16897 if (!TARGET_64BIT)
16899 if (nparts == 3)
16901 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16902 emit_insn (gen_addsi3 (stack_pointer_rtx,
16903 stack_pointer_rtx, GEN_INT (-4)));
16904 emit_move_insn (part[0][2], part[1][2]);
16906 else if (nparts == 4)
16908 emit_move_insn (part[0][3], part[1][3]);
16909 emit_move_insn (part[0][2], part[1][2]);
16912 else
16914 /* In 64bit mode we don't have 32bit push available. In case this is
16915 register, it is OK - we will just use larger counterpart. We also
16916 retype memory - these comes from attempt to avoid REX prefix on
16917 moving of second half of TFmode value. */
16918 if (GET_MODE (part[1][1]) == SImode)
16920 switch (GET_CODE (part[1][1]))
16922 case MEM:
16923 part[1][1] = adjust_address (part[1][1], DImode, 0);
16924 break;
16926 case REG:
16927 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16928 break;
16930 default:
16931 gcc_unreachable ();
16934 if (GET_MODE (part[1][0]) == SImode)
16935 part[1][0] = part[1][1];
16938 emit_move_insn (part[0][1], part[1][1]);
16939 emit_move_insn (part[0][0], part[1][0]);
16940 return;
16943 /* Choose correct order to not overwrite the source before it is copied. */
16944 if ((REG_P (part[0][0])
16945 && REG_P (part[1][1])
16946 && (REGNO (part[0][0]) == REGNO (part[1][1])
16947 || (nparts == 3
16948 && REGNO (part[0][0]) == REGNO (part[1][2]))
16949 || (nparts == 4
16950 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16951 || (collisions > 0
16952 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16954 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16956 operands[2 + i] = part[0][j];
16957 operands[6 + i] = part[1][j];
16960 else
16962 for (i = 0; i < nparts; i++)
16964 operands[2 + i] = part[0][i];
16965 operands[6 + i] = part[1][i];
16969 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16970 if (optimize_insn_for_size_p ())
16972 for (j = 0; j < nparts - 1; j++)
16973 if (CONST_INT_P (operands[6 + j])
16974 && operands[6 + j] != const0_rtx
16975 && REG_P (operands[2 + j]))
16976 for (i = j; i < nparts - 1; i++)
16977 if (CONST_INT_P (operands[7 + i])
16978 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16979 operands[7 + i] = operands[2 + j];
16982 for (i = 0; i < nparts; i++)
16983 emit_move_insn (operands[2 + i], operands[6 + i]);
16985 return;
16988 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16989 left shift by a constant, either using a single shift or
16990 a sequence of add instructions. */
16992 static void
16993 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16995 if (count == 1)
16997 emit_insn ((mode == DImode
16998 ? gen_addsi3
16999 : gen_adddi3) (operand, operand, operand));
17001 else if (!optimize_insn_for_size_p ()
17002 && count * ix86_cost->add <= ix86_cost->shift_const)
17004 int i;
17005 for (i=0; i<count; i++)
17007 emit_insn ((mode == DImode
17008 ? gen_addsi3
17009 : gen_adddi3) (operand, operand, operand));
17012 else
17013 emit_insn ((mode == DImode
17014 ? gen_ashlsi3
17015 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17018 void
17019 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17021 rtx low[2], high[2];
17022 int count;
17023 const int single_width = mode == DImode ? 32 : 64;
17025 if (CONST_INT_P (operands[2]))
17027 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17028 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17030 if (count >= single_width)
17032 emit_move_insn (high[0], low[1]);
17033 emit_move_insn (low[0], const0_rtx);
17035 if (count > single_width)
17036 ix86_expand_ashl_const (high[0], count - single_width, mode);
17038 else
17040 if (!rtx_equal_p (operands[0], operands[1]))
17041 emit_move_insn (operands[0], operands[1]);
17042 emit_insn ((mode == DImode
17043 ? gen_x86_shld
17044 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17045 ix86_expand_ashl_const (low[0], count, mode);
17047 return;
17050 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17052 if (operands[1] == const1_rtx)
17054 /* Assuming we've chosen a QImode capable registers, then 1 << N
17055 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17056 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17058 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17060 ix86_expand_clear (low[0]);
17061 ix86_expand_clear (high[0]);
17062 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17064 d = gen_lowpart (QImode, low[0]);
17065 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17066 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17067 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17069 d = gen_lowpart (QImode, high[0]);
17070 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17071 s = gen_rtx_NE (QImode, flags, const0_rtx);
17072 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17075 /* Otherwise, we can get the same results by manually performing
17076 a bit extract operation on bit 5/6, and then performing the two
17077 shifts. The two methods of getting 0/1 into low/high are exactly
17078 the same size. Avoiding the shift in the bit extract case helps
17079 pentium4 a bit; no one else seems to care much either way. */
17080 else
17082 rtx x;
17084 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17085 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17086 else
17087 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17088 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17090 emit_insn ((mode == DImode
17091 ? gen_lshrsi3
17092 : gen_lshrdi3) (high[0], high[0],
17093 GEN_INT (mode == DImode ? 5 : 6)));
17094 emit_insn ((mode == DImode
17095 ? gen_andsi3
17096 : gen_anddi3) (high[0], high[0], const1_rtx));
17097 emit_move_insn (low[0], high[0]);
17098 emit_insn ((mode == DImode
17099 ? gen_xorsi3
17100 : gen_xordi3) (low[0], low[0], const1_rtx));
17103 emit_insn ((mode == DImode
17104 ? gen_ashlsi3
17105 : gen_ashldi3) (low[0], low[0], operands[2]));
17106 emit_insn ((mode == DImode
17107 ? gen_ashlsi3
17108 : gen_ashldi3) (high[0], high[0], operands[2]));
17109 return;
17112 if (operands[1] == constm1_rtx)
17114 /* For -1 << N, we can avoid the shld instruction, because we
17115 know that we're shifting 0...31/63 ones into a -1. */
17116 emit_move_insn (low[0], constm1_rtx);
17117 if (optimize_insn_for_size_p ())
17118 emit_move_insn (high[0], low[0]);
17119 else
17120 emit_move_insn (high[0], constm1_rtx);
17122 else
17124 if (!rtx_equal_p (operands[0], operands[1]))
17125 emit_move_insn (operands[0], operands[1]);
17127 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17128 emit_insn ((mode == DImode
17129 ? gen_x86_shld
17130 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17133 emit_insn ((mode == DImode
17134 ? gen_ashlsi3
17135 : gen_ashldi3) (low[0], low[0], operands[2]));
17137 if (TARGET_CMOVE && scratch)
17139 ix86_expand_clear (scratch);
17140 emit_insn ((mode == DImode
17141 ? gen_x86_shiftsi_adj_1
17142 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17143 scratch));
17145 else
17146 emit_insn ((mode == DImode
17147 ? gen_x86_shiftsi_adj_2
17148 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17151 void
17152 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17154 rtx low[2], high[2];
17155 int count;
17156 const int single_width = mode == DImode ? 32 : 64;
17158 if (CONST_INT_P (operands[2]))
17160 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17161 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17163 if (count == single_width * 2 - 1)
17165 emit_move_insn (high[0], high[1]);
17166 emit_insn ((mode == DImode
17167 ? gen_ashrsi3
17168 : gen_ashrdi3) (high[0], high[0],
17169 GEN_INT (single_width - 1)));
17170 emit_move_insn (low[0], high[0]);
17173 else if (count >= single_width)
17175 emit_move_insn (low[0], high[1]);
17176 emit_move_insn (high[0], low[0]);
17177 emit_insn ((mode == DImode
17178 ? gen_ashrsi3
17179 : gen_ashrdi3) (high[0], high[0],
17180 GEN_INT (single_width - 1)));
17181 if (count > single_width)
17182 emit_insn ((mode == DImode
17183 ? gen_ashrsi3
17184 : gen_ashrdi3) (low[0], low[0],
17185 GEN_INT (count - single_width)));
17187 else
17189 if (!rtx_equal_p (operands[0], operands[1]))
17190 emit_move_insn (operands[0], operands[1]);
17191 emit_insn ((mode == DImode
17192 ? gen_x86_shrd
17193 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17194 emit_insn ((mode == DImode
17195 ? gen_ashrsi3
17196 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17199 else
17201 if (!rtx_equal_p (operands[0], operands[1]))
17202 emit_move_insn (operands[0], operands[1]);
17204 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17206 emit_insn ((mode == DImode
17207 ? gen_x86_shrd
17208 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17209 emit_insn ((mode == DImode
17210 ? gen_ashrsi3
17211 : gen_ashrdi3) (high[0], high[0], operands[2]));
17213 if (TARGET_CMOVE && scratch)
17215 emit_move_insn (scratch, high[0]);
17216 emit_insn ((mode == DImode
17217 ? gen_ashrsi3
17218 : gen_ashrdi3) (scratch, scratch,
17219 GEN_INT (single_width - 1)));
17220 emit_insn ((mode == DImode
17221 ? gen_x86_shiftsi_adj_1
17222 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17223 scratch));
17225 else
17226 emit_insn ((mode == DImode
17227 ? gen_x86_shiftsi_adj_3
17228 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17232 void
17233 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17235 rtx low[2], high[2];
17236 int count;
17237 const int single_width = mode == DImode ? 32 : 64;
17239 if (CONST_INT_P (operands[2]))
17241 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17242 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17244 if (count >= single_width)
17246 emit_move_insn (low[0], high[1]);
17247 ix86_expand_clear (high[0]);
17249 if (count > single_width)
17250 emit_insn ((mode == DImode
17251 ? gen_lshrsi3
17252 : gen_lshrdi3) (low[0], low[0],
17253 GEN_INT (count - single_width)));
17255 else
17257 if (!rtx_equal_p (operands[0], operands[1]))
17258 emit_move_insn (operands[0], operands[1]);
17259 emit_insn ((mode == DImode
17260 ? gen_x86_shrd
17261 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17262 emit_insn ((mode == DImode
17263 ? gen_lshrsi3
17264 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17267 else
17269 if (!rtx_equal_p (operands[0], operands[1]))
17270 emit_move_insn (operands[0], operands[1]);
17272 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17274 emit_insn ((mode == DImode
17275 ? gen_x86_shrd
17276 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17277 emit_insn ((mode == DImode
17278 ? gen_lshrsi3
17279 : gen_lshrdi3) (high[0], high[0], operands[2]));
17281 /* Heh. By reversing the arguments, we can reuse this pattern. */
17282 if (TARGET_CMOVE && scratch)
17284 ix86_expand_clear (scratch);
17285 emit_insn ((mode == DImode
17286 ? gen_x86_shiftsi_adj_1
17287 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17288 scratch));
17290 else
17291 emit_insn ((mode == DImode
17292 ? gen_x86_shiftsi_adj_2
17293 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17297 /* Predict just emitted jump instruction to be taken with probability PROB. */
17298 static void
17299 predict_jump (int prob)
17301 rtx insn = get_last_insn ();
17302 gcc_assert (JUMP_P (insn));
17303 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17306 /* Helper function for the string operations below. Dest VARIABLE whether
17307 it is aligned to VALUE bytes. If true, jump to the label. */
17308 static rtx
17309 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17311 rtx label = gen_label_rtx ();
17312 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17313 if (GET_MODE (variable) == DImode)
17314 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17315 else
17316 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17317 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17318 1, label);
17319 if (epilogue)
17320 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17321 else
17322 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17323 return label;
17326 /* Adjust COUNTER by the VALUE. */
17327 static void
17328 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17330 if (GET_MODE (countreg) == DImode)
17331 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17332 else
17333 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17336 /* Zero extend possibly SImode EXP to Pmode register. */
17338 ix86_zero_extend_to_Pmode (rtx exp)
17340 rtx r;
17341 if (GET_MODE (exp) == VOIDmode)
17342 return force_reg (Pmode, exp);
17343 if (GET_MODE (exp) == Pmode)
17344 return copy_to_mode_reg (Pmode, exp);
17345 r = gen_reg_rtx (Pmode);
17346 emit_insn (gen_zero_extendsidi2 (r, exp));
17347 return r;
17350 /* Divide COUNTREG by SCALE. */
17351 static rtx
17352 scale_counter (rtx countreg, int scale)
17354 rtx sc;
17356 if (scale == 1)
17357 return countreg;
17358 if (CONST_INT_P (countreg))
17359 return GEN_INT (INTVAL (countreg) / scale);
17360 gcc_assert (REG_P (countreg));
17362 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17363 GEN_INT (exact_log2 (scale)),
17364 NULL, 1, OPTAB_DIRECT);
17365 return sc;
17368 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17369 DImode for constant loop counts. */
17371 static enum machine_mode
17372 counter_mode (rtx count_exp)
17374 if (GET_MODE (count_exp) != VOIDmode)
17375 return GET_MODE (count_exp);
17376 if (!CONST_INT_P (count_exp))
17377 return Pmode;
17378 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17379 return DImode;
17380 return SImode;
17383 /* When SRCPTR is non-NULL, output simple loop to move memory
17384 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17385 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17386 equivalent loop to set memory by VALUE (supposed to be in MODE).
17388 The size is rounded down to whole number of chunk size moved at once.
17389 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17392 static void
17393 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17394 rtx destptr, rtx srcptr, rtx value,
17395 rtx count, enum machine_mode mode, int unroll,
17396 int expected_size)
17398 rtx out_label, top_label, iter, tmp;
17399 enum machine_mode iter_mode = counter_mode (count);
17400 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17401 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17402 rtx size;
17403 rtx x_addr;
17404 rtx y_addr;
17405 int i;
17407 top_label = gen_label_rtx ();
17408 out_label = gen_label_rtx ();
17409 iter = gen_reg_rtx (iter_mode);
17411 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17412 NULL, 1, OPTAB_DIRECT);
17413 /* Those two should combine. */
17414 if (piece_size == const1_rtx)
17416 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17417 true, out_label);
17418 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17420 emit_move_insn (iter, const0_rtx);
17422 emit_label (top_label);
17424 tmp = convert_modes (Pmode, iter_mode, iter, true);
17425 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17426 destmem = change_address (destmem, mode, x_addr);
17428 if (srcmem)
17430 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17431 srcmem = change_address (srcmem, mode, y_addr);
17433 /* When unrolling for chips that reorder memory reads and writes,
17434 we can save registers by using single temporary.
17435 Also using 4 temporaries is overkill in 32bit mode. */
17436 if (!TARGET_64BIT && 0)
17438 for (i = 0; i < unroll; i++)
17440 if (i)
17442 destmem =
17443 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17444 srcmem =
17445 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17447 emit_move_insn (destmem, srcmem);
17450 else
17452 rtx tmpreg[4];
17453 gcc_assert (unroll <= 4);
17454 for (i = 0; i < unroll; i++)
17456 tmpreg[i] = gen_reg_rtx (mode);
17457 if (i)
17459 srcmem =
17460 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17462 emit_move_insn (tmpreg[i], srcmem);
17464 for (i = 0; i < unroll; i++)
17466 if (i)
17468 destmem =
17469 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17471 emit_move_insn (destmem, tmpreg[i]);
17475 else
17476 for (i = 0; i < unroll; i++)
17478 if (i)
17479 destmem =
17480 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17481 emit_move_insn (destmem, value);
17484 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17485 true, OPTAB_LIB_WIDEN);
17486 if (tmp != iter)
17487 emit_move_insn (iter, tmp);
17489 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17490 true, top_label);
17491 if (expected_size != -1)
17493 expected_size /= GET_MODE_SIZE (mode) * unroll;
17494 if (expected_size == 0)
17495 predict_jump (0);
17496 else if (expected_size > REG_BR_PROB_BASE)
17497 predict_jump (REG_BR_PROB_BASE - 1);
17498 else
17499 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17501 else
17502 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17503 iter = ix86_zero_extend_to_Pmode (iter);
17504 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17505 true, OPTAB_LIB_WIDEN);
17506 if (tmp != destptr)
17507 emit_move_insn (destptr, tmp);
17508 if (srcptr)
17510 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17511 true, OPTAB_LIB_WIDEN);
17512 if (tmp != srcptr)
17513 emit_move_insn (srcptr, tmp);
17515 emit_label (out_label);
17518 /* Output "rep; mov" instruction.
17519 Arguments have same meaning as for previous function */
17520 static void
17521 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17522 rtx destptr, rtx srcptr,
17523 rtx count,
17524 enum machine_mode mode)
17526 rtx destexp;
17527 rtx srcexp;
17528 rtx countreg;
17530 /* If the size is known, it is shorter to use rep movs. */
17531 if (mode == QImode && CONST_INT_P (count)
17532 && !(INTVAL (count) & 3))
17533 mode = SImode;
17535 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17536 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17537 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17538 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17539 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17540 if (mode != QImode)
17542 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17543 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17544 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17545 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17546 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17547 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17549 else
17551 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17552 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17554 if (CONST_INT_P (count))
17556 count = GEN_INT (INTVAL (count)
17557 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17558 destmem = shallow_copy_rtx (destmem);
17559 srcmem = shallow_copy_rtx (srcmem);
17560 set_mem_size (destmem, count);
17561 set_mem_size (srcmem, count);
17563 else
17565 if (MEM_SIZE (destmem))
17566 set_mem_size (destmem, NULL_RTX);
17567 if (MEM_SIZE (srcmem))
17568 set_mem_size (srcmem, NULL_RTX);
17570 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17571 destexp, srcexp));
17574 /* Output "rep; stos" instruction.
17575 Arguments have same meaning as for previous function */
17576 static void
17577 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17578 rtx count, enum machine_mode mode,
17579 rtx orig_value)
17581 rtx destexp;
17582 rtx countreg;
17584 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17585 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17586 value = force_reg (mode, gen_lowpart (mode, value));
17587 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17588 if (mode != QImode)
17590 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17591 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17592 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17594 else
17595 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17596 if (orig_value == const0_rtx && CONST_INT_P (count))
17598 count = GEN_INT (INTVAL (count)
17599 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17600 destmem = shallow_copy_rtx (destmem);
17601 set_mem_size (destmem, count);
17603 else if (MEM_SIZE (destmem))
17604 set_mem_size (destmem, NULL_RTX);
17605 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17608 static void
17609 emit_strmov (rtx destmem, rtx srcmem,
17610 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17612 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17613 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17614 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17617 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17618 static void
17619 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17620 rtx destptr, rtx srcptr, rtx count, int max_size)
17622 rtx src, dest;
17623 if (CONST_INT_P (count))
17625 HOST_WIDE_INT countval = INTVAL (count);
17626 int offset = 0;
17628 if ((countval & 0x10) && max_size > 16)
17630 if (TARGET_64BIT)
17632 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17633 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17635 else
17636 gcc_unreachable ();
17637 offset += 16;
17639 if ((countval & 0x08) && max_size > 8)
17641 if (TARGET_64BIT)
17642 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17643 else
17645 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17646 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17648 offset += 8;
17650 if ((countval & 0x04) && max_size > 4)
17652 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17653 offset += 4;
17655 if ((countval & 0x02) && max_size > 2)
17657 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17658 offset += 2;
17660 if ((countval & 0x01) && max_size > 1)
17662 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17663 offset += 1;
17665 return;
17667 if (max_size > 8)
17669 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17670 count, 1, OPTAB_DIRECT);
17671 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17672 count, QImode, 1, 4);
17673 return;
17676 /* When there are stringops, we can cheaply increase dest and src pointers.
17677 Otherwise we save code size by maintaining offset (zero is readily
17678 available from preceding rep operation) and using x86 addressing modes.
17680 if (TARGET_SINGLE_STRINGOP)
17682 if (max_size > 4)
17684 rtx label = ix86_expand_aligntest (count, 4, true);
17685 src = change_address (srcmem, SImode, srcptr);
17686 dest = change_address (destmem, SImode, destptr);
17687 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17688 emit_label (label);
17689 LABEL_NUSES (label) = 1;
17691 if (max_size > 2)
17693 rtx label = ix86_expand_aligntest (count, 2, true);
17694 src = change_address (srcmem, HImode, srcptr);
17695 dest = change_address (destmem, HImode, destptr);
17696 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17697 emit_label (label);
17698 LABEL_NUSES (label) = 1;
17700 if (max_size > 1)
17702 rtx label = ix86_expand_aligntest (count, 1, true);
17703 src = change_address (srcmem, QImode, srcptr);
17704 dest = change_address (destmem, QImode, destptr);
17705 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17706 emit_label (label);
17707 LABEL_NUSES (label) = 1;
17710 else
17712 rtx offset = force_reg (Pmode, const0_rtx);
17713 rtx tmp;
17715 if (max_size > 4)
17717 rtx label = ix86_expand_aligntest (count, 4, true);
17718 src = change_address (srcmem, SImode, srcptr);
17719 dest = change_address (destmem, SImode, destptr);
17720 emit_move_insn (dest, src);
17721 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17722 true, OPTAB_LIB_WIDEN);
17723 if (tmp != offset)
17724 emit_move_insn (offset, tmp);
17725 emit_label (label);
17726 LABEL_NUSES (label) = 1;
17728 if (max_size > 2)
17730 rtx label = ix86_expand_aligntest (count, 2, true);
17731 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17732 src = change_address (srcmem, HImode, tmp);
17733 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17734 dest = change_address (destmem, HImode, tmp);
17735 emit_move_insn (dest, src);
17736 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17737 true, OPTAB_LIB_WIDEN);
17738 if (tmp != offset)
17739 emit_move_insn (offset, tmp);
17740 emit_label (label);
17741 LABEL_NUSES (label) = 1;
17743 if (max_size > 1)
17745 rtx label = ix86_expand_aligntest (count, 1, true);
17746 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17747 src = change_address (srcmem, QImode, tmp);
17748 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17749 dest = change_address (destmem, QImode, tmp);
17750 emit_move_insn (dest, src);
17751 emit_label (label);
17752 LABEL_NUSES (label) = 1;
17757 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17758 static void
17759 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17760 rtx count, int max_size)
17762 count =
17763 expand_simple_binop (counter_mode (count), AND, count,
17764 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17765 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17766 gen_lowpart (QImode, value), count, QImode,
17767 1, max_size / 2);
17770 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17771 static void
17772 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17774 rtx dest;
17776 if (CONST_INT_P (count))
17778 HOST_WIDE_INT countval = INTVAL (count);
17779 int offset = 0;
17781 if ((countval & 0x10) && max_size > 16)
17783 if (TARGET_64BIT)
17785 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17786 emit_insn (gen_strset (destptr, dest, value));
17787 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17788 emit_insn (gen_strset (destptr, dest, value));
17790 else
17791 gcc_unreachable ();
17792 offset += 16;
17794 if ((countval & 0x08) && max_size > 8)
17796 if (TARGET_64BIT)
17798 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17799 emit_insn (gen_strset (destptr, dest, value));
17801 else
17803 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17804 emit_insn (gen_strset (destptr, dest, value));
17805 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17806 emit_insn (gen_strset (destptr, dest, value));
17808 offset += 8;
17810 if ((countval & 0x04) && max_size > 4)
17812 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17813 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17814 offset += 4;
17816 if ((countval & 0x02) && max_size > 2)
17818 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17819 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17820 offset += 2;
17822 if ((countval & 0x01) && max_size > 1)
17824 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17825 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17826 offset += 1;
17828 return;
17830 if (max_size > 32)
17832 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17833 return;
17835 if (max_size > 16)
17837 rtx label = ix86_expand_aligntest (count, 16, true);
17838 if (TARGET_64BIT)
17840 dest = change_address (destmem, DImode, destptr);
17841 emit_insn (gen_strset (destptr, dest, value));
17842 emit_insn (gen_strset (destptr, dest, value));
17844 else
17846 dest = change_address (destmem, SImode, destptr);
17847 emit_insn (gen_strset (destptr, dest, value));
17848 emit_insn (gen_strset (destptr, dest, value));
17849 emit_insn (gen_strset (destptr, dest, value));
17850 emit_insn (gen_strset (destptr, dest, value));
17852 emit_label (label);
17853 LABEL_NUSES (label) = 1;
17855 if (max_size > 8)
17857 rtx label = ix86_expand_aligntest (count, 8, true);
17858 if (TARGET_64BIT)
17860 dest = change_address (destmem, DImode, destptr);
17861 emit_insn (gen_strset (destptr, dest, value));
17863 else
17865 dest = change_address (destmem, SImode, destptr);
17866 emit_insn (gen_strset (destptr, dest, value));
17867 emit_insn (gen_strset (destptr, dest, value));
17869 emit_label (label);
17870 LABEL_NUSES (label) = 1;
17872 if (max_size > 4)
17874 rtx label = ix86_expand_aligntest (count, 4, true);
17875 dest = change_address (destmem, SImode, destptr);
17876 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17877 emit_label (label);
17878 LABEL_NUSES (label) = 1;
17880 if (max_size > 2)
17882 rtx label = ix86_expand_aligntest (count, 2, true);
17883 dest = change_address (destmem, HImode, destptr);
17884 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17885 emit_label (label);
17886 LABEL_NUSES (label) = 1;
17888 if (max_size > 1)
17890 rtx label = ix86_expand_aligntest (count, 1, true);
17891 dest = change_address (destmem, QImode, destptr);
17892 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17893 emit_label (label);
17894 LABEL_NUSES (label) = 1;
17898 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17899 DESIRED_ALIGNMENT. */
17900 static void
17901 expand_movmem_prologue (rtx destmem, rtx srcmem,
17902 rtx destptr, rtx srcptr, rtx count,
17903 int align, int desired_alignment)
17905 if (align <= 1 && desired_alignment > 1)
17907 rtx label = ix86_expand_aligntest (destptr, 1, false);
17908 srcmem = change_address (srcmem, QImode, srcptr);
17909 destmem = change_address (destmem, QImode, destptr);
17910 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17911 ix86_adjust_counter (count, 1);
17912 emit_label (label);
17913 LABEL_NUSES (label) = 1;
17915 if (align <= 2 && desired_alignment > 2)
17917 rtx label = ix86_expand_aligntest (destptr, 2, false);
17918 srcmem = change_address (srcmem, HImode, srcptr);
17919 destmem = change_address (destmem, HImode, destptr);
17920 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17921 ix86_adjust_counter (count, 2);
17922 emit_label (label);
17923 LABEL_NUSES (label) = 1;
17925 if (align <= 4 && desired_alignment > 4)
17927 rtx label = ix86_expand_aligntest (destptr, 4, false);
17928 srcmem = change_address (srcmem, SImode, srcptr);
17929 destmem = change_address (destmem, SImode, destptr);
17930 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17931 ix86_adjust_counter (count, 4);
17932 emit_label (label);
17933 LABEL_NUSES (label) = 1;
17935 gcc_assert (desired_alignment <= 8);
17938 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17939 ALIGN_BYTES is how many bytes need to be copied. */
17940 static rtx
17941 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17942 int desired_align, int align_bytes)
17944 rtx src = *srcp;
17945 rtx src_size, dst_size;
17946 int off = 0;
17947 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17948 if (src_align_bytes >= 0)
17949 src_align_bytes = desired_align - src_align_bytes;
17950 src_size = MEM_SIZE (src);
17951 dst_size = MEM_SIZE (dst);
17952 if (align_bytes & 1)
17954 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17955 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17956 off = 1;
17957 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17959 if (align_bytes & 2)
17961 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17962 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17963 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17964 set_mem_align (dst, 2 * BITS_PER_UNIT);
17965 if (src_align_bytes >= 0
17966 && (src_align_bytes & 1) == (align_bytes & 1)
17967 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17968 set_mem_align (src, 2 * BITS_PER_UNIT);
17969 off = 2;
17970 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17972 if (align_bytes & 4)
17974 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17975 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17976 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17977 set_mem_align (dst, 4 * BITS_PER_UNIT);
17978 if (src_align_bytes >= 0)
17980 unsigned int src_align = 0;
17981 if ((src_align_bytes & 3) == (align_bytes & 3))
17982 src_align = 4;
17983 else if ((src_align_bytes & 1) == (align_bytes & 1))
17984 src_align = 2;
17985 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17986 set_mem_align (src, src_align * BITS_PER_UNIT);
17988 off = 4;
17989 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17991 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17992 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17993 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17994 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17995 if (src_align_bytes >= 0)
17997 unsigned int src_align = 0;
17998 if ((src_align_bytes & 7) == (align_bytes & 7))
17999 src_align = 8;
18000 else if ((src_align_bytes & 3) == (align_bytes & 3))
18001 src_align = 4;
18002 else if ((src_align_bytes & 1) == (align_bytes & 1))
18003 src_align = 2;
18004 if (src_align > (unsigned int) desired_align)
18005 src_align = desired_align;
18006 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18007 set_mem_align (src, src_align * BITS_PER_UNIT);
18009 if (dst_size)
18010 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18011 if (src_size)
18012 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18013 *srcp = src;
18014 return dst;
18017 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18018 DESIRED_ALIGNMENT. */
18019 static void
18020 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18021 int align, int desired_alignment)
18023 if (align <= 1 && desired_alignment > 1)
18025 rtx label = ix86_expand_aligntest (destptr, 1, false);
18026 destmem = change_address (destmem, QImode, destptr);
18027 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18028 ix86_adjust_counter (count, 1);
18029 emit_label (label);
18030 LABEL_NUSES (label) = 1;
18032 if (align <= 2 && desired_alignment > 2)
18034 rtx label = ix86_expand_aligntest (destptr, 2, false);
18035 destmem = change_address (destmem, HImode, destptr);
18036 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18037 ix86_adjust_counter (count, 2);
18038 emit_label (label);
18039 LABEL_NUSES (label) = 1;
18041 if (align <= 4 && desired_alignment > 4)
18043 rtx label = ix86_expand_aligntest (destptr, 4, false);
18044 destmem = change_address (destmem, SImode, destptr);
18045 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18046 ix86_adjust_counter (count, 4);
18047 emit_label (label);
18048 LABEL_NUSES (label) = 1;
18050 gcc_assert (desired_alignment <= 8);
18053 /* Set enough from DST to align DST known to by aligned by ALIGN to
18054 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18055 static rtx
18056 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18057 int desired_align, int align_bytes)
18059 int off = 0;
18060 rtx dst_size = MEM_SIZE (dst);
18061 if (align_bytes & 1)
18063 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18064 off = 1;
18065 emit_insn (gen_strset (destreg, dst,
18066 gen_lowpart (QImode, value)));
18068 if (align_bytes & 2)
18070 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18071 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18072 set_mem_align (dst, 2 * BITS_PER_UNIT);
18073 off = 2;
18074 emit_insn (gen_strset (destreg, dst,
18075 gen_lowpart (HImode, value)));
18077 if (align_bytes & 4)
18079 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18080 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18081 set_mem_align (dst, 4 * BITS_PER_UNIT);
18082 off = 4;
18083 emit_insn (gen_strset (destreg, dst,
18084 gen_lowpart (SImode, value)));
18086 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18087 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18088 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18089 if (dst_size)
18090 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18091 return dst;
18094 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18095 static enum stringop_alg
18096 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18097 int *dynamic_check)
18099 const struct stringop_algs * algs;
18100 bool optimize_for_speed;
18101 /* Algorithms using the rep prefix want at least edi and ecx;
18102 additionally, memset wants eax and memcpy wants esi. Don't
18103 consider such algorithms if the user has appropriated those
18104 registers for their own purposes. */
18105 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18106 || (memset
18107 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18109 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18110 || (alg != rep_prefix_1_byte \
18111 && alg != rep_prefix_4_byte \
18112 && alg != rep_prefix_8_byte))
18113 const struct processor_costs *cost;
18115 /* Even if the string operation call is cold, we still might spend a lot
18116 of time processing large blocks. */
18117 if (optimize_function_for_size_p (cfun)
18118 || (optimize_insn_for_size_p ()
18119 && expected_size != -1 && expected_size < 256))
18120 optimize_for_speed = false;
18121 else
18122 optimize_for_speed = true;
18124 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18126 *dynamic_check = -1;
18127 if (memset)
18128 algs = &cost->memset[TARGET_64BIT != 0];
18129 else
18130 algs = &cost->memcpy[TARGET_64BIT != 0];
18131 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18132 return stringop_alg;
18133 /* rep; movq or rep; movl is the smallest variant. */
18134 else if (!optimize_for_speed)
18136 if (!count || (count & 3))
18137 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18138 else
18139 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18141 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18143 else if (expected_size != -1 && expected_size < 4)
18144 return loop_1_byte;
18145 else if (expected_size != -1)
18147 unsigned int i;
18148 enum stringop_alg alg = libcall;
18149 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18151 /* We get here if the algorithms that were not libcall-based
18152 were rep-prefix based and we are unable to use rep prefixes
18153 based on global register usage. Break out of the loop and
18154 use the heuristic below. */
18155 if (algs->size[i].max == 0)
18156 break;
18157 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18159 enum stringop_alg candidate = algs->size[i].alg;
18161 if (candidate != libcall && ALG_USABLE_P (candidate))
18162 alg = candidate;
18163 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18164 last non-libcall inline algorithm. */
18165 if (TARGET_INLINE_ALL_STRINGOPS)
18167 /* When the current size is best to be copied by a libcall,
18168 but we are still forced to inline, run the heuristic below
18169 that will pick code for medium sized blocks. */
18170 if (alg != libcall)
18171 return alg;
18172 break;
18174 else if (ALG_USABLE_P (candidate))
18175 return candidate;
18178 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18180 /* When asked to inline the call anyway, try to pick meaningful choice.
18181 We look for maximal size of block that is faster to copy by hand and
18182 take blocks of at most of that size guessing that average size will
18183 be roughly half of the block.
18185 If this turns out to be bad, we might simply specify the preferred
18186 choice in ix86_costs. */
18187 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18188 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18190 int max = -1;
18191 enum stringop_alg alg;
18192 int i;
18193 bool any_alg_usable_p = true;
18195 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18197 enum stringop_alg candidate = algs->size[i].alg;
18198 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18200 if (candidate != libcall && candidate
18201 && ALG_USABLE_P (candidate))
18202 max = algs->size[i].max;
18204 /* If there aren't any usable algorithms, then recursing on
18205 smaller sizes isn't going to find anything. Just return the
18206 simple byte-at-a-time copy loop. */
18207 if (!any_alg_usable_p)
18209 /* Pick something reasonable. */
18210 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18211 *dynamic_check = 128;
18212 return loop_1_byte;
18214 if (max == -1)
18215 max = 4096;
18216 alg = decide_alg (count, max / 2, memset, dynamic_check);
18217 gcc_assert (*dynamic_check == -1);
18218 gcc_assert (alg != libcall);
18219 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18220 *dynamic_check = max;
18221 return alg;
18223 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18224 #undef ALG_USABLE_P
18227 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18228 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18229 static int
18230 decide_alignment (int align,
18231 enum stringop_alg alg,
18232 int expected_size)
18234 int desired_align = 0;
18235 switch (alg)
18237 case no_stringop:
18238 gcc_unreachable ();
18239 case loop:
18240 case unrolled_loop:
18241 desired_align = GET_MODE_SIZE (Pmode);
18242 break;
18243 case rep_prefix_8_byte:
18244 desired_align = 8;
18245 break;
18246 case rep_prefix_4_byte:
18247 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18248 copying whole cacheline at once. */
18249 if (TARGET_PENTIUMPRO)
18250 desired_align = 8;
18251 else
18252 desired_align = 4;
18253 break;
18254 case rep_prefix_1_byte:
18255 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18256 copying whole cacheline at once. */
18257 if (TARGET_PENTIUMPRO)
18258 desired_align = 8;
18259 else
18260 desired_align = 1;
18261 break;
18262 case loop_1_byte:
18263 desired_align = 1;
18264 break;
18265 case libcall:
18266 return 0;
18269 if (optimize_size)
18270 desired_align = 1;
18271 if (desired_align < align)
18272 desired_align = align;
18273 if (expected_size != -1 && expected_size < 4)
18274 desired_align = align;
18275 return desired_align;
18278 /* Return the smallest power of 2 greater than VAL. */
18279 static int
18280 smallest_pow2_greater_than (int val)
18282 int ret = 1;
18283 while (ret <= val)
18284 ret <<= 1;
18285 return ret;
18288 /* Expand string move (memcpy) operation. Use i386 string operations when
18289 profitable. expand_setmem contains similar code. The code depends upon
18290 architecture, block size and alignment, but always has the same
18291 overall structure:
18293 1) Prologue guard: Conditional that jumps up to epilogues for small
18294 blocks that can be handled by epilogue alone. This is faster but
18295 also needed for correctness, since prologue assume the block is larger
18296 than the desired alignment.
18298 Optional dynamic check for size and libcall for large
18299 blocks is emitted here too, with -minline-stringops-dynamically.
18301 2) Prologue: copy first few bytes in order to get destination aligned
18302 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18303 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18304 We emit either a jump tree on power of two sized blocks, or a byte loop.
18306 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18307 with specified algorithm.
18309 4) Epilogue: code copying tail of the block that is too small to be
18310 handled by main body (or up to size guarded by prologue guard). */
18313 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18314 rtx expected_align_exp, rtx expected_size_exp)
18316 rtx destreg;
18317 rtx srcreg;
18318 rtx label = NULL;
18319 rtx tmp;
18320 rtx jump_around_label = NULL;
18321 HOST_WIDE_INT align = 1;
18322 unsigned HOST_WIDE_INT count = 0;
18323 HOST_WIDE_INT expected_size = -1;
18324 int size_needed = 0, epilogue_size_needed;
18325 int desired_align = 0, align_bytes = 0;
18326 enum stringop_alg alg;
18327 int dynamic_check;
18328 bool need_zero_guard = false;
18330 if (CONST_INT_P (align_exp))
18331 align = INTVAL (align_exp);
18332 /* i386 can do misaligned access on reasonably increased cost. */
18333 if (CONST_INT_P (expected_align_exp)
18334 && INTVAL (expected_align_exp) > align)
18335 align = INTVAL (expected_align_exp);
18336 /* ALIGN is the minimum of destination and source alignment, but we care here
18337 just about destination alignment. */
18338 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18339 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18341 if (CONST_INT_P (count_exp))
18342 count = expected_size = INTVAL (count_exp);
18343 if (CONST_INT_P (expected_size_exp) && count == 0)
18344 expected_size = INTVAL (expected_size_exp);
18346 /* Make sure we don't need to care about overflow later on. */
18347 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18348 return 0;
18350 /* Step 0: Decide on preferred algorithm, desired alignment and
18351 size of chunks to be copied by main loop. */
18353 alg = decide_alg (count, expected_size, false, &dynamic_check);
18354 desired_align = decide_alignment (align, alg, expected_size);
18356 if (!TARGET_ALIGN_STRINGOPS)
18357 align = desired_align;
18359 if (alg == libcall)
18360 return 0;
18361 gcc_assert (alg != no_stringop);
18362 if (!count)
18363 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18364 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18365 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18366 switch (alg)
18368 case libcall:
18369 case no_stringop:
18370 gcc_unreachable ();
18371 case loop:
18372 need_zero_guard = true;
18373 size_needed = GET_MODE_SIZE (Pmode);
18374 break;
18375 case unrolled_loop:
18376 need_zero_guard = true;
18377 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18378 break;
18379 case rep_prefix_8_byte:
18380 size_needed = 8;
18381 break;
18382 case rep_prefix_4_byte:
18383 size_needed = 4;
18384 break;
18385 case rep_prefix_1_byte:
18386 size_needed = 1;
18387 break;
18388 case loop_1_byte:
18389 need_zero_guard = true;
18390 size_needed = 1;
18391 break;
18394 epilogue_size_needed = size_needed;
18396 /* Step 1: Prologue guard. */
18398 /* Alignment code needs count to be in register. */
18399 if (CONST_INT_P (count_exp) && desired_align > align)
18401 if (INTVAL (count_exp) > desired_align
18402 && INTVAL (count_exp) > size_needed)
18404 align_bytes
18405 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18406 if (align_bytes <= 0)
18407 align_bytes = 0;
18408 else
18409 align_bytes = desired_align - align_bytes;
18411 if (align_bytes == 0)
18412 count_exp = force_reg (counter_mode (count_exp), count_exp);
18414 gcc_assert (desired_align >= 1 && align >= 1);
18416 /* Ensure that alignment prologue won't copy past end of block. */
18417 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18419 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18420 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18421 Make sure it is power of 2. */
18422 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18424 if (count)
18426 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18428 /* If main algorithm works on QImode, no epilogue is needed.
18429 For small sizes just don't align anything. */
18430 if (size_needed == 1)
18431 desired_align = align;
18432 else
18433 goto epilogue;
18436 else
18438 label = gen_label_rtx ();
18439 emit_cmp_and_jump_insns (count_exp,
18440 GEN_INT (epilogue_size_needed),
18441 LTU, 0, counter_mode (count_exp), 1, label);
18442 if (expected_size == -1 || expected_size < epilogue_size_needed)
18443 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18444 else
18445 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18449 /* Emit code to decide on runtime whether library call or inline should be
18450 used. */
18451 if (dynamic_check != -1)
18453 if (CONST_INT_P (count_exp))
18455 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18457 emit_block_move_via_libcall (dst, src, count_exp, false);
18458 count_exp = const0_rtx;
18459 goto epilogue;
18462 else
18464 rtx hot_label = gen_label_rtx ();
18465 jump_around_label = gen_label_rtx ();
18466 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18467 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18468 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18469 emit_block_move_via_libcall (dst, src, count_exp, false);
18470 emit_jump (jump_around_label);
18471 emit_label (hot_label);
18475 /* Step 2: Alignment prologue. */
18477 if (desired_align > align)
18479 if (align_bytes == 0)
18481 /* Except for the first move in epilogue, we no longer know
18482 constant offset in aliasing info. It don't seems to worth
18483 the pain to maintain it for the first move, so throw away
18484 the info early. */
18485 src = change_address (src, BLKmode, srcreg);
18486 dst = change_address (dst, BLKmode, destreg);
18487 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18488 desired_align);
18490 else
18492 /* If we know how many bytes need to be stored before dst is
18493 sufficiently aligned, maintain aliasing info accurately. */
18494 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18495 desired_align, align_bytes);
18496 count_exp = plus_constant (count_exp, -align_bytes);
18497 count -= align_bytes;
18499 if (need_zero_guard
18500 && (count < (unsigned HOST_WIDE_INT) size_needed
18501 || (align_bytes == 0
18502 && count < ((unsigned HOST_WIDE_INT) size_needed
18503 + desired_align - align))))
18505 /* It is possible that we copied enough so the main loop will not
18506 execute. */
18507 gcc_assert (size_needed > 1);
18508 if (label == NULL_RTX)
18509 label = gen_label_rtx ();
18510 emit_cmp_and_jump_insns (count_exp,
18511 GEN_INT (size_needed),
18512 LTU, 0, counter_mode (count_exp), 1, label);
18513 if (expected_size == -1
18514 || expected_size < (desired_align - align) / 2 + size_needed)
18515 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18516 else
18517 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18520 if (label && size_needed == 1)
18522 emit_label (label);
18523 LABEL_NUSES (label) = 1;
18524 label = NULL;
18525 epilogue_size_needed = 1;
18527 else if (label == NULL_RTX)
18528 epilogue_size_needed = size_needed;
18530 /* Step 3: Main loop. */
18532 switch (alg)
18534 case libcall:
18535 case no_stringop:
18536 gcc_unreachable ();
18537 case loop_1_byte:
18538 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18539 count_exp, QImode, 1, expected_size);
18540 break;
18541 case loop:
18542 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18543 count_exp, Pmode, 1, expected_size);
18544 break;
18545 case unrolled_loop:
18546 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18547 registers for 4 temporaries anyway. */
18548 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18549 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18550 expected_size);
18551 break;
18552 case rep_prefix_8_byte:
18553 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18554 DImode);
18555 break;
18556 case rep_prefix_4_byte:
18557 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18558 SImode);
18559 break;
18560 case rep_prefix_1_byte:
18561 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18562 QImode);
18563 break;
18565 /* Adjust properly the offset of src and dest memory for aliasing. */
18566 if (CONST_INT_P (count_exp))
18568 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18569 (count / size_needed) * size_needed);
18570 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18571 (count / size_needed) * size_needed);
18573 else
18575 src = change_address (src, BLKmode, srcreg);
18576 dst = change_address (dst, BLKmode, destreg);
18579 /* Step 4: Epilogue to copy the remaining bytes. */
18580 epilogue:
18581 if (label)
18583 /* When the main loop is done, COUNT_EXP might hold original count,
18584 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18585 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18586 bytes. Compensate if needed. */
18588 if (size_needed < epilogue_size_needed)
18590 tmp =
18591 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18592 GEN_INT (size_needed - 1), count_exp, 1,
18593 OPTAB_DIRECT);
18594 if (tmp != count_exp)
18595 emit_move_insn (count_exp, tmp);
18597 emit_label (label);
18598 LABEL_NUSES (label) = 1;
18601 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18602 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18603 epilogue_size_needed);
18604 if (jump_around_label)
18605 emit_label (jump_around_label);
18606 return 1;
18609 /* Helper function for memcpy. For QImode value 0xXY produce
18610 0xXYXYXYXY of wide specified by MODE. This is essentially
18611 a * 0x10101010, but we can do slightly better than
18612 synth_mult by unwinding the sequence by hand on CPUs with
18613 slow multiply. */
18614 static rtx
18615 promote_duplicated_reg (enum machine_mode mode, rtx val)
18617 enum machine_mode valmode = GET_MODE (val);
18618 rtx tmp;
18619 int nops = mode == DImode ? 3 : 2;
18621 gcc_assert (mode == SImode || mode == DImode);
18622 if (val == const0_rtx)
18623 return copy_to_mode_reg (mode, const0_rtx);
18624 if (CONST_INT_P (val))
18626 HOST_WIDE_INT v = INTVAL (val) & 255;
18628 v |= v << 8;
18629 v |= v << 16;
18630 if (mode == DImode)
18631 v |= (v << 16) << 16;
18632 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18635 if (valmode == VOIDmode)
18636 valmode = QImode;
18637 if (valmode != QImode)
18638 val = gen_lowpart (QImode, val);
18639 if (mode == QImode)
18640 return val;
18641 if (!TARGET_PARTIAL_REG_STALL)
18642 nops--;
18643 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18644 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18645 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18646 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18648 rtx reg = convert_modes (mode, QImode, val, true);
18649 tmp = promote_duplicated_reg (mode, const1_rtx);
18650 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18651 OPTAB_DIRECT);
18653 else
18655 rtx reg = convert_modes (mode, QImode, val, true);
18657 if (!TARGET_PARTIAL_REG_STALL)
18658 if (mode == SImode)
18659 emit_insn (gen_movsi_insv_1 (reg, reg));
18660 else
18661 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18662 else
18664 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18665 NULL, 1, OPTAB_DIRECT);
18666 reg =
18667 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18669 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18670 NULL, 1, OPTAB_DIRECT);
18671 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18672 if (mode == SImode)
18673 return reg;
18674 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18675 NULL, 1, OPTAB_DIRECT);
18676 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18677 return reg;
18681 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18682 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18683 alignment from ALIGN to DESIRED_ALIGN. */
18684 static rtx
18685 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18687 rtx promoted_val;
18689 if (TARGET_64BIT
18690 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18691 promoted_val = promote_duplicated_reg (DImode, val);
18692 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18693 promoted_val = promote_duplicated_reg (SImode, val);
18694 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18695 promoted_val = promote_duplicated_reg (HImode, val);
18696 else
18697 promoted_val = val;
18699 return promoted_val;
18702 /* Expand string clear operation (bzero). Use i386 string operations when
18703 profitable. See expand_movmem comment for explanation of individual
18704 steps performed. */
18706 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18707 rtx expected_align_exp, rtx expected_size_exp)
18709 rtx destreg;
18710 rtx label = NULL;
18711 rtx tmp;
18712 rtx jump_around_label = NULL;
18713 HOST_WIDE_INT align = 1;
18714 unsigned HOST_WIDE_INT count = 0;
18715 HOST_WIDE_INT expected_size = -1;
18716 int size_needed = 0, epilogue_size_needed;
18717 int desired_align = 0, align_bytes = 0;
18718 enum stringop_alg alg;
18719 rtx promoted_val = NULL;
18720 bool force_loopy_epilogue = false;
18721 int dynamic_check;
18722 bool need_zero_guard = false;
18724 if (CONST_INT_P (align_exp))
18725 align = INTVAL (align_exp);
18726 /* i386 can do misaligned access on reasonably increased cost. */
18727 if (CONST_INT_P (expected_align_exp)
18728 && INTVAL (expected_align_exp) > align)
18729 align = INTVAL (expected_align_exp);
18730 if (CONST_INT_P (count_exp))
18731 count = expected_size = INTVAL (count_exp);
18732 if (CONST_INT_P (expected_size_exp) && count == 0)
18733 expected_size = INTVAL (expected_size_exp);
18735 /* Make sure we don't need to care about overflow later on. */
18736 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18737 return 0;
18739 /* Step 0: Decide on preferred algorithm, desired alignment and
18740 size of chunks to be copied by main loop. */
18742 alg = decide_alg (count, expected_size, true, &dynamic_check);
18743 desired_align = decide_alignment (align, alg, expected_size);
18745 if (!TARGET_ALIGN_STRINGOPS)
18746 align = desired_align;
18748 if (alg == libcall)
18749 return 0;
18750 gcc_assert (alg != no_stringop);
18751 if (!count)
18752 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18753 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18754 switch (alg)
18756 case libcall:
18757 case no_stringop:
18758 gcc_unreachable ();
18759 case loop:
18760 need_zero_guard = true;
18761 size_needed = GET_MODE_SIZE (Pmode);
18762 break;
18763 case unrolled_loop:
18764 need_zero_guard = true;
18765 size_needed = GET_MODE_SIZE (Pmode) * 4;
18766 break;
18767 case rep_prefix_8_byte:
18768 size_needed = 8;
18769 break;
18770 case rep_prefix_4_byte:
18771 size_needed = 4;
18772 break;
18773 case rep_prefix_1_byte:
18774 size_needed = 1;
18775 break;
18776 case loop_1_byte:
18777 need_zero_guard = true;
18778 size_needed = 1;
18779 break;
18781 epilogue_size_needed = size_needed;
18783 /* Step 1: Prologue guard. */
18785 /* Alignment code needs count to be in register. */
18786 if (CONST_INT_P (count_exp) && desired_align > align)
18788 if (INTVAL (count_exp) > desired_align
18789 && INTVAL (count_exp) > size_needed)
18791 align_bytes
18792 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18793 if (align_bytes <= 0)
18794 align_bytes = 0;
18795 else
18796 align_bytes = desired_align - align_bytes;
18798 if (align_bytes == 0)
18800 enum machine_mode mode = SImode;
18801 if (TARGET_64BIT && (count & ~0xffffffff))
18802 mode = DImode;
18803 count_exp = force_reg (mode, count_exp);
18806 /* Do the cheap promotion to allow better CSE across the
18807 main loop and epilogue (ie one load of the big constant in the
18808 front of all code. */
18809 if (CONST_INT_P (val_exp))
18810 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18811 desired_align, align);
18812 /* Ensure that alignment prologue won't copy past end of block. */
18813 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18815 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18816 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18817 Make sure it is power of 2. */
18818 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18820 /* To improve performance of small blocks, we jump around the VAL
18821 promoting mode. This mean that if the promoted VAL is not constant,
18822 we might not use it in the epilogue and have to use byte
18823 loop variant. */
18824 if (epilogue_size_needed > 2 && !promoted_val)
18825 force_loopy_epilogue = true;
18826 if (count)
18828 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18830 /* If main algorithm works on QImode, no epilogue is needed.
18831 For small sizes just don't align anything. */
18832 if (size_needed == 1)
18833 desired_align = align;
18834 else
18835 goto epilogue;
18838 else
18840 label = gen_label_rtx ();
18841 emit_cmp_and_jump_insns (count_exp,
18842 GEN_INT (epilogue_size_needed),
18843 LTU, 0, counter_mode (count_exp), 1, label);
18844 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18845 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18846 else
18847 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18850 if (dynamic_check != -1)
18852 rtx hot_label = gen_label_rtx ();
18853 jump_around_label = gen_label_rtx ();
18854 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18855 LEU, 0, counter_mode (count_exp), 1, hot_label);
18856 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18857 set_storage_via_libcall (dst, count_exp, val_exp, false);
18858 emit_jump (jump_around_label);
18859 emit_label (hot_label);
18862 /* Step 2: Alignment prologue. */
18864 /* Do the expensive promotion once we branched off the small blocks. */
18865 if (!promoted_val)
18866 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18867 desired_align, align);
18868 gcc_assert (desired_align >= 1 && align >= 1);
18870 if (desired_align > align)
18872 if (align_bytes == 0)
18874 /* Except for the first move in epilogue, we no longer know
18875 constant offset in aliasing info. It don't seems to worth
18876 the pain to maintain it for the first move, so throw away
18877 the info early. */
18878 dst = change_address (dst, BLKmode, destreg);
18879 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18880 desired_align);
18882 else
18884 /* If we know how many bytes need to be stored before dst is
18885 sufficiently aligned, maintain aliasing info accurately. */
18886 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18887 desired_align, align_bytes);
18888 count_exp = plus_constant (count_exp, -align_bytes);
18889 count -= align_bytes;
18891 if (need_zero_guard
18892 && (count < (unsigned HOST_WIDE_INT) size_needed
18893 || (align_bytes == 0
18894 && count < ((unsigned HOST_WIDE_INT) size_needed
18895 + desired_align - align))))
18897 /* It is possible that we copied enough so the main loop will not
18898 execute. */
18899 gcc_assert (size_needed > 1);
18900 if (label == NULL_RTX)
18901 label = gen_label_rtx ();
18902 emit_cmp_and_jump_insns (count_exp,
18903 GEN_INT (size_needed),
18904 LTU, 0, counter_mode (count_exp), 1, label);
18905 if (expected_size == -1
18906 || expected_size < (desired_align - align) / 2 + size_needed)
18907 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18908 else
18909 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18912 if (label && size_needed == 1)
18914 emit_label (label);
18915 LABEL_NUSES (label) = 1;
18916 label = NULL;
18917 promoted_val = val_exp;
18918 epilogue_size_needed = 1;
18920 else if (label == NULL_RTX)
18921 epilogue_size_needed = size_needed;
18923 /* Step 3: Main loop. */
18925 switch (alg)
18927 case libcall:
18928 case no_stringop:
18929 gcc_unreachable ();
18930 case loop_1_byte:
18931 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18932 count_exp, QImode, 1, expected_size);
18933 break;
18934 case loop:
18935 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18936 count_exp, Pmode, 1, expected_size);
18937 break;
18938 case unrolled_loop:
18939 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18940 count_exp, Pmode, 4, expected_size);
18941 break;
18942 case rep_prefix_8_byte:
18943 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18944 DImode, val_exp);
18945 break;
18946 case rep_prefix_4_byte:
18947 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18948 SImode, val_exp);
18949 break;
18950 case rep_prefix_1_byte:
18951 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18952 QImode, val_exp);
18953 break;
18955 /* Adjust properly the offset of src and dest memory for aliasing. */
18956 if (CONST_INT_P (count_exp))
18957 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18958 (count / size_needed) * size_needed);
18959 else
18960 dst = change_address (dst, BLKmode, destreg);
18962 /* Step 4: Epilogue to copy the remaining bytes. */
18964 if (label)
18966 /* When the main loop is done, COUNT_EXP might hold original count,
18967 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18968 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18969 bytes. Compensate if needed. */
18971 if (size_needed < epilogue_size_needed)
18973 tmp =
18974 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18975 GEN_INT (size_needed - 1), count_exp, 1,
18976 OPTAB_DIRECT);
18977 if (tmp != count_exp)
18978 emit_move_insn (count_exp, tmp);
18980 emit_label (label);
18981 LABEL_NUSES (label) = 1;
18983 epilogue:
18984 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18986 if (force_loopy_epilogue)
18987 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18988 epilogue_size_needed);
18989 else
18990 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18991 epilogue_size_needed);
18993 if (jump_around_label)
18994 emit_label (jump_around_label);
18995 return 1;
18998 /* Expand the appropriate insns for doing strlen if not just doing
18999 repnz; scasb
19001 out = result, initialized with the start address
19002 align_rtx = alignment of the address.
19003 scratch = scratch register, initialized with the startaddress when
19004 not aligned, otherwise undefined
19006 This is just the body. It needs the initializations mentioned above and
19007 some address computing at the end. These things are done in i386.md. */
19009 static void
19010 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19012 int align;
19013 rtx tmp;
19014 rtx align_2_label = NULL_RTX;
19015 rtx align_3_label = NULL_RTX;
19016 rtx align_4_label = gen_label_rtx ();
19017 rtx end_0_label = gen_label_rtx ();
19018 rtx mem;
19019 rtx tmpreg = gen_reg_rtx (SImode);
19020 rtx scratch = gen_reg_rtx (SImode);
19021 rtx cmp;
19023 align = 0;
19024 if (CONST_INT_P (align_rtx))
19025 align = INTVAL (align_rtx);
19027 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19029 /* Is there a known alignment and is it less than 4? */
19030 if (align < 4)
19032 rtx scratch1 = gen_reg_rtx (Pmode);
19033 emit_move_insn (scratch1, out);
19034 /* Is there a known alignment and is it not 2? */
19035 if (align != 2)
19037 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19038 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19040 /* Leave just the 3 lower bits. */
19041 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19042 NULL_RTX, 0, OPTAB_WIDEN);
19044 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19045 Pmode, 1, align_4_label);
19046 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19047 Pmode, 1, align_2_label);
19048 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19049 Pmode, 1, align_3_label);
19051 else
19053 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19054 check if is aligned to 4 - byte. */
19056 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19057 NULL_RTX, 0, OPTAB_WIDEN);
19059 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19060 Pmode, 1, align_4_label);
19063 mem = change_address (src, QImode, out);
19065 /* Now compare the bytes. */
19067 /* Compare the first n unaligned byte on a byte per byte basis. */
19068 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19069 QImode, 1, end_0_label);
19071 /* Increment the address. */
19072 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19074 /* Not needed with an alignment of 2 */
19075 if (align != 2)
19077 emit_label (align_2_label);
19079 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19080 end_0_label);
19082 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19084 emit_label (align_3_label);
19087 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19088 end_0_label);
19090 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19093 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19094 align this loop. It gives only huge programs, but does not help to
19095 speed up. */
19096 emit_label (align_4_label);
19098 mem = change_address (src, SImode, out);
19099 emit_move_insn (scratch, mem);
19100 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19102 /* This formula yields a nonzero result iff one of the bytes is zero.
19103 This saves three branches inside loop and many cycles. */
19105 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19106 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19107 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19108 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19109 gen_int_mode (0x80808080, SImode)));
19110 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19111 align_4_label);
19113 if (TARGET_CMOVE)
19115 rtx reg = gen_reg_rtx (SImode);
19116 rtx reg2 = gen_reg_rtx (Pmode);
19117 emit_move_insn (reg, tmpreg);
19118 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19120 /* If zero is not in the first two bytes, move two bytes forward. */
19121 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19122 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19123 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19124 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19125 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19126 reg,
19127 tmpreg)));
19128 /* Emit lea manually to avoid clobbering of flags. */
19129 emit_insn (gen_rtx_SET (SImode, reg2,
19130 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19132 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19133 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19134 emit_insn (gen_rtx_SET (VOIDmode, out,
19135 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19136 reg2,
19137 out)));
19139 else
19141 rtx end_2_label = gen_label_rtx ();
19142 /* Is zero in the first two bytes? */
19144 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19145 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19146 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19147 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19148 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19149 pc_rtx);
19150 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19151 JUMP_LABEL (tmp) = end_2_label;
19153 /* Not in the first two. Move two bytes forward. */
19154 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19155 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19157 emit_label (end_2_label);
19161 /* Avoid branch in fixing the byte. */
19162 tmpreg = gen_lowpart (QImode, tmpreg);
19163 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19164 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19165 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19166 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19168 emit_label (end_0_label);
19171 /* Expand strlen. */
19174 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19176 rtx addr, scratch1, scratch2, scratch3, scratch4;
19178 /* The generic case of strlen expander is long. Avoid it's
19179 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19181 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19182 && !TARGET_INLINE_ALL_STRINGOPS
19183 && !optimize_insn_for_size_p ()
19184 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19185 return 0;
19187 addr = force_reg (Pmode, XEXP (src, 0));
19188 scratch1 = gen_reg_rtx (Pmode);
19190 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19191 && !optimize_insn_for_size_p ())
19193 /* Well it seems that some optimizer does not combine a call like
19194 foo(strlen(bar), strlen(bar));
19195 when the move and the subtraction is done here. It does calculate
19196 the length just once when these instructions are done inside of
19197 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19198 often used and I use one fewer register for the lifetime of
19199 output_strlen_unroll() this is better. */
19201 emit_move_insn (out, addr);
19203 ix86_expand_strlensi_unroll_1 (out, src, align);
19205 /* strlensi_unroll_1 returns the address of the zero at the end of
19206 the string, like memchr(), so compute the length by subtracting
19207 the start address. */
19208 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19210 else
19212 rtx unspec;
19214 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19215 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19216 return false;
19218 scratch2 = gen_reg_rtx (Pmode);
19219 scratch3 = gen_reg_rtx (Pmode);
19220 scratch4 = force_reg (Pmode, constm1_rtx);
19222 emit_move_insn (scratch3, addr);
19223 eoschar = force_reg (QImode, eoschar);
19225 src = replace_equiv_address_nv (src, scratch3);
19227 /* If .md starts supporting :P, this can be done in .md. */
19228 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19229 scratch4), UNSPEC_SCAS);
19230 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19231 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19232 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19234 return 1;
19237 /* For given symbol (function) construct code to compute address of it's PLT
19238 entry in large x86-64 PIC model. */
19240 construct_plt_address (rtx symbol)
19242 rtx tmp = gen_reg_rtx (Pmode);
19243 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19245 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19246 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19248 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19249 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19250 return tmp;
19253 void
19254 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19255 rtx callarg2,
19256 rtx pop, int sibcall)
19258 rtx use = NULL, call;
19260 if (pop == const0_rtx)
19261 pop = NULL;
19262 gcc_assert (!TARGET_64BIT || !pop);
19264 if (TARGET_MACHO && !TARGET_64BIT)
19266 #if TARGET_MACHO
19267 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19268 fnaddr = machopic_indirect_call_target (fnaddr);
19269 #endif
19271 else
19273 /* Static functions and indirect calls don't need the pic register. */
19274 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19275 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19276 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19277 use_reg (&use, pic_offset_table_rtx);
19280 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19282 rtx al = gen_rtx_REG (QImode, AX_REG);
19283 emit_move_insn (al, callarg2);
19284 use_reg (&use, al);
19287 if (ix86_cmodel == CM_LARGE_PIC
19288 && MEM_P (fnaddr)
19289 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19290 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19291 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19292 else if (sibcall
19293 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19294 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19296 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19297 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19300 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19301 if (retval)
19302 call = gen_rtx_SET (VOIDmode, retval, call);
19303 if (pop)
19305 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19306 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19307 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19309 if (TARGET_64BIT
19310 && ix86_cfun_abi () == MS_ABI
19311 && (!callarg2 || INTVAL (callarg2) != -2))
19313 /* We need to represent that SI and DI registers are clobbered
19314 by SYSV calls. */
19315 static int clobbered_registers[] = {
19316 XMM6_REG, XMM7_REG, XMM8_REG,
19317 XMM9_REG, XMM10_REG, XMM11_REG,
19318 XMM12_REG, XMM13_REG, XMM14_REG,
19319 XMM15_REG, SI_REG, DI_REG
19321 unsigned int i;
19322 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19323 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19324 UNSPEC_MS_TO_SYSV_CALL);
19326 vec[0] = call;
19327 vec[1] = unspec;
19328 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19329 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19330 ? TImode : DImode,
19331 gen_rtx_REG
19332 (SSE_REGNO_P (clobbered_registers[i])
19333 ? TImode : DImode,
19334 clobbered_registers[i]));
19336 call = gen_rtx_PARALLEL (VOIDmode,
19337 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19338 + 2, vec));
19341 call = emit_call_insn (call);
19342 if (use)
19343 CALL_INSN_FUNCTION_USAGE (call) = use;
19347 /* Clear stack slot assignments remembered from previous functions.
19348 This is called from INIT_EXPANDERS once before RTL is emitted for each
19349 function. */
19351 static struct machine_function *
19352 ix86_init_machine_status (void)
19354 struct machine_function *f;
19356 f = GGC_CNEW (struct machine_function);
19357 f->use_fast_prologue_epilogue_nregs = -1;
19358 f->tls_descriptor_call_expanded_p = 0;
19359 f->call_abi = ix86_abi;
19361 return f;
19364 /* Return a MEM corresponding to a stack slot with mode MODE.
19365 Allocate a new slot if necessary.
19367 The RTL for a function can have several slots available: N is
19368 which slot to use. */
19371 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19373 struct stack_local_entry *s;
19375 gcc_assert (n < MAX_386_STACK_LOCALS);
19377 /* Virtual slot is valid only before vregs are instantiated. */
19378 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19380 for (s = ix86_stack_locals; s; s = s->next)
19381 if (s->mode == mode && s->n == n)
19382 return copy_rtx (s->rtl);
19384 s = (struct stack_local_entry *)
19385 ggc_alloc (sizeof (struct stack_local_entry));
19386 s->n = n;
19387 s->mode = mode;
19388 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19390 s->next = ix86_stack_locals;
19391 ix86_stack_locals = s;
19392 return s->rtl;
19395 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19397 static GTY(()) rtx ix86_tls_symbol;
19399 ix86_tls_get_addr (void)
19402 if (!ix86_tls_symbol)
19404 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19405 (TARGET_ANY_GNU_TLS
19406 && !TARGET_64BIT)
19407 ? "___tls_get_addr"
19408 : "__tls_get_addr");
19411 return ix86_tls_symbol;
19414 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19416 static GTY(()) rtx ix86_tls_module_base_symbol;
19418 ix86_tls_module_base (void)
19421 if (!ix86_tls_module_base_symbol)
19423 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19424 "_TLS_MODULE_BASE_");
19425 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19426 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19429 return ix86_tls_module_base_symbol;
19432 /* Calculate the length of the memory address in the instruction
19433 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19436 memory_address_length (rtx addr)
19438 struct ix86_address parts;
19439 rtx base, index, disp;
19440 int len;
19441 int ok;
19443 if (GET_CODE (addr) == PRE_DEC
19444 || GET_CODE (addr) == POST_INC
19445 || GET_CODE (addr) == PRE_MODIFY
19446 || GET_CODE (addr) == POST_MODIFY)
19447 return 0;
19449 ok = ix86_decompose_address (addr, &parts);
19450 gcc_assert (ok);
19452 if (parts.base && GET_CODE (parts.base) == SUBREG)
19453 parts.base = SUBREG_REG (parts.base);
19454 if (parts.index && GET_CODE (parts.index) == SUBREG)
19455 parts.index = SUBREG_REG (parts.index);
19457 base = parts.base;
19458 index = parts.index;
19459 disp = parts.disp;
19460 len = 0;
19462 /* Rule of thumb:
19463 - esp as the base always wants an index,
19464 - ebp as the base always wants a displacement,
19465 - r12 as the base always wants an index,
19466 - r13 as the base always wants a displacement. */
19468 /* Register Indirect. */
19469 if (base && !index && !disp)
19471 /* esp (for its index) and ebp (for its displacement) need
19472 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19473 code. */
19474 if (REG_P (addr)
19475 && (addr == arg_pointer_rtx
19476 || addr == frame_pointer_rtx
19477 || REGNO (addr) == SP_REG
19478 || REGNO (addr) == BP_REG
19479 || REGNO (addr) == R12_REG
19480 || REGNO (addr) == R13_REG))
19481 len = 1;
19484 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19485 is not disp32, but disp32(%rip), so for disp32
19486 SIB byte is needed, unless print_operand_address
19487 optimizes it into disp32(%rip) or (%rip) is implied
19488 by UNSPEC. */
19489 else if (disp && !base && !index)
19491 len = 4;
19492 if (TARGET_64BIT)
19494 rtx symbol = disp;
19496 if (GET_CODE (disp) == CONST)
19497 symbol = XEXP (disp, 0);
19498 if (GET_CODE (symbol) == PLUS
19499 && CONST_INT_P (XEXP (symbol, 1)))
19500 symbol = XEXP (symbol, 0);
19502 if (GET_CODE (symbol) != LABEL_REF
19503 && (GET_CODE (symbol) != SYMBOL_REF
19504 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19505 && (GET_CODE (symbol) != UNSPEC
19506 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19507 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19508 len += 1;
19512 else
19514 /* Find the length of the displacement constant. */
19515 if (disp)
19517 if (base && satisfies_constraint_K (disp))
19518 len = 1;
19519 else
19520 len = 4;
19522 /* ebp always wants a displacement. Similarly r13. */
19523 else if (base && REG_P (base)
19524 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19525 len = 1;
19527 /* An index requires the two-byte modrm form.... */
19528 if (index
19529 /* ...like esp (or r12), which always wants an index. */
19530 || base == arg_pointer_rtx
19531 || base == frame_pointer_rtx
19532 || (base && REG_P (base)
19533 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19534 len += 1;
19537 switch (parts.seg)
19539 case SEG_FS:
19540 case SEG_GS:
19541 len += 1;
19542 break;
19543 default:
19544 break;
19547 return len;
19550 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19551 is set, expect that insn have 8bit immediate alternative. */
19553 ix86_attr_length_immediate_default (rtx insn, int shortform)
19555 int len = 0;
19556 int i;
19557 extract_insn_cached (insn);
19558 for (i = recog_data.n_operands - 1; i >= 0; --i)
19559 if (CONSTANT_P (recog_data.operand[i]))
19561 enum attr_mode mode = get_attr_mode (insn);
19563 gcc_assert (!len);
19564 if (shortform && CONST_INT_P (recog_data.operand[i]))
19566 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19567 switch (mode)
19569 case MODE_QI:
19570 len = 1;
19571 continue;
19572 case MODE_HI:
19573 ival = trunc_int_for_mode (ival, HImode);
19574 break;
19575 case MODE_SI:
19576 ival = trunc_int_for_mode (ival, SImode);
19577 break;
19578 default:
19579 break;
19581 if (IN_RANGE (ival, -128, 127))
19583 len = 1;
19584 continue;
19587 switch (mode)
19589 case MODE_QI:
19590 len = 1;
19591 break;
19592 case MODE_HI:
19593 len = 2;
19594 break;
19595 case MODE_SI:
19596 len = 4;
19597 break;
19598 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19599 case MODE_DI:
19600 len = 4;
19601 break;
19602 default:
19603 fatal_insn ("unknown insn mode", insn);
19606 return len;
19608 /* Compute default value for "length_address" attribute. */
19610 ix86_attr_length_address_default (rtx insn)
19612 int i;
19614 if (get_attr_type (insn) == TYPE_LEA)
19616 rtx set = PATTERN (insn), addr;
19618 if (GET_CODE (set) == PARALLEL)
19619 set = XVECEXP (set, 0, 0);
19621 gcc_assert (GET_CODE (set) == SET);
19623 addr = SET_SRC (set);
19624 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19626 if (GET_CODE (addr) == ZERO_EXTEND)
19627 addr = XEXP (addr, 0);
19628 if (GET_CODE (addr) == SUBREG)
19629 addr = SUBREG_REG (addr);
19632 return memory_address_length (addr);
19635 extract_insn_cached (insn);
19636 for (i = recog_data.n_operands - 1; i >= 0; --i)
19637 if (MEM_P (recog_data.operand[i]))
19639 constrain_operands_cached (reload_completed);
19640 if (which_alternative != -1)
19642 const char *constraints = recog_data.constraints[i];
19643 int alt = which_alternative;
19645 while (*constraints == '=' || *constraints == '+')
19646 constraints++;
19647 while (alt-- > 0)
19648 while (*constraints++ != ',')
19650 /* Skip ignored operands. */
19651 if (*constraints == 'X')
19652 continue;
19654 return memory_address_length (XEXP (recog_data.operand[i], 0));
19656 return 0;
19659 /* Compute default value for "length_vex" attribute. It includes
19660 2 or 3 byte VEX prefix and 1 opcode byte. */
19663 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19664 int has_vex_w)
19666 int i;
19668 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19669 byte VEX prefix. */
19670 if (!has_0f_opcode || has_vex_w)
19671 return 3 + 1;
19673 /* We can always use 2 byte VEX prefix in 32bit. */
19674 if (!TARGET_64BIT)
19675 return 2 + 1;
19677 extract_insn_cached (insn);
19679 for (i = recog_data.n_operands - 1; i >= 0; --i)
19680 if (REG_P (recog_data.operand[i]))
19682 /* REX.W bit uses 3 byte VEX prefix. */
19683 if (GET_MODE (recog_data.operand[i]) == DImode
19684 && GENERAL_REG_P (recog_data.operand[i]))
19685 return 3 + 1;
19687 else
19689 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19690 if (MEM_P (recog_data.operand[i])
19691 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19692 return 3 + 1;
19695 return 2 + 1;
19698 /* Return the maximum number of instructions a cpu can issue. */
19700 static int
19701 ix86_issue_rate (void)
19703 switch (ix86_tune)
19705 case PROCESSOR_PENTIUM:
19706 case PROCESSOR_ATOM:
19707 case PROCESSOR_K6:
19708 return 2;
19710 case PROCESSOR_PENTIUMPRO:
19711 case PROCESSOR_PENTIUM4:
19712 case PROCESSOR_ATHLON:
19713 case PROCESSOR_K8:
19714 case PROCESSOR_AMDFAM10:
19715 case PROCESSOR_NOCONA:
19716 case PROCESSOR_GENERIC32:
19717 case PROCESSOR_GENERIC64:
19718 return 3;
19720 case PROCESSOR_CORE2:
19721 return 4;
19723 default:
19724 return 1;
19728 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19729 by DEP_INSN and nothing set by DEP_INSN. */
19731 static int
19732 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19734 rtx set, set2;
19736 /* Simplify the test for uninteresting insns. */
19737 if (insn_type != TYPE_SETCC
19738 && insn_type != TYPE_ICMOV
19739 && insn_type != TYPE_FCMOV
19740 && insn_type != TYPE_IBR)
19741 return 0;
19743 if ((set = single_set (dep_insn)) != 0)
19745 set = SET_DEST (set);
19746 set2 = NULL_RTX;
19748 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19749 && XVECLEN (PATTERN (dep_insn), 0) == 2
19750 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19751 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19753 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19754 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19756 else
19757 return 0;
19759 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19760 return 0;
19762 /* This test is true if the dependent insn reads the flags but
19763 not any other potentially set register. */
19764 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19765 return 0;
19767 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19768 return 0;
19770 return 1;
19773 /* Return true iff USE_INSN has a memory address with operands set by
19774 SET_INSN. */
19776 bool
19777 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19779 int i;
19780 extract_insn_cached (use_insn);
19781 for (i = recog_data.n_operands - 1; i >= 0; --i)
19782 if (MEM_P (recog_data.operand[i]))
19784 rtx addr = XEXP (recog_data.operand[i], 0);
19785 return modified_in_p (addr, set_insn) != 0;
19787 return false;
19790 static int
19791 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19793 enum attr_type insn_type, dep_insn_type;
19794 enum attr_memory memory;
19795 rtx set, set2;
19796 int dep_insn_code_number;
19798 /* Anti and output dependencies have zero cost on all CPUs. */
19799 if (REG_NOTE_KIND (link) != 0)
19800 return 0;
19802 dep_insn_code_number = recog_memoized (dep_insn);
19804 /* If we can't recognize the insns, we can't really do anything. */
19805 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19806 return cost;
19808 insn_type = get_attr_type (insn);
19809 dep_insn_type = get_attr_type (dep_insn);
19811 switch (ix86_tune)
19813 case PROCESSOR_PENTIUM:
19814 /* Address Generation Interlock adds a cycle of latency. */
19815 if (insn_type == TYPE_LEA)
19817 rtx addr = PATTERN (insn);
19819 if (GET_CODE (addr) == PARALLEL)
19820 addr = XVECEXP (addr, 0, 0);
19822 gcc_assert (GET_CODE (addr) == SET);
19824 addr = SET_SRC (addr);
19825 if (modified_in_p (addr, dep_insn))
19826 cost += 1;
19828 else if (ix86_agi_dependent (dep_insn, insn))
19829 cost += 1;
19831 /* ??? Compares pair with jump/setcc. */
19832 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19833 cost = 0;
19835 /* Floating point stores require value to be ready one cycle earlier. */
19836 if (insn_type == TYPE_FMOV
19837 && get_attr_memory (insn) == MEMORY_STORE
19838 && !ix86_agi_dependent (dep_insn, insn))
19839 cost += 1;
19840 break;
19842 case PROCESSOR_PENTIUMPRO:
19843 memory = get_attr_memory (insn);
19845 /* INT->FP conversion is expensive. */
19846 if (get_attr_fp_int_src (dep_insn))
19847 cost += 5;
19849 /* There is one cycle extra latency between an FP op and a store. */
19850 if (insn_type == TYPE_FMOV
19851 && (set = single_set (dep_insn)) != NULL_RTX
19852 && (set2 = single_set (insn)) != NULL_RTX
19853 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19854 && MEM_P (SET_DEST (set2)))
19855 cost += 1;
19857 /* Show ability of reorder buffer to hide latency of load by executing
19858 in parallel with previous instruction in case
19859 previous instruction is not needed to compute the address. */
19860 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19861 && !ix86_agi_dependent (dep_insn, insn))
19863 /* Claim moves to take one cycle, as core can issue one load
19864 at time and the next load can start cycle later. */
19865 if (dep_insn_type == TYPE_IMOV
19866 || dep_insn_type == TYPE_FMOV)
19867 cost = 1;
19868 else if (cost > 1)
19869 cost--;
19871 break;
19873 case PROCESSOR_K6:
19874 memory = get_attr_memory (insn);
19876 /* The esp dependency is resolved before the instruction is really
19877 finished. */
19878 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19879 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19880 return 1;
19882 /* INT->FP conversion is expensive. */
19883 if (get_attr_fp_int_src (dep_insn))
19884 cost += 5;
19886 /* Show ability of reorder buffer to hide latency of load by executing
19887 in parallel with previous instruction in case
19888 previous instruction is not needed to compute the address. */
19889 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19890 && !ix86_agi_dependent (dep_insn, insn))
19892 /* Claim moves to take one cycle, as core can issue one load
19893 at time and the next load can start cycle later. */
19894 if (dep_insn_type == TYPE_IMOV
19895 || dep_insn_type == TYPE_FMOV)
19896 cost = 1;
19897 else if (cost > 2)
19898 cost -= 2;
19899 else
19900 cost = 1;
19902 break;
19904 case PROCESSOR_ATHLON:
19905 case PROCESSOR_K8:
19906 case PROCESSOR_AMDFAM10:
19907 case PROCESSOR_ATOM:
19908 case PROCESSOR_GENERIC32:
19909 case PROCESSOR_GENERIC64:
19910 memory = get_attr_memory (insn);
19912 /* Show ability of reorder buffer to hide latency of load by executing
19913 in parallel with previous instruction in case
19914 previous instruction is not needed to compute the address. */
19915 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19916 && !ix86_agi_dependent (dep_insn, insn))
19918 enum attr_unit unit = get_attr_unit (insn);
19919 int loadcost = 3;
19921 /* Because of the difference between the length of integer and
19922 floating unit pipeline preparation stages, the memory operands
19923 for floating point are cheaper.
19925 ??? For Athlon it the difference is most probably 2. */
19926 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19927 loadcost = 3;
19928 else
19929 loadcost = TARGET_ATHLON ? 2 : 0;
19931 if (cost >= loadcost)
19932 cost -= loadcost;
19933 else
19934 cost = 0;
19937 default:
19938 break;
19941 return cost;
19944 /* How many alternative schedules to try. This should be as wide as the
19945 scheduling freedom in the DFA, but no wider. Making this value too
19946 large results extra work for the scheduler. */
19948 static int
19949 ia32_multipass_dfa_lookahead (void)
19951 switch (ix86_tune)
19953 case PROCESSOR_PENTIUM:
19954 return 2;
19956 case PROCESSOR_PENTIUMPRO:
19957 case PROCESSOR_K6:
19958 return 1;
19960 default:
19961 return 0;
19966 /* Compute the alignment given to a constant that is being placed in memory.
19967 EXP is the constant and ALIGN is the alignment that the object would
19968 ordinarily have.
19969 The value of this function is used instead of that alignment to align
19970 the object. */
19973 ix86_constant_alignment (tree exp, int align)
19975 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19976 || TREE_CODE (exp) == INTEGER_CST)
19978 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19979 return 64;
19980 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19981 return 128;
19983 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19984 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19985 return BITS_PER_WORD;
19987 return align;
19990 /* Compute the alignment for a static variable.
19991 TYPE is the data type, and ALIGN is the alignment that
19992 the object would ordinarily have. The value of this function is used
19993 instead of that alignment to align the object. */
19996 ix86_data_alignment (tree type, int align)
19998 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20000 if (AGGREGATE_TYPE_P (type)
20001 && TYPE_SIZE (type)
20002 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20003 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20004 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20005 && align < max_align)
20006 align = max_align;
20008 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20009 to 16byte boundary. */
20010 if (TARGET_64BIT)
20012 if (AGGREGATE_TYPE_P (type)
20013 && TYPE_SIZE (type)
20014 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20015 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20016 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20017 return 128;
20020 if (TREE_CODE (type) == ARRAY_TYPE)
20022 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20023 return 64;
20024 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20025 return 128;
20027 else if (TREE_CODE (type) == COMPLEX_TYPE)
20030 if (TYPE_MODE (type) == DCmode && align < 64)
20031 return 64;
20032 if ((TYPE_MODE (type) == XCmode
20033 || TYPE_MODE (type) == TCmode) && align < 128)
20034 return 128;
20036 else if ((TREE_CODE (type) == RECORD_TYPE
20037 || TREE_CODE (type) == UNION_TYPE
20038 || TREE_CODE (type) == QUAL_UNION_TYPE)
20039 && TYPE_FIELDS (type))
20041 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20042 return 64;
20043 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20044 return 128;
20046 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20047 || TREE_CODE (type) == INTEGER_TYPE)
20049 if (TYPE_MODE (type) == DFmode && align < 64)
20050 return 64;
20051 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20052 return 128;
20055 return align;
20058 /* Compute the alignment for a local variable or a stack slot. EXP is
20059 the data type or decl itself, MODE is the widest mode available and
20060 ALIGN is the alignment that the object would ordinarily have. The
20061 value of this macro is used instead of that alignment to align the
20062 object. */
20064 unsigned int
20065 ix86_local_alignment (tree exp, enum machine_mode mode,
20066 unsigned int align)
20068 tree type, decl;
20070 if (exp && DECL_P (exp))
20072 type = TREE_TYPE (exp);
20073 decl = exp;
20075 else
20077 type = exp;
20078 decl = NULL;
20081 /* Don't do dynamic stack realignment for long long objects with
20082 -mpreferred-stack-boundary=2. */
20083 if (!TARGET_64BIT
20084 && align == 64
20085 && ix86_preferred_stack_boundary < 64
20086 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20087 && (!type || !TYPE_USER_ALIGN (type))
20088 && (!decl || !DECL_USER_ALIGN (decl)))
20089 align = 32;
20091 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20092 register in MODE. We will return the largest alignment of XF
20093 and DF. */
20094 if (!type)
20096 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20097 align = GET_MODE_ALIGNMENT (DFmode);
20098 return align;
20101 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20102 to 16byte boundary. */
20103 if (TARGET_64BIT)
20105 if (AGGREGATE_TYPE_P (type)
20106 && TYPE_SIZE (type)
20107 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20108 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20109 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20110 return 128;
20112 if (TREE_CODE (type) == ARRAY_TYPE)
20114 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20115 return 64;
20116 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20117 return 128;
20119 else if (TREE_CODE (type) == COMPLEX_TYPE)
20121 if (TYPE_MODE (type) == DCmode && align < 64)
20122 return 64;
20123 if ((TYPE_MODE (type) == XCmode
20124 || TYPE_MODE (type) == TCmode) && align < 128)
20125 return 128;
20127 else if ((TREE_CODE (type) == RECORD_TYPE
20128 || TREE_CODE (type) == UNION_TYPE
20129 || TREE_CODE (type) == QUAL_UNION_TYPE)
20130 && TYPE_FIELDS (type))
20132 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20133 return 64;
20134 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20135 return 128;
20137 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20138 || TREE_CODE (type) == INTEGER_TYPE)
20141 if (TYPE_MODE (type) == DFmode && align < 64)
20142 return 64;
20143 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20144 return 128;
20146 return align;
20149 /* Compute the minimum required alignment for dynamic stack realignment
20150 purposes for a local variable, parameter or a stack slot. EXP is
20151 the data type or decl itself, MODE is its mode and ALIGN is the
20152 alignment that the object would ordinarily have. */
20154 unsigned int
20155 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20156 unsigned int align)
20158 tree type, decl;
20160 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20161 return align;
20163 if (exp && DECL_P (exp))
20165 type = TREE_TYPE (exp);
20166 decl = exp;
20168 else
20170 type = exp;
20171 decl = NULL;
20174 /* Don't do dynamic stack realignment for long long objects with
20175 -mpreferred-stack-boundary=2. */
20176 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20177 && (!type || !TYPE_USER_ALIGN (type))
20178 && (!decl || !DECL_USER_ALIGN (decl)))
20179 return 32;
20181 return align;
20184 /* Find a location for the static chain incoming to a nested function.
20185 This is a register, unless all free registers are used by arguments. */
20187 static rtx
20188 ix86_static_chain (const_tree fndecl, bool incoming_p)
20190 unsigned regno;
20192 if (!DECL_STATIC_CHAIN (fndecl))
20193 return NULL;
20195 if (TARGET_64BIT)
20197 /* We always use R10 in 64-bit mode. */
20198 regno = R10_REG;
20200 else
20202 tree fntype;
20203 /* By default in 32-bit mode we use ECX to pass the static chain. */
20204 regno = CX_REG;
20206 fntype = TREE_TYPE (fndecl);
20207 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20209 /* Fastcall functions use ecx/edx for arguments, which leaves
20210 us with EAX for the static chain. */
20211 regno = AX_REG;
20213 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20215 /* Thiscall functions use ecx for arguments, which leaves
20216 us with EAX for the static chain. */
20217 regno = AX_REG;
20219 else if (ix86_function_regparm (fntype, fndecl) == 3)
20221 /* For regparm 3, we have no free call-clobbered registers in
20222 which to store the static chain. In order to implement this,
20223 we have the trampoline push the static chain to the stack.
20224 However, we can't push a value below the return address when
20225 we call the nested function directly, so we have to use an
20226 alternate entry point. For this we use ESI, and have the
20227 alternate entry point push ESI, so that things appear the
20228 same once we're executing the nested function. */
20229 if (incoming_p)
20231 if (fndecl == current_function_decl)
20232 ix86_static_chain_on_stack = true;
20233 return gen_frame_mem (SImode,
20234 plus_constant (arg_pointer_rtx, -8));
20236 regno = SI_REG;
20240 return gen_rtx_REG (Pmode, regno);
20243 /* Emit RTL insns to initialize the variable parts of a trampoline.
20244 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20245 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20246 to be passed to the target function. */
20248 static void
20249 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20251 rtx mem, fnaddr;
20253 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20255 if (!TARGET_64BIT)
20257 rtx disp, chain;
20258 int opcode;
20260 /* Depending on the static chain location, either load a register
20261 with a constant, or push the constant to the stack. All of the
20262 instructions are the same size. */
20263 chain = ix86_static_chain (fndecl, true);
20264 if (REG_P (chain))
20266 if (REGNO (chain) == CX_REG)
20267 opcode = 0xb9;
20268 else if (REGNO (chain) == AX_REG)
20269 opcode = 0xb8;
20270 else
20271 gcc_unreachable ();
20273 else
20274 opcode = 0x68;
20276 mem = adjust_address (m_tramp, QImode, 0);
20277 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20279 mem = adjust_address (m_tramp, SImode, 1);
20280 emit_move_insn (mem, chain_value);
20282 /* Compute offset from the end of the jmp to the target function.
20283 In the case in which the trampoline stores the static chain on
20284 the stack, we need to skip the first insn which pushes the
20285 (call-saved) register static chain; this push is 1 byte. */
20286 disp = expand_binop (SImode, sub_optab, fnaddr,
20287 plus_constant (XEXP (m_tramp, 0),
20288 MEM_P (chain) ? 9 : 10),
20289 NULL_RTX, 1, OPTAB_DIRECT);
20291 mem = adjust_address (m_tramp, QImode, 5);
20292 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20294 mem = adjust_address (m_tramp, SImode, 6);
20295 emit_move_insn (mem, disp);
20297 else
20299 int offset = 0;
20301 /* Load the function address to r11. Try to load address using
20302 the shorter movl instead of movabs. We may want to support
20303 movq for kernel mode, but kernel does not use trampolines at
20304 the moment. */
20305 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20307 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20309 mem = adjust_address (m_tramp, HImode, offset);
20310 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20312 mem = adjust_address (m_tramp, SImode, offset + 2);
20313 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20314 offset += 6;
20316 else
20318 mem = adjust_address (m_tramp, HImode, offset);
20319 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20321 mem = adjust_address (m_tramp, DImode, offset + 2);
20322 emit_move_insn (mem, fnaddr);
20323 offset += 10;
20326 /* Load static chain using movabs to r10. */
20327 mem = adjust_address (m_tramp, HImode, offset);
20328 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20330 mem = adjust_address (m_tramp, DImode, offset + 2);
20331 emit_move_insn (mem, chain_value);
20332 offset += 10;
20334 /* Jump to r11; the last (unused) byte is a nop, only there to
20335 pad the write out to a single 32-bit store. */
20336 mem = adjust_address (m_tramp, SImode, offset);
20337 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20338 offset += 4;
20340 gcc_assert (offset <= TRAMPOLINE_SIZE);
20343 #ifdef ENABLE_EXECUTE_STACK
20344 #ifdef CHECK_EXECUTE_STACK_ENABLED
20345 if (CHECK_EXECUTE_STACK_ENABLED)
20346 #endif
20347 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20348 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20349 #endif
20352 /* The following file contains several enumerations and data structures
20353 built from the definitions in i386-builtin-types.def. */
20355 #include "i386-builtin-types.inc"
20357 /* Table for the ix86 builtin non-function types. */
20358 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20360 /* Retrieve an element from the above table, building some of
20361 the types lazily. */
20363 static tree
20364 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20366 unsigned int index;
20367 tree type, itype;
20369 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20371 type = ix86_builtin_type_tab[(int) tcode];
20372 if (type != NULL)
20373 return type;
20375 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20376 if (tcode <= IX86_BT_LAST_VECT)
20378 enum machine_mode mode;
20380 index = tcode - IX86_BT_LAST_PRIM - 1;
20381 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20382 mode = ix86_builtin_type_vect_mode[index];
20384 type = build_vector_type_for_mode (itype, mode);
20386 else
20388 int quals;
20390 index = tcode - IX86_BT_LAST_VECT - 1;
20391 if (tcode <= IX86_BT_LAST_PTR)
20392 quals = TYPE_UNQUALIFIED;
20393 else
20394 quals = TYPE_QUAL_CONST;
20396 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20397 if (quals != TYPE_UNQUALIFIED)
20398 itype = build_qualified_type (itype, quals);
20400 type = build_pointer_type (itype);
20403 ix86_builtin_type_tab[(int) tcode] = type;
20404 return type;
20407 /* Table for the ix86 builtin function types. */
20408 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20410 /* Retrieve an element from the above table, building some of
20411 the types lazily. */
20413 static tree
20414 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20416 tree type;
20418 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20420 type = ix86_builtin_func_type_tab[(int) tcode];
20421 if (type != NULL)
20422 return type;
20424 if (tcode <= IX86_BT_LAST_FUNC)
20426 unsigned start = ix86_builtin_func_start[(int) tcode];
20427 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20428 tree rtype, atype, args = void_list_node;
20429 unsigned i;
20431 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20432 for (i = after - 1; i > start; --i)
20434 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20435 args = tree_cons (NULL, atype, args);
20438 type = build_function_type (rtype, args);
20440 else
20442 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20443 enum ix86_builtin_func_type icode;
20445 icode = ix86_builtin_func_alias_base[index];
20446 type = ix86_get_builtin_func_type (icode);
20449 ix86_builtin_func_type_tab[(int) tcode] = type;
20450 return type;
20454 /* Codes for all the SSE/MMX builtins. */
20455 enum ix86_builtins
20457 IX86_BUILTIN_ADDPS,
20458 IX86_BUILTIN_ADDSS,
20459 IX86_BUILTIN_DIVPS,
20460 IX86_BUILTIN_DIVSS,
20461 IX86_BUILTIN_MULPS,
20462 IX86_BUILTIN_MULSS,
20463 IX86_BUILTIN_SUBPS,
20464 IX86_BUILTIN_SUBSS,
20466 IX86_BUILTIN_CMPEQPS,
20467 IX86_BUILTIN_CMPLTPS,
20468 IX86_BUILTIN_CMPLEPS,
20469 IX86_BUILTIN_CMPGTPS,
20470 IX86_BUILTIN_CMPGEPS,
20471 IX86_BUILTIN_CMPNEQPS,
20472 IX86_BUILTIN_CMPNLTPS,
20473 IX86_BUILTIN_CMPNLEPS,
20474 IX86_BUILTIN_CMPNGTPS,
20475 IX86_BUILTIN_CMPNGEPS,
20476 IX86_BUILTIN_CMPORDPS,
20477 IX86_BUILTIN_CMPUNORDPS,
20478 IX86_BUILTIN_CMPEQSS,
20479 IX86_BUILTIN_CMPLTSS,
20480 IX86_BUILTIN_CMPLESS,
20481 IX86_BUILTIN_CMPNEQSS,
20482 IX86_BUILTIN_CMPNLTSS,
20483 IX86_BUILTIN_CMPNLESS,
20484 IX86_BUILTIN_CMPNGTSS,
20485 IX86_BUILTIN_CMPNGESS,
20486 IX86_BUILTIN_CMPORDSS,
20487 IX86_BUILTIN_CMPUNORDSS,
20489 IX86_BUILTIN_COMIEQSS,
20490 IX86_BUILTIN_COMILTSS,
20491 IX86_BUILTIN_COMILESS,
20492 IX86_BUILTIN_COMIGTSS,
20493 IX86_BUILTIN_COMIGESS,
20494 IX86_BUILTIN_COMINEQSS,
20495 IX86_BUILTIN_UCOMIEQSS,
20496 IX86_BUILTIN_UCOMILTSS,
20497 IX86_BUILTIN_UCOMILESS,
20498 IX86_BUILTIN_UCOMIGTSS,
20499 IX86_BUILTIN_UCOMIGESS,
20500 IX86_BUILTIN_UCOMINEQSS,
20502 IX86_BUILTIN_CVTPI2PS,
20503 IX86_BUILTIN_CVTPS2PI,
20504 IX86_BUILTIN_CVTSI2SS,
20505 IX86_BUILTIN_CVTSI642SS,
20506 IX86_BUILTIN_CVTSS2SI,
20507 IX86_BUILTIN_CVTSS2SI64,
20508 IX86_BUILTIN_CVTTPS2PI,
20509 IX86_BUILTIN_CVTTSS2SI,
20510 IX86_BUILTIN_CVTTSS2SI64,
20512 IX86_BUILTIN_MAXPS,
20513 IX86_BUILTIN_MAXSS,
20514 IX86_BUILTIN_MINPS,
20515 IX86_BUILTIN_MINSS,
20517 IX86_BUILTIN_LOADUPS,
20518 IX86_BUILTIN_STOREUPS,
20519 IX86_BUILTIN_MOVSS,
20521 IX86_BUILTIN_MOVHLPS,
20522 IX86_BUILTIN_MOVLHPS,
20523 IX86_BUILTIN_LOADHPS,
20524 IX86_BUILTIN_LOADLPS,
20525 IX86_BUILTIN_STOREHPS,
20526 IX86_BUILTIN_STORELPS,
20528 IX86_BUILTIN_MASKMOVQ,
20529 IX86_BUILTIN_MOVMSKPS,
20530 IX86_BUILTIN_PMOVMSKB,
20532 IX86_BUILTIN_MOVNTPS,
20533 IX86_BUILTIN_MOVNTQ,
20535 IX86_BUILTIN_LOADDQU,
20536 IX86_BUILTIN_STOREDQU,
20538 IX86_BUILTIN_PACKSSWB,
20539 IX86_BUILTIN_PACKSSDW,
20540 IX86_BUILTIN_PACKUSWB,
20542 IX86_BUILTIN_PADDB,
20543 IX86_BUILTIN_PADDW,
20544 IX86_BUILTIN_PADDD,
20545 IX86_BUILTIN_PADDQ,
20546 IX86_BUILTIN_PADDSB,
20547 IX86_BUILTIN_PADDSW,
20548 IX86_BUILTIN_PADDUSB,
20549 IX86_BUILTIN_PADDUSW,
20550 IX86_BUILTIN_PSUBB,
20551 IX86_BUILTIN_PSUBW,
20552 IX86_BUILTIN_PSUBD,
20553 IX86_BUILTIN_PSUBQ,
20554 IX86_BUILTIN_PSUBSB,
20555 IX86_BUILTIN_PSUBSW,
20556 IX86_BUILTIN_PSUBUSB,
20557 IX86_BUILTIN_PSUBUSW,
20559 IX86_BUILTIN_PAND,
20560 IX86_BUILTIN_PANDN,
20561 IX86_BUILTIN_POR,
20562 IX86_BUILTIN_PXOR,
20564 IX86_BUILTIN_PAVGB,
20565 IX86_BUILTIN_PAVGW,
20567 IX86_BUILTIN_PCMPEQB,
20568 IX86_BUILTIN_PCMPEQW,
20569 IX86_BUILTIN_PCMPEQD,
20570 IX86_BUILTIN_PCMPGTB,
20571 IX86_BUILTIN_PCMPGTW,
20572 IX86_BUILTIN_PCMPGTD,
20574 IX86_BUILTIN_PMADDWD,
20576 IX86_BUILTIN_PMAXSW,
20577 IX86_BUILTIN_PMAXUB,
20578 IX86_BUILTIN_PMINSW,
20579 IX86_BUILTIN_PMINUB,
20581 IX86_BUILTIN_PMULHUW,
20582 IX86_BUILTIN_PMULHW,
20583 IX86_BUILTIN_PMULLW,
20585 IX86_BUILTIN_PSADBW,
20586 IX86_BUILTIN_PSHUFW,
20588 IX86_BUILTIN_PSLLW,
20589 IX86_BUILTIN_PSLLD,
20590 IX86_BUILTIN_PSLLQ,
20591 IX86_BUILTIN_PSRAW,
20592 IX86_BUILTIN_PSRAD,
20593 IX86_BUILTIN_PSRLW,
20594 IX86_BUILTIN_PSRLD,
20595 IX86_BUILTIN_PSRLQ,
20596 IX86_BUILTIN_PSLLWI,
20597 IX86_BUILTIN_PSLLDI,
20598 IX86_BUILTIN_PSLLQI,
20599 IX86_BUILTIN_PSRAWI,
20600 IX86_BUILTIN_PSRADI,
20601 IX86_BUILTIN_PSRLWI,
20602 IX86_BUILTIN_PSRLDI,
20603 IX86_BUILTIN_PSRLQI,
20605 IX86_BUILTIN_PUNPCKHBW,
20606 IX86_BUILTIN_PUNPCKHWD,
20607 IX86_BUILTIN_PUNPCKHDQ,
20608 IX86_BUILTIN_PUNPCKLBW,
20609 IX86_BUILTIN_PUNPCKLWD,
20610 IX86_BUILTIN_PUNPCKLDQ,
20612 IX86_BUILTIN_SHUFPS,
20614 IX86_BUILTIN_RCPPS,
20615 IX86_BUILTIN_RCPSS,
20616 IX86_BUILTIN_RSQRTPS,
20617 IX86_BUILTIN_RSQRTPS_NR,
20618 IX86_BUILTIN_RSQRTSS,
20619 IX86_BUILTIN_RSQRTF,
20620 IX86_BUILTIN_SQRTPS,
20621 IX86_BUILTIN_SQRTPS_NR,
20622 IX86_BUILTIN_SQRTSS,
20624 IX86_BUILTIN_UNPCKHPS,
20625 IX86_BUILTIN_UNPCKLPS,
20627 IX86_BUILTIN_ANDPS,
20628 IX86_BUILTIN_ANDNPS,
20629 IX86_BUILTIN_ORPS,
20630 IX86_BUILTIN_XORPS,
20632 IX86_BUILTIN_EMMS,
20633 IX86_BUILTIN_LDMXCSR,
20634 IX86_BUILTIN_STMXCSR,
20635 IX86_BUILTIN_SFENCE,
20637 /* 3DNow! Original */
20638 IX86_BUILTIN_FEMMS,
20639 IX86_BUILTIN_PAVGUSB,
20640 IX86_BUILTIN_PF2ID,
20641 IX86_BUILTIN_PFACC,
20642 IX86_BUILTIN_PFADD,
20643 IX86_BUILTIN_PFCMPEQ,
20644 IX86_BUILTIN_PFCMPGE,
20645 IX86_BUILTIN_PFCMPGT,
20646 IX86_BUILTIN_PFMAX,
20647 IX86_BUILTIN_PFMIN,
20648 IX86_BUILTIN_PFMUL,
20649 IX86_BUILTIN_PFRCP,
20650 IX86_BUILTIN_PFRCPIT1,
20651 IX86_BUILTIN_PFRCPIT2,
20652 IX86_BUILTIN_PFRSQIT1,
20653 IX86_BUILTIN_PFRSQRT,
20654 IX86_BUILTIN_PFSUB,
20655 IX86_BUILTIN_PFSUBR,
20656 IX86_BUILTIN_PI2FD,
20657 IX86_BUILTIN_PMULHRW,
20659 /* 3DNow! Athlon Extensions */
20660 IX86_BUILTIN_PF2IW,
20661 IX86_BUILTIN_PFNACC,
20662 IX86_BUILTIN_PFPNACC,
20663 IX86_BUILTIN_PI2FW,
20664 IX86_BUILTIN_PSWAPDSI,
20665 IX86_BUILTIN_PSWAPDSF,
20667 /* SSE2 */
20668 IX86_BUILTIN_ADDPD,
20669 IX86_BUILTIN_ADDSD,
20670 IX86_BUILTIN_DIVPD,
20671 IX86_BUILTIN_DIVSD,
20672 IX86_BUILTIN_MULPD,
20673 IX86_BUILTIN_MULSD,
20674 IX86_BUILTIN_SUBPD,
20675 IX86_BUILTIN_SUBSD,
20677 IX86_BUILTIN_CMPEQPD,
20678 IX86_BUILTIN_CMPLTPD,
20679 IX86_BUILTIN_CMPLEPD,
20680 IX86_BUILTIN_CMPGTPD,
20681 IX86_BUILTIN_CMPGEPD,
20682 IX86_BUILTIN_CMPNEQPD,
20683 IX86_BUILTIN_CMPNLTPD,
20684 IX86_BUILTIN_CMPNLEPD,
20685 IX86_BUILTIN_CMPNGTPD,
20686 IX86_BUILTIN_CMPNGEPD,
20687 IX86_BUILTIN_CMPORDPD,
20688 IX86_BUILTIN_CMPUNORDPD,
20689 IX86_BUILTIN_CMPEQSD,
20690 IX86_BUILTIN_CMPLTSD,
20691 IX86_BUILTIN_CMPLESD,
20692 IX86_BUILTIN_CMPNEQSD,
20693 IX86_BUILTIN_CMPNLTSD,
20694 IX86_BUILTIN_CMPNLESD,
20695 IX86_BUILTIN_CMPORDSD,
20696 IX86_BUILTIN_CMPUNORDSD,
20698 IX86_BUILTIN_COMIEQSD,
20699 IX86_BUILTIN_COMILTSD,
20700 IX86_BUILTIN_COMILESD,
20701 IX86_BUILTIN_COMIGTSD,
20702 IX86_BUILTIN_COMIGESD,
20703 IX86_BUILTIN_COMINEQSD,
20704 IX86_BUILTIN_UCOMIEQSD,
20705 IX86_BUILTIN_UCOMILTSD,
20706 IX86_BUILTIN_UCOMILESD,
20707 IX86_BUILTIN_UCOMIGTSD,
20708 IX86_BUILTIN_UCOMIGESD,
20709 IX86_BUILTIN_UCOMINEQSD,
20711 IX86_BUILTIN_MAXPD,
20712 IX86_BUILTIN_MAXSD,
20713 IX86_BUILTIN_MINPD,
20714 IX86_BUILTIN_MINSD,
20716 IX86_BUILTIN_ANDPD,
20717 IX86_BUILTIN_ANDNPD,
20718 IX86_BUILTIN_ORPD,
20719 IX86_BUILTIN_XORPD,
20721 IX86_BUILTIN_SQRTPD,
20722 IX86_BUILTIN_SQRTSD,
20724 IX86_BUILTIN_UNPCKHPD,
20725 IX86_BUILTIN_UNPCKLPD,
20727 IX86_BUILTIN_SHUFPD,
20729 IX86_BUILTIN_LOADUPD,
20730 IX86_BUILTIN_STOREUPD,
20731 IX86_BUILTIN_MOVSD,
20733 IX86_BUILTIN_LOADHPD,
20734 IX86_BUILTIN_LOADLPD,
20736 IX86_BUILTIN_CVTDQ2PD,
20737 IX86_BUILTIN_CVTDQ2PS,
20739 IX86_BUILTIN_CVTPD2DQ,
20740 IX86_BUILTIN_CVTPD2PI,
20741 IX86_BUILTIN_CVTPD2PS,
20742 IX86_BUILTIN_CVTTPD2DQ,
20743 IX86_BUILTIN_CVTTPD2PI,
20745 IX86_BUILTIN_CVTPI2PD,
20746 IX86_BUILTIN_CVTSI2SD,
20747 IX86_BUILTIN_CVTSI642SD,
20749 IX86_BUILTIN_CVTSD2SI,
20750 IX86_BUILTIN_CVTSD2SI64,
20751 IX86_BUILTIN_CVTSD2SS,
20752 IX86_BUILTIN_CVTSS2SD,
20753 IX86_BUILTIN_CVTTSD2SI,
20754 IX86_BUILTIN_CVTTSD2SI64,
20756 IX86_BUILTIN_CVTPS2DQ,
20757 IX86_BUILTIN_CVTPS2PD,
20758 IX86_BUILTIN_CVTTPS2DQ,
20760 IX86_BUILTIN_MOVNTI,
20761 IX86_BUILTIN_MOVNTPD,
20762 IX86_BUILTIN_MOVNTDQ,
20764 IX86_BUILTIN_MOVQ128,
20766 /* SSE2 MMX */
20767 IX86_BUILTIN_MASKMOVDQU,
20768 IX86_BUILTIN_MOVMSKPD,
20769 IX86_BUILTIN_PMOVMSKB128,
20771 IX86_BUILTIN_PACKSSWB128,
20772 IX86_BUILTIN_PACKSSDW128,
20773 IX86_BUILTIN_PACKUSWB128,
20775 IX86_BUILTIN_PADDB128,
20776 IX86_BUILTIN_PADDW128,
20777 IX86_BUILTIN_PADDD128,
20778 IX86_BUILTIN_PADDQ128,
20779 IX86_BUILTIN_PADDSB128,
20780 IX86_BUILTIN_PADDSW128,
20781 IX86_BUILTIN_PADDUSB128,
20782 IX86_BUILTIN_PADDUSW128,
20783 IX86_BUILTIN_PSUBB128,
20784 IX86_BUILTIN_PSUBW128,
20785 IX86_BUILTIN_PSUBD128,
20786 IX86_BUILTIN_PSUBQ128,
20787 IX86_BUILTIN_PSUBSB128,
20788 IX86_BUILTIN_PSUBSW128,
20789 IX86_BUILTIN_PSUBUSB128,
20790 IX86_BUILTIN_PSUBUSW128,
20792 IX86_BUILTIN_PAND128,
20793 IX86_BUILTIN_PANDN128,
20794 IX86_BUILTIN_POR128,
20795 IX86_BUILTIN_PXOR128,
20797 IX86_BUILTIN_PAVGB128,
20798 IX86_BUILTIN_PAVGW128,
20800 IX86_BUILTIN_PCMPEQB128,
20801 IX86_BUILTIN_PCMPEQW128,
20802 IX86_BUILTIN_PCMPEQD128,
20803 IX86_BUILTIN_PCMPGTB128,
20804 IX86_BUILTIN_PCMPGTW128,
20805 IX86_BUILTIN_PCMPGTD128,
20807 IX86_BUILTIN_PMADDWD128,
20809 IX86_BUILTIN_PMAXSW128,
20810 IX86_BUILTIN_PMAXUB128,
20811 IX86_BUILTIN_PMINSW128,
20812 IX86_BUILTIN_PMINUB128,
20814 IX86_BUILTIN_PMULUDQ,
20815 IX86_BUILTIN_PMULUDQ128,
20816 IX86_BUILTIN_PMULHUW128,
20817 IX86_BUILTIN_PMULHW128,
20818 IX86_BUILTIN_PMULLW128,
20820 IX86_BUILTIN_PSADBW128,
20821 IX86_BUILTIN_PSHUFHW,
20822 IX86_BUILTIN_PSHUFLW,
20823 IX86_BUILTIN_PSHUFD,
20825 IX86_BUILTIN_PSLLDQI128,
20826 IX86_BUILTIN_PSLLWI128,
20827 IX86_BUILTIN_PSLLDI128,
20828 IX86_BUILTIN_PSLLQI128,
20829 IX86_BUILTIN_PSRAWI128,
20830 IX86_BUILTIN_PSRADI128,
20831 IX86_BUILTIN_PSRLDQI128,
20832 IX86_BUILTIN_PSRLWI128,
20833 IX86_BUILTIN_PSRLDI128,
20834 IX86_BUILTIN_PSRLQI128,
20836 IX86_BUILTIN_PSLLDQ128,
20837 IX86_BUILTIN_PSLLW128,
20838 IX86_BUILTIN_PSLLD128,
20839 IX86_BUILTIN_PSLLQ128,
20840 IX86_BUILTIN_PSRAW128,
20841 IX86_BUILTIN_PSRAD128,
20842 IX86_BUILTIN_PSRLW128,
20843 IX86_BUILTIN_PSRLD128,
20844 IX86_BUILTIN_PSRLQ128,
20846 IX86_BUILTIN_PUNPCKHBW128,
20847 IX86_BUILTIN_PUNPCKHWD128,
20848 IX86_BUILTIN_PUNPCKHDQ128,
20849 IX86_BUILTIN_PUNPCKHQDQ128,
20850 IX86_BUILTIN_PUNPCKLBW128,
20851 IX86_BUILTIN_PUNPCKLWD128,
20852 IX86_BUILTIN_PUNPCKLDQ128,
20853 IX86_BUILTIN_PUNPCKLQDQ128,
20855 IX86_BUILTIN_CLFLUSH,
20856 IX86_BUILTIN_MFENCE,
20857 IX86_BUILTIN_LFENCE,
20859 IX86_BUILTIN_BSRSI,
20860 IX86_BUILTIN_BSRDI,
20861 IX86_BUILTIN_RDPMC,
20862 IX86_BUILTIN_RDTSC,
20863 IX86_BUILTIN_RDTSCP,
20864 IX86_BUILTIN_ROLQI,
20865 IX86_BUILTIN_ROLHI,
20866 IX86_BUILTIN_RORQI,
20867 IX86_BUILTIN_RORHI,
20869 /* SSE3. */
20870 IX86_BUILTIN_ADDSUBPS,
20871 IX86_BUILTIN_HADDPS,
20872 IX86_BUILTIN_HSUBPS,
20873 IX86_BUILTIN_MOVSHDUP,
20874 IX86_BUILTIN_MOVSLDUP,
20875 IX86_BUILTIN_ADDSUBPD,
20876 IX86_BUILTIN_HADDPD,
20877 IX86_BUILTIN_HSUBPD,
20878 IX86_BUILTIN_LDDQU,
20880 IX86_BUILTIN_MONITOR,
20881 IX86_BUILTIN_MWAIT,
20883 /* SSSE3. */
20884 IX86_BUILTIN_PHADDW,
20885 IX86_BUILTIN_PHADDD,
20886 IX86_BUILTIN_PHADDSW,
20887 IX86_BUILTIN_PHSUBW,
20888 IX86_BUILTIN_PHSUBD,
20889 IX86_BUILTIN_PHSUBSW,
20890 IX86_BUILTIN_PMADDUBSW,
20891 IX86_BUILTIN_PMULHRSW,
20892 IX86_BUILTIN_PSHUFB,
20893 IX86_BUILTIN_PSIGNB,
20894 IX86_BUILTIN_PSIGNW,
20895 IX86_BUILTIN_PSIGND,
20896 IX86_BUILTIN_PALIGNR,
20897 IX86_BUILTIN_PABSB,
20898 IX86_BUILTIN_PABSW,
20899 IX86_BUILTIN_PABSD,
20901 IX86_BUILTIN_PHADDW128,
20902 IX86_BUILTIN_PHADDD128,
20903 IX86_BUILTIN_PHADDSW128,
20904 IX86_BUILTIN_PHSUBW128,
20905 IX86_BUILTIN_PHSUBD128,
20906 IX86_BUILTIN_PHSUBSW128,
20907 IX86_BUILTIN_PMADDUBSW128,
20908 IX86_BUILTIN_PMULHRSW128,
20909 IX86_BUILTIN_PSHUFB128,
20910 IX86_BUILTIN_PSIGNB128,
20911 IX86_BUILTIN_PSIGNW128,
20912 IX86_BUILTIN_PSIGND128,
20913 IX86_BUILTIN_PALIGNR128,
20914 IX86_BUILTIN_PABSB128,
20915 IX86_BUILTIN_PABSW128,
20916 IX86_BUILTIN_PABSD128,
20918 /* AMDFAM10 - SSE4A New Instructions. */
20919 IX86_BUILTIN_MOVNTSD,
20920 IX86_BUILTIN_MOVNTSS,
20921 IX86_BUILTIN_EXTRQI,
20922 IX86_BUILTIN_EXTRQ,
20923 IX86_BUILTIN_INSERTQI,
20924 IX86_BUILTIN_INSERTQ,
20926 /* SSE4.1. */
20927 IX86_BUILTIN_BLENDPD,
20928 IX86_BUILTIN_BLENDPS,
20929 IX86_BUILTIN_BLENDVPD,
20930 IX86_BUILTIN_BLENDVPS,
20931 IX86_BUILTIN_PBLENDVB128,
20932 IX86_BUILTIN_PBLENDW128,
20934 IX86_BUILTIN_DPPD,
20935 IX86_BUILTIN_DPPS,
20937 IX86_BUILTIN_INSERTPS128,
20939 IX86_BUILTIN_MOVNTDQA,
20940 IX86_BUILTIN_MPSADBW128,
20941 IX86_BUILTIN_PACKUSDW128,
20942 IX86_BUILTIN_PCMPEQQ,
20943 IX86_BUILTIN_PHMINPOSUW128,
20945 IX86_BUILTIN_PMAXSB128,
20946 IX86_BUILTIN_PMAXSD128,
20947 IX86_BUILTIN_PMAXUD128,
20948 IX86_BUILTIN_PMAXUW128,
20950 IX86_BUILTIN_PMINSB128,
20951 IX86_BUILTIN_PMINSD128,
20952 IX86_BUILTIN_PMINUD128,
20953 IX86_BUILTIN_PMINUW128,
20955 IX86_BUILTIN_PMOVSXBW128,
20956 IX86_BUILTIN_PMOVSXBD128,
20957 IX86_BUILTIN_PMOVSXBQ128,
20958 IX86_BUILTIN_PMOVSXWD128,
20959 IX86_BUILTIN_PMOVSXWQ128,
20960 IX86_BUILTIN_PMOVSXDQ128,
20962 IX86_BUILTIN_PMOVZXBW128,
20963 IX86_BUILTIN_PMOVZXBD128,
20964 IX86_BUILTIN_PMOVZXBQ128,
20965 IX86_BUILTIN_PMOVZXWD128,
20966 IX86_BUILTIN_PMOVZXWQ128,
20967 IX86_BUILTIN_PMOVZXDQ128,
20969 IX86_BUILTIN_PMULDQ128,
20970 IX86_BUILTIN_PMULLD128,
20972 IX86_BUILTIN_ROUNDPD,
20973 IX86_BUILTIN_ROUNDPS,
20974 IX86_BUILTIN_ROUNDSD,
20975 IX86_BUILTIN_ROUNDSS,
20977 IX86_BUILTIN_PTESTZ,
20978 IX86_BUILTIN_PTESTC,
20979 IX86_BUILTIN_PTESTNZC,
20981 IX86_BUILTIN_VEC_INIT_V2SI,
20982 IX86_BUILTIN_VEC_INIT_V4HI,
20983 IX86_BUILTIN_VEC_INIT_V8QI,
20984 IX86_BUILTIN_VEC_EXT_V2DF,
20985 IX86_BUILTIN_VEC_EXT_V2DI,
20986 IX86_BUILTIN_VEC_EXT_V4SF,
20987 IX86_BUILTIN_VEC_EXT_V4SI,
20988 IX86_BUILTIN_VEC_EXT_V8HI,
20989 IX86_BUILTIN_VEC_EXT_V2SI,
20990 IX86_BUILTIN_VEC_EXT_V4HI,
20991 IX86_BUILTIN_VEC_EXT_V16QI,
20992 IX86_BUILTIN_VEC_SET_V2DI,
20993 IX86_BUILTIN_VEC_SET_V4SF,
20994 IX86_BUILTIN_VEC_SET_V4SI,
20995 IX86_BUILTIN_VEC_SET_V8HI,
20996 IX86_BUILTIN_VEC_SET_V4HI,
20997 IX86_BUILTIN_VEC_SET_V16QI,
20999 IX86_BUILTIN_VEC_PACK_SFIX,
21001 /* SSE4.2. */
21002 IX86_BUILTIN_CRC32QI,
21003 IX86_BUILTIN_CRC32HI,
21004 IX86_BUILTIN_CRC32SI,
21005 IX86_BUILTIN_CRC32DI,
21007 IX86_BUILTIN_PCMPESTRI128,
21008 IX86_BUILTIN_PCMPESTRM128,
21009 IX86_BUILTIN_PCMPESTRA128,
21010 IX86_BUILTIN_PCMPESTRC128,
21011 IX86_BUILTIN_PCMPESTRO128,
21012 IX86_BUILTIN_PCMPESTRS128,
21013 IX86_BUILTIN_PCMPESTRZ128,
21014 IX86_BUILTIN_PCMPISTRI128,
21015 IX86_BUILTIN_PCMPISTRM128,
21016 IX86_BUILTIN_PCMPISTRA128,
21017 IX86_BUILTIN_PCMPISTRC128,
21018 IX86_BUILTIN_PCMPISTRO128,
21019 IX86_BUILTIN_PCMPISTRS128,
21020 IX86_BUILTIN_PCMPISTRZ128,
21022 IX86_BUILTIN_PCMPGTQ,
21024 /* AES instructions */
21025 IX86_BUILTIN_AESENC128,
21026 IX86_BUILTIN_AESENCLAST128,
21027 IX86_BUILTIN_AESDEC128,
21028 IX86_BUILTIN_AESDECLAST128,
21029 IX86_BUILTIN_AESIMC128,
21030 IX86_BUILTIN_AESKEYGENASSIST128,
21032 /* PCLMUL instruction */
21033 IX86_BUILTIN_PCLMULQDQ128,
21035 /* AVX */
21036 IX86_BUILTIN_ADDPD256,
21037 IX86_BUILTIN_ADDPS256,
21038 IX86_BUILTIN_ADDSUBPD256,
21039 IX86_BUILTIN_ADDSUBPS256,
21040 IX86_BUILTIN_ANDPD256,
21041 IX86_BUILTIN_ANDPS256,
21042 IX86_BUILTIN_ANDNPD256,
21043 IX86_BUILTIN_ANDNPS256,
21044 IX86_BUILTIN_BLENDPD256,
21045 IX86_BUILTIN_BLENDPS256,
21046 IX86_BUILTIN_BLENDVPD256,
21047 IX86_BUILTIN_BLENDVPS256,
21048 IX86_BUILTIN_DIVPD256,
21049 IX86_BUILTIN_DIVPS256,
21050 IX86_BUILTIN_DPPS256,
21051 IX86_BUILTIN_HADDPD256,
21052 IX86_BUILTIN_HADDPS256,
21053 IX86_BUILTIN_HSUBPD256,
21054 IX86_BUILTIN_HSUBPS256,
21055 IX86_BUILTIN_MAXPD256,
21056 IX86_BUILTIN_MAXPS256,
21057 IX86_BUILTIN_MINPD256,
21058 IX86_BUILTIN_MINPS256,
21059 IX86_BUILTIN_MULPD256,
21060 IX86_BUILTIN_MULPS256,
21061 IX86_BUILTIN_ORPD256,
21062 IX86_BUILTIN_ORPS256,
21063 IX86_BUILTIN_SHUFPD256,
21064 IX86_BUILTIN_SHUFPS256,
21065 IX86_BUILTIN_SUBPD256,
21066 IX86_BUILTIN_SUBPS256,
21067 IX86_BUILTIN_XORPD256,
21068 IX86_BUILTIN_XORPS256,
21069 IX86_BUILTIN_CMPSD,
21070 IX86_BUILTIN_CMPSS,
21071 IX86_BUILTIN_CMPPD,
21072 IX86_BUILTIN_CMPPS,
21073 IX86_BUILTIN_CMPPD256,
21074 IX86_BUILTIN_CMPPS256,
21075 IX86_BUILTIN_CVTDQ2PD256,
21076 IX86_BUILTIN_CVTDQ2PS256,
21077 IX86_BUILTIN_CVTPD2PS256,
21078 IX86_BUILTIN_CVTPS2DQ256,
21079 IX86_BUILTIN_CVTPS2PD256,
21080 IX86_BUILTIN_CVTTPD2DQ256,
21081 IX86_BUILTIN_CVTPD2DQ256,
21082 IX86_BUILTIN_CVTTPS2DQ256,
21083 IX86_BUILTIN_EXTRACTF128PD256,
21084 IX86_BUILTIN_EXTRACTF128PS256,
21085 IX86_BUILTIN_EXTRACTF128SI256,
21086 IX86_BUILTIN_VZEROALL,
21087 IX86_BUILTIN_VZEROUPPER,
21088 IX86_BUILTIN_VPERMILVARPD,
21089 IX86_BUILTIN_VPERMILVARPS,
21090 IX86_BUILTIN_VPERMILVARPD256,
21091 IX86_BUILTIN_VPERMILVARPS256,
21092 IX86_BUILTIN_VPERMILPD,
21093 IX86_BUILTIN_VPERMILPS,
21094 IX86_BUILTIN_VPERMILPD256,
21095 IX86_BUILTIN_VPERMILPS256,
21096 IX86_BUILTIN_VPERMIL2PD,
21097 IX86_BUILTIN_VPERMIL2PS,
21098 IX86_BUILTIN_VPERMIL2PD256,
21099 IX86_BUILTIN_VPERMIL2PS256,
21100 IX86_BUILTIN_VPERM2F128PD256,
21101 IX86_BUILTIN_VPERM2F128PS256,
21102 IX86_BUILTIN_VPERM2F128SI256,
21103 IX86_BUILTIN_VBROADCASTSS,
21104 IX86_BUILTIN_VBROADCASTSD256,
21105 IX86_BUILTIN_VBROADCASTSS256,
21106 IX86_BUILTIN_VBROADCASTPD256,
21107 IX86_BUILTIN_VBROADCASTPS256,
21108 IX86_BUILTIN_VINSERTF128PD256,
21109 IX86_BUILTIN_VINSERTF128PS256,
21110 IX86_BUILTIN_VINSERTF128SI256,
21111 IX86_BUILTIN_LOADUPD256,
21112 IX86_BUILTIN_LOADUPS256,
21113 IX86_BUILTIN_STOREUPD256,
21114 IX86_BUILTIN_STOREUPS256,
21115 IX86_BUILTIN_LDDQU256,
21116 IX86_BUILTIN_MOVNTDQ256,
21117 IX86_BUILTIN_MOVNTPD256,
21118 IX86_BUILTIN_MOVNTPS256,
21119 IX86_BUILTIN_LOADDQU256,
21120 IX86_BUILTIN_STOREDQU256,
21121 IX86_BUILTIN_MASKLOADPD,
21122 IX86_BUILTIN_MASKLOADPS,
21123 IX86_BUILTIN_MASKSTOREPD,
21124 IX86_BUILTIN_MASKSTOREPS,
21125 IX86_BUILTIN_MASKLOADPD256,
21126 IX86_BUILTIN_MASKLOADPS256,
21127 IX86_BUILTIN_MASKSTOREPD256,
21128 IX86_BUILTIN_MASKSTOREPS256,
21129 IX86_BUILTIN_MOVSHDUP256,
21130 IX86_BUILTIN_MOVSLDUP256,
21131 IX86_BUILTIN_MOVDDUP256,
21133 IX86_BUILTIN_SQRTPD256,
21134 IX86_BUILTIN_SQRTPS256,
21135 IX86_BUILTIN_SQRTPS_NR256,
21136 IX86_BUILTIN_RSQRTPS256,
21137 IX86_BUILTIN_RSQRTPS_NR256,
21139 IX86_BUILTIN_RCPPS256,
21141 IX86_BUILTIN_ROUNDPD256,
21142 IX86_BUILTIN_ROUNDPS256,
21144 IX86_BUILTIN_UNPCKHPD256,
21145 IX86_BUILTIN_UNPCKLPD256,
21146 IX86_BUILTIN_UNPCKHPS256,
21147 IX86_BUILTIN_UNPCKLPS256,
21149 IX86_BUILTIN_SI256_SI,
21150 IX86_BUILTIN_PS256_PS,
21151 IX86_BUILTIN_PD256_PD,
21152 IX86_BUILTIN_SI_SI256,
21153 IX86_BUILTIN_PS_PS256,
21154 IX86_BUILTIN_PD_PD256,
21156 IX86_BUILTIN_VTESTZPD,
21157 IX86_BUILTIN_VTESTCPD,
21158 IX86_BUILTIN_VTESTNZCPD,
21159 IX86_BUILTIN_VTESTZPS,
21160 IX86_BUILTIN_VTESTCPS,
21161 IX86_BUILTIN_VTESTNZCPS,
21162 IX86_BUILTIN_VTESTZPD256,
21163 IX86_BUILTIN_VTESTCPD256,
21164 IX86_BUILTIN_VTESTNZCPD256,
21165 IX86_BUILTIN_VTESTZPS256,
21166 IX86_BUILTIN_VTESTCPS256,
21167 IX86_BUILTIN_VTESTNZCPS256,
21168 IX86_BUILTIN_PTESTZ256,
21169 IX86_BUILTIN_PTESTC256,
21170 IX86_BUILTIN_PTESTNZC256,
21172 IX86_BUILTIN_MOVMSKPD256,
21173 IX86_BUILTIN_MOVMSKPS256,
21175 /* TFmode support builtins. */
21176 IX86_BUILTIN_INFQ,
21177 IX86_BUILTIN_HUGE_VALQ,
21178 IX86_BUILTIN_FABSQ,
21179 IX86_BUILTIN_COPYSIGNQ,
21181 /* Vectorizer support builtins. */
21182 IX86_BUILTIN_CPYSGNPS,
21183 IX86_BUILTIN_CPYSGNPD,
21185 IX86_BUILTIN_CVTUDQ2PS,
21187 IX86_BUILTIN_VEC_PERM_V2DF,
21188 IX86_BUILTIN_VEC_PERM_V4SF,
21189 IX86_BUILTIN_VEC_PERM_V2DI,
21190 IX86_BUILTIN_VEC_PERM_V4SI,
21191 IX86_BUILTIN_VEC_PERM_V8HI,
21192 IX86_BUILTIN_VEC_PERM_V16QI,
21193 IX86_BUILTIN_VEC_PERM_V2DI_U,
21194 IX86_BUILTIN_VEC_PERM_V4SI_U,
21195 IX86_BUILTIN_VEC_PERM_V8HI_U,
21196 IX86_BUILTIN_VEC_PERM_V16QI_U,
21197 IX86_BUILTIN_VEC_PERM_V4DF,
21198 IX86_BUILTIN_VEC_PERM_V8SF,
21200 /* FMA4 and XOP instructions. */
21201 IX86_BUILTIN_VFMADDSS,
21202 IX86_BUILTIN_VFMADDSD,
21203 IX86_BUILTIN_VFMADDPS,
21204 IX86_BUILTIN_VFMADDPD,
21205 IX86_BUILTIN_VFMSUBSS,
21206 IX86_BUILTIN_VFMSUBSD,
21207 IX86_BUILTIN_VFMSUBPS,
21208 IX86_BUILTIN_VFMSUBPD,
21209 IX86_BUILTIN_VFMADDSUBPS,
21210 IX86_BUILTIN_VFMADDSUBPD,
21211 IX86_BUILTIN_VFMSUBADDPS,
21212 IX86_BUILTIN_VFMSUBADDPD,
21213 IX86_BUILTIN_VFNMADDSS,
21214 IX86_BUILTIN_VFNMADDSD,
21215 IX86_BUILTIN_VFNMADDPS,
21216 IX86_BUILTIN_VFNMADDPD,
21217 IX86_BUILTIN_VFNMSUBSS,
21218 IX86_BUILTIN_VFNMSUBSD,
21219 IX86_BUILTIN_VFNMSUBPS,
21220 IX86_BUILTIN_VFNMSUBPD,
21221 IX86_BUILTIN_VFMADDPS256,
21222 IX86_BUILTIN_VFMADDPD256,
21223 IX86_BUILTIN_VFMSUBPS256,
21224 IX86_BUILTIN_VFMSUBPD256,
21225 IX86_BUILTIN_VFMADDSUBPS256,
21226 IX86_BUILTIN_VFMADDSUBPD256,
21227 IX86_BUILTIN_VFMSUBADDPS256,
21228 IX86_BUILTIN_VFMSUBADDPD256,
21229 IX86_BUILTIN_VFNMADDPS256,
21230 IX86_BUILTIN_VFNMADDPD256,
21231 IX86_BUILTIN_VFNMSUBPS256,
21232 IX86_BUILTIN_VFNMSUBPD256,
21234 IX86_BUILTIN_VPCMOV,
21235 IX86_BUILTIN_VPCMOV_V2DI,
21236 IX86_BUILTIN_VPCMOV_V4SI,
21237 IX86_BUILTIN_VPCMOV_V8HI,
21238 IX86_BUILTIN_VPCMOV_V16QI,
21239 IX86_BUILTIN_VPCMOV_V4SF,
21240 IX86_BUILTIN_VPCMOV_V2DF,
21241 IX86_BUILTIN_VPCMOV256,
21242 IX86_BUILTIN_VPCMOV_V4DI256,
21243 IX86_BUILTIN_VPCMOV_V8SI256,
21244 IX86_BUILTIN_VPCMOV_V16HI256,
21245 IX86_BUILTIN_VPCMOV_V32QI256,
21246 IX86_BUILTIN_VPCMOV_V8SF256,
21247 IX86_BUILTIN_VPCMOV_V4DF256,
21249 IX86_BUILTIN_VPPERM,
21251 IX86_BUILTIN_VPMACSSWW,
21252 IX86_BUILTIN_VPMACSWW,
21253 IX86_BUILTIN_VPMACSSWD,
21254 IX86_BUILTIN_VPMACSWD,
21255 IX86_BUILTIN_VPMACSSDD,
21256 IX86_BUILTIN_VPMACSDD,
21257 IX86_BUILTIN_VPMACSSDQL,
21258 IX86_BUILTIN_VPMACSSDQH,
21259 IX86_BUILTIN_VPMACSDQL,
21260 IX86_BUILTIN_VPMACSDQH,
21261 IX86_BUILTIN_VPMADCSSWD,
21262 IX86_BUILTIN_VPMADCSWD,
21264 IX86_BUILTIN_VPHADDBW,
21265 IX86_BUILTIN_VPHADDBD,
21266 IX86_BUILTIN_VPHADDBQ,
21267 IX86_BUILTIN_VPHADDWD,
21268 IX86_BUILTIN_VPHADDWQ,
21269 IX86_BUILTIN_VPHADDDQ,
21270 IX86_BUILTIN_VPHADDUBW,
21271 IX86_BUILTIN_VPHADDUBD,
21272 IX86_BUILTIN_VPHADDUBQ,
21273 IX86_BUILTIN_VPHADDUWD,
21274 IX86_BUILTIN_VPHADDUWQ,
21275 IX86_BUILTIN_VPHADDUDQ,
21276 IX86_BUILTIN_VPHSUBBW,
21277 IX86_BUILTIN_VPHSUBWD,
21278 IX86_BUILTIN_VPHSUBDQ,
21280 IX86_BUILTIN_VPROTB,
21281 IX86_BUILTIN_VPROTW,
21282 IX86_BUILTIN_VPROTD,
21283 IX86_BUILTIN_VPROTQ,
21284 IX86_BUILTIN_VPROTB_IMM,
21285 IX86_BUILTIN_VPROTW_IMM,
21286 IX86_BUILTIN_VPROTD_IMM,
21287 IX86_BUILTIN_VPROTQ_IMM,
21289 IX86_BUILTIN_VPSHLB,
21290 IX86_BUILTIN_VPSHLW,
21291 IX86_BUILTIN_VPSHLD,
21292 IX86_BUILTIN_VPSHLQ,
21293 IX86_BUILTIN_VPSHAB,
21294 IX86_BUILTIN_VPSHAW,
21295 IX86_BUILTIN_VPSHAD,
21296 IX86_BUILTIN_VPSHAQ,
21298 IX86_BUILTIN_VFRCZSS,
21299 IX86_BUILTIN_VFRCZSD,
21300 IX86_BUILTIN_VFRCZPS,
21301 IX86_BUILTIN_VFRCZPD,
21302 IX86_BUILTIN_VFRCZPS256,
21303 IX86_BUILTIN_VFRCZPD256,
21305 IX86_BUILTIN_VPCOMEQUB,
21306 IX86_BUILTIN_VPCOMNEUB,
21307 IX86_BUILTIN_VPCOMLTUB,
21308 IX86_BUILTIN_VPCOMLEUB,
21309 IX86_BUILTIN_VPCOMGTUB,
21310 IX86_BUILTIN_VPCOMGEUB,
21311 IX86_BUILTIN_VPCOMFALSEUB,
21312 IX86_BUILTIN_VPCOMTRUEUB,
21314 IX86_BUILTIN_VPCOMEQUW,
21315 IX86_BUILTIN_VPCOMNEUW,
21316 IX86_BUILTIN_VPCOMLTUW,
21317 IX86_BUILTIN_VPCOMLEUW,
21318 IX86_BUILTIN_VPCOMGTUW,
21319 IX86_BUILTIN_VPCOMGEUW,
21320 IX86_BUILTIN_VPCOMFALSEUW,
21321 IX86_BUILTIN_VPCOMTRUEUW,
21323 IX86_BUILTIN_VPCOMEQUD,
21324 IX86_BUILTIN_VPCOMNEUD,
21325 IX86_BUILTIN_VPCOMLTUD,
21326 IX86_BUILTIN_VPCOMLEUD,
21327 IX86_BUILTIN_VPCOMGTUD,
21328 IX86_BUILTIN_VPCOMGEUD,
21329 IX86_BUILTIN_VPCOMFALSEUD,
21330 IX86_BUILTIN_VPCOMTRUEUD,
21332 IX86_BUILTIN_VPCOMEQUQ,
21333 IX86_BUILTIN_VPCOMNEUQ,
21334 IX86_BUILTIN_VPCOMLTUQ,
21335 IX86_BUILTIN_VPCOMLEUQ,
21336 IX86_BUILTIN_VPCOMGTUQ,
21337 IX86_BUILTIN_VPCOMGEUQ,
21338 IX86_BUILTIN_VPCOMFALSEUQ,
21339 IX86_BUILTIN_VPCOMTRUEUQ,
21341 IX86_BUILTIN_VPCOMEQB,
21342 IX86_BUILTIN_VPCOMNEB,
21343 IX86_BUILTIN_VPCOMLTB,
21344 IX86_BUILTIN_VPCOMLEB,
21345 IX86_BUILTIN_VPCOMGTB,
21346 IX86_BUILTIN_VPCOMGEB,
21347 IX86_BUILTIN_VPCOMFALSEB,
21348 IX86_BUILTIN_VPCOMTRUEB,
21350 IX86_BUILTIN_VPCOMEQW,
21351 IX86_BUILTIN_VPCOMNEW,
21352 IX86_BUILTIN_VPCOMLTW,
21353 IX86_BUILTIN_VPCOMLEW,
21354 IX86_BUILTIN_VPCOMGTW,
21355 IX86_BUILTIN_VPCOMGEW,
21356 IX86_BUILTIN_VPCOMFALSEW,
21357 IX86_BUILTIN_VPCOMTRUEW,
21359 IX86_BUILTIN_VPCOMEQD,
21360 IX86_BUILTIN_VPCOMNED,
21361 IX86_BUILTIN_VPCOMLTD,
21362 IX86_BUILTIN_VPCOMLED,
21363 IX86_BUILTIN_VPCOMGTD,
21364 IX86_BUILTIN_VPCOMGED,
21365 IX86_BUILTIN_VPCOMFALSED,
21366 IX86_BUILTIN_VPCOMTRUED,
21368 IX86_BUILTIN_VPCOMEQQ,
21369 IX86_BUILTIN_VPCOMNEQ,
21370 IX86_BUILTIN_VPCOMLTQ,
21371 IX86_BUILTIN_VPCOMLEQ,
21372 IX86_BUILTIN_VPCOMGTQ,
21373 IX86_BUILTIN_VPCOMGEQ,
21374 IX86_BUILTIN_VPCOMFALSEQ,
21375 IX86_BUILTIN_VPCOMTRUEQ,
21377 /* LWP instructions. */
21378 IX86_BUILTIN_LLWPCB,
21379 IX86_BUILTIN_SLWPCB,
21380 IX86_BUILTIN_LWPVAL32,
21381 IX86_BUILTIN_LWPVAL64,
21382 IX86_BUILTIN_LWPINS32,
21383 IX86_BUILTIN_LWPINS64,
21385 IX86_BUILTIN_CLZS,
21387 IX86_BUILTIN_MAX
21390 /* Table for the ix86 builtin decls. */
21391 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21393 /* Table of all of the builtin functions that are possible with different ISA's
21394 but are waiting to be built until a function is declared to use that
21395 ISA. */
21396 struct builtin_isa {
21397 const char *name; /* function name */
21398 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21399 int isa; /* isa_flags this builtin is defined for */
21400 bool const_p; /* true if the declaration is constant */
21401 bool set_and_not_built_p;
21404 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21407 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21408 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21409 function decl in the ix86_builtins array. Returns the function decl or
21410 NULL_TREE, if the builtin was not added.
21412 If the front end has a special hook for builtin functions, delay adding
21413 builtin functions that aren't in the current ISA until the ISA is changed
21414 with function specific optimization. Doing so, can save about 300K for the
21415 default compiler. When the builtin is expanded, check at that time whether
21416 it is valid.
21418 If the front end doesn't have a special hook, record all builtins, even if
21419 it isn't an instruction set in the current ISA in case the user uses
21420 function specific options for a different ISA, so that we don't get scope
21421 errors if a builtin is added in the middle of a function scope. */
21423 static inline tree
21424 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21425 enum ix86_builtins code)
21427 tree decl = NULL_TREE;
21429 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21431 ix86_builtins_isa[(int) code].isa = mask;
21433 if (mask == 0
21434 || (mask & ix86_isa_flags) != 0
21435 || (lang_hooks.builtin_function
21436 == lang_hooks.builtin_function_ext_scope))
21439 tree type = ix86_get_builtin_func_type (tcode);
21440 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21441 NULL, NULL_TREE);
21442 ix86_builtins[(int) code] = decl;
21443 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21445 else
21447 ix86_builtins[(int) code] = NULL_TREE;
21448 ix86_builtins_isa[(int) code].tcode = tcode;
21449 ix86_builtins_isa[(int) code].name = name;
21450 ix86_builtins_isa[(int) code].const_p = false;
21451 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21455 return decl;
21458 /* Like def_builtin, but also marks the function decl "const". */
21460 static inline tree
21461 def_builtin_const (int mask, const char *name,
21462 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21464 tree decl = def_builtin (mask, name, tcode, code);
21465 if (decl)
21466 TREE_READONLY (decl) = 1;
21467 else
21468 ix86_builtins_isa[(int) code].const_p = true;
21470 return decl;
21473 /* Add any new builtin functions for a given ISA that may not have been
21474 declared. This saves a bit of space compared to adding all of the
21475 declarations to the tree, even if we didn't use them. */
21477 static void
21478 ix86_add_new_builtins (int isa)
21480 int i;
21482 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21484 if ((ix86_builtins_isa[i].isa & isa) != 0
21485 && ix86_builtins_isa[i].set_and_not_built_p)
21487 tree decl, type;
21489 /* Don't define the builtin again. */
21490 ix86_builtins_isa[i].set_and_not_built_p = false;
21492 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21493 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21494 type, i, BUILT_IN_MD, NULL,
21495 NULL_TREE);
21497 ix86_builtins[i] = decl;
21498 if (ix86_builtins_isa[i].const_p)
21499 TREE_READONLY (decl) = 1;
21504 /* Bits for builtin_description.flag. */
21506 /* Set when we don't support the comparison natively, and should
21507 swap_comparison in order to support it. */
21508 #define BUILTIN_DESC_SWAP_OPERANDS 1
21510 struct builtin_description
21512 const unsigned int mask;
21513 const enum insn_code icode;
21514 const char *const name;
21515 const enum ix86_builtins code;
21516 const enum rtx_code comparison;
21517 const int flag;
21520 static const struct builtin_description bdesc_comi[] =
21522 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21523 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21524 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21525 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21526 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21527 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21528 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21529 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21530 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21531 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21532 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21533 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21534 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21535 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21536 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21537 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21538 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21539 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21548 static const struct builtin_description bdesc_pcmpestr[] =
21550 /* SSE4.2 */
21551 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21552 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21553 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21554 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21555 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21556 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21557 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21560 static const struct builtin_description bdesc_pcmpistr[] =
21562 /* SSE4.2 */
21563 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21564 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21565 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21566 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21567 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21568 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21569 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21572 /* Special builtins with variable number of arguments. */
21573 static const struct builtin_description bdesc_special_args[] =
21575 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21576 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21578 /* MMX */
21579 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21581 /* 3DNow! */
21582 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21584 /* SSE */
21585 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21586 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21587 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21589 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21590 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21591 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21592 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21594 /* SSE or 3DNow!A */
21595 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21596 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21598 /* SSE2 */
21599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21600 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21602 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21610 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21612 /* SSE3 */
21613 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21615 /* SSE4.1 */
21616 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21618 /* SSE4A */
21619 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21620 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21622 /* AVX */
21623 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21624 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21626 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21627 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21628 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21629 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21630 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21632 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21633 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21634 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21635 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21636 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21637 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21638 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21640 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21641 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21642 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21644 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21645 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21646 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21647 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21648 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21649 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21650 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21651 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21653 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21654 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21655 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21656 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21657 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21658 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21662 /* Builtins with variable number of arguments. */
21663 static const struct builtin_description bdesc_args[] =
21665 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21666 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21667 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21668 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21669 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21670 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21671 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21673 /* MMX */
21674 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21675 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21676 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21677 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21678 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21679 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21681 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21682 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21683 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21684 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21685 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21686 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21687 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21688 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21690 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21691 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21693 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21694 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21695 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21696 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21698 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21699 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21700 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21701 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21702 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21703 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21705 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21706 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21707 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21708 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21709 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21710 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21712 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21713 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21714 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21716 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21718 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21719 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21720 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21721 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21722 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21723 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21725 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21726 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21727 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21728 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21729 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21730 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21732 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21733 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21734 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21735 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21737 /* 3DNow! */
21738 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21739 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21740 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21741 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21743 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21744 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21745 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21746 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21747 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21748 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21749 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21750 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21751 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21752 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21753 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21754 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21755 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21756 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21757 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21759 /* 3DNow!A */
21760 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21761 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21762 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21763 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21764 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21765 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21767 /* SSE */
21768 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21769 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21770 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21771 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21772 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21773 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21774 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21775 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21776 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21777 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21778 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21779 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21781 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21783 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21784 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21785 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21786 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21787 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21788 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21789 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21790 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21792 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21793 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21794 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21795 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21796 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21797 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21798 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21799 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21800 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21801 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21802 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21803 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21804 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21805 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21806 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21807 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21808 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21809 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21810 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21811 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21812 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21813 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21815 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21816 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21817 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21818 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21820 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21821 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21822 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21823 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21825 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21827 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21828 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21829 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21830 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21831 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21833 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21834 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21835 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21837 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21840 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21841 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21843 /* SSE MMX or 3Dnow!A */
21844 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21845 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21846 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21848 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21849 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21850 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21851 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21853 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21854 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21856 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21858 /* SSE2 */
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21862 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21867 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21871 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21872 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21877 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21878 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21885 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21891 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21892 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21904 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21909 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21913 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21915 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21918 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21919 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21921 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21924 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21925 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21926 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21928 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21929 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21930 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21933 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21934 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21935 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21936 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21938 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21940 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21941 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21942 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21944 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21946 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21947 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21948 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21949 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21950 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21951 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21952 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21953 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21955 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21956 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21957 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21958 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21959 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21960 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21964 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21965 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21967 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21969 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21970 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21972 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21975 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21976 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21977 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21978 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21979 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21980 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21982 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21983 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21984 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21985 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21987 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21988 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21989 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21990 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21991 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21992 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21993 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21994 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21996 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21997 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21998 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22000 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22003 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22009 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22010 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22013 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22014 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22015 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22016 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22017 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22018 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22019 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22022 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22024 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22035 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22043 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22045 /* SSE2 MMX */
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22049 /* SSE3 */
22050 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22051 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22053 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22054 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22055 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22056 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22057 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22058 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22060 /* SSSE3 */
22061 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22062 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22063 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22064 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22065 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22066 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22068 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22069 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22070 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22071 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22072 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22073 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22074 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22075 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22076 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22077 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22078 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22079 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22080 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22081 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22082 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22083 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22084 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22085 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22086 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22087 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22088 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22089 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22090 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22091 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22093 /* SSSE3. */
22094 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22095 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22097 /* SSE4.1 */
22098 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22099 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22100 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22101 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22102 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22103 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22104 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22105 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22106 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22107 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22109 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22110 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22111 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22112 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22113 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22114 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22115 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22116 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22117 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22118 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22119 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22120 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22121 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22123 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22124 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22125 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22126 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22127 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22128 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22129 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22130 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22131 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22132 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22133 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22134 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22136 /* SSE4.1 */
22137 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22138 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22139 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22140 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22142 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22143 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22144 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22146 /* SSE4.2 */
22147 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22148 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22149 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22150 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22151 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22153 /* SSE4A */
22154 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22155 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22156 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22157 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22159 /* AES */
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22161 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22164 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22168 /* PCLMUL */
22169 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22171 /* AVX */
22172 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22173 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22174 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22175 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22176 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22177 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22178 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22179 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22180 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22181 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22182 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22183 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22184 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22185 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22186 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22187 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22188 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22189 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22190 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22191 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22192 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22193 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22194 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22195 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22196 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22197 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22199 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22200 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22201 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22202 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22204 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22205 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22206 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22207 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22208 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22209 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22210 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22211 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22212 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22213 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22214 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22215 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22216 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22217 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22218 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22219 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22220 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22221 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22222 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22223 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22224 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22225 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22226 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22227 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22228 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22229 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22230 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22231 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22232 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22233 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22234 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22235 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22236 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22237 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22239 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22240 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22241 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22243 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22244 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22245 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22246 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22247 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22249 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22251 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22252 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22254 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22255 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22256 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22257 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22259 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22261 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22263 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22264 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22269 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22270 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22271 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22274 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22275 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22276 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22277 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22278 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22279 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22280 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22282 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22283 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22285 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22288 /* FMA4 and XOP. */
22289 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22290 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22291 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22292 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22293 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22294 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22295 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22296 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22297 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22298 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22299 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22300 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22301 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22302 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22303 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22304 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22305 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22306 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22307 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22308 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22309 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22310 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22311 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22312 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22313 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22314 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22315 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22316 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22317 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22318 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22319 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22320 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22321 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22322 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22323 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22324 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22325 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22326 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22327 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22328 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22329 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22330 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22331 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22332 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22333 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22334 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22335 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22336 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22337 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22338 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22339 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22340 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22342 static const struct builtin_description bdesc_multi_arg[] =
22344 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22345 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22346 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22347 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22348 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22349 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22350 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22351 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22353 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22354 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22355 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22356 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22357 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22358 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22359 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22360 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22362 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22363 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22364 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22365 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22367 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22368 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22369 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22370 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22372 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22373 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22374 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22375 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22377 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22378 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22379 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22380 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22406 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22407 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22408 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22409 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22410 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22411 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22413 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22414 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22415 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22416 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22417 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22418 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22419 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22420 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22421 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22422 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22423 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22424 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22425 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22426 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22427 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22428 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22430 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22431 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22432 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22433 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22434 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22435 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22437 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22438 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22439 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22440 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22441 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22442 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22443 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22444 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22445 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22446 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22447 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22448 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22449 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22450 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22451 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22453 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22454 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22455 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22456 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22457 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22458 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22459 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22461 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22462 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22463 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22464 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22465 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22466 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22467 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22469 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22470 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22471 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22472 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22473 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22474 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22475 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22477 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22478 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22479 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22480 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22481 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22482 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22483 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22485 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22486 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22487 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22488 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22489 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22490 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22491 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22493 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22494 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22495 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22496 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22497 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22498 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22499 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22501 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22502 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22503 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22504 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22505 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22506 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22507 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22509 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22510 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22511 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22512 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22513 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22514 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22515 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22517 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22518 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22519 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22520 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22521 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22522 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22523 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22524 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22526 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22527 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22528 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22529 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22530 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22531 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22532 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22533 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22535 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22536 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22537 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22538 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22542 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22543 in the current target ISA to allow the user to compile particular modules
22544 with different target specific options that differ from the command line
22545 options. */
22546 static void
22547 ix86_init_mmx_sse_builtins (void)
22549 const struct builtin_description * d;
22550 enum ix86_builtin_func_type ftype;
22551 size_t i;
22553 /* Add all special builtins with variable number of operands. */
22554 for (i = 0, d = bdesc_special_args;
22555 i < ARRAY_SIZE (bdesc_special_args);
22556 i++, d++)
22558 if (d->name == 0)
22559 continue;
22561 ftype = (enum ix86_builtin_func_type) d->flag;
22562 def_builtin (d->mask, d->name, ftype, d->code);
22565 /* Add all builtins with variable number of operands. */
22566 for (i = 0, d = bdesc_args;
22567 i < ARRAY_SIZE (bdesc_args);
22568 i++, d++)
22570 if (d->name == 0)
22571 continue;
22573 ftype = (enum ix86_builtin_func_type) d->flag;
22574 def_builtin_const (d->mask, d->name, ftype, d->code);
22577 /* pcmpestr[im] insns. */
22578 for (i = 0, d = bdesc_pcmpestr;
22579 i < ARRAY_SIZE (bdesc_pcmpestr);
22580 i++, d++)
22582 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22583 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22584 else
22585 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22586 def_builtin_const (d->mask, d->name, ftype, d->code);
22589 /* pcmpistr[im] insns. */
22590 for (i = 0, d = bdesc_pcmpistr;
22591 i < ARRAY_SIZE (bdesc_pcmpistr);
22592 i++, d++)
22594 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22595 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22596 else
22597 ftype = INT_FTYPE_V16QI_V16QI_INT;
22598 def_builtin_const (d->mask, d->name, ftype, d->code);
22601 /* comi/ucomi insns. */
22602 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22604 if (d->mask == OPTION_MASK_ISA_SSE2)
22605 ftype = INT_FTYPE_V2DF_V2DF;
22606 else
22607 ftype = INT_FTYPE_V4SF_V4SF;
22608 def_builtin_const (d->mask, d->name, ftype, d->code);
22611 /* SSE */
22612 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22613 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22614 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22615 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22617 /* SSE or 3DNow!A */
22618 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22619 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22620 IX86_BUILTIN_MASKMOVQ);
22622 /* SSE2 */
22623 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22624 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22626 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22627 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22628 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22629 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22631 /* SSE3. */
22632 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22633 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22634 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22635 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22637 /* AES */
22638 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22639 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22640 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22641 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22642 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22643 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22644 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22645 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22646 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22647 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22648 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22649 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22651 /* PCLMUL */
22652 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22653 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22655 /* MMX access to the vec_init patterns. */
22656 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22657 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22659 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22660 V4HI_FTYPE_HI_HI_HI_HI,
22661 IX86_BUILTIN_VEC_INIT_V4HI);
22663 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22664 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22665 IX86_BUILTIN_VEC_INIT_V8QI);
22667 /* Access to the vec_extract patterns. */
22668 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22669 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22670 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22671 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22672 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22673 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22674 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22675 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22676 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22677 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22679 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22680 "__builtin_ia32_vec_ext_v4hi",
22681 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22683 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22684 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22686 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22687 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22689 /* Access to the vec_set patterns. */
22690 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22691 "__builtin_ia32_vec_set_v2di",
22692 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22694 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22695 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22697 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22698 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22700 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22701 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22703 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22704 "__builtin_ia32_vec_set_v4hi",
22705 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22707 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22708 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22710 /* Add FMA4 multi-arg argument instructions */
22711 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22713 if (d->name == 0)
22714 continue;
22716 ftype = (enum ix86_builtin_func_type) d->flag;
22717 def_builtin_const (d->mask, d->name, ftype, d->code);
22721 /* Internal method for ix86_init_builtins. */
22723 static void
22724 ix86_init_builtins_va_builtins_abi (void)
22726 tree ms_va_ref, sysv_va_ref;
22727 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22728 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22729 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22730 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22732 if (!TARGET_64BIT)
22733 return;
22734 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22735 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22736 ms_va_ref = build_reference_type (ms_va_list_type_node);
22737 sysv_va_ref =
22738 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22740 fnvoid_va_end_ms =
22741 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22742 fnvoid_va_start_ms =
22743 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22744 fnvoid_va_end_sysv =
22745 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22746 fnvoid_va_start_sysv =
22747 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22748 NULL_TREE);
22749 fnvoid_va_copy_ms =
22750 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22751 NULL_TREE);
22752 fnvoid_va_copy_sysv =
22753 build_function_type_list (void_type_node, sysv_va_ref,
22754 sysv_va_ref, NULL_TREE);
22756 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22757 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22758 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22759 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22760 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22761 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22762 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22763 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22764 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22765 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22766 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22767 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22770 static void
22771 ix86_init_builtin_types (void)
22773 tree float128_type_node, float80_type_node;
22775 /* The __float80 type. */
22776 float80_type_node = long_double_type_node;
22777 if (TYPE_MODE (float80_type_node) != XFmode)
22779 /* The __float80 type. */
22780 float80_type_node = make_node (REAL_TYPE);
22782 TYPE_PRECISION (float80_type_node) = 80;
22783 layout_type (float80_type_node);
22785 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22787 /* The __float128 type. */
22788 float128_type_node = make_node (REAL_TYPE);
22789 TYPE_PRECISION (float128_type_node) = 128;
22790 layout_type (float128_type_node);
22791 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22793 /* This macro is built by i386-builtin-types.awk. */
22794 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22797 static void
22798 ix86_init_builtins (void)
22800 tree t;
22802 ix86_init_builtin_types ();
22804 /* TFmode support builtins. */
22805 def_builtin_const (0, "__builtin_infq",
22806 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22807 def_builtin_const (0, "__builtin_huge_valq",
22808 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22810 /* We will expand them to normal call if SSE2 isn't available since
22811 they are used by libgcc. */
22812 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22813 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22814 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22815 TREE_READONLY (t) = 1;
22816 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22818 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22819 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22820 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22821 TREE_READONLY (t) = 1;
22822 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22824 ix86_init_mmx_sse_builtins ();
22826 if (TARGET_64BIT)
22827 ix86_init_builtins_va_builtins_abi ();
22830 /* Return the ix86 builtin for CODE. */
22832 static tree
22833 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22835 if (code >= IX86_BUILTIN_MAX)
22836 return error_mark_node;
22838 return ix86_builtins[code];
22841 /* Errors in the source file can cause expand_expr to return const0_rtx
22842 where we expect a vector. To avoid crashing, use one of the vector
22843 clear instructions. */
22844 static rtx
22845 safe_vector_operand (rtx x, enum machine_mode mode)
22847 if (x == const0_rtx)
22848 x = CONST0_RTX (mode);
22849 return x;
22852 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22854 static rtx
22855 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22857 rtx pat;
22858 tree arg0 = CALL_EXPR_ARG (exp, 0);
22859 tree arg1 = CALL_EXPR_ARG (exp, 1);
22860 rtx op0 = expand_normal (arg0);
22861 rtx op1 = expand_normal (arg1);
22862 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22863 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22864 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22866 if (VECTOR_MODE_P (mode0))
22867 op0 = safe_vector_operand (op0, mode0);
22868 if (VECTOR_MODE_P (mode1))
22869 op1 = safe_vector_operand (op1, mode1);
22871 if (optimize || !target
22872 || GET_MODE (target) != tmode
22873 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22874 target = gen_reg_rtx (tmode);
22876 if (GET_MODE (op1) == SImode && mode1 == TImode)
22878 rtx x = gen_reg_rtx (V4SImode);
22879 emit_insn (gen_sse2_loadd (x, op1));
22880 op1 = gen_lowpart (TImode, x);
22883 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22884 op0 = copy_to_mode_reg (mode0, op0);
22885 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22886 op1 = copy_to_mode_reg (mode1, op1);
22888 pat = GEN_FCN (icode) (target, op0, op1);
22889 if (! pat)
22890 return 0;
22892 emit_insn (pat);
22894 return target;
22897 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22899 static rtx
22900 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22901 enum ix86_builtin_func_type m_type,
22902 enum rtx_code sub_code)
22904 rtx pat;
22905 int i;
22906 int nargs;
22907 bool comparison_p = false;
22908 bool tf_p = false;
22909 bool last_arg_constant = false;
22910 int num_memory = 0;
22911 struct {
22912 rtx op;
22913 enum machine_mode mode;
22914 } args[4];
22916 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22918 switch (m_type)
22920 case MULTI_ARG_4_DF2_DI_I:
22921 case MULTI_ARG_4_DF2_DI_I1:
22922 case MULTI_ARG_4_SF2_SI_I:
22923 case MULTI_ARG_4_SF2_SI_I1:
22924 nargs = 4;
22925 last_arg_constant = true;
22926 break;
22928 case MULTI_ARG_3_SF:
22929 case MULTI_ARG_3_DF:
22930 case MULTI_ARG_3_SF2:
22931 case MULTI_ARG_3_DF2:
22932 case MULTI_ARG_3_DI:
22933 case MULTI_ARG_3_SI:
22934 case MULTI_ARG_3_SI_DI:
22935 case MULTI_ARG_3_HI:
22936 case MULTI_ARG_3_HI_SI:
22937 case MULTI_ARG_3_QI:
22938 case MULTI_ARG_3_DI2:
22939 case MULTI_ARG_3_SI2:
22940 case MULTI_ARG_3_HI2:
22941 case MULTI_ARG_3_QI2:
22942 nargs = 3;
22943 break;
22945 case MULTI_ARG_2_SF:
22946 case MULTI_ARG_2_DF:
22947 case MULTI_ARG_2_DI:
22948 case MULTI_ARG_2_SI:
22949 case MULTI_ARG_2_HI:
22950 case MULTI_ARG_2_QI:
22951 nargs = 2;
22952 break;
22954 case MULTI_ARG_2_DI_IMM:
22955 case MULTI_ARG_2_SI_IMM:
22956 case MULTI_ARG_2_HI_IMM:
22957 case MULTI_ARG_2_QI_IMM:
22958 nargs = 2;
22959 last_arg_constant = true;
22960 break;
22962 case MULTI_ARG_1_SF:
22963 case MULTI_ARG_1_DF:
22964 case MULTI_ARG_1_SF2:
22965 case MULTI_ARG_1_DF2:
22966 case MULTI_ARG_1_DI:
22967 case MULTI_ARG_1_SI:
22968 case MULTI_ARG_1_HI:
22969 case MULTI_ARG_1_QI:
22970 case MULTI_ARG_1_SI_DI:
22971 case MULTI_ARG_1_HI_DI:
22972 case MULTI_ARG_1_HI_SI:
22973 case MULTI_ARG_1_QI_DI:
22974 case MULTI_ARG_1_QI_SI:
22975 case MULTI_ARG_1_QI_HI:
22976 nargs = 1;
22977 break;
22979 case MULTI_ARG_2_DI_CMP:
22980 case MULTI_ARG_2_SI_CMP:
22981 case MULTI_ARG_2_HI_CMP:
22982 case MULTI_ARG_2_QI_CMP:
22983 nargs = 2;
22984 comparison_p = true;
22985 break;
22987 case MULTI_ARG_2_SF_TF:
22988 case MULTI_ARG_2_DF_TF:
22989 case MULTI_ARG_2_DI_TF:
22990 case MULTI_ARG_2_SI_TF:
22991 case MULTI_ARG_2_HI_TF:
22992 case MULTI_ARG_2_QI_TF:
22993 nargs = 2;
22994 tf_p = true;
22995 break;
22997 default:
22998 gcc_unreachable ();
23001 if (optimize || !target
23002 || GET_MODE (target) != tmode
23003 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23004 target = gen_reg_rtx (tmode);
23006 gcc_assert (nargs <= 4);
23008 for (i = 0; i < nargs; i++)
23010 tree arg = CALL_EXPR_ARG (exp, i);
23011 rtx op = expand_normal (arg);
23012 int adjust = (comparison_p) ? 1 : 0;
23013 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23015 if (last_arg_constant && i == nargs-1)
23017 if (!CONST_INT_P (op))
23019 error ("last argument must be an immediate");
23020 return gen_reg_rtx (tmode);
23023 else
23025 if (VECTOR_MODE_P (mode))
23026 op = safe_vector_operand (op, mode);
23028 /* If we aren't optimizing, only allow one memory operand to be
23029 generated. */
23030 if (memory_operand (op, mode))
23031 num_memory++;
23033 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23035 if (optimize
23036 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23037 || num_memory > 1)
23038 op = force_reg (mode, op);
23041 args[i].op = op;
23042 args[i].mode = mode;
23045 switch (nargs)
23047 case 1:
23048 pat = GEN_FCN (icode) (target, args[0].op);
23049 break;
23051 case 2:
23052 if (tf_p)
23053 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23054 GEN_INT ((int)sub_code));
23055 else if (! comparison_p)
23056 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23057 else
23059 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23060 args[0].op,
23061 args[1].op);
23063 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23065 break;
23067 case 3:
23068 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23069 break;
23071 case 4:
23072 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23073 break;
23075 default:
23076 gcc_unreachable ();
23079 if (! pat)
23080 return 0;
23082 emit_insn (pat);
23083 return target;
23086 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23087 insns with vec_merge. */
23089 static rtx
23090 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23091 rtx target)
23093 rtx pat;
23094 tree arg0 = CALL_EXPR_ARG (exp, 0);
23095 rtx op1, op0 = expand_normal (arg0);
23096 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23097 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23099 if (optimize || !target
23100 || GET_MODE (target) != tmode
23101 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23102 target = gen_reg_rtx (tmode);
23104 if (VECTOR_MODE_P (mode0))
23105 op0 = safe_vector_operand (op0, mode0);
23107 if ((optimize && !register_operand (op0, mode0))
23108 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23109 op0 = copy_to_mode_reg (mode0, op0);
23111 op1 = op0;
23112 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23113 op1 = copy_to_mode_reg (mode0, op1);
23115 pat = GEN_FCN (icode) (target, op0, op1);
23116 if (! pat)
23117 return 0;
23118 emit_insn (pat);
23119 return target;
23122 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23124 static rtx
23125 ix86_expand_sse_compare (const struct builtin_description *d,
23126 tree exp, rtx target, bool swap)
23128 rtx pat;
23129 tree arg0 = CALL_EXPR_ARG (exp, 0);
23130 tree arg1 = CALL_EXPR_ARG (exp, 1);
23131 rtx op0 = expand_normal (arg0);
23132 rtx op1 = expand_normal (arg1);
23133 rtx op2;
23134 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23135 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23136 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23137 enum rtx_code comparison = d->comparison;
23139 if (VECTOR_MODE_P (mode0))
23140 op0 = safe_vector_operand (op0, mode0);
23141 if (VECTOR_MODE_P (mode1))
23142 op1 = safe_vector_operand (op1, mode1);
23144 /* Swap operands if we have a comparison that isn't available in
23145 hardware. */
23146 if (swap)
23148 rtx tmp = gen_reg_rtx (mode1);
23149 emit_move_insn (tmp, op1);
23150 op1 = op0;
23151 op0 = tmp;
23154 if (optimize || !target
23155 || GET_MODE (target) != tmode
23156 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23157 target = gen_reg_rtx (tmode);
23159 if ((optimize && !register_operand (op0, mode0))
23160 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23161 op0 = copy_to_mode_reg (mode0, op0);
23162 if ((optimize && !register_operand (op1, mode1))
23163 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23164 op1 = copy_to_mode_reg (mode1, op1);
23166 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23167 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23168 if (! pat)
23169 return 0;
23170 emit_insn (pat);
23171 return target;
23174 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23176 static rtx
23177 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23178 rtx target)
23180 rtx pat;
23181 tree arg0 = CALL_EXPR_ARG (exp, 0);
23182 tree arg1 = CALL_EXPR_ARG (exp, 1);
23183 rtx op0 = expand_normal (arg0);
23184 rtx op1 = expand_normal (arg1);
23185 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23186 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23187 enum rtx_code comparison = d->comparison;
23189 if (VECTOR_MODE_P (mode0))
23190 op0 = safe_vector_operand (op0, mode0);
23191 if (VECTOR_MODE_P (mode1))
23192 op1 = safe_vector_operand (op1, mode1);
23194 /* Swap operands if we have a comparison that isn't available in
23195 hardware. */
23196 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23198 rtx tmp = op1;
23199 op1 = op0;
23200 op0 = tmp;
23203 target = gen_reg_rtx (SImode);
23204 emit_move_insn (target, const0_rtx);
23205 target = gen_rtx_SUBREG (QImode, target, 0);
23207 if ((optimize && !register_operand (op0, mode0))
23208 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23209 op0 = copy_to_mode_reg (mode0, op0);
23210 if ((optimize && !register_operand (op1, mode1))
23211 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23212 op1 = copy_to_mode_reg (mode1, op1);
23214 pat = GEN_FCN (d->icode) (op0, op1);
23215 if (! pat)
23216 return 0;
23217 emit_insn (pat);
23218 emit_insn (gen_rtx_SET (VOIDmode,
23219 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23220 gen_rtx_fmt_ee (comparison, QImode,
23221 SET_DEST (pat),
23222 const0_rtx)));
23224 return SUBREG_REG (target);
23227 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23229 static rtx
23230 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23231 rtx target)
23233 rtx pat;
23234 tree arg0 = CALL_EXPR_ARG (exp, 0);
23235 tree arg1 = CALL_EXPR_ARG (exp, 1);
23236 rtx op0 = expand_normal (arg0);
23237 rtx op1 = expand_normal (arg1);
23238 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23239 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23240 enum rtx_code comparison = d->comparison;
23242 if (VECTOR_MODE_P (mode0))
23243 op0 = safe_vector_operand (op0, mode0);
23244 if (VECTOR_MODE_P (mode1))
23245 op1 = safe_vector_operand (op1, mode1);
23247 target = gen_reg_rtx (SImode);
23248 emit_move_insn (target, const0_rtx);
23249 target = gen_rtx_SUBREG (QImode, target, 0);
23251 if ((optimize && !register_operand (op0, mode0))
23252 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23253 op0 = copy_to_mode_reg (mode0, op0);
23254 if ((optimize && !register_operand (op1, mode1))
23255 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23256 op1 = copy_to_mode_reg (mode1, op1);
23258 pat = GEN_FCN (d->icode) (op0, op1);
23259 if (! pat)
23260 return 0;
23261 emit_insn (pat);
23262 emit_insn (gen_rtx_SET (VOIDmode,
23263 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23264 gen_rtx_fmt_ee (comparison, QImode,
23265 SET_DEST (pat),
23266 const0_rtx)));
23268 return SUBREG_REG (target);
23271 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23273 static rtx
23274 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23275 tree exp, rtx target)
23277 rtx pat;
23278 tree arg0 = CALL_EXPR_ARG (exp, 0);
23279 tree arg1 = CALL_EXPR_ARG (exp, 1);
23280 tree arg2 = CALL_EXPR_ARG (exp, 2);
23281 tree arg3 = CALL_EXPR_ARG (exp, 3);
23282 tree arg4 = CALL_EXPR_ARG (exp, 4);
23283 rtx scratch0, scratch1;
23284 rtx op0 = expand_normal (arg0);
23285 rtx op1 = expand_normal (arg1);
23286 rtx op2 = expand_normal (arg2);
23287 rtx op3 = expand_normal (arg3);
23288 rtx op4 = expand_normal (arg4);
23289 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23291 tmode0 = insn_data[d->icode].operand[0].mode;
23292 tmode1 = insn_data[d->icode].operand[1].mode;
23293 modev2 = insn_data[d->icode].operand[2].mode;
23294 modei3 = insn_data[d->icode].operand[3].mode;
23295 modev4 = insn_data[d->icode].operand[4].mode;
23296 modei5 = insn_data[d->icode].operand[5].mode;
23297 modeimm = insn_data[d->icode].operand[6].mode;
23299 if (VECTOR_MODE_P (modev2))
23300 op0 = safe_vector_operand (op0, modev2);
23301 if (VECTOR_MODE_P (modev4))
23302 op2 = safe_vector_operand (op2, modev4);
23304 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23305 op0 = copy_to_mode_reg (modev2, op0);
23306 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23307 op1 = copy_to_mode_reg (modei3, op1);
23308 if ((optimize && !register_operand (op2, modev4))
23309 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23310 op2 = copy_to_mode_reg (modev4, op2);
23311 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23312 op3 = copy_to_mode_reg (modei5, op3);
23314 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23316 error ("the fifth argument must be a 8-bit immediate");
23317 return const0_rtx;
23320 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23322 if (optimize || !target
23323 || GET_MODE (target) != tmode0
23324 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23325 target = gen_reg_rtx (tmode0);
23327 scratch1 = gen_reg_rtx (tmode1);
23329 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23331 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23333 if (optimize || !target
23334 || GET_MODE (target) != tmode1
23335 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23336 target = gen_reg_rtx (tmode1);
23338 scratch0 = gen_reg_rtx (tmode0);
23340 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23342 else
23344 gcc_assert (d->flag);
23346 scratch0 = gen_reg_rtx (tmode0);
23347 scratch1 = gen_reg_rtx (tmode1);
23349 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23352 if (! pat)
23353 return 0;
23355 emit_insn (pat);
23357 if (d->flag)
23359 target = gen_reg_rtx (SImode);
23360 emit_move_insn (target, const0_rtx);
23361 target = gen_rtx_SUBREG (QImode, target, 0);
23363 emit_insn
23364 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23365 gen_rtx_fmt_ee (EQ, QImode,
23366 gen_rtx_REG ((enum machine_mode) d->flag,
23367 FLAGS_REG),
23368 const0_rtx)));
23369 return SUBREG_REG (target);
23371 else
23372 return target;
23376 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23378 static rtx
23379 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23380 tree exp, rtx target)
23382 rtx pat;
23383 tree arg0 = CALL_EXPR_ARG (exp, 0);
23384 tree arg1 = CALL_EXPR_ARG (exp, 1);
23385 tree arg2 = CALL_EXPR_ARG (exp, 2);
23386 rtx scratch0, scratch1;
23387 rtx op0 = expand_normal (arg0);
23388 rtx op1 = expand_normal (arg1);
23389 rtx op2 = expand_normal (arg2);
23390 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23392 tmode0 = insn_data[d->icode].operand[0].mode;
23393 tmode1 = insn_data[d->icode].operand[1].mode;
23394 modev2 = insn_data[d->icode].operand[2].mode;
23395 modev3 = insn_data[d->icode].operand[3].mode;
23396 modeimm = insn_data[d->icode].operand[4].mode;
23398 if (VECTOR_MODE_P (modev2))
23399 op0 = safe_vector_operand (op0, modev2);
23400 if (VECTOR_MODE_P (modev3))
23401 op1 = safe_vector_operand (op1, modev3);
23403 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23404 op0 = copy_to_mode_reg (modev2, op0);
23405 if ((optimize && !register_operand (op1, modev3))
23406 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23407 op1 = copy_to_mode_reg (modev3, op1);
23409 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23411 error ("the third argument must be a 8-bit immediate");
23412 return const0_rtx;
23415 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23417 if (optimize || !target
23418 || GET_MODE (target) != tmode0
23419 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23420 target = gen_reg_rtx (tmode0);
23422 scratch1 = gen_reg_rtx (tmode1);
23424 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23426 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23428 if (optimize || !target
23429 || GET_MODE (target) != tmode1
23430 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23431 target = gen_reg_rtx (tmode1);
23433 scratch0 = gen_reg_rtx (tmode0);
23435 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23437 else
23439 gcc_assert (d->flag);
23441 scratch0 = gen_reg_rtx (tmode0);
23442 scratch1 = gen_reg_rtx (tmode1);
23444 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23447 if (! pat)
23448 return 0;
23450 emit_insn (pat);
23452 if (d->flag)
23454 target = gen_reg_rtx (SImode);
23455 emit_move_insn (target, const0_rtx);
23456 target = gen_rtx_SUBREG (QImode, target, 0);
23458 emit_insn
23459 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23460 gen_rtx_fmt_ee (EQ, QImode,
23461 gen_rtx_REG ((enum machine_mode) d->flag,
23462 FLAGS_REG),
23463 const0_rtx)));
23464 return SUBREG_REG (target);
23466 else
23467 return target;
23470 /* Subroutine of ix86_expand_builtin to take care of insns with
23471 variable number of operands. */
23473 static rtx
23474 ix86_expand_args_builtin (const struct builtin_description *d,
23475 tree exp, rtx target)
23477 rtx pat, real_target;
23478 unsigned int i, nargs;
23479 unsigned int nargs_constant = 0;
23480 int num_memory = 0;
23481 struct
23483 rtx op;
23484 enum machine_mode mode;
23485 } args[4];
23486 bool last_arg_count = false;
23487 enum insn_code icode = d->icode;
23488 const struct insn_data *insn_p = &insn_data[icode];
23489 enum machine_mode tmode = insn_p->operand[0].mode;
23490 enum machine_mode rmode = VOIDmode;
23491 bool swap = false;
23492 enum rtx_code comparison = d->comparison;
23494 switch ((enum ix86_builtin_func_type) d->flag)
23496 case INT_FTYPE_V8SF_V8SF_PTEST:
23497 case INT_FTYPE_V4DI_V4DI_PTEST:
23498 case INT_FTYPE_V4DF_V4DF_PTEST:
23499 case INT_FTYPE_V4SF_V4SF_PTEST:
23500 case INT_FTYPE_V2DI_V2DI_PTEST:
23501 case INT_FTYPE_V2DF_V2DF_PTEST:
23502 return ix86_expand_sse_ptest (d, exp, target);
23503 case FLOAT128_FTYPE_FLOAT128:
23504 case FLOAT_FTYPE_FLOAT:
23505 case INT_FTYPE_INT:
23506 case UINT64_FTYPE_INT:
23507 case UINT16_FTYPE_UINT16:
23508 case INT64_FTYPE_INT64:
23509 case INT64_FTYPE_V4SF:
23510 case INT64_FTYPE_V2DF:
23511 case INT_FTYPE_V16QI:
23512 case INT_FTYPE_V8QI:
23513 case INT_FTYPE_V8SF:
23514 case INT_FTYPE_V4DF:
23515 case INT_FTYPE_V4SF:
23516 case INT_FTYPE_V2DF:
23517 case V16QI_FTYPE_V16QI:
23518 case V8SI_FTYPE_V8SF:
23519 case V8SI_FTYPE_V4SI:
23520 case V8HI_FTYPE_V8HI:
23521 case V8HI_FTYPE_V16QI:
23522 case V8QI_FTYPE_V8QI:
23523 case V8SF_FTYPE_V8SF:
23524 case V8SF_FTYPE_V8SI:
23525 case V8SF_FTYPE_V4SF:
23526 case V4SI_FTYPE_V4SI:
23527 case V4SI_FTYPE_V16QI:
23528 case V4SI_FTYPE_V4SF:
23529 case V4SI_FTYPE_V8SI:
23530 case V4SI_FTYPE_V8HI:
23531 case V4SI_FTYPE_V4DF:
23532 case V4SI_FTYPE_V2DF:
23533 case V4HI_FTYPE_V4HI:
23534 case V4DF_FTYPE_V4DF:
23535 case V4DF_FTYPE_V4SI:
23536 case V4DF_FTYPE_V4SF:
23537 case V4DF_FTYPE_V2DF:
23538 case V4SF_FTYPE_V4SF:
23539 case V4SF_FTYPE_V4SI:
23540 case V4SF_FTYPE_V8SF:
23541 case V4SF_FTYPE_V4DF:
23542 case V4SF_FTYPE_V2DF:
23543 case V2DI_FTYPE_V2DI:
23544 case V2DI_FTYPE_V16QI:
23545 case V2DI_FTYPE_V8HI:
23546 case V2DI_FTYPE_V4SI:
23547 case V2DF_FTYPE_V2DF:
23548 case V2DF_FTYPE_V4SI:
23549 case V2DF_FTYPE_V4DF:
23550 case V2DF_FTYPE_V4SF:
23551 case V2DF_FTYPE_V2SI:
23552 case V2SI_FTYPE_V2SI:
23553 case V2SI_FTYPE_V4SF:
23554 case V2SI_FTYPE_V2SF:
23555 case V2SI_FTYPE_V2DF:
23556 case V2SF_FTYPE_V2SF:
23557 case V2SF_FTYPE_V2SI:
23558 nargs = 1;
23559 break;
23560 case V4SF_FTYPE_V4SF_VEC_MERGE:
23561 case V2DF_FTYPE_V2DF_VEC_MERGE:
23562 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23563 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23564 case V16QI_FTYPE_V16QI_V16QI:
23565 case V16QI_FTYPE_V8HI_V8HI:
23566 case V8QI_FTYPE_V8QI_V8QI:
23567 case V8QI_FTYPE_V4HI_V4HI:
23568 case V8HI_FTYPE_V8HI_V8HI:
23569 case V8HI_FTYPE_V16QI_V16QI:
23570 case V8HI_FTYPE_V4SI_V4SI:
23571 case V8SF_FTYPE_V8SF_V8SF:
23572 case V8SF_FTYPE_V8SF_V8SI:
23573 case V4SI_FTYPE_V4SI_V4SI:
23574 case V4SI_FTYPE_V8HI_V8HI:
23575 case V4SI_FTYPE_V4SF_V4SF:
23576 case V4SI_FTYPE_V2DF_V2DF:
23577 case V4HI_FTYPE_V4HI_V4HI:
23578 case V4HI_FTYPE_V8QI_V8QI:
23579 case V4HI_FTYPE_V2SI_V2SI:
23580 case V4DF_FTYPE_V4DF_V4DF:
23581 case V4DF_FTYPE_V4DF_V4DI:
23582 case V4SF_FTYPE_V4SF_V4SF:
23583 case V4SF_FTYPE_V4SF_V4SI:
23584 case V4SF_FTYPE_V4SF_V2SI:
23585 case V4SF_FTYPE_V4SF_V2DF:
23586 case V4SF_FTYPE_V4SF_DI:
23587 case V4SF_FTYPE_V4SF_SI:
23588 case V2DI_FTYPE_V2DI_V2DI:
23589 case V2DI_FTYPE_V16QI_V16QI:
23590 case V2DI_FTYPE_V4SI_V4SI:
23591 case V2DI_FTYPE_V2DI_V16QI:
23592 case V2DI_FTYPE_V2DF_V2DF:
23593 case V2SI_FTYPE_V2SI_V2SI:
23594 case V2SI_FTYPE_V4HI_V4HI:
23595 case V2SI_FTYPE_V2SF_V2SF:
23596 case V2DF_FTYPE_V2DF_V2DF:
23597 case V2DF_FTYPE_V2DF_V4SF:
23598 case V2DF_FTYPE_V2DF_V2DI:
23599 case V2DF_FTYPE_V2DF_DI:
23600 case V2DF_FTYPE_V2DF_SI:
23601 case V2SF_FTYPE_V2SF_V2SF:
23602 case V1DI_FTYPE_V1DI_V1DI:
23603 case V1DI_FTYPE_V8QI_V8QI:
23604 case V1DI_FTYPE_V2SI_V2SI:
23605 if (comparison == UNKNOWN)
23606 return ix86_expand_binop_builtin (icode, exp, target);
23607 nargs = 2;
23608 break;
23609 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23610 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23611 gcc_assert (comparison != UNKNOWN);
23612 nargs = 2;
23613 swap = true;
23614 break;
23615 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23616 case V8HI_FTYPE_V8HI_SI_COUNT:
23617 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23618 case V4SI_FTYPE_V4SI_SI_COUNT:
23619 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23620 case V4HI_FTYPE_V4HI_SI_COUNT:
23621 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23622 case V2DI_FTYPE_V2DI_SI_COUNT:
23623 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23624 case V2SI_FTYPE_V2SI_SI_COUNT:
23625 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23626 case V1DI_FTYPE_V1DI_SI_COUNT:
23627 nargs = 2;
23628 last_arg_count = true;
23629 break;
23630 case UINT64_FTYPE_UINT64_UINT64:
23631 case UINT_FTYPE_UINT_UINT:
23632 case UINT_FTYPE_UINT_USHORT:
23633 case UINT_FTYPE_UINT_UCHAR:
23634 case UINT16_FTYPE_UINT16_INT:
23635 case UINT8_FTYPE_UINT8_INT:
23636 nargs = 2;
23637 break;
23638 case V2DI_FTYPE_V2DI_INT_CONVERT:
23639 nargs = 2;
23640 rmode = V1TImode;
23641 nargs_constant = 1;
23642 break;
23643 case V8HI_FTYPE_V8HI_INT:
23644 case V8SF_FTYPE_V8SF_INT:
23645 case V4SI_FTYPE_V4SI_INT:
23646 case V4SI_FTYPE_V8SI_INT:
23647 case V4HI_FTYPE_V4HI_INT:
23648 case V4DF_FTYPE_V4DF_INT:
23649 case V4SF_FTYPE_V4SF_INT:
23650 case V4SF_FTYPE_V8SF_INT:
23651 case V2DI_FTYPE_V2DI_INT:
23652 case V2DF_FTYPE_V2DF_INT:
23653 case V2DF_FTYPE_V4DF_INT:
23654 nargs = 2;
23655 nargs_constant = 1;
23656 break;
23657 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23658 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23659 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23660 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23661 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23662 nargs = 3;
23663 break;
23664 case V16QI_FTYPE_V16QI_V16QI_INT:
23665 case V8HI_FTYPE_V8HI_V8HI_INT:
23666 case V8SI_FTYPE_V8SI_V8SI_INT:
23667 case V8SI_FTYPE_V8SI_V4SI_INT:
23668 case V8SF_FTYPE_V8SF_V8SF_INT:
23669 case V8SF_FTYPE_V8SF_V4SF_INT:
23670 case V4SI_FTYPE_V4SI_V4SI_INT:
23671 case V4DF_FTYPE_V4DF_V4DF_INT:
23672 case V4DF_FTYPE_V4DF_V2DF_INT:
23673 case V4SF_FTYPE_V4SF_V4SF_INT:
23674 case V2DI_FTYPE_V2DI_V2DI_INT:
23675 case V2DF_FTYPE_V2DF_V2DF_INT:
23676 nargs = 3;
23677 nargs_constant = 1;
23678 break;
23679 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23680 nargs = 3;
23681 rmode = V2DImode;
23682 nargs_constant = 1;
23683 break;
23684 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23685 nargs = 3;
23686 rmode = DImode;
23687 nargs_constant = 1;
23688 break;
23689 case V2DI_FTYPE_V2DI_UINT_UINT:
23690 nargs = 3;
23691 nargs_constant = 2;
23692 break;
23693 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23694 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23695 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23696 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23697 nargs = 4;
23698 nargs_constant = 1;
23699 break;
23700 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23701 nargs = 4;
23702 nargs_constant = 2;
23703 break;
23704 default:
23705 gcc_unreachable ();
23708 gcc_assert (nargs <= ARRAY_SIZE (args));
23710 if (comparison != UNKNOWN)
23712 gcc_assert (nargs == 2);
23713 return ix86_expand_sse_compare (d, exp, target, swap);
23716 if (rmode == VOIDmode || rmode == tmode)
23718 if (optimize
23719 || target == 0
23720 || GET_MODE (target) != tmode
23721 || ! (*insn_p->operand[0].predicate) (target, tmode))
23722 target = gen_reg_rtx (tmode);
23723 real_target = target;
23725 else
23727 target = gen_reg_rtx (rmode);
23728 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23731 for (i = 0; i < nargs; i++)
23733 tree arg = CALL_EXPR_ARG (exp, i);
23734 rtx op = expand_normal (arg);
23735 enum machine_mode mode = insn_p->operand[i + 1].mode;
23736 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23738 if (last_arg_count && (i + 1) == nargs)
23740 /* SIMD shift insns take either an 8-bit immediate or
23741 register as count. But builtin functions take int as
23742 count. If count doesn't match, we put it in register. */
23743 if (!match)
23745 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23746 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23747 op = copy_to_reg (op);
23750 else if ((nargs - i) <= nargs_constant)
23752 if (!match)
23753 switch (icode)
23755 case CODE_FOR_sse4_1_roundpd:
23756 case CODE_FOR_sse4_1_roundps:
23757 case CODE_FOR_sse4_1_roundsd:
23758 case CODE_FOR_sse4_1_roundss:
23759 case CODE_FOR_sse4_1_blendps:
23760 case CODE_FOR_avx_blendpd256:
23761 case CODE_FOR_avx_vpermilv4df:
23762 case CODE_FOR_avx_roundpd256:
23763 case CODE_FOR_avx_roundps256:
23764 error ("the last argument must be a 4-bit immediate");
23765 return const0_rtx;
23767 case CODE_FOR_sse4_1_blendpd:
23768 case CODE_FOR_avx_vpermilv2df:
23769 case CODE_FOR_xop_vpermil2v2df3:
23770 case CODE_FOR_xop_vpermil2v4sf3:
23771 case CODE_FOR_xop_vpermil2v4df3:
23772 case CODE_FOR_xop_vpermil2v8sf3:
23773 error ("the last argument must be a 2-bit immediate");
23774 return const0_rtx;
23776 case CODE_FOR_avx_vextractf128v4df:
23777 case CODE_FOR_avx_vextractf128v8sf:
23778 case CODE_FOR_avx_vextractf128v8si:
23779 case CODE_FOR_avx_vinsertf128v4df:
23780 case CODE_FOR_avx_vinsertf128v8sf:
23781 case CODE_FOR_avx_vinsertf128v8si:
23782 error ("the last argument must be a 1-bit immediate");
23783 return const0_rtx;
23785 case CODE_FOR_avx_cmpsdv2df3:
23786 case CODE_FOR_avx_cmpssv4sf3:
23787 case CODE_FOR_avx_cmppdv2df3:
23788 case CODE_FOR_avx_cmppsv4sf3:
23789 case CODE_FOR_avx_cmppdv4df3:
23790 case CODE_FOR_avx_cmppsv8sf3:
23791 error ("the last argument must be a 5-bit immediate");
23792 return const0_rtx;
23794 default:
23795 switch (nargs_constant)
23797 case 2:
23798 if ((nargs - i) == nargs_constant)
23800 error ("the next to last argument must be an 8-bit immediate");
23801 break;
23803 case 1:
23804 error ("the last argument must be an 8-bit immediate");
23805 break;
23806 default:
23807 gcc_unreachable ();
23809 return const0_rtx;
23812 else
23814 if (VECTOR_MODE_P (mode))
23815 op = safe_vector_operand (op, mode);
23817 /* If we aren't optimizing, only allow one memory operand to
23818 be generated. */
23819 if (memory_operand (op, mode))
23820 num_memory++;
23822 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23824 if (optimize || !match || num_memory > 1)
23825 op = copy_to_mode_reg (mode, op);
23827 else
23829 op = copy_to_reg (op);
23830 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23834 args[i].op = op;
23835 args[i].mode = mode;
23838 switch (nargs)
23840 case 1:
23841 pat = GEN_FCN (icode) (real_target, args[0].op);
23842 break;
23843 case 2:
23844 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23845 break;
23846 case 3:
23847 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23848 args[2].op);
23849 break;
23850 case 4:
23851 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23852 args[2].op, args[3].op);
23853 break;
23854 default:
23855 gcc_unreachable ();
23858 if (! pat)
23859 return 0;
23861 emit_insn (pat);
23862 return target;
23865 /* Subroutine of ix86_expand_builtin to take care of special insns
23866 with variable number of operands. */
23868 static rtx
23869 ix86_expand_special_args_builtin (const struct builtin_description *d,
23870 tree exp, rtx target)
23872 tree arg;
23873 rtx pat, op;
23874 unsigned int i, nargs, arg_adjust, memory;
23875 struct
23877 rtx op;
23878 enum machine_mode mode;
23879 } args[3];
23880 enum insn_code icode = d->icode;
23881 bool last_arg_constant = false;
23882 const struct insn_data *insn_p = &insn_data[icode];
23883 enum machine_mode tmode = insn_p->operand[0].mode;
23884 enum { load, store } klass;
23886 switch ((enum ix86_builtin_func_type) d->flag)
23888 case VOID_FTYPE_VOID:
23889 emit_insn (GEN_FCN (icode) (target));
23890 return 0;
23891 case UINT64_FTYPE_VOID:
23892 nargs = 0;
23893 klass = load;
23894 memory = 0;
23895 break;
23896 case UINT64_FTYPE_PUNSIGNED:
23897 case V2DI_FTYPE_PV2DI:
23898 case V32QI_FTYPE_PCCHAR:
23899 case V16QI_FTYPE_PCCHAR:
23900 case V8SF_FTYPE_PCV4SF:
23901 case V8SF_FTYPE_PCFLOAT:
23902 case V4SF_FTYPE_PCFLOAT:
23903 case V4DF_FTYPE_PCV2DF:
23904 case V4DF_FTYPE_PCDOUBLE:
23905 case V2DF_FTYPE_PCDOUBLE:
23906 case VOID_FTYPE_PVOID:
23907 nargs = 1;
23908 klass = load;
23909 memory = 0;
23910 break;
23911 case VOID_FTYPE_PV2SF_V4SF:
23912 case VOID_FTYPE_PV4DI_V4DI:
23913 case VOID_FTYPE_PV2DI_V2DI:
23914 case VOID_FTYPE_PCHAR_V32QI:
23915 case VOID_FTYPE_PCHAR_V16QI:
23916 case VOID_FTYPE_PFLOAT_V8SF:
23917 case VOID_FTYPE_PFLOAT_V4SF:
23918 case VOID_FTYPE_PDOUBLE_V4DF:
23919 case VOID_FTYPE_PDOUBLE_V2DF:
23920 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23921 case VOID_FTYPE_PINT_INT:
23922 nargs = 1;
23923 klass = store;
23924 /* Reserve memory operand for target. */
23925 memory = ARRAY_SIZE (args);
23926 break;
23927 case V4SF_FTYPE_V4SF_PCV2SF:
23928 case V2DF_FTYPE_V2DF_PCDOUBLE:
23929 nargs = 2;
23930 klass = load;
23931 memory = 1;
23932 break;
23933 case V8SF_FTYPE_PCV8SF_V8SF:
23934 case V4DF_FTYPE_PCV4DF_V4DF:
23935 case V4SF_FTYPE_PCV4SF_V4SF:
23936 case V2DF_FTYPE_PCV2DF_V2DF:
23937 nargs = 2;
23938 klass = load;
23939 memory = 0;
23940 break;
23941 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23942 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23943 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23944 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23945 nargs = 2;
23946 klass = store;
23947 /* Reserve memory operand for target. */
23948 memory = ARRAY_SIZE (args);
23949 break;
23950 case VOID_FTYPE_UINT_UINT_UINT:
23951 case VOID_FTYPE_UINT64_UINT_UINT:
23952 case UCHAR_FTYPE_UINT_UINT_UINT:
23953 case UCHAR_FTYPE_UINT64_UINT_UINT:
23954 nargs = 3;
23955 klass = load;
23956 memory = ARRAY_SIZE (args);
23957 last_arg_constant = true;
23958 break;
23959 default:
23960 gcc_unreachable ();
23963 gcc_assert (nargs <= ARRAY_SIZE (args));
23965 if (klass == store)
23967 arg = CALL_EXPR_ARG (exp, 0);
23968 op = expand_normal (arg);
23969 gcc_assert (target == 0);
23970 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23971 arg_adjust = 1;
23973 else
23975 arg_adjust = 0;
23976 if (optimize
23977 || target == 0
23978 || GET_MODE (target) != tmode
23979 || ! (*insn_p->operand[0].predicate) (target, tmode))
23980 target = gen_reg_rtx (tmode);
23983 for (i = 0; i < nargs; i++)
23985 enum machine_mode mode = insn_p->operand[i + 1].mode;
23986 bool match;
23988 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23989 op = expand_normal (arg);
23990 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23992 if (last_arg_constant && (i + 1) == nargs)
23994 if (!match)
23996 if (icode == CODE_FOR_lwp_lwpvalsi3
23997 || icode == CODE_FOR_lwp_lwpinssi3
23998 || icode == CODE_FOR_lwp_lwpvaldi3
23999 || icode == CODE_FOR_lwp_lwpinsdi3)
24000 error ("the last argument must be a 32-bit immediate");
24001 else
24002 error ("the last argument must be an 8-bit immediate");
24003 return const0_rtx;
24006 else
24008 if (i == memory)
24010 /* This must be the memory operand. */
24011 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24012 gcc_assert (GET_MODE (op) == mode
24013 || GET_MODE (op) == VOIDmode);
24015 else
24017 /* This must be register. */
24018 if (VECTOR_MODE_P (mode))
24019 op = safe_vector_operand (op, mode);
24021 gcc_assert (GET_MODE (op) == mode
24022 || GET_MODE (op) == VOIDmode);
24023 op = copy_to_mode_reg (mode, op);
24027 args[i].op = op;
24028 args[i].mode = mode;
24031 switch (nargs)
24033 case 0:
24034 pat = GEN_FCN (icode) (target);
24035 break;
24036 case 1:
24037 pat = GEN_FCN (icode) (target, args[0].op);
24038 break;
24039 case 2:
24040 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24041 break;
24042 case 3:
24043 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24044 break;
24045 default:
24046 gcc_unreachable ();
24049 if (! pat)
24050 return 0;
24051 emit_insn (pat);
24052 return klass == store ? 0 : target;
24055 /* Return the integer constant in ARG. Constrain it to be in the range
24056 of the subparts of VEC_TYPE; issue an error if not. */
24058 static int
24059 get_element_number (tree vec_type, tree arg)
24061 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24063 if (!host_integerp (arg, 1)
24064 || (elt = tree_low_cst (arg, 1), elt > max))
24066 error ("selector must be an integer constant in the range 0..%wi", max);
24067 return 0;
24070 return elt;
24073 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24074 ix86_expand_vector_init. We DO have language-level syntax for this, in
24075 the form of (type){ init-list }. Except that since we can't place emms
24076 instructions from inside the compiler, we can't allow the use of MMX
24077 registers unless the user explicitly asks for it. So we do *not* define
24078 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24079 we have builtins invoked by mmintrin.h that gives us license to emit
24080 these sorts of instructions. */
24082 static rtx
24083 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24085 enum machine_mode tmode = TYPE_MODE (type);
24086 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24087 int i, n_elt = GET_MODE_NUNITS (tmode);
24088 rtvec v = rtvec_alloc (n_elt);
24090 gcc_assert (VECTOR_MODE_P (tmode));
24091 gcc_assert (call_expr_nargs (exp) == n_elt);
24093 for (i = 0; i < n_elt; ++i)
24095 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24096 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24099 if (!target || !register_operand (target, tmode))
24100 target = gen_reg_rtx (tmode);
24102 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24103 return target;
24106 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24107 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24108 had a language-level syntax for referencing vector elements. */
24110 static rtx
24111 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24113 enum machine_mode tmode, mode0;
24114 tree arg0, arg1;
24115 int elt;
24116 rtx op0;
24118 arg0 = CALL_EXPR_ARG (exp, 0);
24119 arg1 = CALL_EXPR_ARG (exp, 1);
24121 op0 = expand_normal (arg0);
24122 elt = get_element_number (TREE_TYPE (arg0), arg1);
24124 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24125 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24126 gcc_assert (VECTOR_MODE_P (mode0));
24128 op0 = force_reg (mode0, op0);
24130 if (optimize || !target || !register_operand (target, tmode))
24131 target = gen_reg_rtx (tmode);
24133 ix86_expand_vector_extract (true, target, op0, elt);
24135 return target;
24138 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24139 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24140 a language-level syntax for referencing vector elements. */
24142 static rtx
24143 ix86_expand_vec_set_builtin (tree exp)
24145 enum machine_mode tmode, mode1;
24146 tree arg0, arg1, arg2;
24147 int elt;
24148 rtx op0, op1, target;
24150 arg0 = CALL_EXPR_ARG (exp, 0);
24151 arg1 = CALL_EXPR_ARG (exp, 1);
24152 arg2 = CALL_EXPR_ARG (exp, 2);
24154 tmode = TYPE_MODE (TREE_TYPE (arg0));
24155 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24156 gcc_assert (VECTOR_MODE_P (tmode));
24158 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24159 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24160 elt = get_element_number (TREE_TYPE (arg0), arg2);
24162 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24163 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24165 op0 = force_reg (tmode, op0);
24166 op1 = force_reg (mode1, op1);
24168 /* OP0 is the source of these builtin functions and shouldn't be
24169 modified. Create a copy, use it and return it as target. */
24170 target = gen_reg_rtx (tmode);
24171 emit_move_insn (target, op0);
24172 ix86_expand_vector_set (true, target, op1, elt);
24174 return target;
24177 /* Expand an expression EXP that calls a built-in function,
24178 with result going to TARGET if that's convenient
24179 (and in mode MODE if that's convenient).
24180 SUBTARGET may be used as the target for computing one of EXP's operands.
24181 IGNORE is nonzero if the value is to be ignored. */
24183 static rtx
24184 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24185 enum machine_mode mode ATTRIBUTE_UNUSED,
24186 int ignore ATTRIBUTE_UNUSED)
24188 const struct builtin_description *d;
24189 size_t i;
24190 enum insn_code icode;
24191 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24192 tree arg0, arg1, arg2;
24193 rtx op0, op1, op2, pat;
24194 enum machine_mode mode0, mode1, mode2;
24195 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24197 /* Determine whether the builtin function is available under the current ISA.
24198 Originally the builtin was not created if it wasn't applicable to the
24199 current ISA based on the command line switches. With function specific
24200 options, we need to check in the context of the function making the call
24201 whether it is supported. */
24202 if (ix86_builtins_isa[fcode].isa
24203 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24205 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24206 NULL, NULL, false);
24208 if (!opts)
24209 error ("%qE needs unknown isa option", fndecl);
24210 else
24212 gcc_assert (opts != NULL);
24213 error ("%qE needs isa option %s", fndecl, opts);
24214 free (opts);
24216 return const0_rtx;
24219 switch (fcode)
24221 case IX86_BUILTIN_MASKMOVQ:
24222 case IX86_BUILTIN_MASKMOVDQU:
24223 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24224 ? CODE_FOR_mmx_maskmovq
24225 : CODE_FOR_sse2_maskmovdqu);
24226 /* Note the arg order is different from the operand order. */
24227 arg1 = CALL_EXPR_ARG (exp, 0);
24228 arg2 = CALL_EXPR_ARG (exp, 1);
24229 arg0 = CALL_EXPR_ARG (exp, 2);
24230 op0 = expand_normal (arg0);
24231 op1 = expand_normal (arg1);
24232 op2 = expand_normal (arg2);
24233 mode0 = insn_data[icode].operand[0].mode;
24234 mode1 = insn_data[icode].operand[1].mode;
24235 mode2 = insn_data[icode].operand[2].mode;
24237 op0 = force_reg (Pmode, op0);
24238 op0 = gen_rtx_MEM (mode1, op0);
24240 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24241 op0 = copy_to_mode_reg (mode0, op0);
24242 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24243 op1 = copy_to_mode_reg (mode1, op1);
24244 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24245 op2 = copy_to_mode_reg (mode2, op2);
24246 pat = GEN_FCN (icode) (op0, op1, op2);
24247 if (! pat)
24248 return 0;
24249 emit_insn (pat);
24250 return 0;
24252 case IX86_BUILTIN_LDMXCSR:
24253 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24254 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24255 emit_move_insn (target, op0);
24256 emit_insn (gen_sse_ldmxcsr (target));
24257 return 0;
24259 case IX86_BUILTIN_STMXCSR:
24260 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24261 emit_insn (gen_sse_stmxcsr (target));
24262 return copy_to_mode_reg (SImode, target);
24264 case IX86_BUILTIN_CLFLUSH:
24265 arg0 = CALL_EXPR_ARG (exp, 0);
24266 op0 = expand_normal (arg0);
24267 icode = CODE_FOR_sse2_clflush;
24268 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24269 op0 = copy_to_mode_reg (Pmode, op0);
24271 emit_insn (gen_sse2_clflush (op0));
24272 return 0;
24274 case IX86_BUILTIN_MONITOR:
24275 arg0 = CALL_EXPR_ARG (exp, 0);
24276 arg1 = CALL_EXPR_ARG (exp, 1);
24277 arg2 = CALL_EXPR_ARG (exp, 2);
24278 op0 = expand_normal (arg0);
24279 op1 = expand_normal (arg1);
24280 op2 = expand_normal (arg2);
24281 if (!REG_P (op0))
24282 op0 = copy_to_mode_reg (Pmode, op0);
24283 if (!REG_P (op1))
24284 op1 = copy_to_mode_reg (SImode, op1);
24285 if (!REG_P (op2))
24286 op2 = copy_to_mode_reg (SImode, op2);
24287 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24288 return 0;
24290 case IX86_BUILTIN_MWAIT:
24291 arg0 = CALL_EXPR_ARG (exp, 0);
24292 arg1 = CALL_EXPR_ARG (exp, 1);
24293 op0 = expand_normal (arg0);
24294 op1 = expand_normal (arg1);
24295 if (!REG_P (op0))
24296 op0 = copy_to_mode_reg (SImode, op0);
24297 if (!REG_P (op1))
24298 op1 = copy_to_mode_reg (SImode, op1);
24299 emit_insn (gen_sse3_mwait (op0, op1));
24300 return 0;
24302 case IX86_BUILTIN_VEC_INIT_V2SI:
24303 case IX86_BUILTIN_VEC_INIT_V4HI:
24304 case IX86_BUILTIN_VEC_INIT_V8QI:
24305 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24307 case IX86_BUILTIN_VEC_EXT_V2DF:
24308 case IX86_BUILTIN_VEC_EXT_V2DI:
24309 case IX86_BUILTIN_VEC_EXT_V4SF:
24310 case IX86_BUILTIN_VEC_EXT_V4SI:
24311 case IX86_BUILTIN_VEC_EXT_V8HI:
24312 case IX86_BUILTIN_VEC_EXT_V2SI:
24313 case IX86_BUILTIN_VEC_EXT_V4HI:
24314 case IX86_BUILTIN_VEC_EXT_V16QI:
24315 return ix86_expand_vec_ext_builtin (exp, target);
24317 case IX86_BUILTIN_VEC_SET_V2DI:
24318 case IX86_BUILTIN_VEC_SET_V4SF:
24319 case IX86_BUILTIN_VEC_SET_V4SI:
24320 case IX86_BUILTIN_VEC_SET_V8HI:
24321 case IX86_BUILTIN_VEC_SET_V4HI:
24322 case IX86_BUILTIN_VEC_SET_V16QI:
24323 return ix86_expand_vec_set_builtin (exp);
24325 case IX86_BUILTIN_VEC_PERM_V2DF:
24326 case IX86_BUILTIN_VEC_PERM_V4SF:
24327 case IX86_BUILTIN_VEC_PERM_V2DI:
24328 case IX86_BUILTIN_VEC_PERM_V4SI:
24329 case IX86_BUILTIN_VEC_PERM_V8HI:
24330 case IX86_BUILTIN_VEC_PERM_V16QI:
24331 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24332 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24333 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24334 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24335 case IX86_BUILTIN_VEC_PERM_V4DF:
24336 case IX86_BUILTIN_VEC_PERM_V8SF:
24337 return ix86_expand_vec_perm_builtin (exp);
24339 case IX86_BUILTIN_INFQ:
24340 case IX86_BUILTIN_HUGE_VALQ:
24342 REAL_VALUE_TYPE inf;
24343 rtx tmp;
24345 real_inf (&inf);
24346 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24348 tmp = validize_mem (force_const_mem (mode, tmp));
24350 if (target == 0)
24351 target = gen_reg_rtx (mode);
24353 emit_move_insn (target, tmp);
24354 return target;
24357 case IX86_BUILTIN_LLWPCB:
24358 arg0 = CALL_EXPR_ARG (exp, 0);
24359 op0 = expand_normal (arg0);
24360 icode = CODE_FOR_lwp_llwpcb;
24361 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24362 op0 = copy_to_mode_reg (Pmode, op0);
24363 emit_insn (gen_lwp_llwpcb (op0));
24364 return 0;
24366 case IX86_BUILTIN_SLWPCB:
24367 icode = CODE_FOR_lwp_slwpcb;
24368 if (!target
24369 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24370 target = gen_reg_rtx (Pmode);
24371 emit_insn (gen_lwp_slwpcb (target));
24372 return target;
24374 default:
24375 break;
24378 for (i = 0, d = bdesc_special_args;
24379 i < ARRAY_SIZE (bdesc_special_args);
24380 i++, d++)
24381 if (d->code == fcode)
24382 return ix86_expand_special_args_builtin (d, exp, target);
24384 for (i = 0, d = bdesc_args;
24385 i < ARRAY_SIZE (bdesc_args);
24386 i++, d++)
24387 if (d->code == fcode)
24388 switch (fcode)
24390 case IX86_BUILTIN_FABSQ:
24391 case IX86_BUILTIN_COPYSIGNQ:
24392 if (!TARGET_SSE2)
24393 /* Emit a normal call if SSE2 isn't available. */
24394 return expand_call (exp, target, ignore);
24395 default:
24396 return ix86_expand_args_builtin (d, exp, target);
24399 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24400 if (d->code == fcode)
24401 return ix86_expand_sse_comi (d, exp, target);
24403 for (i = 0, d = bdesc_pcmpestr;
24404 i < ARRAY_SIZE (bdesc_pcmpestr);
24405 i++, d++)
24406 if (d->code == fcode)
24407 return ix86_expand_sse_pcmpestr (d, exp, target);
24409 for (i = 0, d = bdesc_pcmpistr;
24410 i < ARRAY_SIZE (bdesc_pcmpistr);
24411 i++, d++)
24412 if (d->code == fcode)
24413 return ix86_expand_sse_pcmpistr (d, exp, target);
24415 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24416 if (d->code == fcode)
24417 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24418 (enum ix86_builtin_func_type)
24419 d->flag, d->comparison);
24421 gcc_unreachable ();
24424 /* Returns a function decl for a vectorized version of the builtin function
24425 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24426 if it is not available. */
24428 static tree
24429 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24430 tree type_in)
24432 enum machine_mode in_mode, out_mode;
24433 int in_n, out_n;
24434 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24436 if (TREE_CODE (type_out) != VECTOR_TYPE
24437 || TREE_CODE (type_in) != VECTOR_TYPE
24438 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24439 return NULL_TREE;
24441 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24442 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24443 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24444 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24446 switch (fn)
24448 case BUILT_IN_SQRT:
24449 if (out_mode == DFmode && out_n == 2
24450 && in_mode == DFmode && in_n == 2)
24451 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24452 break;
24454 case BUILT_IN_SQRTF:
24455 if (out_mode == SFmode && out_n == 4
24456 && in_mode == SFmode && in_n == 4)
24457 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24458 break;
24460 case BUILT_IN_LRINT:
24461 if (out_mode == SImode && out_n == 4
24462 && in_mode == DFmode && in_n == 2)
24463 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24464 break;
24466 case BUILT_IN_LRINTF:
24467 if (out_mode == SImode && out_n == 4
24468 && in_mode == SFmode && in_n == 4)
24469 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24470 break;
24472 case BUILT_IN_COPYSIGN:
24473 if (out_mode == DFmode && out_n == 2
24474 && in_mode == DFmode && in_n == 2)
24475 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24476 break;
24478 case BUILT_IN_COPYSIGNF:
24479 if (out_mode == SFmode && out_n == 4
24480 && in_mode == SFmode && in_n == 4)
24481 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24482 break;
24484 default:
24488 /* Dispatch to a handler for a vectorization library. */
24489 if (ix86_veclib_handler)
24490 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24491 type_in);
24493 return NULL_TREE;
24496 /* Handler for an SVML-style interface to
24497 a library with vectorized intrinsics. */
24499 static tree
24500 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24502 char name[20];
24503 tree fntype, new_fndecl, args;
24504 unsigned arity;
24505 const char *bname;
24506 enum machine_mode el_mode, in_mode;
24507 int n, in_n;
24509 /* The SVML is suitable for unsafe math only. */
24510 if (!flag_unsafe_math_optimizations)
24511 return NULL_TREE;
24513 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24514 n = TYPE_VECTOR_SUBPARTS (type_out);
24515 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24516 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24517 if (el_mode != in_mode
24518 || n != in_n)
24519 return NULL_TREE;
24521 switch (fn)
24523 case BUILT_IN_EXP:
24524 case BUILT_IN_LOG:
24525 case BUILT_IN_LOG10:
24526 case BUILT_IN_POW:
24527 case BUILT_IN_TANH:
24528 case BUILT_IN_TAN:
24529 case BUILT_IN_ATAN:
24530 case BUILT_IN_ATAN2:
24531 case BUILT_IN_ATANH:
24532 case BUILT_IN_CBRT:
24533 case BUILT_IN_SINH:
24534 case BUILT_IN_SIN:
24535 case BUILT_IN_ASINH:
24536 case BUILT_IN_ASIN:
24537 case BUILT_IN_COSH:
24538 case BUILT_IN_COS:
24539 case BUILT_IN_ACOSH:
24540 case BUILT_IN_ACOS:
24541 if (el_mode != DFmode || n != 2)
24542 return NULL_TREE;
24543 break;
24545 case BUILT_IN_EXPF:
24546 case BUILT_IN_LOGF:
24547 case BUILT_IN_LOG10F:
24548 case BUILT_IN_POWF:
24549 case BUILT_IN_TANHF:
24550 case BUILT_IN_TANF:
24551 case BUILT_IN_ATANF:
24552 case BUILT_IN_ATAN2F:
24553 case BUILT_IN_ATANHF:
24554 case BUILT_IN_CBRTF:
24555 case BUILT_IN_SINHF:
24556 case BUILT_IN_SINF:
24557 case BUILT_IN_ASINHF:
24558 case BUILT_IN_ASINF:
24559 case BUILT_IN_COSHF:
24560 case BUILT_IN_COSF:
24561 case BUILT_IN_ACOSHF:
24562 case BUILT_IN_ACOSF:
24563 if (el_mode != SFmode || n != 4)
24564 return NULL_TREE;
24565 break;
24567 default:
24568 return NULL_TREE;
24571 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24573 if (fn == BUILT_IN_LOGF)
24574 strcpy (name, "vmlsLn4");
24575 else if (fn == BUILT_IN_LOG)
24576 strcpy (name, "vmldLn2");
24577 else if (n == 4)
24579 sprintf (name, "vmls%s", bname+10);
24580 name[strlen (name)-1] = '4';
24582 else
24583 sprintf (name, "vmld%s2", bname+10);
24585 /* Convert to uppercase. */
24586 name[4] &= ~0x20;
24588 arity = 0;
24589 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24590 args = TREE_CHAIN (args))
24591 arity++;
24593 if (arity == 1)
24594 fntype = build_function_type_list (type_out, type_in, NULL);
24595 else
24596 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24598 /* Build a function declaration for the vectorized function. */
24599 new_fndecl = build_decl (BUILTINS_LOCATION,
24600 FUNCTION_DECL, get_identifier (name), fntype);
24601 TREE_PUBLIC (new_fndecl) = 1;
24602 DECL_EXTERNAL (new_fndecl) = 1;
24603 DECL_IS_NOVOPS (new_fndecl) = 1;
24604 TREE_READONLY (new_fndecl) = 1;
24606 return new_fndecl;
24609 /* Handler for an ACML-style interface to
24610 a library with vectorized intrinsics. */
24612 static tree
24613 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24615 char name[20] = "__vr.._";
24616 tree fntype, new_fndecl, args;
24617 unsigned arity;
24618 const char *bname;
24619 enum machine_mode el_mode, in_mode;
24620 int n, in_n;
24622 /* The ACML is 64bits only and suitable for unsafe math only as
24623 it does not correctly support parts of IEEE with the required
24624 precision such as denormals. */
24625 if (!TARGET_64BIT
24626 || !flag_unsafe_math_optimizations)
24627 return NULL_TREE;
24629 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24630 n = TYPE_VECTOR_SUBPARTS (type_out);
24631 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24632 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24633 if (el_mode != in_mode
24634 || n != in_n)
24635 return NULL_TREE;
24637 switch (fn)
24639 case BUILT_IN_SIN:
24640 case BUILT_IN_COS:
24641 case BUILT_IN_EXP:
24642 case BUILT_IN_LOG:
24643 case BUILT_IN_LOG2:
24644 case BUILT_IN_LOG10:
24645 name[4] = 'd';
24646 name[5] = '2';
24647 if (el_mode != DFmode
24648 || n != 2)
24649 return NULL_TREE;
24650 break;
24652 case BUILT_IN_SINF:
24653 case BUILT_IN_COSF:
24654 case BUILT_IN_EXPF:
24655 case BUILT_IN_POWF:
24656 case BUILT_IN_LOGF:
24657 case BUILT_IN_LOG2F:
24658 case BUILT_IN_LOG10F:
24659 name[4] = 's';
24660 name[5] = '4';
24661 if (el_mode != SFmode
24662 || n != 4)
24663 return NULL_TREE;
24664 break;
24666 default:
24667 return NULL_TREE;
24670 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24671 sprintf (name + 7, "%s", bname+10);
24673 arity = 0;
24674 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24675 args = TREE_CHAIN (args))
24676 arity++;
24678 if (arity == 1)
24679 fntype = build_function_type_list (type_out, type_in, NULL);
24680 else
24681 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24683 /* Build a function declaration for the vectorized function. */
24684 new_fndecl = build_decl (BUILTINS_LOCATION,
24685 FUNCTION_DECL, get_identifier (name), fntype);
24686 TREE_PUBLIC (new_fndecl) = 1;
24687 DECL_EXTERNAL (new_fndecl) = 1;
24688 DECL_IS_NOVOPS (new_fndecl) = 1;
24689 TREE_READONLY (new_fndecl) = 1;
24691 return new_fndecl;
24695 /* Returns a decl of a function that implements conversion of an integer vector
24696 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24697 are the types involved when converting according to CODE.
24698 Return NULL_TREE if it is not available. */
24700 static tree
24701 ix86_vectorize_builtin_conversion (unsigned int code,
24702 tree dest_type, tree src_type)
24704 if (! TARGET_SSE2)
24705 return NULL_TREE;
24707 switch (code)
24709 case FLOAT_EXPR:
24710 switch (TYPE_MODE (src_type))
24712 case V4SImode:
24713 switch (TYPE_MODE (dest_type))
24715 case V4SFmode:
24716 return (TYPE_UNSIGNED (src_type)
24717 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24718 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24719 case V4DFmode:
24720 return (TYPE_UNSIGNED (src_type)
24721 ? NULL_TREE
24722 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24723 default:
24724 return NULL_TREE;
24726 break;
24727 case V8SImode:
24728 switch (TYPE_MODE (dest_type))
24730 case V8SFmode:
24731 return (TYPE_UNSIGNED (src_type)
24732 ? NULL_TREE
24733 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24734 default:
24735 return NULL_TREE;
24737 break;
24738 default:
24739 return NULL_TREE;
24742 case FIX_TRUNC_EXPR:
24743 switch (TYPE_MODE (dest_type))
24745 case V4SImode:
24746 switch (TYPE_MODE (src_type))
24748 case V4SFmode:
24749 return (TYPE_UNSIGNED (dest_type)
24750 ? NULL_TREE
24751 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24752 case V4DFmode:
24753 return (TYPE_UNSIGNED (dest_type)
24754 ? NULL_TREE
24755 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24756 default:
24757 return NULL_TREE;
24759 break;
24761 case V8SImode:
24762 switch (TYPE_MODE (src_type))
24764 case V8SFmode:
24765 return (TYPE_UNSIGNED (dest_type)
24766 ? NULL_TREE
24767 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24768 default:
24769 return NULL_TREE;
24771 break;
24773 default:
24774 return NULL_TREE;
24777 default:
24778 return NULL_TREE;
24781 return NULL_TREE;
24784 /* Returns a code for a target-specific builtin that implements
24785 reciprocal of the function, or NULL_TREE if not available. */
24787 static tree
24788 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24789 bool sqrt ATTRIBUTE_UNUSED)
24791 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24792 && flag_finite_math_only && !flag_trapping_math
24793 && flag_unsafe_math_optimizations))
24794 return NULL_TREE;
24796 if (md_fn)
24797 /* Machine dependent builtins. */
24798 switch (fn)
24800 /* Vectorized version of sqrt to rsqrt conversion. */
24801 case IX86_BUILTIN_SQRTPS_NR:
24802 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24804 default:
24805 return NULL_TREE;
24807 else
24808 /* Normal builtins. */
24809 switch (fn)
24811 /* Sqrt to rsqrt conversion. */
24812 case BUILT_IN_SQRTF:
24813 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24815 default:
24816 return NULL_TREE;
24820 /* Helper for avx_vpermilps256_operand et al. This is also used by
24821 the expansion functions to turn the parallel back into a mask.
24822 The return value is 0 for no match and the imm8+1 for a match. */
24825 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24827 unsigned i, nelt = GET_MODE_NUNITS (mode);
24828 unsigned mask = 0;
24829 unsigned char ipar[8];
24831 if (XVECLEN (par, 0) != (int) nelt)
24832 return 0;
24834 /* Validate that all of the elements are constants, and not totally
24835 out of range. Copy the data into an integral array to make the
24836 subsequent checks easier. */
24837 for (i = 0; i < nelt; ++i)
24839 rtx er = XVECEXP (par, 0, i);
24840 unsigned HOST_WIDE_INT ei;
24842 if (!CONST_INT_P (er))
24843 return 0;
24844 ei = INTVAL (er);
24845 if (ei >= nelt)
24846 return 0;
24847 ipar[i] = ei;
24850 switch (mode)
24852 case V4DFmode:
24853 /* In the 256-bit DFmode case, we can only move elements within
24854 a 128-bit lane. */
24855 for (i = 0; i < 2; ++i)
24857 if (ipar[i] >= 2)
24858 return 0;
24859 mask |= ipar[i] << i;
24861 for (i = 2; i < 4; ++i)
24863 if (ipar[i] < 2)
24864 return 0;
24865 mask |= (ipar[i] - 2) << i;
24867 break;
24869 case V8SFmode:
24870 /* In the 256-bit SFmode case, we have full freedom of movement
24871 within the low 128-bit lane, but the high 128-bit lane must
24872 mirror the exact same pattern. */
24873 for (i = 0; i < 4; ++i)
24874 if (ipar[i] + 4 != ipar[i + 4])
24875 return 0;
24876 nelt = 4;
24877 /* FALLTHRU */
24879 case V2DFmode:
24880 case V4SFmode:
24881 /* In the 128-bit case, we've full freedom in the placement of
24882 the elements from the source operand. */
24883 for (i = 0; i < nelt; ++i)
24884 mask |= ipar[i] << (i * (nelt / 2));
24885 break;
24887 default:
24888 gcc_unreachable ();
24891 /* Make sure success has a non-zero value by adding one. */
24892 return mask + 1;
24895 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24896 the expansion functions to turn the parallel back into a mask.
24897 The return value is 0 for no match and the imm8+1 for a match. */
24900 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24902 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24903 unsigned mask = 0;
24904 unsigned char ipar[8];
24906 if (XVECLEN (par, 0) != (int) nelt)
24907 return 0;
24909 /* Validate that all of the elements are constants, and not totally
24910 out of range. Copy the data into an integral array to make the
24911 subsequent checks easier. */
24912 for (i = 0; i < nelt; ++i)
24914 rtx er = XVECEXP (par, 0, i);
24915 unsigned HOST_WIDE_INT ei;
24917 if (!CONST_INT_P (er))
24918 return 0;
24919 ei = INTVAL (er);
24920 if (ei >= 2 * nelt)
24921 return 0;
24922 ipar[i] = ei;
24925 /* Validate that the halves of the permute are halves. */
24926 for (i = 0; i < nelt2 - 1; ++i)
24927 if (ipar[i] + 1 != ipar[i + 1])
24928 return 0;
24929 for (i = nelt2; i < nelt - 1; ++i)
24930 if (ipar[i] + 1 != ipar[i + 1])
24931 return 0;
24933 /* Reconstruct the mask. */
24934 for (i = 0; i < 2; ++i)
24936 unsigned e = ipar[i * nelt2];
24937 if (e % nelt2)
24938 return 0;
24939 e /= nelt2;
24940 mask |= e << (i * 4);
24943 /* Make sure success has a non-zero value by adding one. */
24944 return mask + 1;
24948 /* Store OPERAND to the memory after reload is completed. This means
24949 that we can't easily use assign_stack_local. */
24951 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24953 rtx result;
24955 gcc_assert (reload_completed);
24956 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24958 result = gen_rtx_MEM (mode,
24959 gen_rtx_PLUS (Pmode,
24960 stack_pointer_rtx,
24961 GEN_INT (-RED_ZONE_SIZE)));
24962 emit_move_insn (result, operand);
24964 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24966 switch (mode)
24968 case HImode:
24969 case SImode:
24970 operand = gen_lowpart (DImode, operand);
24971 /* FALLTHRU */
24972 case DImode:
24973 emit_insn (
24974 gen_rtx_SET (VOIDmode,
24975 gen_rtx_MEM (DImode,
24976 gen_rtx_PRE_DEC (DImode,
24977 stack_pointer_rtx)),
24978 operand));
24979 break;
24980 default:
24981 gcc_unreachable ();
24983 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24985 else
24987 switch (mode)
24989 case DImode:
24991 rtx operands[2];
24992 split_di (&operand, 1, operands, operands + 1);
24993 emit_insn (
24994 gen_rtx_SET (VOIDmode,
24995 gen_rtx_MEM (SImode,
24996 gen_rtx_PRE_DEC (Pmode,
24997 stack_pointer_rtx)),
24998 operands[1]));
24999 emit_insn (
25000 gen_rtx_SET (VOIDmode,
25001 gen_rtx_MEM (SImode,
25002 gen_rtx_PRE_DEC (Pmode,
25003 stack_pointer_rtx)),
25004 operands[0]));
25006 break;
25007 case HImode:
25008 /* Store HImodes as SImodes. */
25009 operand = gen_lowpart (SImode, operand);
25010 /* FALLTHRU */
25011 case SImode:
25012 emit_insn (
25013 gen_rtx_SET (VOIDmode,
25014 gen_rtx_MEM (GET_MODE (operand),
25015 gen_rtx_PRE_DEC (SImode,
25016 stack_pointer_rtx)),
25017 operand));
25018 break;
25019 default:
25020 gcc_unreachable ();
25022 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25024 return result;
25027 /* Free operand from the memory. */
25028 void
25029 ix86_free_from_memory (enum machine_mode mode)
25031 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25033 int size;
25035 if (mode == DImode || TARGET_64BIT)
25036 size = 8;
25037 else
25038 size = 4;
25039 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25040 to pop or add instruction if registers are available. */
25041 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25042 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25043 GEN_INT (size))));
25047 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25048 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25049 same. */
25050 static const enum reg_class *
25051 i386_ira_cover_classes (void)
25053 static const enum reg_class sse_fpmath_classes[] = {
25054 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25056 static const enum reg_class no_sse_fpmath_classes[] = {
25057 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25060 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25063 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25064 QImode must go into class Q_REGS.
25065 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25066 movdf to do mem-to-mem moves through integer regs. */
25067 enum reg_class
25068 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25070 enum machine_mode mode = GET_MODE (x);
25072 /* We're only allowed to return a subclass of CLASS. Many of the
25073 following checks fail for NO_REGS, so eliminate that early. */
25074 if (regclass == NO_REGS)
25075 return NO_REGS;
25077 /* All classes can load zeros. */
25078 if (x == CONST0_RTX (mode))
25079 return regclass;
25081 /* Force constants into memory if we are loading a (nonzero) constant into
25082 an MMX or SSE register. This is because there are no MMX/SSE instructions
25083 to load from a constant. */
25084 if (CONSTANT_P (x)
25085 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25086 return NO_REGS;
25088 /* Prefer SSE regs only, if we can use them for math. */
25089 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25090 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25092 /* Floating-point constants need more complex checks. */
25093 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25095 /* General regs can load everything. */
25096 if (reg_class_subset_p (regclass, GENERAL_REGS))
25097 return regclass;
25099 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25100 zero above. We only want to wind up preferring 80387 registers if
25101 we plan on doing computation with them. */
25102 if (TARGET_80387
25103 && standard_80387_constant_p (x))
25105 /* Limit class to non-sse. */
25106 if (regclass == FLOAT_SSE_REGS)
25107 return FLOAT_REGS;
25108 if (regclass == FP_TOP_SSE_REGS)
25109 return FP_TOP_REG;
25110 if (regclass == FP_SECOND_SSE_REGS)
25111 return FP_SECOND_REG;
25112 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25113 return regclass;
25116 return NO_REGS;
25119 /* Generally when we see PLUS here, it's the function invariant
25120 (plus soft-fp const_int). Which can only be computed into general
25121 regs. */
25122 if (GET_CODE (x) == PLUS)
25123 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25125 /* QImode constants are easy to load, but non-constant QImode data
25126 must go into Q_REGS. */
25127 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25129 if (reg_class_subset_p (regclass, Q_REGS))
25130 return regclass;
25131 if (reg_class_subset_p (Q_REGS, regclass))
25132 return Q_REGS;
25133 return NO_REGS;
25136 return regclass;
25139 /* Discourage putting floating-point values in SSE registers unless
25140 SSE math is being used, and likewise for the 387 registers. */
25141 enum reg_class
25142 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25144 enum machine_mode mode = GET_MODE (x);
25146 /* Restrict the output reload class to the register bank that we are doing
25147 math on. If we would like not to return a subset of CLASS, reject this
25148 alternative: if reload cannot do this, it will still use its choice. */
25149 mode = GET_MODE (x);
25150 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25151 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25153 if (X87_FLOAT_MODE_P (mode))
25155 if (regclass == FP_TOP_SSE_REGS)
25156 return FP_TOP_REG;
25157 else if (regclass == FP_SECOND_SSE_REGS)
25158 return FP_SECOND_REG;
25159 else
25160 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25163 return regclass;
25166 static enum reg_class
25167 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25168 enum machine_mode mode,
25169 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25171 /* QImode spills from non-QI registers require
25172 intermediate register on 32bit targets. */
25173 if (!in_p && mode == QImode && !TARGET_64BIT
25174 && (rclass == GENERAL_REGS
25175 || rclass == LEGACY_REGS
25176 || rclass == INDEX_REGS))
25178 int regno;
25180 if (REG_P (x))
25181 regno = REGNO (x);
25182 else
25183 regno = -1;
25185 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25186 regno = true_regnum (x);
25188 /* Return Q_REGS if the operand is in memory. */
25189 if (regno == -1)
25190 return Q_REGS;
25193 return NO_REGS;
25196 /* If we are copying between general and FP registers, we need a memory
25197 location. The same is true for SSE and MMX registers.
25199 To optimize register_move_cost performance, allow inline variant.
25201 The macro can't work reliably when one of the CLASSES is class containing
25202 registers from multiple units (SSE, MMX, integer). We avoid this by never
25203 combining those units in single alternative in the machine description.
25204 Ensure that this constraint holds to avoid unexpected surprises.
25206 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25207 enforce these sanity checks. */
25209 static inline int
25210 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25211 enum machine_mode mode, int strict)
25213 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25214 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25215 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25216 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25217 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25218 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25220 gcc_assert (!strict);
25221 return true;
25224 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25225 return true;
25227 /* ??? This is a lie. We do have moves between mmx/general, and for
25228 mmx/sse2. But by saying we need secondary memory we discourage the
25229 register allocator from using the mmx registers unless needed. */
25230 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25231 return true;
25233 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25235 /* SSE1 doesn't have any direct moves from other classes. */
25236 if (!TARGET_SSE2)
25237 return true;
25239 /* If the target says that inter-unit moves are more expensive
25240 than moving through memory, then don't generate them. */
25241 if (!TARGET_INTER_UNIT_MOVES)
25242 return true;
25244 /* Between SSE and general, we have moves no larger than word size. */
25245 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25246 return true;
25249 return false;
25253 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25254 enum machine_mode mode, int strict)
25256 return inline_secondary_memory_needed (class1, class2, mode, strict);
25259 /* Return true if the registers in CLASS cannot represent the change from
25260 modes FROM to TO. */
25262 bool
25263 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25264 enum reg_class regclass)
25266 if (from == to)
25267 return false;
25269 /* x87 registers can't do subreg at all, as all values are reformatted
25270 to extended precision. */
25271 if (MAYBE_FLOAT_CLASS_P (regclass))
25272 return true;
25274 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25276 /* Vector registers do not support QI or HImode loads. If we don't
25277 disallow a change to these modes, reload will assume it's ok to
25278 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25279 the vec_dupv4hi pattern. */
25280 if (GET_MODE_SIZE (from) < 4)
25281 return true;
25283 /* Vector registers do not support subreg with nonzero offsets, which
25284 are otherwise valid for integer registers. Since we can't see
25285 whether we have a nonzero offset from here, prohibit all
25286 nonparadoxical subregs changing size. */
25287 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25288 return true;
25291 return false;
25294 /* Return the cost of moving data of mode M between a
25295 register and memory. A value of 2 is the default; this cost is
25296 relative to those in `REGISTER_MOVE_COST'.
25298 This function is used extensively by register_move_cost that is used to
25299 build tables at startup. Make it inline in this case.
25300 When IN is 2, return maximum of in and out move cost.
25302 If moving between registers and memory is more expensive than
25303 between two registers, you should define this macro to express the
25304 relative cost.
25306 Model also increased moving costs of QImode registers in non
25307 Q_REGS classes.
25309 static inline int
25310 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25311 int in)
25313 int cost;
25314 if (FLOAT_CLASS_P (regclass))
25316 int index;
25317 switch (mode)
25319 case SFmode:
25320 index = 0;
25321 break;
25322 case DFmode:
25323 index = 1;
25324 break;
25325 case XFmode:
25326 index = 2;
25327 break;
25328 default:
25329 return 100;
25331 if (in == 2)
25332 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25333 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25335 if (SSE_CLASS_P (regclass))
25337 int index;
25338 switch (GET_MODE_SIZE (mode))
25340 case 4:
25341 index = 0;
25342 break;
25343 case 8:
25344 index = 1;
25345 break;
25346 case 16:
25347 index = 2;
25348 break;
25349 default:
25350 return 100;
25352 if (in == 2)
25353 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25354 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25356 if (MMX_CLASS_P (regclass))
25358 int index;
25359 switch (GET_MODE_SIZE (mode))
25361 case 4:
25362 index = 0;
25363 break;
25364 case 8:
25365 index = 1;
25366 break;
25367 default:
25368 return 100;
25370 if (in)
25371 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25372 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25374 switch (GET_MODE_SIZE (mode))
25376 case 1:
25377 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25379 if (!in)
25380 return ix86_cost->int_store[0];
25381 if (TARGET_PARTIAL_REG_DEPENDENCY
25382 && optimize_function_for_speed_p (cfun))
25383 cost = ix86_cost->movzbl_load;
25384 else
25385 cost = ix86_cost->int_load[0];
25386 if (in == 2)
25387 return MAX (cost, ix86_cost->int_store[0]);
25388 return cost;
25390 else
25392 if (in == 2)
25393 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25394 if (in)
25395 return ix86_cost->movzbl_load;
25396 else
25397 return ix86_cost->int_store[0] + 4;
25399 break;
25400 case 2:
25401 if (in == 2)
25402 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25403 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25404 default:
25405 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25406 if (mode == TFmode)
25407 mode = XFmode;
25408 if (in == 2)
25409 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25410 else if (in)
25411 cost = ix86_cost->int_load[2];
25412 else
25413 cost = ix86_cost->int_store[2];
25414 return (cost * (((int) GET_MODE_SIZE (mode)
25415 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25420 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25422 return inline_memory_move_cost (mode, regclass, in);
25426 /* Return the cost of moving data from a register in class CLASS1 to
25427 one in class CLASS2.
25429 It is not required that the cost always equal 2 when FROM is the same as TO;
25430 on some machines it is expensive to move between registers if they are not
25431 general registers. */
25434 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25435 enum reg_class class2)
25437 /* In case we require secondary memory, compute cost of the store followed
25438 by load. In order to avoid bad register allocation choices, we need
25439 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25441 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25443 int cost = 1;
25445 cost += inline_memory_move_cost (mode, class1, 2);
25446 cost += inline_memory_move_cost (mode, class2, 2);
25448 /* In case of copying from general_purpose_register we may emit multiple
25449 stores followed by single load causing memory size mismatch stall.
25450 Count this as arbitrarily high cost of 20. */
25451 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25452 cost += 20;
25454 /* In the case of FP/MMX moves, the registers actually overlap, and we
25455 have to switch modes in order to treat them differently. */
25456 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25457 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25458 cost += 20;
25460 return cost;
25463 /* Moves between SSE/MMX and integer unit are expensive. */
25464 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25465 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25467 /* ??? By keeping returned value relatively high, we limit the number
25468 of moves between integer and MMX/SSE registers for all targets.
25469 Additionally, high value prevents problem with x86_modes_tieable_p(),
25470 where integer modes in MMX/SSE registers are not tieable
25471 because of missing QImode and HImode moves to, from or between
25472 MMX/SSE registers. */
25473 return MAX (8, ix86_cost->mmxsse_to_integer);
25475 if (MAYBE_FLOAT_CLASS_P (class1))
25476 return ix86_cost->fp_move;
25477 if (MAYBE_SSE_CLASS_P (class1))
25478 return ix86_cost->sse_move;
25479 if (MAYBE_MMX_CLASS_P (class1))
25480 return ix86_cost->mmx_move;
25481 return 2;
25484 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25486 bool
25487 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25489 /* Flags and only flags can only hold CCmode values. */
25490 if (CC_REGNO_P (regno))
25491 return GET_MODE_CLASS (mode) == MODE_CC;
25492 if (GET_MODE_CLASS (mode) == MODE_CC
25493 || GET_MODE_CLASS (mode) == MODE_RANDOM
25494 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25495 return 0;
25496 if (FP_REGNO_P (regno))
25497 return VALID_FP_MODE_P (mode);
25498 if (SSE_REGNO_P (regno))
25500 /* We implement the move patterns for all vector modes into and
25501 out of SSE registers, even when no operation instructions
25502 are available. OImode move is available only when AVX is
25503 enabled. */
25504 return ((TARGET_AVX && mode == OImode)
25505 || VALID_AVX256_REG_MODE (mode)
25506 || VALID_SSE_REG_MODE (mode)
25507 || VALID_SSE2_REG_MODE (mode)
25508 || VALID_MMX_REG_MODE (mode)
25509 || VALID_MMX_REG_MODE_3DNOW (mode));
25511 if (MMX_REGNO_P (regno))
25513 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25514 so if the register is available at all, then we can move data of
25515 the given mode into or out of it. */
25516 return (VALID_MMX_REG_MODE (mode)
25517 || VALID_MMX_REG_MODE_3DNOW (mode));
25520 if (mode == QImode)
25522 /* Take care for QImode values - they can be in non-QI regs,
25523 but then they do cause partial register stalls. */
25524 if (regno <= BX_REG || TARGET_64BIT)
25525 return 1;
25526 if (!TARGET_PARTIAL_REG_STALL)
25527 return 1;
25528 return reload_in_progress || reload_completed;
25530 /* We handle both integer and floats in the general purpose registers. */
25531 else if (VALID_INT_MODE_P (mode))
25532 return 1;
25533 else if (VALID_FP_MODE_P (mode))
25534 return 1;
25535 else if (VALID_DFP_MODE_P (mode))
25536 return 1;
25537 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25538 on to use that value in smaller contexts, this can easily force a
25539 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25540 supporting DImode, allow it. */
25541 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25542 return 1;
25544 return 0;
25547 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25548 tieable integer mode. */
25550 static bool
25551 ix86_tieable_integer_mode_p (enum machine_mode mode)
25553 switch (mode)
25555 case HImode:
25556 case SImode:
25557 return true;
25559 case QImode:
25560 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25562 case DImode:
25563 return TARGET_64BIT;
25565 default:
25566 return false;
25570 /* Return true if MODE1 is accessible in a register that can hold MODE2
25571 without copying. That is, all register classes that can hold MODE2
25572 can also hold MODE1. */
25574 bool
25575 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25577 if (mode1 == mode2)
25578 return true;
25580 if (ix86_tieable_integer_mode_p (mode1)
25581 && ix86_tieable_integer_mode_p (mode2))
25582 return true;
25584 /* MODE2 being XFmode implies fp stack or general regs, which means we
25585 can tie any smaller floating point modes to it. Note that we do not
25586 tie this with TFmode. */
25587 if (mode2 == XFmode)
25588 return mode1 == SFmode || mode1 == DFmode;
25590 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25591 that we can tie it with SFmode. */
25592 if (mode2 == DFmode)
25593 return mode1 == SFmode;
25595 /* If MODE2 is only appropriate for an SSE register, then tie with
25596 any other mode acceptable to SSE registers. */
25597 if (GET_MODE_SIZE (mode2) == 16
25598 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25599 return (GET_MODE_SIZE (mode1) == 16
25600 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25602 /* If MODE2 is appropriate for an MMX register, then tie
25603 with any other mode acceptable to MMX registers. */
25604 if (GET_MODE_SIZE (mode2) == 8
25605 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25606 return (GET_MODE_SIZE (mode1) == 8
25607 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25609 return false;
25612 /* Compute a (partial) cost for rtx X. Return true if the complete
25613 cost has been computed, and false if subexpressions should be
25614 scanned. In either case, *TOTAL contains the cost result. */
25616 static bool
25617 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25619 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25620 enum machine_mode mode = GET_MODE (x);
25621 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25623 switch (code)
25625 case CONST_INT:
25626 case CONST:
25627 case LABEL_REF:
25628 case SYMBOL_REF:
25629 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25630 *total = 3;
25631 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25632 *total = 2;
25633 else if (flag_pic && SYMBOLIC_CONST (x)
25634 && (!TARGET_64BIT
25635 || (!GET_CODE (x) != LABEL_REF
25636 && (GET_CODE (x) != SYMBOL_REF
25637 || !SYMBOL_REF_LOCAL_P (x)))))
25638 *total = 1;
25639 else
25640 *total = 0;
25641 return true;
25643 case CONST_DOUBLE:
25644 if (mode == VOIDmode)
25645 *total = 0;
25646 else
25647 switch (standard_80387_constant_p (x))
25649 case 1: /* 0.0 */
25650 *total = 1;
25651 break;
25652 default: /* Other constants */
25653 *total = 2;
25654 break;
25655 case 0:
25656 case -1:
25657 /* Start with (MEM (SYMBOL_REF)), since that's where
25658 it'll probably end up. Add a penalty for size. */
25659 *total = (COSTS_N_INSNS (1)
25660 + (flag_pic != 0 && !TARGET_64BIT)
25661 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25662 break;
25664 return true;
25666 case ZERO_EXTEND:
25667 /* The zero extensions is often completely free on x86_64, so make
25668 it as cheap as possible. */
25669 if (TARGET_64BIT && mode == DImode
25670 && GET_MODE (XEXP (x, 0)) == SImode)
25671 *total = 1;
25672 else if (TARGET_ZERO_EXTEND_WITH_AND)
25673 *total = cost->add;
25674 else
25675 *total = cost->movzx;
25676 return false;
25678 case SIGN_EXTEND:
25679 *total = cost->movsx;
25680 return false;
25682 case ASHIFT:
25683 if (CONST_INT_P (XEXP (x, 1))
25684 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25686 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25687 if (value == 1)
25689 *total = cost->add;
25690 return false;
25692 if ((value == 2 || value == 3)
25693 && cost->lea <= cost->shift_const)
25695 *total = cost->lea;
25696 return false;
25699 /* FALLTHRU */
25701 case ROTATE:
25702 case ASHIFTRT:
25703 case LSHIFTRT:
25704 case ROTATERT:
25705 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25707 if (CONST_INT_P (XEXP (x, 1)))
25709 if (INTVAL (XEXP (x, 1)) > 32)
25710 *total = cost->shift_const + COSTS_N_INSNS (2);
25711 else
25712 *total = cost->shift_const * 2;
25714 else
25716 if (GET_CODE (XEXP (x, 1)) == AND)
25717 *total = cost->shift_var * 2;
25718 else
25719 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25722 else
25724 if (CONST_INT_P (XEXP (x, 1)))
25725 *total = cost->shift_const;
25726 else
25727 *total = cost->shift_var;
25729 return false;
25731 case MULT:
25732 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25734 /* ??? SSE scalar cost should be used here. */
25735 *total = cost->fmul;
25736 return false;
25738 else if (X87_FLOAT_MODE_P (mode))
25740 *total = cost->fmul;
25741 return false;
25743 else if (FLOAT_MODE_P (mode))
25745 /* ??? SSE vector cost should be used here. */
25746 *total = cost->fmul;
25747 return false;
25749 else
25751 rtx op0 = XEXP (x, 0);
25752 rtx op1 = XEXP (x, 1);
25753 int nbits;
25754 if (CONST_INT_P (XEXP (x, 1)))
25756 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25757 for (nbits = 0; value != 0; value &= value - 1)
25758 nbits++;
25760 else
25761 /* This is arbitrary. */
25762 nbits = 7;
25764 /* Compute costs correctly for widening multiplication. */
25765 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25766 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25767 == GET_MODE_SIZE (mode))
25769 int is_mulwiden = 0;
25770 enum machine_mode inner_mode = GET_MODE (op0);
25772 if (GET_CODE (op0) == GET_CODE (op1))
25773 is_mulwiden = 1, op1 = XEXP (op1, 0);
25774 else if (CONST_INT_P (op1))
25776 if (GET_CODE (op0) == SIGN_EXTEND)
25777 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25778 == INTVAL (op1);
25779 else
25780 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25783 if (is_mulwiden)
25784 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25787 *total = (cost->mult_init[MODE_INDEX (mode)]
25788 + nbits * cost->mult_bit
25789 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25791 return true;
25794 case DIV:
25795 case UDIV:
25796 case MOD:
25797 case UMOD:
25798 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25799 /* ??? SSE cost should be used here. */
25800 *total = cost->fdiv;
25801 else if (X87_FLOAT_MODE_P (mode))
25802 *total = cost->fdiv;
25803 else if (FLOAT_MODE_P (mode))
25804 /* ??? SSE vector cost should be used here. */
25805 *total = cost->fdiv;
25806 else
25807 *total = cost->divide[MODE_INDEX (mode)];
25808 return false;
25810 case PLUS:
25811 if (GET_MODE_CLASS (mode) == MODE_INT
25812 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25814 if (GET_CODE (XEXP (x, 0)) == PLUS
25815 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25816 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25817 && CONSTANT_P (XEXP (x, 1)))
25819 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25820 if (val == 2 || val == 4 || val == 8)
25822 *total = cost->lea;
25823 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25824 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25825 outer_code, speed);
25826 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25827 return true;
25830 else if (GET_CODE (XEXP (x, 0)) == MULT
25831 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25833 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25834 if (val == 2 || val == 4 || val == 8)
25836 *total = cost->lea;
25837 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25838 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25839 return true;
25842 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25844 *total = cost->lea;
25845 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25846 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25847 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25848 return true;
25851 /* FALLTHRU */
25853 case MINUS:
25854 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25856 /* ??? SSE cost should be used here. */
25857 *total = cost->fadd;
25858 return false;
25860 else if (X87_FLOAT_MODE_P (mode))
25862 *total = cost->fadd;
25863 return false;
25865 else if (FLOAT_MODE_P (mode))
25867 /* ??? SSE vector cost should be used here. */
25868 *total = cost->fadd;
25869 return false;
25871 /* FALLTHRU */
25873 case AND:
25874 case IOR:
25875 case XOR:
25876 if (!TARGET_64BIT && mode == DImode)
25878 *total = (cost->add * 2
25879 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25880 << (GET_MODE (XEXP (x, 0)) != DImode))
25881 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25882 << (GET_MODE (XEXP (x, 1)) != DImode)));
25883 return true;
25885 /* FALLTHRU */
25887 case NEG:
25888 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25890 /* ??? SSE cost should be used here. */
25891 *total = cost->fchs;
25892 return false;
25894 else if (X87_FLOAT_MODE_P (mode))
25896 *total = cost->fchs;
25897 return false;
25899 else if (FLOAT_MODE_P (mode))
25901 /* ??? SSE vector cost should be used here. */
25902 *total = cost->fchs;
25903 return false;
25905 /* FALLTHRU */
25907 case NOT:
25908 if (!TARGET_64BIT && mode == DImode)
25909 *total = cost->add * 2;
25910 else
25911 *total = cost->add;
25912 return false;
25914 case COMPARE:
25915 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25916 && XEXP (XEXP (x, 0), 1) == const1_rtx
25917 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25918 && XEXP (x, 1) == const0_rtx)
25920 /* This kind of construct is implemented using test[bwl].
25921 Treat it as if we had an AND. */
25922 *total = (cost->add
25923 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25924 + rtx_cost (const1_rtx, outer_code, speed));
25925 return true;
25927 return false;
25929 case FLOAT_EXTEND:
25930 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25931 *total = 0;
25932 return false;
25934 case ABS:
25935 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25936 /* ??? SSE cost should be used here. */
25937 *total = cost->fabs;
25938 else if (X87_FLOAT_MODE_P (mode))
25939 *total = cost->fabs;
25940 else if (FLOAT_MODE_P (mode))
25941 /* ??? SSE vector cost should be used here. */
25942 *total = cost->fabs;
25943 return false;
25945 case SQRT:
25946 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25947 /* ??? SSE cost should be used here. */
25948 *total = cost->fsqrt;
25949 else if (X87_FLOAT_MODE_P (mode))
25950 *total = cost->fsqrt;
25951 else if (FLOAT_MODE_P (mode))
25952 /* ??? SSE vector cost should be used here. */
25953 *total = cost->fsqrt;
25954 return false;
25956 case UNSPEC:
25957 if (XINT (x, 1) == UNSPEC_TP)
25958 *total = 0;
25959 return false;
25961 case VEC_SELECT:
25962 case VEC_CONCAT:
25963 case VEC_MERGE:
25964 case VEC_DUPLICATE:
25965 /* ??? Assume all of these vector manipulation patterns are
25966 recognizable. In which case they all pretty much have the
25967 same cost. */
25968 *total = COSTS_N_INSNS (1);
25969 return true;
25971 default:
25972 return false;
25976 #if TARGET_MACHO
25978 static int current_machopic_label_num;
25980 /* Given a symbol name and its associated stub, write out the
25981 definition of the stub. */
25983 void
25984 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25986 unsigned int length;
25987 char *binder_name, *symbol_name, lazy_ptr_name[32];
25988 int label = ++current_machopic_label_num;
25990 /* For 64-bit we shouldn't get here. */
25991 gcc_assert (!TARGET_64BIT);
25993 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25994 symb = (*targetm.strip_name_encoding) (symb);
25996 length = strlen (stub);
25997 binder_name = XALLOCAVEC (char, length + 32);
25998 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26000 length = strlen (symb);
26001 symbol_name = XALLOCAVEC (char, length + 32);
26002 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26004 sprintf (lazy_ptr_name, "L%d$lz", label);
26006 if (MACHOPIC_PURE)
26007 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26008 else
26009 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26011 fprintf (file, "%s:\n", stub);
26012 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26014 if (MACHOPIC_PURE)
26016 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26017 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26018 fprintf (file, "\tjmp\t*%%edx\n");
26020 else
26021 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26023 fprintf (file, "%s:\n", binder_name);
26025 if (MACHOPIC_PURE)
26027 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26028 fputs ("\tpushl\t%eax\n", file);
26030 else
26031 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26033 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26035 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26036 fprintf (file, "%s:\n", lazy_ptr_name);
26037 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26038 fprintf (file, ASM_LONG "%s\n", binder_name);
26040 #endif /* TARGET_MACHO */
26042 /* Order the registers for register allocator. */
26044 void
26045 x86_order_regs_for_local_alloc (void)
26047 int pos = 0;
26048 int i;
26050 /* First allocate the local general purpose registers. */
26051 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26052 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26053 reg_alloc_order [pos++] = i;
26055 /* Global general purpose registers. */
26056 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26057 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26058 reg_alloc_order [pos++] = i;
26060 /* x87 registers come first in case we are doing FP math
26061 using them. */
26062 if (!TARGET_SSE_MATH)
26063 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26064 reg_alloc_order [pos++] = i;
26066 /* SSE registers. */
26067 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26068 reg_alloc_order [pos++] = i;
26069 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26070 reg_alloc_order [pos++] = i;
26072 /* x87 registers. */
26073 if (TARGET_SSE_MATH)
26074 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26075 reg_alloc_order [pos++] = i;
26077 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26078 reg_alloc_order [pos++] = i;
26080 /* Initialize the rest of array as we do not allocate some registers
26081 at all. */
26082 while (pos < FIRST_PSEUDO_REGISTER)
26083 reg_alloc_order [pos++] = 0;
26086 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26087 struct attribute_spec.handler. */
26088 static tree
26089 ix86_handle_abi_attribute (tree *node, tree name,
26090 tree args ATTRIBUTE_UNUSED,
26091 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26093 if (TREE_CODE (*node) != FUNCTION_TYPE
26094 && TREE_CODE (*node) != METHOD_TYPE
26095 && TREE_CODE (*node) != FIELD_DECL
26096 && TREE_CODE (*node) != TYPE_DECL)
26098 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26099 name);
26100 *no_add_attrs = true;
26101 return NULL_TREE;
26103 if (!TARGET_64BIT)
26105 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26106 name);
26107 *no_add_attrs = true;
26108 return NULL_TREE;
26111 /* Can combine regparm with all attributes but fastcall. */
26112 if (is_attribute_p ("ms_abi", name))
26114 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26116 error ("ms_abi and sysv_abi attributes are not compatible");
26119 return NULL_TREE;
26121 else if (is_attribute_p ("sysv_abi", name))
26123 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26125 error ("ms_abi and sysv_abi attributes are not compatible");
26128 return NULL_TREE;
26131 return NULL_TREE;
26134 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26135 struct attribute_spec.handler. */
26136 static tree
26137 ix86_handle_struct_attribute (tree *node, tree name,
26138 tree args ATTRIBUTE_UNUSED,
26139 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26141 tree *type = NULL;
26142 if (DECL_P (*node))
26144 if (TREE_CODE (*node) == TYPE_DECL)
26145 type = &TREE_TYPE (*node);
26147 else
26148 type = node;
26150 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26151 || TREE_CODE (*type) == UNION_TYPE)))
26153 warning (OPT_Wattributes, "%qE attribute ignored",
26154 name);
26155 *no_add_attrs = true;
26158 else if ((is_attribute_p ("ms_struct", name)
26159 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26160 || ((is_attribute_p ("gcc_struct", name)
26161 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26163 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26164 name);
26165 *no_add_attrs = true;
26168 return NULL_TREE;
26171 static tree
26172 ix86_handle_fndecl_attribute (tree *node, tree name,
26173 tree args ATTRIBUTE_UNUSED,
26174 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26176 if (TREE_CODE (*node) != FUNCTION_DECL)
26178 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26179 name);
26180 *no_add_attrs = true;
26181 return NULL_TREE;
26184 if (TARGET_64BIT)
26186 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26187 name);
26188 return NULL_TREE;
26191 #ifndef HAVE_AS_IX86_SWAP
26192 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26193 #endif
26195 return NULL_TREE;
26198 static bool
26199 ix86_ms_bitfield_layout_p (const_tree record_type)
26201 return (TARGET_MS_BITFIELD_LAYOUT &&
26202 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26203 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26206 /* Returns an expression indicating where the this parameter is
26207 located on entry to the FUNCTION. */
26209 static rtx
26210 x86_this_parameter (tree function)
26212 tree type = TREE_TYPE (function);
26213 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26214 int nregs;
26216 if (TARGET_64BIT)
26218 const int *parm_regs;
26220 if (ix86_function_type_abi (type) == MS_ABI)
26221 parm_regs = x86_64_ms_abi_int_parameter_registers;
26222 else
26223 parm_regs = x86_64_int_parameter_registers;
26224 return gen_rtx_REG (DImode, parm_regs[aggr]);
26227 nregs = ix86_function_regparm (type, function);
26229 if (nregs > 0 && !stdarg_p (type))
26231 int regno;
26233 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26234 regno = aggr ? DX_REG : CX_REG;
26235 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26237 regno = CX_REG;
26238 if (aggr)
26239 return gen_rtx_MEM (SImode,
26240 plus_constant (stack_pointer_rtx, 4));
26242 else
26244 regno = AX_REG;
26245 if (aggr)
26247 regno = DX_REG;
26248 if (nregs == 1)
26249 return gen_rtx_MEM (SImode,
26250 plus_constant (stack_pointer_rtx, 4));
26253 return gen_rtx_REG (SImode, regno);
26256 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26259 /* Determine whether x86_output_mi_thunk can succeed. */
26261 static bool
26262 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26263 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26264 HOST_WIDE_INT vcall_offset, const_tree function)
26266 /* 64-bit can handle anything. */
26267 if (TARGET_64BIT)
26268 return true;
26270 /* For 32-bit, everything's fine if we have one free register. */
26271 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26272 return true;
26274 /* Need a free register for vcall_offset. */
26275 if (vcall_offset)
26276 return false;
26278 /* Need a free register for GOT references. */
26279 if (flag_pic && !(*targetm.binds_local_p) (function))
26280 return false;
26282 /* Otherwise ok. */
26283 return true;
26286 /* Output the assembler code for a thunk function. THUNK_DECL is the
26287 declaration for the thunk function itself, FUNCTION is the decl for
26288 the target function. DELTA is an immediate constant offset to be
26289 added to THIS. If VCALL_OFFSET is nonzero, the word at
26290 *(*this + vcall_offset) should be added to THIS. */
26292 static void
26293 x86_output_mi_thunk (FILE *file,
26294 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26295 HOST_WIDE_INT vcall_offset, tree function)
26297 rtx xops[3];
26298 rtx this_param = x86_this_parameter (function);
26299 rtx this_reg, tmp;
26301 /* Make sure unwind info is emitted for the thunk if needed. */
26302 final_start_function (emit_barrier (), file, 1);
26304 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26305 pull it in now and let DELTA benefit. */
26306 if (REG_P (this_param))
26307 this_reg = this_param;
26308 else if (vcall_offset)
26310 /* Put the this parameter into %eax. */
26311 xops[0] = this_param;
26312 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26313 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26315 else
26316 this_reg = NULL_RTX;
26318 /* Adjust the this parameter by a fixed constant. */
26319 if (delta)
26321 xops[0] = GEN_INT (delta);
26322 xops[1] = this_reg ? this_reg : this_param;
26323 if (TARGET_64BIT)
26325 if (!x86_64_general_operand (xops[0], DImode))
26327 tmp = gen_rtx_REG (DImode, R10_REG);
26328 xops[1] = tmp;
26329 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26330 xops[0] = tmp;
26331 xops[1] = this_param;
26333 if (x86_maybe_negate_const_int (&xops[0], DImode))
26334 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26335 else
26336 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26338 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26339 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26340 else
26341 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26344 /* Adjust the this parameter by a value stored in the vtable. */
26345 if (vcall_offset)
26347 if (TARGET_64BIT)
26348 tmp = gen_rtx_REG (DImode, R10_REG);
26349 else
26351 int tmp_regno = CX_REG;
26352 if (lookup_attribute ("fastcall",
26353 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26354 || lookup_attribute ("thiscall",
26355 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26356 tmp_regno = AX_REG;
26357 tmp = gen_rtx_REG (SImode, tmp_regno);
26360 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26361 xops[1] = tmp;
26362 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26364 /* Adjust the this parameter. */
26365 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26366 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26368 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26369 xops[0] = GEN_INT (vcall_offset);
26370 xops[1] = tmp2;
26371 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26372 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26374 xops[1] = this_reg;
26375 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26378 /* If necessary, drop THIS back to its stack slot. */
26379 if (this_reg && this_reg != this_param)
26381 xops[0] = this_reg;
26382 xops[1] = this_param;
26383 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26386 xops[0] = XEXP (DECL_RTL (function), 0);
26387 if (TARGET_64BIT)
26389 if (!flag_pic || (*targetm.binds_local_p) (function))
26390 output_asm_insn ("jmp\t%P0", xops);
26391 /* All thunks should be in the same object as their target,
26392 and thus binds_local_p should be true. */
26393 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26394 gcc_unreachable ();
26395 else
26397 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26398 tmp = gen_rtx_CONST (Pmode, tmp);
26399 tmp = gen_rtx_MEM (QImode, tmp);
26400 xops[0] = tmp;
26401 output_asm_insn ("jmp\t%A0", xops);
26404 else
26406 if (!flag_pic || (*targetm.binds_local_p) (function))
26407 output_asm_insn ("jmp\t%P0", xops);
26408 else
26409 #if TARGET_MACHO
26410 if (TARGET_MACHO)
26412 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26413 tmp = (gen_rtx_SYMBOL_REF
26414 (Pmode,
26415 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26416 tmp = gen_rtx_MEM (QImode, tmp);
26417 xops[0] = tmp;
26418 output_asm_insn ("jmp\t%0", xops);
26420 else
26421 #endif /* TARGET_MACHO */
26423 tmp = gen_rtx_REG (SImode, CX_REG);
26424 output_set_got (tmp, NULL_RTX);
26426 xops[1] = tmp;
26427 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26428 output_asm_insn ("jmp\t{*}%1", xops);
26431 final_end_function ();
26434 static void
26435 x86_file_start (void)
26437 default_file_start ();
26438 #if TARGET_MACHO
26439 darwin_file_start ();
26440 #endif
26441 if (X86_FILE_START_VERSION_DIRECTIVE)
26442 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26443 if (X86_FILE_START_FLTUSED)
26444 fputs ("\t.global\t__fltused\n", asm_out_file);
26445 if (ix86_asm_dialect == ASM_INTEL)
26446 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26450 x86_field_alignment (tree field, int computed)
26452 enum machine_mode mode;
26453 tree type = TREE_TYPE (field);
26455 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26456 return computed;
26457 mode = TYPE_MODE (strip_array_types (type));
26458 if (mode == DFmode || mode == DCmode
26459 || GET_MODE_CLASS (mode) == MODE_INT
26460 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26461 return MIN (32, computed);
26462 return computed;
26465 /* Output assembler code to FILE to increment profiler label # LABELNO
26466 for profiling a function entry. */
26467 void
26468 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26470 if (TARGET_64BIT)
26472 #ifndef NO_PROFILE_COUNTERS
26473 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26474 #endif
26476 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26477 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26478 else
26479 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26481 else if (flag_pic)
26483 #ifndef NO_PROFILE_COUNTERS
26484 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26485 labelno);
26486 #endif
26487 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26489 else
26491 #ifndef NO_PROFILE_COUNTERS
26492 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26493 labelno);
26494 #endif
26495 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26499 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26500 /* We don't have exact information about the insn sizes, but we may assume
26501 quite safely that we are informed about all 1 byte insns and memory
26502 address sizes. This is enough to eliminate unnecessary padding in
26503 99% of cases. */
26505 static int
26506 min_insn_size (rtx insn)
26508 int l = 0, len;
26510 if (!INSN_P (insn) || !active_insn_p (insn))
26511 return 0;
26513 /* Discard alignments we've emit and jump instructions. */
26514 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26515 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26516 return 0;
26517 if (JUMP_TABLE_DATA_P (insn))
26518 return 0;
26520 /* Important case - calls are always 5 bytes.
26521 It is common to have many calls in the row. */
26522 if (CALL_P (insn)
26523 && symbolic_reference_mentioned_p (PATTERN (insn))
26524 && !SIBLING_CALL_P (insn))
26525 return 5;
26526 len = get_attr_length (insn);
26527 if (len <= 1)
26528 return 1;
26530 /* For normal instructions we rely on get_attr_length being exact,
26531 with a few exceptions. */
26532 if (!JUMP_P (insn))
26534 enum attr_type type = get_attr_type (insn);
26536 switch (type)
26538 case TYPE_MULTI:
26539 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26540 || asm_noperands (PATTERN (insn)) >= 0)
26541 return 0;
26542 break;
26543 case TYPE_OTHER:
26544 case TYPE_FCMP:
26545 break;
26546 default:
26547 /* Otherwise trust get_attr_length. */
26548 return len;
26551 l = get_attr_length_address (insn);
26552 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26553 l = 4;
26555 if (l)
26556 return 1+l;
26557 else
26558 return 2;
26561 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26562 window. */
26564 static void
26565 ix86_avoid_jump_mispredicts (void)
26567 rtx insn, start = get_insns ();
26568 int nbytes = 0, njumps = 0;
26569 int isjump = 0;
26571 /* Look for all minimal intervals of instructions containing 4 jumps.
26572 The intervals are bounded by START and INSN. NBYTES is the total
26573 size of instructions in the interval including INSN and not including
26574 START. When the NBYTES is smaller than 16 bytes, it is possible
26575 that the end of START and INSN ends up in the same 16byte page.
26577 The smallest offset in the page INSN can start is the case where START
26578 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26579 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26581 for (insn = start; insn; insn = NEXT_INSN (insn))
26583 int min_size;
26585 if (LABEL_P (insn))
26587 int align = label_to_alignment (insn);
26588 int max_skip = label_to_max_skip (insn);
26590 if (max_skip > 15)
26591 max_skip = 15;
26592 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26593 already in the current 16 byte page, because otherwise
26594 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26595 bytes to reach 16 byte boundary. */
26596 if (align <= 0
26597 || (align <= 3 && max_skip != (1 << align) - 1))
26598 max_skip = 0;
26599 if (dump_file)
26600 fprintf (dump_file, "Label %i with max_skip %i\n",
26601 INSN_UID (insn), max_skip);
26602 if (max_skip)
26604 while (nbytes + max_skip >= 16)
26606 start = NEXT_INSN (start);
26607 if ((JUMP_P (start)
26608 && GET_CODE (PATTERN (start)) != ADDR_VEC
26609 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26610 || CALL_P (start))
26611 njumps--, isjump = 1;
26612 else
26613 isjump = 0;
26614 nbytes -= min_insn_size (start);
26617 continue;
26620 min_size = min_insn_size (insn);
26621 nbytes += min_size;
26622 if (dump_file)
26623 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26624 INSN_UID (insn), min_size);
26625 if ((JUMP_P (insn)
26626 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26627 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26628 || CALL_P (insn))
26629 njumps++;
26630 else
26631 continue;
26633 while (njumps > 3)
26635 start = NEXT_INSN (start);
26636 if ((JUMP_P (start)
26637 && GET_CODE (PATTERN (start)) != ADDR_VEC
26638 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26639 || CALL_P (start))
26640 njumps--, isjump = 1;
26641 else
26642 isjump = 0;
26643 nbytes -= min_insn_size (start);
26645 gcc_assert (njumps >= 0);
26646 if (dump_file)
26647 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26648 INSN_UID (start), INSN_UID (insn), nbytes);
26650 if (njumps == 3 && isjump && nbytes < 16)
26652 int padsize = 15 - nbytes + min_insn_size (insn);
26654 if (dump_file)
26655 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26656 INSN_UID (insn), padsize);
26657 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26661 #endif
26663 /* AMD Athlon works faster
26664 when RET is not destination of conditional jump or directly preceded
26665 by other jump instruction. We avoid the penalty by inserting NOP just
26666 before the RET instructions in such cases. */
26667 static void
26668 ix86_pad_returns (void)
26670 edge e;
26671 edge_iterator ei;
26673 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26675 basic_block bb = e->src;
26676 rtx ret = BB_END (bb);
26677 rtx prev;
26678 bool replace = false;
26680 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26681 || optimize_bb_for_size_p (bb))
26682 continue;
26683 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26684 if (active_insn_p (prev) || LABEL_P (prev))
26685 break;
26686 if (prev && LABEL_P (prev))
26688 edge e;
26689 edge_iterator ei;
26691 FOR_EACH_EDGE (e, ei, bb->preds)
26692 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26693 && !(e->flags & EDGE_FALLTHRU))
26694 replace = true;
26696 if (!replace)
26698 prev = prev_active_insn (ret);
26699 if (prev
26700 && ((JUMP_P (prev) && any_condjump_p (prev))
26701 || CALL_P (prev)))
26702 replace = true;
26703 /* Empty functions get branch mispredict even when the jump destination
26704 is not visible to us. */
26705 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26706 replace = true;
26708 if (replace)
26710 emit_jump_insn_before (gen_return_internal_long (), ret);
26711 delete_insn (ret);
26716 /* Implement machine specific optimizations. We implement padding of returns
26717 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26718 static void
26719 ix86_reorg (void)
26721 if (optimize && optimize_function_for_speed_p (cfun))
26723 if (TARGET_PAD_RETURNS)
26724 ix86_pad_returns ();
26725 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26726 if (TARGET_FOUR_JUMP_LIMIT)
26727 ix86_avoid_jump_mispredicts ();
26728 #endif
26732 /* Return nonzero when QImode register that must be represented via REX prefix
26733 is used. */
26734 bool
26735 x86_extended_QIreg_mentioned_p (rtx insn)
26737 int i;
26738 extract_insn_cached (insn);
26739 for (i = 0; i < recog_data.n_operands; i++)
26740 if (REG_P (recog_data.operand[i])
26741 && REGNO (recog_data.operand[i]) > BX_REG)
26742 return true;
26743 return false;
26746 /* Return nonzero when P points to register encoded via REX prefix.
26747 Called via for_each_rtx. */
26748 static int
26749 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26751 unsigned int regno;
26752 if (!REG_P (*p))
26753 return 0;
26754 regno = REGNO (*p);
26755 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26758 /* Return true when INSN mentions register that must be encoded using REX
26759 prefix. */
26760 bool
26761 x86_extended_reg_mentioned_p (rtx insn)
26763 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26764 extended_reg_mentioned_1, NULL);
26767 /* If profitable, negate (without causing overflow) integer constant
26768 of mode MODE at location LOC. Return true in this case. */
26769 bool
26770 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26772 HOST_WIDE_INT val;
26774 if (!CONST_INT_P (*loc))
26775 return false;
26777 switch (mode)
26779 case DImode:
26780 /* DImode x86_64 constants must fit in 32 bits. */
26781 gcc_assert (x86_64_immediate_operand (*loc, mode));
26783 mode = SImode;
26784 break;
26786 case SImode:
26787 case HImode:
26788 case QImode:
26789 break;
26791 default:
26792 gcc_unreachable ();
26795 /* Avoid overflows. */
26796 if (mode_signbit_p (mode, *loc))
26797 return false;
26799 val = INTVAL (*loc);
26801 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26802 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26803 if ((val < 0 && val != -128)
26804 || val == 128)
26806 *loc = GEN_INT (-val);
26807 return true;
26810 return false;
26813 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26814 optabs would emit if we didn't have TFmode patterns. */
26816 void
26817 x86_emit_floatuns (rtx operands[2])
26819 rtx neglab, donelab, i0, i1, f0, in, out;
26820 enum machine_mode mode, inmode;
26822 inmode = GET_MODE (operands[1]);
26823 gcc_assert (inmode == SImode || inmode == DImode);
26825 out = operands[0];
26826 in = force_reg (inmode, operands[1]);
26827 mode = GET_MODE (out);
26828 neglab = gen_label_rtx ();
26829 donelab = gen_label_rtx ();
26830 f0 = gen_reg_rtx (mode);
26832 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26834 expand_float (out, in, 0);
26836 emit_jump_insn (gen_jump (donelab));
26837 emit_barrier ();
26839 emit_label (neglab);
26841 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26842 1, OPTAB_DIRECT);
26843 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26844 1, OPTAB_DIRECT);
26845 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26847 expand_float (f0, i0, 0);
26849 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26851 emit_label (donelab);
26854 /* AVX does not support 32-byte integer vector operations,
26855 thus the longest vector we are faced with is V16QImode. */
26856 #define MAX_VECT_LEN 16
26858 struct expand_vec_perm_d
26860 rtx target, op0, op1;
26861 unsigned char perm[MAX_VECT_LEN];
26862 enum machine_mode vmode;
26863 unsigned char nelt;
26864 bool testing_p;
26867 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26868 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26870 /* Get a vector mode of the same size as the original but with elements
26871 twice as wide. This is only guaranteed to apply to integral vectors. */
26873 static inline enum machine_mode
26874 get_mode_wider_vector (enum machine_mode o)
26876 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26877 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26878 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26879 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26880 return n;
26883 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26884 with all elements equal to VAR. Return true if successful. */
26886 static bool
26887 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26888 rtx target, rtx val)
26890 bool ok;
26892 switch (mode)
26894 case V2SImode:
26895 case V2SFmode:
26896 if (!mmx_ok)
26897 return false;
26898 /* FALLTHRU */
26900 case V4DFmode:
26901 case V4DImode:
26902 case V8SFmode:
26903 case V8SImode:
26904 case V2DFmode:
26905 case V2DImode:
26906 case V4SFmode:
26907 case V4SImode:
26909 rtx insn, dup;
26911 /* First attempt to recognize VAL as-is. */
26912 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26913 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26914 if (recog_memoized (insn) < 0)
26916 rtx seq;
26917 /* If that fails, force VAL into a register. */
26919 start_sequence ();
26920 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26921 seq = get_insns ();
26922 end_sequence ();
26923 if (seq)
26924 emit_insn_before (seq, insn);
26926 ok = recog_memoized (insn) >= 0;
26927 gcc_assert (ok);
26930 return true;
26932 case V4HImode:
26933 if (!mmx_ok)
26934 return false;
26935 if (TARGET_SSE || TARGET_3DNOW_A)
26937 rtx x;
26939 val = gen_lowpart (SImode, val);
26940 x = gen_rtx_TRUNCATE (HImode, val);
26941 x = gen_rtx_VEC_DUPLICATE (mode, x);
26942 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26943 return true;
26945 goto widen;
26947 case V8QImode:
26948 if (!mmx_ok)
26949 return false;
26950 goto widen;
26952 case V8HImode:
26953 if (TARGET_SSE2)
26955 struct expand_vec_perm_d dperm;
26956 rtx tmp1, tmp2;
26958 permute:
26959 memset (&dperm, 0, sizeof (dperm));
26960 dperm.target = target;
26961 dperm.vmode = mode;
26962 dperm.nelt = GET_MODE_NUNITS (mode);
26963 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26965 /* Extend to SImode using a paradoxical SUBREG. */
26966 tmp1 = gen_reg_rtx (SImode);
26967 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26969 /* Insert the SImode value as low element of a V4SImode vector. */
26970 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26971 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26973 ok = (expand_vec_perm_1 (&dperm)
26974 || expand_vec_perm_broadcast_1 (&dperm));
26975 gcc_assert (ok);
26976 return ok;
26978 goto widen;
26980 case V16QImode:
26981 if (TARGET_SSE2)
26982 goto permute;
26983 goto widen;
26985 widen:
26986 /* Replicate the value once into the next wider mode and recurse. */
26988 enum machine_mode smode, wsmode, wvmode;
26989 rtx x;
26991 smode = GET_MODE_INNER (mode);
26992 wvmode = get_mode_wider_vector (mode);
26993 wsmode = GET_MODE_INNER (wvmode);
26995 val = convert_modes (wsmode, smode, val, true);
26996 x = expand_simple_binop (wsmode, ASHIFT, val,
26997 GEN_INT (GET_MODE_BITSIZE (smode)),
26998 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26999 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27001 x = gen_lowpart (wvmode, target);
27002 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27003 gcc_assert (ok);
27004 return ok;
27007 case V16HImode:
27008 case V32QImode:
27010 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27011 rtx x = gen_reg_rtx (hvmode);
27013 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27014 gcc_assert (ok);
27016 x = gen_rtx_VEC_CONCAT (mode, x, x);
27017 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27019 return true;
27021 default:
27022 return false;
27026 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27027 whose ONE_VAR element is VAR, and other elements are zero. Return true
27028 if successful. */
27030 static bool
27031 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27032 rtx target, rtx var, int one_var)
27034 enum machine_mode vsimode;
27035 rtx new_target;
27036 rtx x, tmp;
27037 bool use_vector_set = false;
27039 switch (mode)
27041 case V2DImode:
27042 /* For SSE4.1, we normally use vector set. But if the second
27043 element is zero and inter-unit moves are OK, we use movq
27044 instead. */
27045 use_vector_set = (TARGET_64BIT
27046 && TARGET_SSE4_1
27047 && !(TARGET_INTER_UNIT_MOVES
27048 && one_var == 0));
27049 break;
27050 case V16QImode:
27051 case V4SImode:
27052 case V4SFmode:
27053 use_vector_set = TARGET_SSE4_1;
27054 break;
27055 case V8HImode:
27056 use_vector_set = TARGET_SSE2;
27057 break;
27058 case V4HImode:
27059 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27060 break;
27061 case V32QImode:
27062 case V16HImode:
27063 case V8SImode:
27064 case V8SFmode:
27065 case V4DFmode:
27066 use_vector_set = TARGET_AVX;
27067 break;
27068 case V4DImode:
27069 /* Use ix86_expand_vector_set in 64bit mode only. */
27070 use_vector_set = TARGET_AVX && TARGET_64BIT;
27071 break;
27072 default:
27073 break;
27076 if (use_vector_set)
27078 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27079 var = force_reg (GET_MODE_INNER (mode), var);
27080 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27081 return true;
27084 switch (mode)
27086 case V2SFmode:
27087 case V2SImode:
27088 if (!mmx_ok)
27089 return false;
27090 /* FALLTHRU */
27092 case V2DFmode:
27093 case V2DImode:
27094 if (one_var != 0)
27095 return false;
27096 var = force_reg (GET_MODE_INNER (mode), var);
27097 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27098 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27099 return true;
27101 case V4SFmode:
27102 case V4SImode:
27103 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27104 new_target = gen_reg_rtx (mode);
27105 else
27106 new_target = target;
27107 var = force_reg (GET_MODE_INNER (mode), var);
27108 x = gen_rtx_VEC_DUPLICATE (mode, var);
27109 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27110 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27111 if (one_var != 0)
27113 /* We need to shuffle the value to the correct position, so
27114 create a new pseudo to store the intermediate result. */
27116 /* With SSE2, we can use the integer shuffle insns. */
27117 if (mode != V4SFmode && TARGET_SSE2)
27119 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27120 const1_rtx,
27121 GEN_INT (one_var == 1 ? 0 : 1),
27122 GEN_INT (one_var == 2 ? 0 : 1),
27123 GEN_INT (one_var == 3 ? 0 : 1)));
27124 if (target != new_target)
27125 emit_move_insn (target, new_target);
27126 return true;
27129 /* Otherwise convert the intermediate result to V4SFmode and
27130 use the SSE1 shuffle instructions. */
27131 if (mode != V4SFmode)
27133 tmp = gen_reg_rtx (V4SFmode);
27134 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27136 else
27137 tmp = new_target;
27139 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27140 const1_rtx,
27141 GEN_INT (one_var == 1 ? 0 : 1),
27142 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27143 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27145 if (mode != V4SFmode)
27146 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27147 else if (tmp != target)
27148 emit_move_insn (target, tmp);
27150 else if (target != new_target)
27151 emit_move_insn (target, new_target);
27152 return true;
27154 case V8HImode:
27155 case V16QImode:
27156 vsimode = V4SImode;
27157 goto widen;
27158 case V4HImode:
27159 case V8QImode:
27160 if (!mmx_ok)
27161 return false;
27162 vsimode = V2SImode;
27163 goto widen;
27164 widen:
27165 if (one_var != 0)
27166 return false;
27168 /* Zero extend the variable element to SImode and recurse. */
27169 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27171 x = gen_reg_rtx (vsimode);
27172 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27173 var, one_var))
27174 gcc_unreachable ();
27176 emit_move_insn (target, gen_lowpart (mode, x));
27177 return true;
27179 default:
27180 return false;
27184 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27185 consisting of the values in VALS. It is known that all elements
27186 except ONE_VAR are constants. Return true if successful. */
27188 static bool
27189 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27190 rtx target, rtx vals, int one_var)
27192 rtx var = XVECEXP (vals, 0, one_var);
27193 enum machine_mode wmode;
27194 rtx const_vec, x;
27196 const_vec = copy_rtx (vals);
27197 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27198 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27200 switch (mode)
27202 case V2DFmode:
27203 case V2DImode:
27204 case V2SFmode:
27205 case V2SImode:
27206 /* For the two element vectors, it's just as easy to use
27207 the general case. */
27208 return false;
27210 case V4DImode:
27211 /* Use ix86_expand_vector_set in 64bit mode only. */
27212 if (!TARGET_64BIT)
27213 return false;
27214 case V4DFmode:
27215 case V8SFmode:
27216 case V8SImode:
27217 case V16HImode:
27218 case V32QImode:
27219 case V4SFmode:
27220 case V4SImode:
27221 case V8HImode:
27222 case V4HImode:
27223 break;
27225 case V16QImode:
27226 if (TARGET_SSE4_1)
27227 break;
27228 wmode = V8HImode;
27229 goto widen;
27230 case V8QImode:
27231 wmode = V4HImode;
27232 goto widen;
27233 widen:
27234 /* There's no way to set one QImode entry easily. Combine
27235 the variable value with its adjacent constant value, and
27236 promote to an HImode set. */
27237 x = XVECEXP (vals, 0, one_var ^ 1);
27238 if (one_var & 1)
27240 var = convert_modes (HImode, QImode, var, true);
27241 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27242 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27243 x = GEN_INT (INTVAL (x) & 0xff);
27245 else
27247 var = convert_modes (HImode, QImode, var, true);
27248 x = gen_int_mode (INTVAL (x) << 8, HImode);
27250 if (x != const0_rtx)
27251 var = expand_simple_binop (HImode, IOR, var, x, var,
27252 1, OPTAB_LIB_WIDEN);
27254 x = gen_reg_rtx (wmode);
27255 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27256 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27258 emit_move_insn (target, gen_lowpart (mode, x));
27259 return true;
27261 default:
27262 return false;
27265 emit_move_insn (target, const_vec);
27266 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27267 return true;
27270 /* A subroutine of ix86_expand_vector_init_general. Use vector
27271 concatenate to handle the most general case: all values variable,
27272 and none identical. */
27274 static void
27275 ix86_expand_vector_init_concat (enum machine_mode mode,
27276 rtx target, rtx *ops, int n)
27278 enum machine_mode cmode, hmode = VOIDmode;
27279 rtx first[8], second[4];
27280 rtvec v;
27281 int i, j;
27283 switch (n)
27285 case 2:
27286 switch (mode)
27288 case V8SImode:
27289 cmode = V4SImode;
27290 break;
27291 case V8SFmode:
27292 cmode = V4SFmode;
27293 break;
27294 case V4DImode:
27295 cmode = V2DImode;
27296 break;
27297 case V4DFmode:
27298 cmode = V2DFmode;
27299 break;
27300 case V4SImode:
27301 cmode = V2SImode;
27302 break;
27303 case V4SFmode:
27304 cmode = V2SFmode;
27305 break;
27306 case V2DImode:
27307 cmode = DImode;
27308 break;
27309 case V2SImode:
27310 cmode = SImode;
27311 break;
27312 case V2DFmode:
27313 cmode = DFmode;
27314 break;
27315 case V2SFmode:
27316 cmode = SFmode;
27317 break;
27318 default:
27319 gcc_unreachable ();
27322 if (!register_operand (ops[1], cmode))
27323 ops[1] = force_reg (cmode, ops[1]);
27324 if (!register_operand (ops[0], cmode))
27325 ops[0] = force_reg (cmode, ops[0]);
27326 emit_insn (gen_rtx_SET (VOIDmode, target,
27327 gen_rtx_VEC_CONCAT (mode, ops[0],
27328 ops[1])));
27329 break;
27331 case 4:
27332 switch (mode)
27334 case V4DImode:
27335 cmode = V2DImode;
27336 break;
27337 case V4DFmode:
27338 cmode = V2DFmode;
27339 break;
27340 case V4SImode:
27341 cmode = V2SImode;
27342 break;
27343 case V4SFmode:
27344 cmode = V2SFmode;
27345 break;
27346 default:
27347 gcc_unreachable ();
27349 goto half;
27351 case 8:
27352 switch (mode)
27354 case V8SImode:
27355 cmode = V2SImode;
27356 hmode = V4SImode;
27357 break;
27358 case V8SFmode:
27359 cmode = V2SFmode;
27360 hmode = V4SFmode;
27361 break;
27362 default:
27363 gcc_unreachable ();
27365 goto half;
27367 half:
27368 /* FIXME: We process inputs backward to help RA. PR 36222. */
27369 i = n - 1;
27370 j = (n >> 1) - 1;
27371 for (; i > 0; i -= 2, j--)
27373 first[j] = gen_reg_rtx (cmode);
27374 v = gen_rtvec (2, ops[i - 1], ops[i]);
27375 ix86_expand_vector_init (false, first[j],
27376 gen_rtx_PARALLEL (cmode, v));
27379 n >>= 1;
27380 if (n > 2)
27382 gcc_assert (hmode != VOIDmode);
27383 for (i = j = 0; i < n; i += 2, j++)
27385 second[j] = gen_reg_rtx (hmode);
27386 ix86_expand_vector_init_concat (hmode, second [j],
27387 &first [i], 2);
27389 n >>= 1;
27390 ix86_expand_vector_init_concat (mode, target, second, n);
27392 else
27393 ix86_expand_vector_init_concat (mode, target, first, n);
27394 break;
27396 default:
27397 gcc_unreachable ();
27401 /* A subroutine of ix86_expand_vector_init_general. Use vector
27402 interleave to handle the most general case: all values variable,
27403 and none identical. */
27405 static void
27406 ix86_expand_vector_init_interleave (enum machine_mode mode,
27407 rtx target, rtx *ops, int n)
27409 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27410 int i, j;
27411 rtx op0, op1;
27412 rtx (*gen_load_even) (rtx, rtx, rtx);
27413 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27414 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27416 switch (mode)
27418 case V8HImode:
27419 gen_load_even = gen_vec_setv8hi;
27420 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27421 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27422 inner_mode = HImode;
27423 first_imode = V4SImode;
27424 second_imode = V2DImode;
27425 third_imode = VOIDmode;
27426 break;
27427 case V16QImode:
27428 gen_load_even = gen_vec_setv16qi;
27429 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27430 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27431 inner_mode = QImode;
27432 first_imode = V8HImode;
27433 second_imode = V4SImode;
27434 third_imode = V2DImode;
27435 break;
27436 default:
27437 gcc_unreachable ();
27440 for (i = 0; i < n; i++)
27442 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27443 op0 = gen_reg_rtx (SImode);
27444 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27446 /* Insert the SImode value as low element of V4SImode vector. */
27447 op1 = gen_reg_rtx (V4SImode);
27448 op0 = gen_rtx_VEC_MERGE (V4SImode,
27449 gen_rtx_VEC_DUPLICATE (V4SImode,
27450 op0),
27451 CONST0_RTX (V4SImode),
27452 const1_rtx);
27453 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27455 /* Cast the V4SImode vector back to a vector in orignal mode. */
27456 op0 = gen_reg_rtx (mode);
27457 emit_move_insn (op0, gen_lowpart (mode, op1));
27459 /* Load even elements into the second positon. */
27460 emit_insn ((*gen_load_even) (op0,
27461 force_reg (inner_mode,
27462 ops [i + i + 1]),
27463 const1_rtx));
27465 /* Cast vector to FIRST_IMODE vector. */
27466 ops[i] = gen_reg_rtx (first_imode);
27467 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27470 /* Interleave low FIRST_IMODE vectors. */
27471 for (i = j = 0; i < n; i += 2, j++)
27473 op0 = gen_reg_rtx (first_imode);
27474 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27476 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27477 ops[j] = gen_reg_rtx (second_imode);
27478 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27481 /* Interleave low SECOND_IMODE vectors. */
27482 switch (second_imode)
27484 case V4SImode:
27485 for (i = j = 0; i < n / 2; i += 2, j++)
27487 op0 = gen_reg_rtx (second_imode);
27488 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27489 ops[i + 1]));
27491 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27492 vector. */
27493 ops[j] = gen_reg_rtx (third_imode);
27494 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27496 second_imode = V2DImode;
27497 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27498 /* FALLTHRU */
27500 case V2DImode:
27501 op0 = gen_reg_rtx (second_imode);
27502 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27503 ops[1]));
27505 /* Cast the SECOND_IMODE vector back to a vector on original
27506 mode. */
27507 emit_insn (gen_rtx_SET (VOIDmode, target,
27508 gen_lowpart (mode, op0)));
27509 break;
27511 default:
27512 gcc_unreachable ();
27516 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27517 all values variable, and none identical. */
27519 static void
27520 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27521 rtx target, rtx vals)
27523 rtx ops[32], op0, op1;
27524 enum machine_mode half_mode = VOIDmode;
27525 int n, i;
27527 switch (mode)
27529 case V2SFmode:
27530 case V2SImode:
27531 if (!mmx_ok && !TARGET_SSE)
27532 break;
27533 /* FALLTHRU */
27535 case V8SFmode:
27536 case V8SImode:
27537 case V4DFmode:
27538 case V4DImode:
27539 case V4SFmode:
27540 case V4SImode:
27541 case V2DFmode:
27542 case V2DImode:
27543 n = GET_MODE_NUNITS (mode);
27544 for (i = 0; i < n; i++)
27545 ops[i] = XVECEXP (vals, 0, i);
27546 ix86_expand_vector_init_concat (mode, target, ops, n);
27547 return;
27549 case V32QImode:
27550 half_mode = V16QImode;
27551 goto half;
27553 case V16HImode:
27554 half_mode = V8HImode;
27555 goto half;
27557 half:
27558 n = GET_MODE_NUNITS (mode);
27559 for (i = 0; i < n; i++)
27560 ops[i] = XVECEXP (vals, 0, i);
27561 op0 = gen_reg_rtx (half_mode);
27562 op1 = gen_reg_rtx (half_mode);
27563 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27564 n >> 2);
27565 ix86_expand_vector_init_interleave (half_mode, op1,
27566 &ops [n >> 1], n >> 2);
27567 emit_insn (gen_rtx_SET (VOIDmode, target,
27568 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27569 return;
27571 case V16QImode:
27572 if (!TARGET_SSE4_1)
27573 break;
27574 /* FALLTHRU */
27576 case V8HImode:
27577 if (!TARGET_SSE2)
27578 break;
27580 /* Don't use ix86_expand_vector_init_interleave if we can't
27581 move from GPR to SSE register directly. */
27582 if (!TARGET_INTER_UNIT_MOVES)
27583 break;
27585 n = GET_MODE_NUNITS (mode);
27586 for (i = 0; i < n; i++)
27587 ops[i] = XVECEXP (vals, 0, i);
27588 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27589 return;
27591 case V4HImode:
27592 case V8QImode:
27593 break;
27595 default:
27596 gcc_unreachable ();
27600 int i, j, n_elts, n_words, n_elt_per_word;
27601 enum machine_mode inner_mode;
27602 rtx words[4], shift;
27604 inner_mode = GET_MODE_INNER (mode);
27605 n_elts = GET_MODE_NUNITS (mode);
27606 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27607 n_elt_per_word = n_elts / n_words;
27608 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27610 for (i = 0; i < n_words; ++i)
27612 rtx word = NULL_RTX;
27614 for (j = 0; j < n_elt_per_word; ++j)
27616 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27617 elt = convert_modes (word_mode, inner_mode, elt, true);
27619 if (j == 0)
27620 word = elt;
27621 else
27623 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27624 word, 1, OPTAB_LIB_WIDEN);
27625 word = expand_simple_binop (word_mode, IOR, word, elt,
27626 word, 1, OPTAB_LIB_WIDEN);
27630 words[i] = word;
27633 if (n_words == 1)
27634 emit_move_insn (target, gen_lowpart (mode, words[0]));
27635 else if (n_words == 2)
27637 rtx tmp = gen_reg_rtx (mode);
27638 emit_clobber (tmp);
27639 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27640 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27641 emit_move_insn (target, tmp);
27643 else if (n_words == 4)
27645 rtx tmp = gen_reg_rtx (V4SImode);
27646 gcc_assert (word_mode == SImode);
27647 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27648 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27649 emit_move_insn (target, gen_lowpart (mode, tmp));
27651 else
27652 gcc_unreachable ();
27656 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27657 instructions unless MMX_OK is true. */
27659 void
27660 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27662 enum machine_mode mode = GET_MODE (target);
27663 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27664 int n_elts = GET_MODE_NUNITS (mode);
27665 int n_var = 0, one_var = -1;
27666 bool all_same = true, all_const_zero = true;
27667 int i;
27668 rtx x;
27670 for (i = 0; i < n_elts; ++i)
27672 x = XVECEXP (vals, 0, i);
27673 if (!(CONST_INT_P (x)
27674 || GET_CODE (x) == CONST_DOUBLE
27675 || GET_CODE (x) == CONST_FIXED))
27676 n_var++, one_var = i;
27677 else if (x != CONST0_RTX (inner_mode))
27678 all_const_zero = false;
27679 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27680 all_same = false;
27683 /* Constants are best loaded from the constant pool. */
27684 if (n_var == 0)
27686 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27687 return;
27690 /* If all values are identical, broadcast the value. */
27691 if (all_same
27692 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27693 XVECEXP (vals, 0, 0)))
27694 return;
27696 /* Values where only one field is non-constant are best loaded from
27697 the pool and overwritten via move later. */
27698 if (n_var == 1)
27700 if (all_const_zero
27701 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27702 XVECEXP (vals, 0, one_var),
27703 one_var))
27704 return;
27706 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27707 return;
27710 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27713 void
27714 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27716 enum machine_mode mode = GET_MODE (target);
27717 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27718 enum machine_mode half_mode;
27719 bool use_vec_merge = false;
27720 rtx tmp;
27721 static rtx (*gen_extract[6][2]) (rtx, rtx)
27723 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27724 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27725 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27726 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27727 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27728 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27730 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27732 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27733 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27734 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27735 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27736 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27737 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27739 int i, j, n;
27741 switch (mode)
27743 case V2SFmode:
27744 case V2SImode:
27745 if (mmx_ok)
27747 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27748 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27749 if (elt == 0)
27750 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27751 else
27752 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27753 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27754 return;
27756 break;
27758 case V2DImode:
27759 use_vec_merge = TARGET_SSE4_1;
27760 if (use_vec_merge)
27761 break;
27763 case V2DFmode:
27765 rtx op0, op1;
27767 /* For the two element vectors, we implement a VEC_CONCAT with
27768 the extraction of the other element. */
27770 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27771 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27773 if (elt == 0)
27774 op0 = val, op1 = tmp;
27775 else
27776 op0 = tmp, op1 = val;
27778 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27779 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27781 return;
27783 case V4SFmode:
27784 use_vec_merge = TARGET_SSE4_1;
27785 if (use_vec_merge)
27786 break;
27788 switch (elt)
27790 case 0:
27791 use_vec_merge = true;
27792 break;
27794 case 1:
27795 /* tmp = target = A B C D */
27796 tmp = copy_to_reg (target);
27797 /* target = A A B B */
27798 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27799 /* target = X A B B */
27800 ix86_expand_vector_set (false, target, val, 0);
27801 /* target = A X C D */
27802 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27803 const1_rtx, const0_rtx,
27804 GEN_INT (2+4), GEN_INT (3+4)));
27805 return;
27807 case 2:
27808 /* tmp = target = A B C D */
27809 tmp = copy_to_reg (target);
27810 /* tmp = X B C D */
27811 ix86_expand_vector_set (false, tmp, val, 0);
27812 /* target = A B X D */
27813 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27814 const0_rtx, const1_rtx,
27815 GEN_INT (0+4), GEN_INT (3+4)));
27816 return;
27818 case 3:
27819 /* tmp = target = A B C D */
27820 tmp = copy_to_reg (target);
27821 /* tmp = X B C D */
27822 ix86_expand_vector_set (false, tmp, val, 0);
27823 /* target = A B X D */
27824 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27825 const0_rtx, const1_rtx,
27826 GEN_INT (2+4), GEN_INT (0+4)));
27827 return;
27829 default:
27830 gcc_unreachable ();
27832 break;
27834 case V4SImode:
27835 use_vec_merge = TARGET_SSE4_1;
27836 if (use_vec_merge)
27837 break;
27839 /* Element 0 handled by vec_merge below. */
27840 if (elt == 0)
27842 use_vec_merge = true;
27843 break;
27846 if (TARGET_SSE2)
27848 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27849 store into element 0, then shuffle them back. */
27851 rtx order[4];
27853 order[0] = GEN_INT (elt);
27854 order[1] = const1_rtx;
27855 order[2] = const2_rtx;
27856 order[3] = GEN_INT (3);
27857 order[elt] = const0_rtx;
27859 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27860 order[1], order[2], order[3]));
27862 ix86_expand_vector_set (false, target, val, 0);
27864 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27865 order[1], order[2], order[3]));
27867 else
27869 /* For SSE1, we have to reuse the V4SF code. */
27870 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27871 gen_lowpart (SFmode, val), elt);
27873 return;
27875 case V8HImode:
27876 use_vec_merge = TARGET_SSE2;
27877 break;
27878 case V4HImode:
27879 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27880 break;
27882 case V16QImode:
27883 use_vec_merge = TARGET_SSE4_1;
27884 break;
27886 case V8QImode:
27887 break;
27889 case V32QImode:
27890 half_mode = V16QImode;
27891 j = 0;
27892 n = 16;
27893 goto half;
27895 case V16HImode:
27896 half_mode = V8HImode;
27897 j = 1;
27898 n = 8;
27899 goto half;
27901 case V8SImode:
27902 half_mode = V4SImode;
27903 j = 2;
27904 n = 4;
27905 goto half;
27907 case V4DImode:
27908 half_mode = V2DImode;
27909 j = 3;
27910 n = 2;
27911 goto half;
27913 case V8SFmode:
27914 half_mode = V4SFmode;
27915 j = 4;
27916 n = 4;
27917 goto half;
27919 case V4DFmode:
27920 half_mode = V2DFmode;
27921 j = 5;
27922 n = 2;
27923 goto half;
27925 half:
27926 /* Compute offset. */
27927 i = elt / n;
27928 elt %= n;
27930 gcc_assert (i <= 1);
27932 /* Extract the half. */
27933 tmp = gen_reg_rtx (half_mode);
27934 emit_insn ((*gen_extract[j][i]) (tmp, target));
27936 /* Put val in tmp at elt. */
27937 ix86_expand_vector_set (false, tmp, val, elt);
27939 /* Put it back. */
27940 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27941 return;
27943 default:
27944 break;
27947 if (use_vec_merge)
27949 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27950 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27951 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27953 else
27955 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27957 emit_move_insn (mem, target);
27959 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27960 emit_move_insn (tmp, val);
27962 emit_move_insn (target, mem);
27966 void
27967 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27969 enum machine_mode mode = GET_MODE (vec);
27970 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27971 bool use_vec_extr = false;
27972 rtx tmp;
27974 switch (mode)
27976 case V2SImode:
27977 case V2SFmode:
27978 if (!mmx_ok)
27979 break;
27980 /* FALLTHRU */
27982 case V2DFmode:
27983 case V2DImode:
27984 use_vec_extr = true;
27985 break;
27987 case V4SFmode:
27988 use_vec_extr = TARGET_SSE4_1;
27989 if (use_vec_extr)
27990 break;
27992 switch (elt)
27994 case 0:
27995 tmp = vec;
27996 break;
27998 case 1:
27999 case 3:
28000 tmp = gen_reg_rtx (mode);
28001 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28002 GEN_INT (elt), GEN_INT (elt),
28003 GEN_INT (elt+4), GEN_INT (elt+4)));
28004 break;
28006 case 2:
28007 tmp = gen_reg_rtx (mode);
28008 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28009 break;
28011 default:
28012 gcc_unreachable ();
28014 vec = tmp;
28015 use_vec_extr = true;
28016 elt = 0;
28017 break;
28019 case V4SImode:
28020 use_vec_extr = TARGET_SSE4_1;
28021 if (use_vec_extr)
28022 break;
28024 if (TARGET_SSE2)
28026 switch (elt)
28028 case 0:
28029 tmp = vec;
28030 break;
28032 case 1:
28033 case 3:
28034 tmp = gen_reg_rtx (mode);
28035 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28036 GEN_INT (elt), GEN_INT (elt),
28037 GEN_INT (elt), GEN_INT (elt)));
28038 break;
28040 case 2:
28041 tmp = gen_reg_rtx (mode);
28042 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28043 break;
28045 default:
28046 gcc_unreachable ();
28048 vec = tmp;
28049 use_vec_extr = true;
28050 elt = 0;
28052 else
28054 /* For SSE1, we have to reuse the V4SF code. */
28055 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28056 gen_lowpart (V4SFmode, vec), elt);
28057 return;
28059 break;
28061 case V8HImode:
28062 use_vec_extr = TARGET_SSE2;
28063 break;
28064 case V4HImode:
28065 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28066 break;
28068 case V16QImode:
28069 use_vec_extr = TARGET_SSE4_1;
28070 break;
28072 case V8QImode:
28073 /* ??? Could extract the appropriate HImode element and shift. */
28074 default:
28075 break;
28078 if (use_vec_extr)
28080 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28081 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28083 /* Let the rtl optimizers know about the zero extension performed. */
28084 if (inner_mode == QImode || inner_mode == HImode)
28086 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28087 target = gen_lowpart (SImode, target);
28090 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28092 else
28094 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28096 emit_move_insn (mem, vec);
28098 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28099 emit_move_insn (target, tmp);
28103 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28104 pattern to reduce; DEST is the destination; IN is the input vector. */
28106 void
28107 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28109 rtx tmp1, tmp2, tmp3;
28111 tmp1 = gen_reg_rtx (V4SFmode);
28112 tmp2 = gen_reg_rtx (V4SFmode);
28113 tmp3 = gen_reg_rtx (V4SFmode);
28115 emit_insn (gen_sse_movhlps (tmp1, in, in));
28116 emit_insn (fn (tmp2, tmp1, in));
28118 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28119 const1_rtx, const1_rtx,
28120 GEN_INT (1+4), GEN_INT (1+4)));
28121 emit_insn (fn (dest, tmp2, tmp3));
28124 /* Target hook for scalar_mode_supported_p. */
28125 static bool
28126 ix86_scalar_mode_supported_p (enum machine_mode mode)
28128 if (DECIMAL_FLOAT_MODE_P (mode))
28129 return default_decimal_float_supported_p ();
28130 else if (mode == TFmode)
28131 return true;
28132 else
28133 return default_scalar_mode_supported_p (mode);
28136 /* Implements target hook vector_mode_supported_p. */
28137 static bool
28138 ix86_vector_mode_supported_p (enum machine_mode mode)
28140 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28141 return true;
28142 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28143 return true;
28144 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28145 return true;
28146 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28147 return true;
28148 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28149 return true;
28150 return false;
28153 /* Target hook for c_mode_for_suffix. */
28154 static enum machine_mode
28155 ix86_c_mode_for_suffix (char suffix)
28157 if (suffix == 'q')
28158 return TFmode;
28159 if (suffix == 'w')
28160 return XFmode;
28162 return VOIDmode;
28165 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28167 We do this in the new i386 backend to maintain source compatibility
28168 with the old cc0-based compiler. */
28170 static tree
28171 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28172 tree inputs ATTRIBUTE_UNUSED,
28173 tree clobbers)
28175 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28176 clobbers);
28177 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28178 clobbers);
28179 return clobbers;
28182 /* Implements target vector targetm.asm.encode_section_info. This
28183 is not used by netware. */
28185 static void ATTRIBUTE_UNUSED
28186 ix86_encode_section_info (tree decl, rtx rtl, int first)
28188 default_encode_section_info (decl, rtl, first);
28190 if (TREE_CODE (decl) == VAR_DECL
28191 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28192 && ix86_in_large_data_p (decl))
28193 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28196 /* Worker function for REVERSE_CONDITION. */
28198 enum rtx_code
28199 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28201 return (mode != CCFPmode && mode != CCFPUmode
28202 ? reverse_condition (code)
28203 : reverse_condition_maybe_unordered (code));
28206 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28207 to OPERANDS[0]. */
28209 const char *
28210 output_387_reg_move (rtx insn, rtx *operands)
28212 if (REG_P (operands[0]))
28214 if (REG_P (operands[1])
28215 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28217 if (REGNO (operands[0]) == FIRST_STACK_REG)
28218 return output_387_ffreep (operands, 0);
28219 return "fstp\t%y0";
28221 if (STACK_TOP_P (operands[0]))
28222 return "fld%Z1\t%y1";
28223 return "fst\t%y0";
28225 else if (MEM_P (operands[0]))
28227 gcc_assert (REG_P (operands[1]));
28228 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28229 return "fstp%Z0\t%y0";
28230 else
28232 /* There is no non-popping store to memory for XFmode.
28233 So if we need one, follow the store with a load. */
28234 if (GET_MODE (operands[0]) == XFmode)
28235 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28236 else
28237 return "fst%Z0\t%y0";
28240 else
28241 gcc_unreachable();
28244 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28245 FP status register is set. */
28247 void
28248 ix86_emit_fp_unordered_jump (rtx label)
28250 rtx reg = gen_reg_rtx (HImode);
28251 rtx temp;
28253 emit_insn (gen_x86_fnstsw_1 (reg));
28255 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28257 emit_insn (gen_x86_sahf_1 (reg));
28259 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28260 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28262 else
28264 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28266 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28267 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28270 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28271 gen_rtx_LABEL_REF (VOIDmode, label),
28272 pc_rtx);
28273 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28275 emit_jump_insn (temp);
28276 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28279 /* Output code to perform a log1p XFmode calculation. */
28281 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28283 rtx label1 = gen_label_rtx ();
28284 rtx label2 = gen_label_rtx ();
28286 rtx tmp = gen_reg_rtx (XFmode);
28287 rtx tmp2 = gen_reg_rtx (XFmode);
28288 rtx test;
28290 emit_insn (gen_absxf2 (tmp, op1));
28291 test = gen_rtx_GE (VOIDmode, tmp,
28292 CONST_DOUBLE_FROM_REAL_VALUE (
28293 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28294 XFmode));
28295 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28297 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28298 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28299 emit_jump (label2);
28301 emit_label (label1);
28302 emit_move_insn (tmp, CONST1_RTX (XFmode));
28303 emit_insn (gen_addxf3 (tmp, op1, tmp));
28304 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28305 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28307 emit_label (label2);
28310 /* Output code to perform a Newton-Rhapson approximation of a single precision
28311 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28313 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28315 rtx x0, x1, e0, e1, two;
28317 x0 = gen_reg_rtx (mode);
28318 e0 = gen_reg_rtx (mode);
28319 e1 = gen_reg_rtx (mode);
28320 x1 = gen_reg_rtx (mode);
28322 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28324 if (VECTOR_MODE_P (mode))
28325 two = ix86_build_const_vector (SFmode, true, two);
28327 two = force_reg (mode, two);
28329 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28331 /* x0 = rcp(b) estimate */
28332 emit_insn (gen_rtx_SET (VOIDmode, x0,
28333 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28334 UNSPEC_RCP)));
28335 /* e0 = x0 * a */
28336 emit_insn (gen_rtx_SET (VOIDmode, e0,
28337 gen_rtx_MULT (mode, x0, a)));
28338 /* e1 = x0 * b */
28339 emit_insn (gen_rtx_SET (VOIDmode, e1,
28340 gen_rtx_MULT (mode, x0, b)));
28341 /* x1 = 2. - e1 */
28342 emit_insn (gen_rtx_SET (VOIDmode, x1,
28343 gen_rtx_MINUS (mode, two, e1)));
28344 /* res = e0 * x1 */
28345 emit_insn (gen_rtx_SET (VOIDmode, res,
28346 gen_rtx_MULT (mode, e0, x1)));
28349 /* Output code to perform a Newton-Rhapson approximation of a
28350 single precision floating point [reciprocal] square root. */
28352 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28353 bool recip)
28355 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28356 REAL_VALUE_TYPE r;
28358 x0 = gen_reg_rtx (mode);
28359 e0 = gen_reg_rtx (mode);
28360 e1 = gen_reg_rtx (mode);
28361 e2 = gen_reg_rtx (mode);
28362 e3 = gen_reg_rtx (mode);
28364 real_from_integer (&r, VOIDmode, -3, -1, 0);
28365 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28367 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28368 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28370 if (VECTOR_MODE_P (mode))
28372 mthree = ix86_build_const_vector (SFmode, true, mthree);
28373 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28376 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28377 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28379 /* x0 = rsqrt(a) estimate */
28380 emit_insn (gen_rtx_SET (VOIDmode, x0,
28381 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28382 UNSPEC_RSQRT)));
28384 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28385 if (!recip)
28387 rtx zero, mask;
28389 zero = gen_reg_rtx (mode);
28390 mask = gen_reg_rtx (mode);
28392 zero = force_reg (mode, CONST0_RTX(mode));
28393 emit_insn (gen_rtx_SET (VOIDmode, mask,
28394 gen_rtx_NE (mode, zero, a)));
28396 emit_insn (gen_rtx_SET (VOIDmode, x0,
28397 gen_rtx_AND (mode, x0, mask)));
28400 /* e0 = x0 * a */
28401 emit_insn (gen_rtx_SET (VOIDmode, e0,
28402 gen_rtx_MULT (mode, x0, a)));
28403 /* e1 = e0 * x0 */
28404 emit_insn (gen_rtx_SET (VOIDmode, e1,
28405 gen_rtx_MULT (mode, e0, x0)));
28407 /* e2 = e1 - 3. */
28408 mthree = force_reg (mode, mthree);
28409 emit_insn (gen_rtx_SET (VOIDmode, e2,
28410 gen_rtx_PLUS (mode, e1, mthree)));
28412 mhalf = force_reg (mode, mhalf);
28413 if (recip)
28414 /* e3 = -.5 * x0 */
28415 emit_insn (gen_rtx_SET (VOIDmode, e3,
28416 gen_rtx_MULT (mode, x0, mhalf)));
28417 else
28418 /* e3 = -.5 * e0 */
28419 emit_insn (gen_rtx_SET (VOIDmode, e3,
28420 gen_rtx_MULT (mode, e0, mhalf)));
28421 /* ret = e2 * e3 */
28422 emit_insn (gen_rtx_SET (VOIDmode, res,
28423 gen_rtx_MULT (mode, e2, e3)));
28426 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28428 static void ATTRIBUTE_UNUSED
28429 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28430 tree decl)
28432 /* With Binutils 2.15, the "@unwind" marker must be specified on
28433 every occurrence of the ".eh_frame" section, not just the first
28434 one. */
28435 if (TARGET_64BIT
28436 && strcmp (name, ".eh_frame") == 0)
28438 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28439 flags & SECTION_WRITE ? "aw" : "a");
28440 return;
28442 default_elf_asm_named_section (name, flags, decl);
28445 /* Return the mangling of TYPE if it is an extended fundamental type. */
28447 static const char *
28448 ix86_mangle_type (const_tree type)
28450 type = TYPE_MAIN_VARIANT (type);
28452 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28453 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28454 return NULL;
28456 switch (TYPE_MODE (type))
28458 case TFmode:
28459 /* __float128 is "g". */
28460 return "g";
28461 case XFmode:
28462 /* "long double" or __float80 is "e". */
28463 return "e";
28464 default:
28465 return NULL;
28469 /* For 32-bit code we can save PIC register setup by using
28470 __stack_chk_fail_local hidden function instead of calling
28471 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28472 register, so it is better to call __stack_chk_fail directly. */
28474 static tree
28475 ix86_stack_protect_fail (void)
28477 return TARGET_64BIT
28478 ? default_external_stack_protect_fail ()
28479 : default_hidden_stack_protect_fail ();
28482 /* Select a format to encode pointers in exception handling data. CODE
28483 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28484 true if the symbol may be affected by dynamic relocations.
28486 ??? All x86 object file formats are capable of representing this.
28487 After all, the relocation needed is the same as for the call insn.
28488 Whether or not a particular assembler allows us to enter such, I
28489 guess we'll have to see. */
28491 asm_preferred_eh_data_format (int code, int global)
28493 if (flag_pic)
28495 int type = DW_EH_PE_sdata8;
28496 if (!TARGET_64BIT
28497 || ix86_cmodel == CM_SMALL_PIC
28498 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28499 type = DW_EH_PE_sdata4;
28500 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28502 if (ix86_cmodel == CM_SMALL
28503 || (ix86_cmodel == CM_MEDIUM && code))
28504 return DW_EH_PE_udata4;
28505 return DW_EH_PE_absptr;
28508 /* Expand copysign from SIGN to the positive value ABS_VALUE
28509 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28510 the sign-bit. */
28511 static void
28512 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28514 enum machine_mode mode = GET_MODE (sign);
28515 rtx sgn = gen_reg_rtx (mode);
28516 if (mask == NULL_RTX)
28518 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28519 if (!VECTOR_MODE_P (mode))
28521 /* We need to generate a scalar mode mask in this case. */
28522 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28523 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28524 mask = gen_reg_rtx (mode);
28525 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28528 else
28529 mask = gen_rtx_NOT (mode, mask);
28530 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28531 gen_rtx_AND (mode, mask, sign)));
28532 emit_insn (gen_rtx_SET (VOIDmode, result,
28533 gen_rtx_IOR (mode, abs_value, sgn)));
28536 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28537 mask for masking out the sign-bit is stored in *SMASK, if that is
28538 non-null. */
28539 static rtx
28540 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28542 enum machine_mode mode = GET_MODE (op0);
28543 rtx xa, mask;
28545 xa = gen_reg_rtx (mode);
28546 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28547 if (!VECTOR_MODE_P (mode))
28549 /* We need to generate a scalar mode mask in this case. */
28550 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28551 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28552 mask = gen_reg_rtx (mode);
28553 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28555 emit_insn (gen_rtx_SET (VOIDmode, xa,
28556 gen_rtx_AND (mode, op0, mask)));
28558 if (smask)
28559 *smask = mask;
28561 return xa;
28564 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28565 swapping the operands if SWAP_OPERANDS is true. The expanded
28566 code is a forward jump to a newly created label in case the
28567 comparison is true. The generated label rtx is returned. */
28568 static rtx
28569 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28570 bool swap_operands)
28572 rtx label, tmp;
28574 if (swap_operands)
28576 tmp = op0;
28577 op0 = op1;
28578 op1 = tmp;
28581 label = gen_label_rtx ();
28582 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28583 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28584 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28585 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28586 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28587 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28588 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28589 JUMP_LABEL (tmp) = label;
28591 return label;
28594 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28595 using comparison code CODE. Operands are swapped for the comparison if
28596 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28597 static rtx
28598 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28599 bool swap_operands)
28601 enum machine_mode mode = GET_MODE (op0);
28602 rtx mask = gen_reg_rtx (mode);
28604 if (swap_operands)
28606 rtx tmp = op0;
28607 op0 = op1;
28608 op1 = tmp;
28611 if (mode == DFmode)
28612 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28613 gen_rtx_fmt_ee (code, mode, op0, op1)));
28614 else
28615 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28616 gen_rtx_fmt_ee (code, mode, op0, op1)));
28618 return mask;
28621 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28622 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28623 static rtx
28624 ix86_gen_TWO52 (enum machine_mode mode)
28626 REAL_VALUE_TYPE TWO52r;
28627 rtx TWO52;
28629 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28630 TWO52 = const_double_from_real_value (TWO52r, mode);
28631 TWO52 = force_reg (mode, TWO52);
28633 return TWO52;
28636 /* Expand SSE sequence for computing lround from OP1 storing
28637 into OP0. */
28638 void
28639 ix86_expand_lround (rtx op0, rtx op1)
28641 /* C code for the stuff we're doing below:
28642 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28643 return (long)tmp;
28645 enum machine_mode mode = GET_MODE (op1);
28646 const struct real_format *fmt;
28647 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28648 rtx adj;
28650 /* load nextafter (0.5, 0.0) */
28651 fmt = REAL_MODE_FORMAT (mode);
28652 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28653 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28655 /* adj = copysign (0.5, op1) */
28656 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28657 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28659 /* adj = op1 + adj */
28660 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28662 /* op0 = (imode)adj */
28663 expand_fix (op0, adj, 0);
28666 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28667 into OPERAND0. */
28668 void
28669 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28671 /* C code for the stuff we're doing below (for do_floor):
28672 xi = (long)op1;
28673 xi -= (double)xi > op1 ? 1 : 0;
28674 return xi;
28676 enum machine_mode fmode = GET_MODE (op1);
28677 enum machine_mode imode = GET_MODE (op0);
28678 rtx ireg, freg, label, tmp;
28680 /* reg = (long)op1 */
28681 ireg = gen_reg_rtx (imode);
28682 expand_fix (ireg, op1, 0);
28684 /* freg = (double)reg */
28685 freg = gen_reg_rtx (fmode);
28686 expand_float (freg, ireg, 0);
28688 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28689 label = ix86_expand_sse_compare_and_jump (UNLE,
28690 freg, op1, !do_floor);
28691 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28692 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28693 emit_move_insn (ireg, tmp);
28695 emit_label (label);
28696 LABEL_NUSES (label) = 1;
28698 emit_move_insn (op0, ireg);
28701 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28702 result in OPERAND0. */
28703 void
28704 ix86_expand_rint (rtx operand0, rtx operand1)
28706 /* C code for the stuff we're doing below:
28707 xa = fabs (operand1);
28708 if (!isless (xa, 2**52))
28709 return operand1;
28710 xa = xa + 2**52 - 2**52;
28711 return copysign (xa, operand1);
28713 enum machine_mode mode = GET_MODE (operand0);
28714 rtx res, xa, label, TWO52, mask;
28716 res = gen_reg_rtx (mode);
28717 emit_move_insn (res, operand1);
28719 /* xa = abs (operand1) */
28720 xa = ix86_expand_sse_fabs (res, &mask);
28722 /* if (!isless (xa, TWO52)) goto label; */
28723 TWO52 = ix86_gen_TWO52 (mode);
28724 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28726 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28727 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28729 ix86_sse_copysign_to_positive (res, xa, res, mask);
28731 emit_label (label);
28732 LABEL_NUSES (label) = 1;
28734 emit_move_insn (operand0, res);
28737 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28738 into OPERAND0. */
28739 void
28740 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28742 /* C code for the stuff we expand below.
28743 double xa = fabs (x), x2;
28744 if (!isless (xa, TWO52))
28745 return x;
28746 xa = xa + TWO52 - TWO52;
28747 x2 = copysign (xa, x);
28748 Compensate. Floor:
28749 if (x2 > x)
28750 x2 -= 1;
28751 Compensate. Ceil:
28752 if (x2 < x)
28753 x2 -= -1;
28754 return x2;
28756 enum machine_mode mode = GET_MODE (operand0);
28757 rtx xa, TWO52, tmp, label, one, res, mask;
28759 TWO52 = ix86_gen_TWO52 (mode);
28761 /* Temporary for holding the result, initialized to the input
28762 operand to ease control flow. */
28763 res = gen_reg_rtx (mode);
28764 emit_move_insn (res, operand1);
28766 /* xa = abs (operand1) */
28767 xa = ix86_expand_sse_fabs (res, &mask);
28769 /* if (!isless (xa, TWO52)) goto label; */
28770 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28772 /* xa = xa + TWO52 - TWO52; */
28773 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28774 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28776 /* xa = copysign (xa, operand1) */
28777 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28779 /* generate 1.0 or -1.0 */
28780 one = force_reg (mode,
28781 const_double_from_real_value (do_floor
28782 ? dconst1 : dconstm1, mode));
28784 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28785 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28786 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28787 gen_rtx_AND (mode, one, tmp)));
28788 /* We always need to subtract here to preserve signed zero. */
28789 tmp = expand_simple_binop (mode, MINUS,
28790 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28791 emit_move_insn (res, tmp);
28793 emit_label (label);
28794 LABEL_NUSES (label) = 1;
28796 emit_move_insn (operand0, res);
28799 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28800 into OPERAND0. */
28801 void
28802 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28804 /* C code for the stuff we expand below.
28805 double xa = fabs (x), x2;
28806 if (!isless (xa, TWO52))
28807 return x;
28808 x2 = (double)(long)x;
28809 Compensate. Floor:
28810 if (x2 > x)
28811 x2 -= 1;
28812 Compensate. Ceil:
28813 if (x2 < x)
28814 x2 += 1;
28815 if (HONOR_SIGNED_ZEROS (mode))
28816 return copysign (x2, x);
28817 return x2;
28819 enum machine_mode mode = GET_MODE (operand0);
28820 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28822 TWO52 = ix86_gen_TWO52 (mode);
28824 /* Temporary for holding the result, initialized to the input
28825 operand to ease control flow. */
28826 res = gen_reg_rtx (mode);
28827 emit_move_insn (res, operand1);
28829 /* xa = abs (operand1) */
28830 xa = ix86_expand_sse_fabs (res, &mask);
28832 /* if (!isless (xa, TWO52)) goto label; */
28833 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28835 /* xa = (double)(long)x */
28836 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28837 expand_fix (xi, res, 0);
28838 expand_float (xa, xi, 0);
28840 /* generate 1.0 */
28841 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28843 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28844 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28845 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28846 gen_rtx_AND (mode, one, tmp)));
28847 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28848 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28849 emit_move_insn (res, tmp);
28851 if (HONOR_SIGNED_ZEROS (mode))
28852 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28854 emit_label (label);
28855 LABEL_NUSES (label) = 1;
28857 emit_move_insn (operand0, res);
28860 /* Expand SSE sequence for computing round from OPERAND1 storing
28861 into OPERAND0. Sequence that works without relying on DImode truncation
28862 via cvttsd2siq that is only available on 64bit targets. */
28863 void
28864 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28866 /* C code for the stuff we expand below.
28867 double xa = fabs (x), xa2, x2;
28868 if (!isless (xa, TWO52))
28869 return x;
28870 Using the absolute value and copying back sign makes
28871 -0.0 -> -0.0 correct.
28872 xa2 = xa + TWO52 - TWO52;
28873 Compensate.
28874 dxa = xa2 - xa;
28875 if (dxa <= -0.5)
28876 xa2 += 1;
28877 else if (dxa > 0.5)
28878 xa2 -= 1;
28879 x2 = copysign (xa2, x);
28880 return x2;
28882 enum machine_mode mode = GET_MODE (operand0);
28883 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28885 TWO52 = ix86_gen_TWO52 (mode);
28887 /* Temporary for holding the result, initialized to the input
28888 operand to ease control flow. */
28889 res = gen_reg_rtx (mode);
28890 emit_move_insn (res, operand1);
28892 /* xa = abs (operand1) */
28893 xa = ix86_expand_sse_fabs (res, &mask);
28895 /* if (!isless (xa, TWO52)) goto label; */
28896 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28898 /* xa2 = xa + TWO52 - TWO52; */
28899 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28900 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28902 /* dxa = xa2 - xa; */
28903 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28905 /* generate 0.5, 1.0 and -0.5 */
28906 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28907 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28908 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28909 0, OPTAB_DIRECT);
28911 /* Compensate. */
28912 tmp = gen_reg_rtx (mode);
28913 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28914 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28915 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28916 gen_rtx_AND (mode, one, tmp)));
28917 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28918 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28919 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28920 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28921 gen_rtx_AND (mode, one, tmp)));
28922 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28924 /* res = copysign (xa2, operand1) */
28925 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28927 emit_label (label);
28928 LABEL_NUSES (label) = 1;
28930 emit_move_insn (operand0, res);
28933 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28934 into OPERAND0. */
28935 void
28936 ix86_expand_trunc (rtx operand0, rtx operand1)
28938 /* C code for SSE variant we expand below.
28939 double xa = fabs (x), x2;
28940 if (!isless (xa, TWO52))
28941 return x;
28942 x2 = (double)(long)x;
28943 if (HONOR_SIGNED_ZEROS (mode))
28944 return copysign (x2, x);
28945 return x2;
28947 enum machine_mode mode = GET_MODE (operand0);
28948 rtx xa, xi, TWO52, label, res, mask;
28950 TWO52 = ix86_gen_TWO52 (mode);
28952 /* Temporary for holding the result, initialized to the input
28953 operand to ease control flow. */
28954 res = gen_reg_rtx (mode);
28955 emit_move_insn (res, operand1);
28957 /* xa = abs (operand1) */
28958 xa = ix86_expand_sse_fabs (res, &mask);
28960 /* if (!isless (xa, TWO52)) goto label; */
28961 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28963 /* x = (double)(long)x */
28964 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28965 expand_fix (xi, res, 0);
28966 expand_float (res, xi, 0);
28968 if (HONOR_SIGNED_ZEROS (mode))
28969 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28971 emit_label (label);
28972 LABEL_NUSES (label) = 1;
28974 emit_move_insn (operand0, res);
28977 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28978 into OPERAND0. */
28979 void
28980 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28982 enum machine_mode mode = GET_MODE (operand0);
28983 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28985 /* C code for SSE variant we expand below.
28986 double xa = fabs (x), x2;
28987 if (!isless (xa, TWO52))
28988 return x;
28989 xa2 = xa + TWO52 - TWO52;
28990 Compensate:
28991 if (xa2 > xa)
28992 xa2 -= 1.0;
28993 x2 = copysign (xa2, x);
28994 return x2;
28997 TWO52 = ix86_gen_TWO52 (mode);
28999 /* Temporary for holding the result, initialized to the input
29000 operand to ease control flow. */
29001 res = gen_reg_rtx (mode);
29002 emit_move_insn (res, operand1);
29004 /* xa = abs (operand1) */
29005 xa = ix86_expand_sse_fabs (res, &smask);
29007 /* if (!isless (xa, TWO52)) goto label; */
29008 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29010 /* res = xa + TWO52 - TWO52; */
29011 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29012 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29013 emit_move_insn (res, tmp);
29015 /* generate 1.0 */
29016 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29018 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29019 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29020 emit_insn (gen_rtx_SET (VOIDmode, mask,
29021 gen_rtx_AND (mode, mask, one)));
29022 tmp = expand_simple_binop (mode, MINUS,
29023 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29024 emit_move_insn (res, tmp);
29026 /* res = copysign (res, operand1) */
29027 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29029 emit_label (label);
29030 LABEL_NUSES (label) = 1;
29032 emit_move_insn (operand0, res);
29035 /* Expand SSE sequence for computing round from OPERAND1 storing
29036 into OPERAND0. */
29037 void
29038 ix86_expand_round (rtx operand0, rtx operand1)
29040 /* C code for the stuff we're doing below:
29041 double xa = fabs (x);
29042 if (!isless (xa, TWO52))
29043 return x;
29044 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29045 return copysign (xa, x);
29047 enum machine_mode mode = GET_MODE (operand0);
29048 rtx res, TWO52, xa, label, xi, half, mask;
29049 const struct real_format *fmt;
29050 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29052 /* Temporary for holding the result, initialized to the input
29053 operand to ease control flow. */
29054 res = gen_reg_rtx (mode);
29055 emit_move_insn (res, operand1);
29057 TWO52 = ix86_gen_TWO52 (mode);
29058 xa = ix86_expand_sse_fabs (res, &mask);
29059 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29061 /* load nextafter (0.5, 0.0) */
29062 fmt = REAL_MODE_FORMAT (mode);
29063 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29064 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29066 /* xa = xa + 0.5 */
29067 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29068 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29070 /* xa = (double)(int64_t)xa */
29071 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29072 expand_fix (xi, xa, 0);
29073 expand_float (xa, xi, 0);
29075 /* res = copysign (xa, operand1) */
29076 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29078 emit_label (label);
29079 LABEL_NUSES (label) = 1;
29081 emit_move_insn (operand0, res);
29085 /* Table of valid machine attributes. */
29086 static const struct attribute_spec ix86_attribute_table[] =
29088 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29089 /* Stdcall attribute says callee is responsible for popping arguments
29090 if they are not variable. */
29091 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29092 /* Fastcall attribute says callee is responsible for popping arguments
29093 if they are not variable. */
29094 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29095 /* Thiscall attribute says callee is responsible for popping arguments
29096 if they are not variable. */
29097 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29098 /* Cdecl attribute says the callee is a normal C declaration */
29099 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29100 /* Regparm attribute specifies how many integer arguments are to be
29101 passed in registers. */
29102 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29103 /* Sseregparm attribute says we are using x86_64 calling conventions
29104 for FP arguments. */
29105 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29106 /* force_align_arg_pointer says this function realigns the stack at entry. */
29107 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29108 false, true, true, ix86_handle_cconv_attribute },
29109 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29110 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29111 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29112 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29113 #endif
29114 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29115 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29116 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29117 SUBTARGET_ATTRIBUTE_TABLE,
29118 #endif
29119 /* ms_abi and sysv_abi calling convention function attributes. */
29120 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29121 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29122 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29123 /* End element. */
29124 { NULL, 0, 0, false, false, false, NULL }
29127 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29128 static int
29129 ix86_builtin_vectorization_cost (bool runtime_test)
29131 /* If the branch of the runtime test is taken - i.e. - the vectorized
29132 version is skipped - this incurs a misprediction cost (because the
29133 vectorized version is expected to be the fall-through). So we subtract
29134 the latency of a mispredicted branch from the costs that are incured
29135 when the vectorized version is executed.
29137 TODO: The values in individual target tables have to be tuned or new
29138 fields may be needed. For eg. on K8, the default branch path is the
29139 not-taken path. If the taken path is predicted correctly, the minimum
29140 penalty of going down the taken-path is 1 cycle. If the taken-path is
29141 not predicted correctly, then the minimum penalty is 10 cycles. */
29143 if (runtime_test)
29145 return (-(ix86_cost->cond_taken_branch_cost));
29147 else
29148 return 0;
29151 /* Implement targetm.vectorize.builtin_vec_perm. */
29153 static tree
29154 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29156 tree itype = TREE_TYPE (vec_type);
29157 bool u = TYPE_UNSIGNED (itype);
29158 enum machine_mode vmode = TYPE_MODE (vec_type);
29159 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29160 bool ok = TARGET_SSE2;
29162 switch (vmode)
29164 case V4DFmode:
29165 ok = TARGET_AVX;
29166 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29167 goto get_di;
29168 case V2DFmode:
29169 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29170 get_di:
29171 itype = ix86_get_builtin_type (IX86_BT_DI);
29172 break;
29174 case V8SFmode:
29175 ok = TARGET_AVX;
29176 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29177 goto get_si;
29178 case V4SFmode:
29179 ok = TARGET_SSE;
29180 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29181 get_si:
29182 itype = ix86_get_builtin_type (IX86_BT_SI);
29183 break;
29185 case V2DImode:
29186 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29187 break;
29188 case V4SImode:
29189 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29190 break;
29191 case V8HImode:
29192 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29193 break;
29194 case V16QImode:
29195 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29196 break;
29197 default:
29198 ok = false;
29199 break;
29202 if (!ok)
29203 return NULL_TREE;
29205 *mask_type = itype;
29206 return ix86_builtins[(int) fcode];
29209 /* Return a vector mode with twice as many elements as VMODE. */
29210 /* ??? Consider moving this to a table generated by genmodes.c. */
29212 static enum machine_mode
29213 doublesize_vector_mode (enum machine_mode vmode)
29215 switch (vmode)
29217 case V2SFmode: return V4SFmode;
29218 case V1DImode: return V2DImode;
29219 case V2SImode: return V4SImode;
29220 case V4HImode: return V8HImode;
29221 case V8QImode: return V16QImode;
29223 case V2DFmode: return V4DFmode;
29224 case V4SFmode: return V8SFmode;
29225 case V2DImode: return V4DImode;
29226 case V4SImode: return V8SImode;
29227 case V8HImode: return V16HImode;
29228 case V16QImode: return V32QImode;
29230 case V4DFmode: return V8DFmode;
29231 case V8SFmode: return V16SFmode;
29232 case V4DImode: return V8DImode;
29233 case V8SImode: return V16SImode;
29234 case V16HImode: return V32HImode;
29235 case V32QImode: return V64QImode;
29237 default:
29238 gcc_unreachable ();
29242 /* Construct (set target (vec_select op0 (parallel perm))) and
29243 return true if that's a valid instruction in the active ISA. */
29245 static bool
29246 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29248 rtx rperm[MAX_VECT_LEN], x;
29249 unsigned i;
29251 for (i = 0; i < nelt; ++i)
29252 rperm[i] = GEN_INT (perm[i]);
29254 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29255 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29256 x = gen_rtx_SET (VOIDmode, target, x);
29258 x = emit_insn (x);
29259 if (recog_memoized (x) < 0)
29261 remove_insn (x);
29262 return false;
29264 return true;
29267 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29269 static bool
29270 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29271 const unsigned char *perm, unsigned nelt)
29273 enum machine_mode v2mode;
29274 rtx x;
29276 v2mode = doublesize_vector_mode (GET_MODE (op0));
29277 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29278 return expand_vselect (target, x, perm, nelt);
29281 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29282 in terms of blendp[sd] / pblendw / pblendvb. */
29284 static bool
29285 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29287 enum machine_mode vmode = d->vmode;
29288 unsigned i, mask, nelt = d->nelt;
29289 rtx target, op0, op1, x;
29291 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29292 return false;
29293 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29294 return false;
29296 /* This is a blend, not a permute. Elements must stay in their
29297 respective lanes. */
29298 for (i = 0; i < nelt; ++i)
29300 unsigned e = d->perm[i];
29301 if (!(e == i || e == i + nelt))
29302 return false;
29305 if (d->testing_p)
29306 return true;
29308 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29309 decision should be extracted elsewhere, so that we only try that
29310 sequence once all budget==3 options have been tried. */
29312 /* For bytes, see if bytes move in pairs so we can use pblendw with
29313 an immediate argument, rather than pblendvb with a vector argument. */
29314 if (vmode == V16QImode)
29316 bool pblendw_ok = true;
29317 for (i = 0; i < 16 && pblendw_ok; i += 2)
29318 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29320 if (!pblendw_ok)
29322 rtx rperm[16], vperm;
29324 for (i = 0; i < nelt; ++i)
29325 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29327 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29328 vperm = force_reg (V16QImode, vperm);
29330 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29331 return true;
29335 target = d->target;
29336 op0 = d->op0;
29337 op1 = d->op1;
29338 mask = 0;
29340 switch (vmode)
29342 case V4DFmode:
29343 case V8SFmode:
29344 case V2DFmode:
29345 case V4SFmode:
29346 case V8HImode:
29347 for (i = 0; i < nelt; ++i)
29348 mask |= (d->perm[i] >= nelt) << i;
29349 break;
29351 case V2DImode:
29352 for (i = 0; i < 2; ++i)
29353 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29354 goto do_subreg;
29356 case V4SImode:
29357 for (i = 0; i < 4; ++i)
29358 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29359 goto do_subreg;
29361 case V16QImode:
29362 for (i = 0; i < 8; ++i)
29363 mask |= (d->perm[i * 2] >= 16) << i;
29365 do_subreg:
29366 vmode = V8HImode;
29367 target = gen_lowpart (vmode, target);
29368 op0 = gen_lowpart (vmode, op0);
29369 op1 = gen_lowpart (vmode, op1);
29370 break;
29372 default:
29373 gcc_unreachable ();
29376 /* This matches five different patterns with the different modes. */
29377 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29378 x = gen_rtx_SET (VOIDmode, target, x);
29379 emit_insn (x);
29381 return true;
29384 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29385 in terms of the variable form of vpermilps.
29387 Note that we will have already failed the immediate input vpermilps,
29388 which requires that the high and low part shuffle be identical; the
29389 variable form doesn't require that. */
29391 static bool
29392 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29394 rtx rperm[8], vperm;
29395 unsigned i;
29397 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29398 return false;
29400 /* We can only permute within the 128-bit lane. */
29401 for (i = 0; i < 8; ++i)
29403 unsigned e = d->perm[i];
29404 if (i < 4 ? e >= 4 : e < 4)
29405 return false;
29408 if (d->testing_p)
29409 return true;
29411 for (i = 0; i < 8; ++i)
29413 unsigned e = d->perm[i];
29415 /* Within each 128-bit lane, the elements of op0 are numbered
29416 from 0 and the elements of op1 are numbered from 4. */
29417 if (e >= 8 + 4)
29418 e -= 8;
29419 else if (e >= 4)
29420 e -= 4;
29422 rperm[i] = GEN_INT (e);
29425 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29426 vperm = force_reg (V8SImode, vperm);
29427 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29429 return true;
29432 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29433 in terms of pshufb or vpperm. */
29435 static bool
29436 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29438 unsigned i, nelt, eltsz;
29439 rtx rperm[16], vperm, target, op0, op1;
29441 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29442 return false;
29443 if (GET_MODE_SIZE (d->vmode) != 16)
29444 return false;
29446 if (d->testing_p)
29447 return true;
29449 nelt = d->nelt;
29450 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29452 for (i = 0; i < nelt; ++i)
29454 unsigned j, e = d->perm[i];
29455 for (j = 0; j < eltsz; ++j)
29456 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29459 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29460 vperm = force_reg (V16QImode, vperm);
29462 target = gen_lowpart (V16QImode, d->target);
29463 op0 = gen_lowpart (V16QImode, d->op0);
29464 if (d->op0 == d->op1)
29465 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29466 else
29468 op1 = gen_lowpart (V16QImode, d->op1);
29469 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29472 return true;
29475 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29476 in a single instruction. */
29478 static bool
29479 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29481 unsigned i, nelt = d->nelt;
29482 unsigned char perm2[MAX_VECT_LEN];
29484 /* Check plain VEC_SELECT first, because AVX has instructions that could
29485 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29486 input where SEL+CONCAT may not. */
29487 if (d->op0 == d->op1)
29489 int mask = nelt - 1;
29491 for (i = 0; i < nelt; i++)
29492 perm2[i] = d->perm[i] & mask;
29494 if (expand_vselect (d->target, d->op0, perm2, nelt))
29495 return true;
29497 /* There are plenty of patterns in sse.md that are written for
29498 SEL+CONCAT and are not replicated for a single op. Perhaps
29499 that should be changed, to avoid the nastiness here. */
29501 /* Recognize interleave style patterns, which means incrementing
29502 every other permutation operand. */
29503 for (i = 0; i < nelt; i += 2)
29505 perm2[i] = d->perm[i] & mask;
29506 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29508 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29509 return true;
29511 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29512 if (nelt >= 4)
29514 for (i = 0; i < nelt; i += 4)
29516 perm2[i + 0] = d->perm[i + 0] & mask;
29517 perm2[i + 1] = d->perm[i + 1] & mask;
29518 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29519 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29522 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29523 return true;
29527 /* Finally, try the fully general two operand permute. */
29528 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29529 return true;
29531 /* Recognize interleave style patterns with reversed operands. */
29532 if (d->op0 != d->op1)
29534 for (i = 0; i < nelt; ++i)
29536 unsigned e = d->perm[i];
29537 if (e >= nelt)
29538 e -= nelt;
29539 else
29540 e += nelt;
29541 perm2[i] = e;
29544 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29545 return true;
29548 /* Try the SSE4.1 blend variable merge instructions. */
29549 if (expand_vec_perm_blend (d))
29550 return true;
29552 /* Try one of the AVX vpermil variable permutations. */
29553 if (expand_vec_perm_vpermil (d))
29554 return true;
29556 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29557 if (expand_vec_perm_pshufb (d))
29558 return true;
29560 return false;
29563 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29564 in terms of a pair of pshuflw + pshufhw instructions. */
29566 static bool
29567 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29569 unsigned char perm2[MAX_VECT_LEN];
29570 unsigned i;
29571 bool ok;
29573 if (d->vmode != V8HImode || d->op0 != d->op1)
29574 return false;
29576 /* The two permutations only operate in 64-bit lanes. */
29577 for (i = 0; i < 4; ++i)
29578 if (d->perm[i] >= 4)
29579 return false;
29580 for (i = 4; i < 8; ++i)
29581 if (d->perm[i] < 4)
29582 return false;
29584 if (d->testing_p)
29585 return true;
29587 /* Emit the pshuflw. */
29588 memcpy (perm2, d->perm, 4);
29589 for (i = 4; i < 8; ++i)
29590 perm2[i] = i;
29591 ok = expand_vselect (d->target, d->op0, perm2, 8);
29592 gcc_assert (ok);
29594 /* Emit the pshufhw. */
29595 memcpy (perm2 + 4, d->perm + 4, 4);
29596 for (i = 0; i < 4; ++i)
29597 perm2[i] = i;
29598 ok = expand_vselect (d->target, d->target, perm2, 8);
29599 gcc_assert (ok);
29601 return true;
29604 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29605 the permutation using the SSSE3 palignr instruction. This succeeds
29606 when all of the elements in PERM fit within one vector and we merely
29607 need to shift them down so that a single vector permutation has a
29608 chance to succeed. */
29610 static bool
29611 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29613 unsigned i, nelt = d->nelt;
29614 unsigned min, max;
29615 bool in_order, ok;
29616 rtx shift;
29618 /* Even with AVX, palignr only operates on 128-bit vectors. */
29619 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29620 return false;
29622 min = nelt, max = 0;
29623 for (i = 0; i < nelt; ++i)
29625 unsigned e = d->perm[i];
29626 if (e < min)
29627 min = e;
29628 if (e > max)
29629 max = e;
29631 if (min == 0 || max - min >= nelt)
29632 return false;
29634 /* Given that we have SSSE3, we know we'll be able to implement the
29635 single operand permutation after the palignr with pshufb. */
29636 if (d->testing_p)
29637 return true;
29639 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29640 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29641 gen_lowpart (TImode, d->op1),
29642 gen_lowpart (TImode, d->op0), shift));
29644 d->op0 = d->op1 = d->target;
29646 in_order = true;
29647 for (i = 0; i < nelt; ++i)
29649 unsigned e = d->perm[i] - min;
29650 if (e != i)
29651 in_order = false;
29652 d->perm[i] = e;
29655 /* Test for the degenerate case where the alignment by itself
29656 produces the desired permutation. */
29657 if (in_order)
29658 return true;
29660 ok = expand_vec_perm_1 (d);
29661 gcc_assert (ok);
29663 return ok;
29666 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29667 a two vector permutation into a single vector permutation by using
29668 an interleave operation to merge the vectors. */
29670 static bool
29671 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29673 struct expand_vec_perm_d dremap, dfinal;
29674 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29675 unsigned contents, h1, h2, h3, h4;
29676 unsigned char remap[2 * MAX_VECT_LEN];
29677 rtx seq;
29678 bool ok;
29680 if (d->op0 == d->op1)
29681 return false;
29683 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29684 lanes. We can use similar techniques with the vperm2f128 instruction,
29685 but it requires slightly different logic. */
29686 if (GET_MODE_SIZE (d->vmode) != 16)
29687 return false;
29689 /* Examine from whence the elements come. */
29690 contents = 0;
29691 for (i = 0; i < nelt; ++i)
29692 contents |= 1u << d->perm[i];
29694 /* Split the two input vectors into 4 halves. */
29695 h1 = (1u << nelt2) - 1;
29696 h2 = h1 << nelt2;
29697 h3 = h2 << nelt2;
29698 h4 = h3 << nelt2;
29700 memset (remap, 0xff, sizeof (remap));
29701 dremap = *d;
29703 /* If the elements from the low halves use interleave low, and similarly
29704 for interleave high. If the elements are from mis-matched halves, we
29705 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29706 if ((contents & (h1 | h3)) == contents)
29708 for (i = 0; i < nelt2; ++i)
29710 remap[i] = i * 2;
29711 remap[i + nelt] = i * 2 + 1;
29712 dremap.perm[i * 2] = i;
29713 dremap.perm[i * 2 + 1] = i + nelt;
29716 else if ((contents & (h2 | h4)) == contents)
29718 for (i = 0; i < nelt2; ++i)
29720 remap[i + nelt2] = i * 2;
29721 remap[i + nelt + nelt2] = i * 2 + 1;
29722 dremap.perm[i * 2] = i + nelt2;
29723 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29726 else if ((contents & (h1 | h4)) == contents)
29728 for (i = 0; i < nelt2; ++i)
29730 remap[i] = i;
29731 remap[i + nelt + nelt2] = i + nelt2;
29732 dremap.perm[i] = i;
29733 dremap.perm[i + nelt2] = i + nelt + nelt2;
29735 if (nelt != 4)
29737 dremap.vmode = V2DImode;
29738 dremap.nelt = 2;
29739 dremap.perm[0] = 0;
29740 dremap.perm[1] = 3;
29743 else if ((contents & (h2 | h3)) == contents)
29745 for (i = 0; i < nelt2; ++i)
29747 remap[i + nelt2] = i;
29748 remap[i + nelt] = i + nelt2;
29749 dremap.perm[i] = i + nelt2;
29750 dremap.perm[i + nelt2] = i + nelt;
29752 if (nelt != 4)
29754 dremap.vmode = V2DImode;
29755 dremap.nelt = 2;
29756 dremap.perm[0] = 1;
29757 dremap.perm[1] = 2;
29760 else
29761 return false;
29763 /* Use the remapping array set up above to move the elements from their
29764 swizzled locations into their final destinations. */
29765 dfinal = *d;
29766 for (i = 0; i < nelt; ++i)
29768 unsigned e = remap[d->perm[i]];
29769 gcc_assert (e < nelt);
29770 dfinal.perm[i] = e;
29772 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29773 dfinal.op1 = dfinal.op0;
29774 dremap.target = dfinal.op0;
29776 /* Test if the final remap can be done with a single insn. For V4SFmode or
29777 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29778 start_sequence ();
29779 ok = expand_vec_perm_1 (&dfinal);
29780 seq = get_insns ();
29781 end_sequence ();
29783 if (!ok)
29784 return false;
29786 if (dremap.vmode != dfinal.vmode)
29788 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29789 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29790 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29793 ok = expand_vec_perm_1 (&dremap);
29794 gcc_assert (ok);
29796 emit_insn (seq);
29797 return true;
29800 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29801 permutation with two pshufb insns and an ior. We should have already
29802 failed all two instruction sequences. */
29804 static bool
29805 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29807 rtx rperm[2][16], vperm, l, h, op, m128;
29808 unsigned int i, nelt, eltsz;
29810 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29811 return false;
29812 gcc_assert (d->op0 != d->op1);
29814 nelt = d->nelt;
29815 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29817 /* Generate two permutation masks. If the required element is within
29818 the given vector it is shuffled into the proper lane. If the required
29819 element is in the other vector, force a zero into the lane by setting
29820 bit 7 in the permutation mask. */
29821 m128 = GEN_INT (-128);
29822 for (i = 0; i < nelt; ++i)
29824 unsigned j, e = d->perm[i];
29825 unsigned which = (e >= nelt);
29826 if (e >= nelt)
29827 e -= nelt;
29829 for (j = 0; j < eltsz; ++j)
29831 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29832 rperm[1-which][i*eltsz + j] = m128;
29836 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29837 vperm = force_reg (V16QImode, vperm);
29839 l = gen_reg_rtx (V16QImode);
29840 op = gen_lowpart (V16QImode, d->op0);
29841 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29843 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29844 vperm = force_reg (V16QImode, vperm);
29846 h = gen_reg_rtx (V16QImode);
29847 op = gen_lowpart (V16QImode, d->op1);
29848 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29850 op = gen_lowpart (V16QImode, d->target);
29851 emit_insn (gen_iorv16qi3 (op, l, h));
29853 return true;
29856 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29857 and extract-odd permutations. */
29859 static bool
29860 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29862 rtx t1, t2, t3, t4;
29864 switch (d->vmode)
29866 case V4DFmode:
29867 t1 = gen_reg_rtx (V4DFmode);
29868 t2 = gen_reg_rtx (V4DFmode);
29870 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29871 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29872 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29874 /* Now an unpck[lh]pd will produce the result required. */
29875 if (odd)
29876 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29877 else
29878 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29879 emit_insn (t3);
29880 break;
29882 case V8SFmode:
29884 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29885 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29886 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29888 t1 = gen_reg_rtx (V8SFmode);
29889 t2 = gen_reg_rtx (V8SFmode);
29890 t3 = gen_reg_rtx (V8SFmode);
29891 t4 = gen_reg_rtx (V8SFmode);
29893 /* Shuffle within the 128-bit lanes to produce:
29894 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29895 expand_vselect (t1, d->op0, perm1, 8);
29896 expand_vselect (t2, d->op1, perm1, 8);
29898 /* Shuffle the lanes around to produce:
29899 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29900 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29901 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29903 /* Now a vpermil2p will produce the result required. */
29904 /* ??? The vpermil2p requires a vector constant. Another option
29905 is a unpck[lh]ps to merge the two vectors to produce
29906 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29907 vpermilps to get the elements into the final order. */
29908 d->op0 = t3;
29909 d->op1 = t4;
29910 memcpy (d->perm, odd ? permo: perme, 8);
29911 expand_vec_perm_vpermil (d);
29913 break;
29915 case V2DFmode:
29916 case V4SFmode:
29917 case V2DImode:
29918 case V4SImode:
29919 /* These are always directly implementable by expand_vec_perm_1. */
29920 gcc_unreachable ();
29922 case V8HImode:
29923 if (TARGET_SSSE3)
29924 return expand_vec_perm_pshufb2 (d);
29925 else
29927 /* We need 2*log2(N)-1 operations to achieve odd/even
29928 with interleave. */
29929 t1 = gen_reg_rtx (V8HImode);
29930 t2 = gen_reg_rtx (V8HImode);
29931 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29932 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29933 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29934 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29935 if (odd)
29936 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29937 else
29938 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29939 emit_insn (t3);
29941 break;
29943 case V16QImode:
29944 if (TARGET_SSSE3)
29945 return expand_vec_perm_pshufb2 (d);
29946 else
29948 t1 = gen_reg_rtx (V16QImode);
29949 t2 = gen_reg_rtx (V16QImode);
29950 t3 = gen_reg_rtx (V16QImode);
29951 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29952 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29953 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29954 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29955 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29956 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29957 if (odd)
29958 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29959 else
29960 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29961 emit_insn (t3);
29963 break;
29965 default:
29966 gcc_unreachable ();
29969 return true;
29972 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29973 extract-even and extract-odd permutations. */
29975 static bool
29976 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29978 unsigned i, odd, nelt = d->nelt;
29980 odd = d->perm[0];
29981 if (odd != 0 && odd != 1)
29982 return false;
29984 for (i = 1; i < nelt; ++i)
29985 if (d->perm[i] != 2 * i + odd)
29986 return false;
29988 return expand_vec_perm_even_odd_1 (d, odd);
29991 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29992 permutations. We assume that expand_vec_perm_1 has already failed. */
29994 static bool
29995 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29997 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29998 enum machine_mode vmode = d->vmode;
29999 unsigned char perm2[4];
30000 rtx op0 = d->op0;
30001 bool ok;
30003 switch (vmode)
30005 case V4DFmode:
30006 case V8SFmode:
30007 /* These are special-cased in sse.md so that we can optionally
30008 use the vbroadcast instruction. They expand to two insns
30009 if the input happens to be in a register. */
30010 gcc_unreachable ();
30012 case V2DFmode:
30013 case V2DImode:
30014 case V4SFmode:
30015 case V4SImode:
30016 /* These are always implementable using standard shuffle patterns. */
30017 gcc_unreachable ();
30019 case V8HImode:
30020 case V16QImode:
30021 /* These can be implemented via interleave. We save one insn by
30022 stopping once we have promoted to V4SImode and then use pshufd. */
30025 optab otab = vec_interleave_low_optab;
30027 if (elt >= nelt2)
30029 otab = vec_interleave_high_optab;
30030 elt -= nelt2;
30032 nelt2 /= 2;
30034 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30035 vmode = get_mode_wider_vector (vmode);
30036 op0 = gen_lowpart (vmode, op0);
30038 while (vmode != V4SImode);
30040 memset (perm2, elt, 4);
30041 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30042 gcc_assert (ok);
30043 return true;
30045 default:
30046 gcc_unreachable ();
30050 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30051 broadcast permutations. */
30053 static bool
30054 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30056 unsigned i, elt, nelt = d->nelt;
30058 if (d->op0 != d->op1)
30059 return false;
30061 elt = d->perm[0];
30062 for (i = 1; i < nelt; ++i)
30063 if (d->perm[i] != elt)
30064 return false;
30066 return expand_vec_perm_broadcast_1 (d);
30069 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30070 With all of the interface bits taken care of, perform the expansion
30071 in D and return true on success. */
30073 static bool
30074 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30076 /* Try a single instruction expansion. */
30077 if (expand_vec_perm_1 (d))
30078 return true;
30080 /* Try sequences of two instructions. */
30082 if (expand_vec_perm_pshuflw_pshufhw (d))
30083 return true;
30085 if (expand_vec_perm_palignr (d))
30086 return true;
30088 if (expand_vec_perm_interleave2 (d))
30089 return true;
30091 if (expand_vec_perm_broadcast (d))
30092 return true;
30094 /* Try sequences of three instructions. */
30096 if (expand_vec_perm_pshufb2 (d))
30097 return true;
30099 /* ??? Look for narrow permutations whose element orderings would
30100 allow the promotion to a wider mode. */
30102 /* ??? Look for sequences of interleave or a wider permute that place
30103 the data into the correct lanes for a half-vector shuffle like
30104 pshuf[lh]w or vpermilps. */
30106 /* ??? Look for sequences of interleave that produce the desired results.
30107 The combinatorics of punpck[lh] get pretty ugly... */
30109 if (expand_vec_perm_even_odd (d))
30110 return true;
30112 return false;
30115 /* Extract the values from the vector CST into the permutation array in D.
30116 Return 0 on error, 1 if all values from the permutation come from the
30117 first vector, 2 if all values from the second vector, and 3 otherwise. */
30119 static int
30120 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30122 tree list = TREE_VECTOR_CST_ELTS (cst);
30123 unsigned i, nelt = d->nelt;
30124 int ret = 0;
30126 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30128 unsigned HOST_WIDE_INT e;
30130 if (!host_integerp (TREE_VALUE (list), 1))
30131 return 0;
30132 e = tree_low_cst (TREE_VALUE (list), 1);
30133 if (e >= 2 * nelt)
30134 return 0;
30136 ret |= (e < nelt ? 1 : 2);
30137 d->perm[i] = e;
30139 gcc_assert (list == NULL);
30141 /* For all elements from second vector, fold the elements to first. */
30142 if (ret == 2)
30143 for (i = 0; i < nelt; ++i)
30144 d->perm[i] -= nelt;
30146 return ret;
30149 static rtx
30150 ix86_expand_vec_perm_builtin (tree exp)
30152 struct expand_vec_perm_d d;
30153 tree arg0, arg1, arg2;
30155 arg0 = CALL_EXPR_ARG (exp, 0);
30156 arg1 = CALL_EXPR_ARG (exp, 1);
30157 arg2 = CALL_EXPR_ARG (exp, 2);
30159 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30160 d.nelt = GET_MODE_NUNITS (d.vmode);
30161 d.testing_p = false;
30162 gcc_assert (VECTOR_MODE_P (d.vmode));
30164 if (TREE_CODE (arg2) != VECTOR_CST)
30166 error_at (EXPR_LOCATION (exp),
30167 "vector permutation requires vector constant");
30168 goto exit_error;
30171 switch (extract_vec_perm_cst (&d, arg2))
30173 default:
30174 gcc_unreachable();
30176 case 0:
30177 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30178 goto exit_error;
30180 case 3:
30181 if (!operand_equal_p (arg0, arg1, 0))
30183 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30184 d.op0 = force_reg (d.vmode, d.op0);
30185 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30186 d.op1 = force_reg (d.vmode, d.op1);
30187 break;
30190 /* The elements of PERM do not suggest that only the first operand
30191 is used, but both operands are identical. Allow easier matching
30192 of the permutation by folding the permutation into the single
30193 input vector. */
30195 unsigned i, nelt = d.nelt;
30196 for (i = 0; i < nelt; ++i)
30197 if (d.perm[i] >= nelt)
30198 d.perm[i] -= nelt;
30200 /* FALLTHRU */
30202 case 1:
30203 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30204 d.op0 = force_reg (d.vmode, d.op0);
30205 d.op1 = d.op0;
30206 break;
30208 case 2:
30209 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30210 d.op0 = force_reg (d.vmode, d.op0);
30211 d.op1 = d.op0;
30212 break;
30215 d.target = gen_reg_rtx (d.vmode);
30216 if (ix86_expand_vec_perm_builtin_1 (&d))
30217 return d.target;
30219 /* For compiler generated permutations, we should never got here, because
30220 the compiler should also be checking the ok hook. But since this is a
30221 builtin the user has access too, so don't abort. */
30222 switch (d.nelt)
30224 case 2:
30225 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30226 break;
30227 case 4:
30228 sorry ("vector permutation (%d %d %d %d)",
30229 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30230 break;
30231 case 8:
30232 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30233 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30234 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30235 break;
30236 case 16:
30237 sorry ("vector permutation "
30238 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30239 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30240 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30241 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30242 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30243 break;
30244 default:
30245 gcc_unreachable ();
30247 exit_error:
30248 return CONST0_RTX (d.vmode);
30251 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30253 static bool
30254 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30256 struct expand_vec_perm_d d;
30257 int vec_mask;
30258 bool ret, one_vec;
30260 d.vmode = TYPE_MODE (vec_type);
30261 d.nelt = GET_MODE_NUNITS (d.vmode);
30262 d.testing_p = true;
30264 /* Given sufficient ISA support we can just return true here
30265 for selected vector modes. */
30266 if (GET_MODE_SIZE (d.vmode) == 16)
30268 /* All implementable with a single vpperm insn. */
30269 if (TARGET_XOP)
30270 return true;
30271 /* All implementable with 2 pshufb + 1 ior. */
30272 if (TARGET_SSSE3)
30273 return true;
30274 /* All implementable with shufpd or unpck[lh]pd. */
30275 if (d.nelt == 2)
30276 return true;
30279 vec_mask = extract_vec_perm_cst (&d, mask);
30281 /* This hook is cannot be called in response to something that the
30282 user does (unlike the builtin expander) so we shouldn't ever see
30283 an error generated from the extract. */
30284 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30285 one_vec = (vec_mask != 3);
30287 /* Implementable with shufps or pshufd. */
30288 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30289 return true;
30291 /* Otherwise we have to go through the motions and see if we can
30292 figure out how to generate the requested permutation. */
30293 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30294 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30295 if (!one_vec)
30296 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30298 start_sequence ();
30299 ret = ix86_expand_vec_perm_builtin_1 (&d);
30300 end_sequence ();
30302 return ret;
30305 void
30306 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30308 struct expand_vec_perm_d d;
30309 unsigned i, nelt;
30311 d.target = targ;
30312 d.op0 = op0;
30313 d.op1 = op1;
30314 d.vmode = GET_MODE (targ);
30315 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30316 d.testing_p = false;
30318 for (i = 0; i < nelt; ++i)
30319 d.perm[i] = i * 2 + odd;
30321 /* We'll either be able to implement the permutation directly... */
30322 if (expand_vec_perm_1 (&d))
30323 return;
30325 /* ... or we use the special-case patterns. */
30326 expand_vec_perm_even_odd_1 (&d, odd);
30329 /* This function returns the calling abi specific va_list type node.
30330 It returns the FNDECL specific va_list type. */
30332 tree
30333 ix86_fn_abi_va_list (tree fndecl)
30335 if (!TARGET_64BIT)
30336 return va_list_type_node;
30337 gcc_assert (fndecl != NULL_TREE);
30339 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30340 return ms_va_list_type_node;
30341 else
30342 return sysv_va_list_type_node;
30345 /* Returns the canonical va_list type specified by TYPE. If there
30346 is no valid TYPE provided, it return NULL_TREE. */
30348 tree
30349 ix86_canonical_va_list_type (tree type)
30351 tree wtype, htype;
30353 /* Resolve references and pointers to va_list type. */
30354 if (INDIRECT_REF_P (type))
30355 type = TREE_TYPE (type);
30356 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30357 type = TREE_TYPE (type);
30359 if (TARGET_64BIT)
30361 wtype = va_list_type_node;
30362 gcc_assert (wtype != NULL_TREE);
30363 htype = type;
30364 if (TREE_CODE (wtype) == ARRAY_TYPE)
30366 /* If va_list is an array type, the argument may have decayed
30367 to a pointer type, e.g. by being passed to another function.
30368 In that case, unwrap both types so that we can compare the
30369 underlying records. */
30370 if (TREE_CODE (htype) == ARRAY_TYPE
30371 || POINTER_TYPE_P (htype))
30373 wtype = TREE_TYPE (wtype);
30374 htype = TREE_TYPE (htype);
30377 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30378 return va_list_type_node;
30379 wtype = sysv_va_list_type_node;
30380 gcc_assert (wtype != NULL_TREE);
30381 htype = type;
30382 if (TREE_CODE (wtype) == ARRAY_TYPE)
30384 /* If va_list is an array type, the argument may have decayed
30385 to a pointer type, e.g. by being passed to another function.
30386 In that case, unwrap both types so that we can compare the
30387 underlying records. */
30388 if (TREE_CODE (htype) == ARRAY_TYPE
30389 || POINTER_TYPE_P (htype))
30391 wtype = TREE_TYPE (wtype);
30392 htype = TREE_TYPE (htype);
30395 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30396 return sysv_va_list_type_node;
30397 wtype = ms_va_list_type_node;
30398 gcc_assert (wtype != NULL_TREE);
30399 htype = type;
30400 if (TREE_CODE (wtype) == ARRAY_TYPE)
30402 /* If va_list is an array type, the argument may have decayed
30403 to a pointer type, e.g. by being passed to another function.
30404 In that case, unwrap both types so that we can compare the
30405 underlying records. */
30406 if (TREE_CODE (htype) == ARRAY_TYPE
30407 || POINTER_TYPE_P (htype))
30409 wtype = TREE_TYPE (wtype);
30410 htype = TREE_TYPE (htype);
30413 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30414 return ms_va_list_type_node;
30415 return NULL_TREE;
30417 return std_canonical_va_list_type (type);
30420 /* Iterate through the target-specific builtin types for va_list.
30421 IDX denotes the iterator, *PTREE is set to the result type of
30422 the va_list builtin, and *PNAME to its internal type.
30423 Returns zero if there is no element for this index, otherwise
30424 IDX should be increased upon the next call.
30425 Note, do not iterate a base builtin's name like __builtin_va_list.
30426 Used from c_common_nodes_and_builtins. */
30429 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30431 if (!TARGET_64BIT)
30432 return 0;
30433 switch (idx) {
30434 case 0:
30435 *ptree = ms_va_list_type_node;
30436 *pname = "__builtin_ms_va_list";
30437 break;
30438 case 1:
30439 *ptree = sysv_va_list_type_node;
30440 *pname = "__builtin_sysv_va_list";
30441 break;
30442 default:
30443 return 0;
30445 return 1;
30448 /* Initialize the GCC target structure. */
30449 #undef TARGET_RETURN_IN_MEMORY
30450 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30452 #undef TARGET_LEGITIMIZE_ADDRESS
30453 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30455 #undef TARGET_ATTRIBUTE_TABLE
30456 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30457 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30458 # undef TARGET_MERGE_DECL_ATTRIBUTES
30459 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30460 #endif
30462 #undef TARGET_COMP_TYPE_ATTRIBUTES
30463 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30465 #undef TARGET_INIT_BUILTINS
30466 #define TARGET_INIT_BUILTINS ix86_init_builtins
30467 #undef TARGET_BUILTIN_DECL
30468 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30469 #undef TARGET_EXPAND_BUILTIN
30470 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30472 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30473 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30474 ix86_builtin_vectorized_function
30476 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30477 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30479 #undef TARGET_BUILTIN_RECIPROCAL
30480 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30482 #undef TARGET_ASM_FUNCTION_EPILOGUE
30483 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30485 #undef TARGET_ENCODE_SECTION_INFO
30486 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30487 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30488 #else
30489 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30490 #endif
30492 #undef TARGET_ASM_OPEN_PAREN
30493 #define TARGET_ASM_OPEN_PAREN ""
30494 #undef TARGET_ASM_CLOSE_PAREN
30495 #define TARGET_ASM_CLOSE_PAREN ""
30497 #undef TARGET_ASM_BYTE_OP
30498 #define TARGET_ASM_BYTE_OP ASM_BYTE
30500 #undef TARGET_ASM_ALIGNED_HI_OP
30501 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30502 #undef TARGET_ASM_ALIGNED_SI_OP
30503 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30504 #ifdef ASM_QUAD
30505 #undef TARGET_ASM_ALIGNED_DI_OP
30506 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30507 #endif
30509 #undef TARGET_ASM_UNALIGNED_HI_OP
30510 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30511 #undef TARGET_ASM_UNALIGNED_SI_OP
30512 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30513 #undef TARGET_ASM_UNALIGNED_DI_OP
30514 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30516 #undef TARGET_SCHED_ADJUST_COST
30517 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30518 #undef TARGET_SCHED_ISSUE_RATE
30519 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30520 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30521 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30522 ia32_multipass_dfa_lookahead
30524 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30525 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30527 #ifdef HAVE_AS_TLS
30528 #undef TARGET_HAVE_TLS
30529 #define TARGET_HAVE_TLS true
30530 #endif
30531 #undef TARGET_CANNOT_FORCE_CONST_MEM
30532 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30533 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30534 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30536 #undef TARGET_DELEGITIMIZE_ADDRESS
30537 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30539 #undef TARGET_MS_BITFIELD_LAYOUT_P
30540 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30542 #if TARGET_MACHO
30543 #undef TARGET_BINDS_LOCAL_P
30544 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30545 #endif
30546 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30547 #undef TARGET_BINDS_LOCAL_P
30548 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30549 #endif
30551 #undef TARGET_ASM_OUTPUT_MI_THUNK
30552 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30553 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30554 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30556 #undef TARGET_ASM_FILE_START
30557 #define TARGET_ASM_FILE_START x86_file_start
30559 #undef TARGET_DEFAULT_TARGET_FLAGS
30560 #define TARGET_DEFAULT_TARGET_FLAGS \
30561 (TARGET_DEFAULT \
30562 | TARGET_SUBTARGET_DEFAULT \
30563 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30564 | MASK_FUSED_MADD)
30566 #undef TARGET_HANDLE_OPTION
30567 #define TARGET_HANDLE_OPTION ix86_handle_option
30569 #undef TARGET_RTX_COSTS
30570 #define TARGET_RTX_COSTS ix86_rtx_costs
30571 #undef TARGET_ADDRESS_COST
30572 #define TARGET_ADDRESS_COST ix86_address_cost
30574 #undef TARGET_FIXED_CONDITION_CODE_REGS
30575 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30576 #undef TARGET_CC_MODES_COMPATIBLE
30577 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30579 #undef TARGET_MACHINE_DEPENDENT_REORG
30580 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30582 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30583 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30585 #undef TARGET_BUILD_BUILTIN_VA_LIST
30586 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30588 #undef TARGET_FN_ABI_VA_LIST
30589 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30591 #undef TARGET_CANONICAL_VA_LIST_TYPE
30592 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30594 #undef TARGET_EXPAND_BUILTIN_VA_START
30595 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30597 #undef TARGET_MD_ASM_CLOBBERS
30598 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30600 #undef TARGET_PROMOTE_PROTOTYPES
30601 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30602 #undef TARGET_STRUCT_VALUE_RTX
30603 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30604 #undef TARGET_SETUP_INCOMING_VARARGS
30605 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30606 #undef TARGET_MUST_PASS_IN_STACK
30607 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30608 #undef TARGET_PASS_BY_REFERENCE
30609 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30610 #undef TARGET_INTERNAL_ARG_POINTER
30611 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30612 #undef TARGET_UPDATE_STACK_BOUNDARY
30613 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30614 #undef TARGET_GET_DRAP_RTX
30615 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30616 #undef TARGET_STRICT_ARGUMENT_NAMING
30617 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30618 #undef TARGET_STATIC_CHAIN
30619 #define TARGET_STATIC_CHAIN ix86_static_chain
30620 #undef TARGET_TRAMPOLINE_INIT
30621 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30623 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30624 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30626 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30627 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30629 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30630 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30632 #undef TARGET_C_MODE_FOR_SUFFIX
30633 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30635 #ifdef HAVE_AS_TLS
30636 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30637 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30638 #endif
30640 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30641 #undef TARGET_INSERT_ATTRIBUTES
30642 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30643 #endif
30645 #undef TARGET_MANGLE_TYPE
30646 #define TARGET_MANGLE_TYPE ix86_mangle_type
30648 #undef TARGET_STACK_PROTECT_FAIL
30649 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30651 #undef TARGET_FUNCTION_VALUE
30652 #define TARGET_FUNCTION_VALUE ix86_function_value
30654 #undef TARGET_SECONDARY_RELOAD
30655 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30657 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30658 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30659 ix86_builtin_vectorization_cost
30660 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30661 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30662 ix86_vectorize_builtin_vec_perm
30663 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30664 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30665 ix86_vectorize_builtin_vec_perm_ok
30667 #undef TARGET_SET_CURRENT_FUNCTION
30668 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30670 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30671 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30673 #undef TARGET_OPTION_SAVE
30674 #define TARGET_OPTION_SAVE ix86_function_specific_save
30676 #undef TARGET_OPTION_RESTORE
30677 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30679 #undef TARGET_OPTION_PRINT
30680 #define TARGET_OPTION_PRINT ix86_function_specific_print
30682 #undef TARGET_CAN_INLINE_P
30683 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30685 #undef TARGET_EXPAND_TO_RTL_HOOK
30686 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30688 #undef TARGET_LEGITIMATE_ADDRESS_P
30689 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30691 #undef TARGET_IRA_COVER_CLASSES
30692 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30694 #undef TARGET_FRAME_POINTER_REQUIRED
30695 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30697 #undef TARGET_CAN_ELIMINATE
30698 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30700 #undef TARGET_ASM_CODE_END
30701 #define TARGET_ASM_CODE_END ix86_code_end
30703 struct gcc_target targetm = TARGET_INITIALIZER;
30705 #include "gt-i386.h"