Fix LWP.
[official-gcc/constexpr.git] / gcc / config / i386 / i386.c
blob9b739a6c95bf4fcb07205e35de201f4a2b7bb80b
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
57 static rtx legitimize_dllimport_symbol (rtx, bool);
59 #ifndef CHECK_STACK_LIMIT
60 #define CHECK_STACK_LIMIT (-1)
61 #endif
63 /* Return index of given mode in mult and division cost tables. */
64 #define MODE_INDEX(mode) \
65 ((mode) == QImode ? 0 \
66 : (mode) == HImode ? 1 \
67 : (mode) == SImode ? 2 \
68 : (mode) == DImode ? 3 \
69 : 4)
71 /* Processor costs (relative to an add) */
72 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
73 #define COSTS_N_BYTES(N) ((N) * 2)
75 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
77 const
78 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
79 COSTS_N_BYTES (2), /* cost of an add instruction */
80 COSTS_N_BYTES (3), /* cost of a lea instruction */
81 COSTS_N_BYTES (2), /* variable shift costs */
82 COSTS_N_BYTES (3), /* constant shift costs */
83 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
84 COSTS_N_BYTES (3), /* HI */
85 COSTS_N_BYTES (3), /* SI */
86 COSTS_N_BYTES (3), /* DI */
87 COSTS_N_BYTES (5)}, /* other */
88 0, /* cost of multiply per each bit set */
89 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
90 COSTS_N_BYTES (3), /* HI */
91 COSTS_N_BYTES (3), /* SI */
92 COSTS_N_BYTES (3), /* DI */
93 COSTS_N_BYTES (5)}, /* other */
94 COSTS_N_BYTES (3), /* cost of movsx */
95 COSTS_N_BYTES (3), /* cost of movzx */
96 0, /* "large" insn */
97 2, /* MOVE_RATIO */
98 2, /* cost for loading QImode using movzbl */
99 {2, 2, 2}, /* cost of loading integer registers
100 in QImode, HImode and SImode.
101 Relative to reg-reg move (2). */
102 {2, 2, 2}, /* cost of storing integer registers */
103 2, /* cost of reg,reg fld/fst */
104 {2, 2, 2}, /* cost of loading fp registers
105 in SFmode, DFmode and XFmode */
106 {2, 2, 2}, /* cost of storing fp registers
107 in SFmode, DFmode and XFmode */
108 3, /* cost of moving MMX register */
109 {3, 3}, /* cost of loading MMX registers
110 in SImode and DImode */
111 {3, 3}, /* cost of storing MMX registers
112 in SImode and DImode */
113 3, /* cost of moving SSE register */
114 {3, 3, 3}, /* cost of loading SSE registers
115 in SImode, DImode and TImode */
116 {3, 3, 3}, /* cost of storing SSE registers
117 in SImode, DImode and TImode */
118 3, /* MMX or SSE register to integer */
119 0, /* size of l1 cache */
120 0, /* size of l2 cache */
121 0, /* size of prefetch block */
122 0, /* number of parallel prefetches */
123 2, /* Branch cost */
124 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
125 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
126 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
127 COSTS_N_BYTES (2), /* cost of FABS instruction. */
128 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
129 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
130 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
131 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 1, /* scalar_stmt_cost. */
135 1, /* scalar load_cost. */
136 1, /* scalar_store_cost. */
137 1, /* vec_stmt_cost. */
138 1, /* vec_to_scalar_cost. */
139 1, /* scalar_to_vec_cost. */
140 1, /* vec_align_load_cost. */
141 1, /* vec_unalign_load_cost. */
142 1, /* vec_store_cost. */
143 1, /* cond_taken_branch_cost. */
144 1, /* cond_not_taken_branch_cost. */
147 /* Processor costs (relative to an add) */
148 static const
149 struct processor_costs i386_cost = { /* 386 specific costs */
150 COSTS_N_INSNS (1), /* cost of an add instruction */
151 COSTS_N_INSNS (1), /* cost of a lea instruction */
152 COSTS_N_INSNS (3), /* variable shift costs */
153 COSTS_N_INSNS (2), /* constant shift costs */
154 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
155 COSTS_N_INSNS (6), /* HI */
156 COSTS_N_INSNS (6), /* SI */
157 COSTS_N_INSNS (6), /* DI */
158 COSTS_N_INSNS (6)}, /* other */
159 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
160 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
161 COSTS_N_INSNS (23), /* HI */
162 COSTS_N_INSNS (23), /* SI */
163 COSTS_N_INSNS (23), /* DI */
164 COSTS_N_INSNS (23)}, /* other */
165 COSTS_N_INSNS (3), /* cost of movsx */
166 COSTS_N_INSNS (2), /* cost of movzx */
167 15, /* "large" insn */
168 3, /* MOVE_RATIO */
169 4, /* cost for loading QImode using movzbl */
170 {2, 4, 2}, /* cost of loading integer registers
171 in QImode, HImode and SImode.
172 Relative to reg-reg move (2). */
173 {2, 4, 2}, /* cost of storing integer registers */
174 2, /* cost of reg,reg fld/fst */
175 {8, 8, 8}, /* cost of loading fp registers
176 in SFmode, DFmode and XFmode */
177 {8, 8, 8}, /* cost of storing fp registers
178 in SFmode, DFmode and XFmode */
179 2, /* cost of moving MMX register */
180 {4, 8}, /* cost of loading MMX registers
181 in SImode and DImode */
182 {4, 8}, /* cost of storing MMX registers
183 in SImode and DImode */
184 2, /* cost of moving SSE register */
185 {4, 8, 16}, /* cost of loading SSE registers
186 in SImode, DImode and TImode */
187 {4, 8, 16}, /* cost of storing SSE registers
188 in SImode, DImode and TImode */
189 3, /* MMX or SSE register to integer */
190 0, /* size of l1 cache */
191 0, /* size of l2 cache */
192 0, /* size of prefetch block */
193 0, /* number of parallel prefetches */
194 1, /* Branch cost */
195 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
196 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
197 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
198 COSTS_N_INSNS (22), /* cost of FABS instruction. */
199 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
200 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
201 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
202 DUMMY_STRINGOP_ALGS},
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 1, /* scalar_stmt_cost. */
206 1, /* scalar load_cost. */
207 1, /* scalar_store_cost. */
208 1, /* vec_stmt_cost. */
209 1, /* vec_to_scalar_cost. */
210 1, /* scalar_to_vec_cost. */
211 1, /* vec_align_load_cost. */
212 2, /* vec_unalign_load_cost. */
213 1, /* vec_store_cost. */
214 3, /* cond_taken_branch_cost. */
215 1, /* cond_not_taken_branch_cost. */
218 static const
219 struct processor_costs i486_cost = { /* 486 specific costs */
220 COSTS_N_INSNS (1), /* cost of an add instruction */
221 COSTS_N_INSNS (1), /* cost of a lea instruction */
222 COSTS_N_INSNS (3), /* variable shift costs */
223 COSTS_N_INSNS (2), /* constant shift costs */
224 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
225 COSTS_N_INSNS (12), /* HI */
226 COSTS_N_INSNS (12), /* SI */
227 COSTS_N_INSNS (12), /* DI */
228 COSTS_N_INSNS (12)}, /* other */
229 1, /* cost of multiply per each bit set */
230 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
231 COSTS_N_INSNS (40), /* HI */
232 COSTS_N_INSNS (40), /* SI */
233 COSTS_N_INSNS (40), /* DI */
234 COSTS_N_INSNS (40)}, /* other */
235 COSTS_N_INSNS (3), /* cost of movsx */
236 COSTS_N_INSNS (2), /* cost of movzx */
237 15, /* "large" insn */
238 3, /* MOVE_RATIO */
239 4, /* cost for loading QImode using movzbl */
240 {2, 4, 2}, /* cost of loading integer registers
241 in QImode, HImode and SImode.
242 Relative to reg-reg move (2). */
243 {2, 4, 2}, /* cost of storing integer registers */
244 2, /* cost of reg,reg fld/fst */
245 {8, 8, 8}, /* cost of loading fp registers
246 in SFmode, DFmode and XFmode */
247 {8, 8, 8}, /* cost of storing fp registers
248 in SFmode, DFmode and XFmode */
249 2, /* cost of moving MMX register */
250 {4, 8}, /* cost of loading MMX registers
251 in SImode and DImode */
252 {4, 8}, /* cost of storing MMX registers
253 in SImode and DImode */
254 2, /* cost of moving SSE register */
255 {4, 8, 16}, /* cost of loading SSE registers
256 in SImode, DImode and TImode */
257 {4, 8, 16}, /* cost of storing SSE registers
258 in SImode, DImode and TImode */
259 3, /* MMX or SSE register to integer */
260 4, /* size of l1 cache. 486 has 8kB cache
261 shared for code and data, so 4kB is
262 not really precise. */
263 4, /* size of l2 cache */
264 0, /* size of prefetch block */
265 0, /* number of parallel prefetches */
266 1, /* Branch cost */
267 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
268 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
269 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
270 COSTS_N_INSNS (3), /* cost of FABS instruction. */
271 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
272 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
273 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
274 DUMMY_STRINGOP_ALGS},
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 1, /* scalar_stmt_cost. */
278 1, /* scalar load_cost. */
279 1, /* scalar_store_cost. */
280 1, /* vec_stmt_cost. */
281 1, /* vec_to_scalar_cost. */
282 1, /* scalar_to_vec_cost. */
283 1, /* vec_align_load_cost. */
284 2, /* vec_unalign_load_cost. */
285 1, /* vec_store_cost. */
286 3, /* cond_taken_branch_cost. */
287 1, /* cond_not_taken_branch_cost. */
290 static const
291 struct processor_costs pentium_cost = {
292 COSTS_N_INSNS (1), /* cost of an add instruction */
293 COSTS_N_INSNS (1), /* cost of a lea instruction */
294 COSTS_N_INSNS (4), /* variable shift costs */
295 COSTS_N_INSNS (1), /* constant shift costs */
296 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
297 COSTS_N_INSNS (11), /* HI */
298 COSTS_N_INSNS (11), /* SI */
299 COSTS_N_INSNS (11), /* DI */
300 COSTS_N_INSNS (11)}, /* other */
301 0, /* cost of multiply per each bit set */
302 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
303 COSTS_N_INSNS (25), /* HI */
304 COSTS_N_INSNS (25), /* SI */
305 COSTS_N_INSNS (25), /* DI */
306 COSTS_N_INSNS (25)}, /* other */
307 COSTS_N_INSNS (3), /* cost of movsx */
308 COSTS_N_INSNS (2), /* cost of movzx */
309 8, /* "large" insn */
310 6, /* MOVE_RATIO */
311 6, /* cost for loading QImode using movzbl */
312 {2, 4, 2}, /* cost of loading integer registers
313 in QImode, HImode and SImode.
314 Relative to reg-reg move (2). */
315 {2, 4, 2}, /* cost of storing integer registers */
316 2, /* cost of reg,reg fld/fst */
317 {2, 2, 6}, /* cost of loading fp registers
318 in SFmode, DFmode and XFmode */
319 {4, 4, 6}, /* cost of storing fp registers
320 in SFmode, DFmode and XFmode */
321 8, /* cost of moving MMX register */
322 {8, 8}, /* cost of loading MMX registers
323 in SImode and DImode */
324 {8, 8}, /* cost of storing MMX registers
325 in SImode and DImode */
326 2, /* cost of moving SSE register */
327 {4, 8, 16}, /* cost of loading SSE registers
328 in SImode, DImode and TImode */
329 {4, 8, 16}, /* cost of storing SSE registers
330 in SImode, DImode and TImode */
331 3, /* MMX or SSE register to integer */
332 8, /* size of l1 cache. */
333 8, /* size of l2 cache */
334 0, /* size of prefetch block */
335 0, /* number of parallel prefetches */
336 2, /* Branch cost */
337 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
338 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
339 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
340 COSTS_N_INSNS (1), /* cost of FABS instruction. */
341 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
342 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
343 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
344 DUMMY_STRINGOP_ALGS},
345 {{libcall, {{-1, rep_prefix_4_byte}}},
346 DUMMY_STRINGOP_ALGS},
347 1, /* scalar_stmt_cost. */
348 1, /* scalar load_cost. */
349 1, /* scalar_store_cost. */
350 1, /* vec_stmt_cost. */
351 1, /* vec_to_scalar_cost. */
352 1, /* scalar_to_vec_cost. */
353 1, /* vec_align_load_cost. */
354 2, /* vec_unalign_load_cost. */
355 1, /* vec_store_cost. */
356 3, /* cond_taken_branch_cost. */
357 1, /* cond_not_taken_branch_cost. */
360 static const
361 struct processor_costs pentiumpro_cost = {
362 COSTS_N_INSNS (1), /* cost of an add instruction */
363 COSTS_N_INSNS (1), /* cost of a lea instruction */
364 COSTS_N_INSNS (1), /* variable shift costs */
365 COSTS_N_INSNS (1), /* constant shift costs */
366 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
367 COSTS_N_INSNS (4), /* HI */
368 COSTS_N_INSNS (4), /* SI */
369 COSTS_N_INSNS (4), /* DI */
370 COSTS_N_INSNS (4)}, /* other */
371 0, /* cost of multiply per each bit set */
372 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
373 COSTS_N_INSNS (17), /* HI */
374 COSTS_N_INSNS (17), /* SI */
375 COSTS_N_INSNS (17), /* DI */
376 COSTS_N_INSNS (17)}, /* other */
377 COSTS_N_INSNS (1), /* cost of movsx */
378 COSTS_N_INSNS (1), /* cost of movzx */
379 8, /* "large" insn */
380 6, /* MOVE_RATIO */
381 2, /* cost for loading QImode using movzbl */
382 {4, 4, 4}, /* cost of loading integer registers
383 in QImode, HImode and SImode.
384 Relative to reg-reg move (2). */
385 {2, 2, 2}, /* cost of storing integer registers */
386 2, /* cost of reg,reg fld/fst */
387 {2, 2, 6}, /* cost of loading fp registers
388 in SFmode, DFmode and XFmode */
389 {4, 4, 6}, /* cost of storing fp registers
390 in SFmode, DFmode and XFmode */
391 2, /* cost of moving MMX register */
392 {2, 2}, /* cost of loading MMX registers
393 in SImode and DImode */
394 {2, 2}, /* cost of storing MMX registers
395 in SImode and DImode */
396 2, /* cost of moving SSE register */
397 {2, 2, 8}, /* cost of loading SSE registers
398 in SImode, DImode and TImode */
399 {2, 2, 8}, /* cost of storing SSE registers
400 in SImode, DImode and TImode */
401 3, /* MMX or SSE register to integer */
402 8, /* size of l1 cache. */
403 256, /* size of l2 cache */
404 32, /* size of prefetch block */
405 6, /* number of parallel prefetches */
406 2, /* Branch cost */
407 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
408 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
409 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
410 COSTS_N_INSNS (2), /* cost of FABS instruction. */
411 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
412 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
413 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
414 the alignment). For small blocks inline loop is still a noticeable win, for bigger
415 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
416 more expensive startup time in CPU, but after 4K the difference is down in the noise.
418 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
419 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
420 DUMMY_STRINGOP_ALGS},
421 {{rep_prefix_4_byte, {{1024, unrolled_loop},
422 {8192, rep_prefix_4_byte}, {-1, libcall}}},
423 DUMMY_STRINGOP_ALGS},
424 1, /* scalar_stmt_cost. */
425 1, /* scalar load_cost. */
426 1, /* scalar_store_cost. */
427 1, /* vec_stmt_cost. */
428 1, /* vec_to_scalar_cost. */
429 1, /* scalar_to_vec_cost. */
430 1, /* vec_align_load_cost. */
431 2, /* vec_unalign_load_cost. */
432 1, /* vec_store_cost. */
433 3, /* cond_taken_branch_cost. */
434 1, /* cond_not_taken_branch_cost. */
437 static const
438 struct processor_costs geode_cost = {
439 COSTS_N_INSNS (1), /* cost of an add instruction */
440 COSTS_N_INSNS (1), /* cost of a lea instruction */
441 COSTS_N_INSNS (2), /* variable shift costs */
442 COSTS_N_INSNS (1), /* constant shift costs */
443 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
444 COSTS_N_INSNS (4), /* HI */
445 COSTS_N_INSNS (7), /* SI */
446 COSTS_N_INSNS (7), /* DI */
447 COSTS_N_INSNS (7)}, /* other */
448 0, /* cost of multiply per each bit set */
449 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
450 COSTS_N_INSNS (23), /* HI */
451 COSTS_N_INSNS (39), /* SI */
452 COSTS_N_INSNS (39), /* DI */
453 COSTS_N_INSNS (39)}, /* other */
454 COSTS_N_INSNS (1), /* cost of movsx */
455 COSTS_N_INSNS (1), /* cost of movzx */
456 8, /* "large" insn */
457 4, /* MOVE_RATIO */
458 1, /* cost for loading QImode using movzbl */
459 {1, 1, 1}, /* cost of loading integer registers
460 in QImode, HImode and SImode.
461 Relative to reg-reg move (2). */
462 {1, 1, 1}, /* cost of storing integer registers */
463 1, /* cost of reg,reg fld/fst */
464 {1, 1, 1}, /* cost of loading fp registers
465 in SFmode, DFmode and XFmode */
466 {4, 6, 6}, /* cost of storing fp registers
467 in SFmode, DFmode and XFmode */
469 1, /* cost of moving MMX register */
470 {1, 1}, /* cost of loading MMX registers
471 in SImode and DImode */
472 {1, 1}, /* cost of storing MMX registers
473 in SImode and DImode */
474 1, /* cost of moving SSE register */
475 {1, 1, 1}, /* cost of loading SSE registers
476 in SImode, DImode and TImode */
477 {1, 1, 1}, /* cost of storing SSE registers
478 in SImode, DImode and TImode */
479 1, /* MMX or SSE register to integer */
480 64, /* size of l1 cache. */
481 128, /* size of l2 cache. */
482 32, /* size of prefetch block */
483 1, /* number of parallel prefetches */
484 1, /* Branch cost */
485 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
486 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
487 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
488 COSTS_N_INSNS (1), /* cost of FABS instruction. */
489 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
490 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
491 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
492 DUMMY_STRINGOP_ALGS},
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 1, /* scalar_stmt_cost. */
496 1, /* scalar load_cost. */
497 1, /* scalar_store_cost. */
498 1, /* vec_stmt_cost. */
499 1, /* vec_to_scalar_cost. */
500 1, /* scalar_to_vec_cost. */
501 1, /* vec_align_load_cost. */
502 2, /* vec_unalign_load_cost. */
503 1, /* vec_store_cost. */
504 3, /* cond_taken_branch_cost. */
505 1, /* cond_not_taken_branch_cost. */
508 static const
509 struct processor_costs k6_cost = {
510 COSTS_N_INSNS (1), /* cost of an add instruction */
511 COSTS_N_INSNS (2), /* cost of a lea instruction */
512 COSTS_N_INSNS (1), /* variable shift costs */
513 COSTS_N_INSNS (1), /* constant shift costs */
514 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
515 COSTS_N_INSNS (3), /* HI */
516 COSTS_N_INSNS (3), /* SI */
517 COSTS_N_INSNS (3), /* DI */
518 COSTS_N_INSNS (3)}, /* other */
519 0, /* cost of multiply per each bit set */
520 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
521 COSTS_N_INSNS (18), /* HI */
522 COSTS_N_INSNS (18), /* SI */
523 COSTS_N_INSNS (18), /* DI */
524 COSTS_N_INSNS (18)}, /* other */
525 COSTS_N_INSNS (2), /* cost of movsx */
526 COSTS_N_INSNS (2), /* cost of movzx */
527 8, /* "large" insn */
528 4, /* MOVE_RATIO */
529 3, /* cost for loading QImode using movzbl */
530 {4, 5, 4}, /* cost of loading integer registers
531 in QImode, HImode and SImode.
532 Relative to reg-reg move (2). */
533 {2, 3, 2}, /* cost of storing integer registers */
534 4, /* cost of reg,reg fld/fst */
535 {6, 6, 6}, /* cost of loading fp registers
536 in SFmode, DFmode and XFmode */
537 {4, 4, 4}, /* cost of storing fp registers
538 in SFmode, DFmode and XFmode */
539 2, /* cost of moving MMX register */
540 {2, 2}, /* cost of loading MMX registers
541 in SImode and DImode */
542 {2, 2}, /* cost of storing MMX registers
543 in SImode and DImode */
544 2, /* cost of moving SSE register */
545 {2, 2, 8}, /* cost of loading SSE registers
546 in SImode, DImode and TImode */
547 {2, 2, 8}, /* cost of storing SSE registers
548 in SImode, DImode and TImode */
549 6, /* MMX or SSE register to integer */
550 32, /* size of l1 cache. */
551 32, /* size of l2 cache. Some models
552 have integrated l2 cache, but
553 optimizing for k6 is not important
554 enough to worry about that. */
555 32, /* size of prefetch block */
556 1, /* number of parallel prefetches */
557 1, /* Branch cost */
558 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
559 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
560 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
561 COSTS_N_INSNS (2), /* cost of FABS instruction. */
562 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
563 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
564 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
565 DUMMY_STRINGOP_ALGS},
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 1, /* scalar_stmt_cost. */
569 1, /* scalar load_cost. */
570 1, /* scalar_store_cost. */
571 1, /* vec_stmt_cost. */
572 1, /* vec_to_scalar_cost. */
573 1, /* scalar_to_vec_cost. */
574 1, /* vec_align_load_cost. */
575 2, /* vec_unalign_load_cost. */
576 1, /* vec_store_cost. */
577 3, /* cond_taken_branch_cost. */
578 1, /* cond_not_taken_branch_cost. */
581 static const
582 struct processor_costs athlon_cost = {
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (2), /* cost of a lea instruction */
585 COSTS_N_INSNS (1), /* variable shift costs */
586 COSTS_N_INSNS (1), /* constant shift costs */
587 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (5), /* HI */
589 COSTS_N_INSNS (5), /* SI */
590 COSTS_N_INSNS (5), /* DI */
591 COSTS_N_INSNS (5)}, /* other */
592 0, /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (26), /* HI */
595 COSTS_N_INSNS (42), /* SI */
596 COSTS_N_INSNS (74), /* DI */
597 COSTS_N_INSNS (74)}, /* other */
598 COSTS_N_INSNS (1), /* cost of movsx */
599 COSTS_N_INSNS (1), /* cost of movzx */
600 8, /* "large" insn */
601 9, /* MOVE_RATIO */
602 4, /* cost for loading QImode using movzbl */
603 {3, 4, 3}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {3, 4, 3}, /* cost of storing integer registers */
607 4, /* cost of reg,reg fld/fst */
608 {4, 4, 12}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {6, 6, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 4}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 4}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 4, 6}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 4, 5}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 5, /* MMX or SSE register to integer */
623 64, /* size of l1 cache. */
624 256, /* size of l2 cache. */
625 64, /* size of prefetch block */
626 6, /* number of parallel prefetches */
627 5, /* Branch cost */
628 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (2), /* cost of FABS instruction. */
632 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
634 /* For some reason, Athlon deals better with REP prefix (relative to loops)
635 compared to K8. Alignment becomes important after 8 bytes for memcpy and
636 128 bytes for memset. */
637 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
638 DUMMY_STRINGOP_ALGS},
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 1, /* scalar_stmt_cost. */
642 1, /* scalar load_cost. */
643 1, /* scalar_store_cost. */
644 1, /* vec_stmt_cost. */
645 1, /* vec_to_scalar_cost. */
646 1, /* scalar_to_vec_cost. */
647 1, /* vec_align_load_cost. */
648 2, /* vec_unalign_load_cost. */
649 1, /* vec_store_cost. */
650 3, /* cond_taken_branch_cost. */
651 1, /* cond_not_taken_branch_cost. */
654 static const
655 struct processor_costs k8_cost = {
656 COSTS_N_INSNS (1), /* cost of an add instruction */
657 COSTS_N_INSNS (2), /* cost of a lea instruction */
658 COSTS_N_INSNS (1), /* variable shift costs */
659 COSTS_N_INSNS (1), /* constant shift costs */
660 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
661 COSTS_N_INSNS (4), /* HI */
662 COSTS_N_INSNS (3), /* SI */
663 COSTS_N_INSNS (4), /* DI */
664 COSTS_N_INSNS (5)}, /* other */
665 0, /* cost of multiply per each bit set */
666 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
667 COSTS_N_INSNS (26), /* HI */
668 COSTS_N_INSNS (42), /* SI */
669 COSTS_N_INSNS (74), /* DI */
670 COSTS_N_INSNS (74)}, /* other */
671 COSTS_N_INSNS (1), /* cost of movsx */
672 COSTS_N_INSNS (1), /* cost of movzx */
673 8, /* "large" insn */
674 9, /* MOVE_RATIO */
675 4, /* cost for loading QImode using movzbl */
676 {3, 4, 3}, /* cost of loading integer registers
677 in QImode, HImode and SImode.
678 Relative to reg-reg move (2). */
679 {3, 4, 3}, /* cost of storing integer registers */
680 4, /* cost of reg,reg fld/fst */
681 {4, 4, 12}, /* cost of loading fp registers
682 in SFmode, DFmode and XFmode */
683 {6, 6, 8}, /* cost of storing fp registers
684 in SFmode, DFmode and XFmode */
685 2, /* cost of moving MMX register */
686 {3, 3}, /* cost of loading MMX registers
687 in SImode and DImode */
688 {4, 4}, /* cost of storing MMX registers
689 in SImode and DImode */
690 2, /* cost of moving SSE register */
691 {4, 3, 6}, /* cost of loading SSE registers
692 in SImode, DImode and TImode */
693 {4, 4, 5}, /* cost of storing SSE registers
694 in SImode, DImode and TImode */
695 5, /* MMX or SSE register to integer */
696 64, /* size of l1 cache. */
697 512, /* size of l2 cache. */
698 64, /* size of prefetch block */
699 /* New AMD processors never drop prefetches; if they cannot be performed
700 immediately, they are queued. We set number of simultaneous prefetches
701 to a large constant to reflect this (it probably is not a good idea not
702 to limit number of prefetches at all, as their execution also takes some
703 time). */
704 100, /* number of parallel prefetches */
705 3, /* Branch cost */
706 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
707 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
708 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
709 COSTS_N_INSNS (2), /* cost of FABS instruction. */
710 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
711 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
712 /* K8 has optimized REP instruction for medium sized blocks, but for very small
713 blocks it is better to use loop. For large blocks, libcall can do
714 nontemporary accesses and beat inline considerably. */
715 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
716 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
717 {{libcall, {{8, loop}, {24, unrolled_loop},
718 {2048, rep_prefix_4_byte}, {-1, libcall}}},
719 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
720 4, /* scalar_stmt_cost. */
721 2, /* scalar load_cost. */
722 2, /* scalar_store_cost. */
723 5, /* vec_stmt_cost. */
724 0, /* vec_to_scalar_cost. */
725 2, /* scalar_to_vec_cost. */
726 2, /* vec_align_load_cost. */
727 3, /* vec_unalign_load_cost. */
728 3, /* vec_store_cost. */
729 3, /* cond_taken_branch_cost. */
730 2, /* cond_not_taken_branch_cost. */
733 struct processor_costs amdfam10_cost = {
734 COSTS_N_INSNS (1), /* cost of an add instruction */
735 COSTS_N_INSNS (2), /* cost of a lea instruction */
736 COSTS_N_INSNS (1), /* variable shift costs */
737 COSTS_N_INSNS (1), /* constant shift costs */
738 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
739 COSTS_N_INSNS (4), /* HI */
740 COSTS_N_INSNS (3), /* SI */
741 COSTS_N_INSNS (4), /* DI */
742 COSTS_N_INSNS (5)}, /* other */
743 0, /* cost of multiply per each bit set */
744 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
745 COSTS_N_INSNS (35), /* HI */
746 COSTS_N_INSNS (51), /* SI */
747 COSTS_N_INSNS (83), /* DI */
748 COSTS_N_INSNS (83)}, /* other */
749 COSTS_N_INSNS (1), /* cost of movsx */
750 COSTS_N_INSNS (1), /* cost of movzx */
751 8, /* "large" insn */
752 9, /* MOVE_RATIO */
753 4, /* cost for loading QImode using movzbl */
754 {3, 4, 3}, /* cost of loading integer registers
755 in QImode, HImode and SImode.
756 Relative to reg-reg move (2). */
757 {3, 4, 3}, /* cost of storing integer registers */
758 4, /* cost of reg,reg fld/fst */
759 {4, 4, 12}, /* cost of loading fp registers
760 in SFmode, DFmode and XFmode */
761 {6, 6, 8}, /* cost of storing fp registers
762 in SFmode, DFmode and XFmode */
763 2, /* cost of moving MMX register */
764 {3, 3}, /* cost of loading MMX registers
765 in SImode and DImode */
766 {4, 4}, /* cost of storing MMX registers
767 in SImode and DImode */
768 2, /* cost of moving SSE register */
769 {4, 4, 3}, /* cost of loading SSE registers
770 in SImode, DImode and TImode */
771 {4, 4, 5}, /* cost of storing SSE registers
772 in SImode, DImode and TImode */
773 3, /* MMX or SSE register to integer */
774 /* On K8
775 MOVD reg64, xmmreg Double FSTORE 4
776 MOVD reg32, xmmreg Double FSTORE 4
777 On AMDFAM10
778 MOVD reg64, xmmreg Double FADD 3
779 1/1 1/1
780 MOVD reg32, xmmreg Double FADD 3
781 1/1 1/1 */
782 64, /* size of l1 cache. */
783 512, /* size of l2 cache. */
784 64, /* size of prefetch block */
785 /* New AMD processors never drop prefetches; if they cannot be performed
786 immediately, they are queued. We set number of simultaneous prefetches
787 to a large constant to reflect this (it probably is not a good idea not
788 to limit number of prefetches at all, as their execution also takes some
789 time). */
790 100, /* number of parallel prefetches */
791 2, /* Branch cost */
792 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
793 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
794 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
795 COSTS_N_INSNS (2), /* cost of FABS instruction. */
796 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
797 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
799 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
800 very small blocks it is better to use loop. For large blocks, libcall can
801 do nontemporary accesses and beat inline considerably. */
802 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
803 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
804 {{libcall, {{8, loop}, {24, unrolled_loop},
805 {2048, rep_prefix_4_byte}, {-1, libcall}}},
806 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
807 4, /* scalar_stmt_cost. */
808 2, /* scalar load_cost. */
809 2, /* scalar_store_cost. */
810 6, /* vec_stmt_cost. */
811 0, /* vec_to_scalar_cost. */
812 2, /* scalar_to_vec_cost. */
813 2, /* vec_align_load_cost. */
814 2, /* vec_unalign_load_cost. */
815 2, /* vec_store_cost. */
816 2, /* cond_taken_branch_cost. */
817 1, /* cond_not_taken_branch_cost. */
820 static const
821 struct processor_costs pentium4_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (3), /* cost of a lea instruction */
824 COSTS_N_INSNS (4), /* variable shift costs */
825 COSTS_N_INSNS (4), /* constant shift costs */
826 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (15), /* HI */
828 COSTS_N_INSNS (15), /* SI */
829 COSTS_N_INSNS (15), /* DI */
830 COSTS_N_INSNS (15)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (56), /* HI */
834 COSTS_N_INSNS (56), /* SI */
835 COSTS_N_INSNS (56), /* DI */
836 COSTS_N_INSNS (56)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 16, /* "large" insn */
840 6, /* MOVE_RATIO */
841 2, /* cost for loading QImode using movzbl */
842 {4, 5, 4}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {2, 3, 2}, /* cost of storing integer registers */
846 2, /* cost of reg,reg fld/fst */
847 {2, 2, 6}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {4, 4, 6}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {2, 2}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {2, 2}, /* cost of storing MMX registers
855 in SImode and DImode */
856 12, /* cost of moving SSE register */
857 {12, 12, 12}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {2, 2, 8}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 10, /* MMX or SSE register to integer */
862 8, /* size of l1 cache. */
863 256, /* size of l2 cache. */
864 64, /* size of prefetch block */
865 6, /* number of parallel prefetches */
866 2, /* Branch cost */
867 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
868 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
869 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
870 COSTS_N_INSNS (2), /* cost of FABS instruction. */
871 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
872 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
873 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
874 DUMMY_STRINGOP_ALGS},
875 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
876 {-1, libcall}}},
877 DUMMY_STRINGOP_ALGS},
878 1, /* scalar_stmt_cost. */
879 1, /* scalar load_cost. */
880 1, /* scalar_store_cost. */
881 1, /* vec_stmt_cost. */
882 1, /* vec_to_scalar_cost. */
883 1, /* scalar_to_vec_cost. */
884 1, /* vec_align_load_cost. */
885 2, /* vec_unalign_load_cost. */
886 1, /* vec_store_cost. */
887 3, /* cond_taken_branch_cost. */
888 1, /* cond_not_taken_branch_cost. */
891 static const
892 struct processor_costs nocona_cost = {
893 COSTS_N_INSNS (1), /* cost of an add instruction */
894 COSTS_N_INSNS (1), /* cost of a lea instruction */
895 COSTS_N_INSNS (1), /* variable shift costs */
896 COSTS_N_INSNS (1), /* constant shift costs */
897 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
898 COSTS_N_INSNS (10), /* HI */
899 COSTS_N_INSNS (10), /* SI */
900 COSTS_N_INSNS (10), /* DI */
901 COSTS_N_INSNS (10)}, /* other */
902 0, /* cost of multiply per each bit set */
903 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
904 COSTS_N_INSNS (66), /* HI */
905 COSTS_N_INSNS (66), /* SI */
906 COSTS_N_INSNS (66), /* DI */
907 COSTS_N_INSNS (66)}, /* other */
908 COSTS_N_INSNS (1), /* cost of movsx */
909 COSTS_N_INSNS (1), /* cost of movzx */
910 16, /* "large" insn */
911 17, /* MOVE_RATIO */
912 4, /* cost for loading QImode using movzbl */
913 {4, 4, 4}, /* cost of loading integer registers
914 in QImode, HImode and SImode.
915 Relative to reg-reg move (2). */
916 {4, 4, 4}, /* cost of storing integer registers */
917 3, /* cost of reg,reg fld/fst */
918 {12, 12, 12}, /* cost of loading fp registers
919 in SFmode, DFmode and XFmode */
920 {4, 4, 4}, /* cost of storing fp registers
921 in SFmode, DFmode and XFmode */
922 6, /* cost of moving MMX register */
923 {12, 12}, /* cost of loading MMX registers
924 in SImode and DImode */
925 {12, 12}, /* cost of storing MMX registers
926 in SImode and DImode */
927 6, /* cost of moving SSE register */
928 {12, 12, 12}, /* cost of loading SSE registers
929 in SImode, DImode and TImode */
930 {12, 12, 12}, /* cost of storing SSE registers
931 in SImode, DImode and TImode */
932 8, /* MMX or SSE register to integer */
933 8, /* size of l1 cache. */
934 1024, /* size of l2 cache. */
935 128, /* size of prefetch block */
936 8, /* number of parallel prefetches */
937 1, /* Branch cost */
938 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
939 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
940 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
941 COSTS_N_INSNS (3), /* cost of FABS instruction. */
942 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
943 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
944 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
945 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
946 {100000, unrolled_loop}, {-1, libcall}}}},
947 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
948 {-1, libcall}}},
949 {libcall, {{24, loop}, {64, unrolled_loop},
950 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
951 1, /* scalar_stmt_cost. */
952 1, /* scalar load_cost. */
953 1, /* scalar_store_cost. */
954 1, /* vec_stmt_cost. */
955 1, /* vec_to_scalar_cost. */
956 1, /* scalar_to_vec_cost. */
957 1, /* vec_align_load_cost. */
958 2, /* vec_unalign_load_cost. */
959 1, /* vec_store_cost. */
960 3, /* cond_taken_branch_cost. */
961 1, /* cond_not_taken_branch_cost. */
964 static const
965 struct processor_costs core2_cost = {
966 COSTS_N_INSNS (1), /* cost of an add instruction */
967 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
968 COSTS_N_INSNS (1), /* variable shift costs */
969 COSTS_N_INSNS (1), /* constant shift costs */
970 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
971 COSTS_N_INSNS (3), /* HI */
972 COSTS_N_INSNS (3), /* SI */
973 COSTS_N_INSNS (3), /* DI */
974 COSTS_N_INSNS (3)}, /* other */
975 0, /* cost of multiply per each bit set */
976 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
977 COSTS_N_INSNS (22), /* HI */
978 COSTS_N_INSNS (22), /* SI */
979 COSTS_N_INSNS (22), /* DI */
980 COSTS_N_INSNS (22)}, /* other */
981 COSTS_N_INSNS (1), /* cost of movsx */
982 COSTS_N_INSNS (1), /* cost of movzx */
983 8, /* "large" insn */
984 16, /* MOVE_RATIO */
985 2, /* cost for loading QImode using movzbl */
986 {6, 6, 6}, /* cost of loading integer registers
987 in QImode, HImode and SImode.
988 Relative to reg-reg move (2). */
989 {4, 4, 4}, /* cost of storing integer registers */
990 2, /* cost of reg,reg fld/fst */
991 {6, 6, 6}, /* cost of loading fp registers
992 in SFmode, DFmode and XFmode */
993 {4, 4, 4}, /* cost of storing fp registers
994 in SFmode, DFmode and XFmode */
995 2, /* cost of moving MMX register */
996 {6, 6}, /* cost of loading MMX registers
997 in SImode and DImode */
998 {4, 4}, /* cost of storing MMX registers
999 in SImode and DImode */
1000 2, /* cost of moving SSE register */
1001 {6, 6, 6}, /* cost of loading SSE registers
1002 in SImode, DImode and TImode */
1003 {4, 4, 4}, /* cost of storing SSE registers
1004 in SImode, DImode and TImode */
1005 2, /* MMX or SSE register to integer */
1006 32, /* size of l1 cache. */
1007 2048, /* size of l2 cache. */
1008 128, /* size of prefetch block */
1009 8, /* number of parallel prefetches */
1010 3, /* Branch cost */
1011 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1012 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1013 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1014 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1015 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1016 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1017 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1018 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1019 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1020 {{libcall, {{8, loop}, {15, unrolled_loop},
1021 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1022 {libcall, {{24, loop}, {32, unrolled_loop},
1023 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1024 1, /* scalar_stmt_cost. */
1025 1, /* scalar load_cost. */
1026 1, /* scalar_store_cost. */
1027 1, /* vec_stmt_cost. */
1028 1, /* vec_to_scalar_cost. */
1029 1, /* scalar_to_vec_cost. */
1030 1, /* vec_align_load_cost. */
1031 2, /* vec_unalign_load_cost. */
1032 1, /* vec_store_cost. */
1033 3, /* cond_taken_branch_cost. */
1034 1, /* cond_not_taken_branch_cost. */
1037 static const
1038 struct processor_costs atom_cost = {
1039 COSTS_N_INSNS (1), /* cost of an add instruction */
1040 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1041 COSTS_N_INSNS (1), /* variable shift costs */
1042 COSTS_N_INSNS (1), /* constant shift costs */
1043 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1044 COSTS_N_INSNS (4), /* HI */
1045 COSTS_N_INSNS (3), /* SI */
1046 COSTS_N_INSNS (4), /* DI */
1047 COSTS_N_INSNS (2)}, /* other */
1048 0, /* cost of multiply per each bit set */
1049 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1050 COSTS_N_INSNS (26), /* HI */
1051 COSTS_N_INSNS (42), /* SI */
1052 COSTS_N_INSNS (74), /* DI */
1053 COSTS_N_INSNS (74)}, /* other */
1054 COSTS_N_INSNS (1), /* cost of movsx */
1055 COSTS_N_INSNS (1), /* cost of movzx */
1056 8, /* "large" insn */
1057 17, /* MOVE_RATIO */
1058 2, /* cost for loading QImode using movzbl */
1059 {4, 4, 4}, /* cost of loading integer registers
1060 in QImode, HImode and SImode.
1061 Relative to reg-reg move (2). */
1062 {4, 4, 4}, /* cost of storing integer registers */
1063 4, /* cost of reg,reg fld/fst */
1064 {12, 12, 12}, /* cost of loading fp registers
1065 in SFmode, DFmode and XFmode */
1066 {6, 6, 8}, /* cost of storing fp registers
1067 in SFmode, DFmode and XFmode */
1068 2, /* cost of moving MMX register */
1069 {8, 8}, /* cost of loading MMX registers
1070 in SImode and DImode */
1071 {8, 8}, /* cost of storing MMX registers
1072 in SImode and DImode */
1073 2, /* cost of moving SSE register */
1074 {8, 8, 8}, /* cost of loading SSE registers
1075 in SImode, DImode and TImode */
1076 {8, 8, 8}, /* cost of storing SSE registers
1077 in SImode, DImode and TImode */
1078 5, /* MMX or SSE register to integer */
1079 32, /* size of l1 cache. */
1080 256, /* size of l2 cache. */
1081 64, /* size of prefetch block */
1082 6, /* number of parallel prefetches */
1083 3, /* Branch cost */
1084 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1085 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1086 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1087 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1088 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1089 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1090 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1091 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1092 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1093 {{libcall, {{8, loop}, {15, unrolled_loop},
1094 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1095 {libcall, {{24, loop}, {32, unrolled_loop},
1096 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1097 1, /* scalar_stmt_cost. */
1098 1, /* scalar load_cost. */
1099 1, /* scalar_store_cost. */
1100 1, /* vec_stmt_cost. */
1101 1, /* vec_to_scalar_cost. */
1102 1, /* scalar_to_vec_cost. */
1103 1, /* vec_align_load_cost. */
1104 2, /* vec_unalign_load_cost. */
1105 1, /* vec_store_cost. */
1106 3, /* cond_taken_branch_cost. */
1107 1, /* cond_not_taken_branch_cost. */
1110 /* Generic64 should produce code tuned for Nocona and K8. */
1111 static const
1112 struct processor_costs generic64_cost = {
1113 COSTS_N_INSNS (1), /* cost of an add instruction */
1114 /* On all chips taken into consideration lea is 2 cycles and more. With
1115 this cost however our current implementation of synth_mult results in
1116 use of unnecessary temporary registers causing regression on several
1117 SPECfp benchmarks. */
1118 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1119 COSTS_N_INSNS (1), /* variable shift costs */
1120 COSTS_N_INSNS (1), /* constant shift costs */
1121 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1122 COSTS_N_INSNS (4), /* HI */
1123 COSTS_N_INSNS (3), /* SI */
1124 COSTS_N_INSNS (4), /* DI */
1125 COSTS_N_INSNS (2)}, /* other */
1126 0, /* cost of multiply per each bit set */
1127 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1128 COSTS_N_INSNS (26), /* HI */
1129 COSTS_N_INSNS (42), /* SI */
1130 COSTS_N_INSNS (74), /* DI */
1131 COSTS_N_INSNS (74)}, /* other */
1132 COSTS_N_INSNS (1), /* cost of movsx */
1133 COSTS_N_INSNS (1), /* cost of movzx */
1134 8, /* "large" insn */
1135 17, /* MOVE_RATIO */
1136 4, /* cost for loading QImode using movzbl */
1137 {4, 4, 4}, /* cost of loading integer registers
1138 in QImode, HImode and SImode.
1139 Relative to reg-reg move (2). */
1140 {4, 4, 4}, /* cost of storing integer registers */
1141 4, /* cost of reg,reg fld/fst */
1142 {12, 12, 12}, /* cost of loading fp registers
1143 in SFmode, DFmode and XFmode */
1144 {6, 6, 8}, /* cost of storing fp registers
1145 in SFmode, DFmode and XFmode */
1146 2, /* cost of moving MMX register */
1147 {8, 8}, /* cost of loading MMX registers
1148 in SImode and DImode */
1149 {8, 8}, /* cost of storing MMX registers
1150 in SImode and DImode */
1151 2, /* cost of moving SSE register */
1152 {8, 8, 8}, /* cost of loading SSE registers
1153 in SImode, DImode and TImode */
1154 {8, 8, 8}, /* cost of storing SSE registers
1155 in SImode, DImode and TImode */
1156 5, /* MMX or SSE register to integer */
1157 32, /* size of l1 cache. */
1158 512, /* size of l2 cache. */
1159 64, /* size of prefetch block */
1160 6, /* number of parallel prefetches */
1161 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1162 is increased to perhaps more appropriate value of 5. */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {DUMMY_STRINGOP_ALGS,
1171 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1188 static const
1189 struct processor_costs generic32_cost = {
1190 COSTS_N_INSNS (1), /* cost of an add instruction */
1191 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1192 COSTS_N_INSNS (1), /* variable shift costs */
1193 COSTS_N_INSNS (1), /* constant shift costs */
1194 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1195 COSTS_N_INSNS (4), /* HI */
1196 COSTS_N_INSNS (3), /* SI */
1197 COSTS_N_INSNS (4), /* DI */
1198 COSTS_N_INSNS (2)}, /* other */
1199 0, /* cost of multiply per each bit set */
1200 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1201 COSTS_N_INSNS (26), /* HI */
1202 COSTS_N_INSNS (42), /* SI */
1203 COSTS_N_INSNS (74), /* DI */
1204 COSTS_N_INSNS (74)}, /* other */
1205 COSTS_N_INSNS (1), /* cost of movsx */
1206 COSTS_N_INSNS (1), /* cost of movzx */
1207 8, /* "large" insn */
1208 17, /* MOVE_RATIO */
1209 4, /* cost for loading QImode using movzbl */
1210 {4, 4, 4}, /* cost of loading integer registers
1211 in QImode, HImode and SImode.
1212 Relative to reg-reg move (2). */
1213 {4, 4, 4}, /* cost of storing integer registers */
1214 4, /* cost of reg,reg fld/fst */
1215 {12, 12, 12}, /* cost of loading fp registers
1216 in SFmode, DFmode and XFmode */
1217 {6, 6, 8}, /* cost of storing fp registers
1218 in SFmode, DFmode and XFmode */
1219 2, /* cost of moving MMX register */
1220 {8, 8}, /* cost of loading MMX registers
1221 in SImode and DImode */
1222 {8, 8}, /* cost of storing MMX registers
1223 in SImode and DImode */
1224 2, /* cost of moving SSE register */
1225 {8, 8, 8}, /* cost of loading SSE registers
1226 in SImode, DImode and TImode */
1227 {8, 8, 8}, /* cost of storing SSE registers
1228 in SImode, DImode and TImode */
1229 5, /* MMX or SSE register to integer */
1230 32, /* size of l1 cache. */
1231 256, /* size of l2 cache. */
1232 64, /* size of prefetch block */
1233 6, /* number of parallel prefetches */
1234 3, /* Branch cost */
1235 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1236 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1237 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1238 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1239 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1240 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1241 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1242 DUMMY_STRINGOP_ALGS},
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 1, /* scalar_stmt_cost. */
1246 1, /* scalar load_cost. */
1247 1, /* scalar_store_cost. */
1248 1, /* vec_stmt_cost. */
1249 1, /* vec_to_scalar_cost. */
1250 1, /* scalar_to_vec_cost. */
1251 1, /* vec_align_load_cost. */
1252 2, /* vec_unalign_load_cost. */
1253 1, /* vec_store_cost. */
1254 3, /* cond_taken_branch_cost. */
1255 1, /* cond_not_taken_branch_cost. */
1258 const struct processor_costs *ix86_cost = &pentium_cost;
1260 /* Processor feature/optimization bitmasks. */
1261 #define m_386 (1<<PROCESSOR_I386)
1262 #define m_486 (1<<PROCESSOR_I486)
1263 #define m_PENT (1<<PROCESSOR_PENTIUM)
1264 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1265 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1266 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1267 #define m_CORE2 (1<<PROCESSOR_CORE2)
1268 #define m_ATOM (1<<PROCESSOR_ATOM)
1270 #define m_GEODE (1<<PROCESSOR_GEODE)
1271 #define m_K6 (1<<PROCESSOR_K6)
1272 #define m_K6_GEODE (m_K6 | m_GEODE)
1273 #define m_K8 (1<<PROCESSOR_K8)
1274 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1275 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1276 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1277 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1279 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1280 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1282 /* Generic instruction choice should be common subset of supported CPUs
1283 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1284 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1286 /* Feature tests against the various tunings. */
1287 unsigned char ix86_tune_features[X86_TUNE_LAST];
1289 /* Feature tests against the various tunings used to create ix86_tune_features
1290 based on the processor mask. */
1291 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1292 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1293 negatively, so enabling for Generic64 seems like good code size
1294 tradeoff. We can't enable it for 32bit generic because it does not
1295 work well with PPro base chips. */
1296 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1298 /* X86_TUNE_PUSH_MEMORY */
1299 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1300 | m_NOCONA | m_CORE2 | m_GENERIC,
1302 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1303 m_486 | m_PENT,
1305 /* X86_TUNE_UNROLL_STRLEN */
1306 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1307 | m_CORE2 | m_GENERIC,
1309 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1310 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1312 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1313 on simulation result. But after P4 was made, no performance benefit
1314 was observed with branch hints. It also increases the code size.
1315 As a result, icc never generates branch hints. */
1318 /* X86_TUNE_DOUBLE_WITH_ADD */
1319 ~m_386,
1321 /* X86_TUNE_USE_SAHF */
1322 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1323 | m_NOCONA | m_CORE2 | m_GENERIC,
1325 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1326 partial dependencies. */
1327 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1328 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1330 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1331 register stalls on Generic32 compilation setting as well. However
1332 in current implementation the partial register stalls are not eliminated
1333 very well - they can be introduced via subregs synthesized by combine
1334 and can happen in caller/callee saving sequences. Because this option
1335 pays back little on PPro based chips and is in conflict with partial reg
1336 dependencies used by Athlon/P4 based chips, it is better to leave it off
1337 for generic32 for now. */
1338 m_PPRO,
1340 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1341 m_CORE2 | m_GENERIC,
1343 /* X86_TUNE_USE_HIMODE_FIOP */
1344 m_386 | m_486 | m_K6_GEODE,
1346 /* X86_TUNE_USE_SIMODE_FIOP */
1347 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1349 /* X86_TUNE_USE_MOV0 */
1350 m_K6,
1352 /* X86_TUNE_USE_CLTD */
1353 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1355 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1356 m_PENT4,
1358 /* X86_TUNE_SPLIT_LONG_MOVES */
1359 m_PPRO,
1361 /* X86_TUNE_READ_MODIFY_WRITE */
1362 ~m_PENT,
1364 /* X86_TUNE_READ_MODIFY */
1365 ~(m_PENT | m_PPRO),
1367 /* X86_TUNE_PROMOTE_QIMODE */
1368 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1369 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1371 /* X86_TUNE_FAST_PREFIX */
1372 ~(m_PENT | m_486 | m_386),
1374 /* X86_TUNE_SINGLE_STRINGOP */
1375 m_386 | m_PENT4 | m_NOCONA,
1377 /* X86_TUNE_QIMODE_MATH */
1380 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1381 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1382 might be considered for Generic32 if our scheme for avoiding partial
1383 stalls was more effective. */
1384 ~m_PPRO,
1386 /* X86_TUNE_PROMOTE_QI_REGS */
1389 /* X86_TUNE_PROMOTE_HI_REGS */
1390 m_PPRO,
1392 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1393 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1394 | m_CORE2 | m_GENERIC,
1396 /* X86_TUNE_ADD_ESP_8 */
1397 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1398 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1400 /* X86_TUNE_SUB_ESP_4 */
1401 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1402 | m_GENERIC,
1404 /* X86_TUNE_SUB_ESP_8 */
1405 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1406 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1408 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1409 for DFmode copies */
1410 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1411 | m_GENERIC | m_GEODE),
1413 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1414 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1416 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1417 conflict here in between PPro/Pentium4 based chips that thread 128bit
1418 SSE registers as single units versus K8 based chips that divide SSE
1419 registers to two 64bit halves. This knob promotes all store destinations
1420 to be 128bit to allow register renaming on 128bit SSE units, but usually
1421 results in one extra microop on 64bit SSE units. Experimental results
1422 shows that disabling this option on P4 brings over 20% SPECfp regression,
1423 while enabling it on K8 brings roughly 2.4% regression that can be partly
1424 masked by careful scheduling of moves. */
1425 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1426 | m_AMDFAM10,
1428 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1429 m_AMDFAM10,
1431 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1432 are resolved on SSE register parts instead of whole registers, so we may
1433 maintain just lower part of scalar values in proper format leaving the
1434 upper part undefined. */
1435 m_ATHLON_K8,
1437 /* X86_TUNE_SSE_TYPELESS_STORES */
1438 m_AMD_MULTIPLE,
1440 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1441 m_PPRO | m_PENT4 | m_NOCONA,
1443 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1444 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1446 /* X86_TUNE_PROLOGUE_USING_MOVE */
1447 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1449 /* X86_TUNE_EPILOGUE_USING_MOVE */
1450 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1452 /* X86_TUNE_SHIFT1 */
1453 ~m_486,
1455 /* X86_TUNE_USE_FFREEP */
1456 m_AMD_MULTIPLE,
1458 /* X86_TUNE_INTER_UNIT_MOVES */
1459 ~(m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
1461 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1462 ~(m_AMDFAM10),
1464 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1465 than 4 branch instructions in the 16 byte window. */
1466 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1467 | m_GENERIC,
1469 /* X86_TUNE_SCHEDULE */
1470 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1471 | m_GENERIC,
1473 /* X86_TUNE_USE_BT */
1474 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1476 /* X86_TUNE_USE_INCDEC */
1477 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1479 /* X86_TUNE_PAD_RETURNS */
1480 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1482 /* X86_TUNE_EXT_80387_CONSTANTS */
1483 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1484 | m_CORE2 | m_GENERIC,
1486 /* X86_TUNE_SHORTEN_X87_SSE */
1487 ~m_K8,
1489 /* X86_TUNE_AVOID_VECTOR_DECODE */
1490 m_K8 | m_GENERIC64,
1492 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1493 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1494 ~(m_386 | m_486),
1496 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1497 vector path on AMD machines. */
1498 m_K8 | m_GENERIC64 | m_AMDFAM10,
1500 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1501 machines. */
1502 m_K8 | m_GENERIC64 | m_AMDFAM10,
1504 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1505 than a MOV. */
1506 m_PENT,
1508 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1509 but one byte longer. */
1510 m_PENT,
1512 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1513 operand that cannot be represented using a modRM byte. The XOR
1514 replacement is long decoded, so this split helps here as well. */
1515 m_K6,
1517 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1518 from FP to FP. */
1519 m_AMDFAM10 | m_GENERIC,
1521 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1522 from integer to FP. */
1523 m_AMDFAM10,
1525 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1526 with a subsequent conditional jump instruction into a single
1527 compare-and-branch uop. */
1528 m_CORE2,
1530 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1531 will impact LEA instruction selection. */
1532 m_ATOM,
1535 /* Feature tests against the various architecture variations. */
1536 unsigned char ix86_arch_features[X86_ARCH_LAST];
1538 /* Feature tests against the various architecture variations, used to create
1539 ix86_arch_features based on the processor mask. */
1540 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1541 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1542 ~(m_386 | m_486 | m_PENT | m_K6),
1544 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1545 ~m_386,
1547 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1548 ~(m_386 | m_486),
1550 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1551 ~m_386,
1553 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1554 ~m_386,
1557 static const unsigned int x86_accumulate_outgoing_args
1558 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1559 | m_GENERIC;
1561 static const unsigned int x86_arch_always_fancy_math_387
1562 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1563 | m_NOCONA | m_CORE2 | m_GENERIC;
1565 static enum stringop_alg stringop_alg = no_stringop;
1567 /* In case the average insn count for single function invocation is
1568 lower than this constant, emit fast (but longer) prologue and
1569 epilogue code. */
1570 #define FAST_PROLOGUE_INSN_COUNT 20
1572 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1573 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1574 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1575 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1577 /* Array of the smallest class containing reg number REGNO, indexed by
1578 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1580 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1582 /* ax, dx, cx, bx */
1583 AREG, DREG, CREG, BREG,
1584 /* si, di, bp, sp */
1585 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1586 /* FP registers */
1587 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1588 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1589 /* arg pointer */
1590 NON_Q_REGS,
1591 /* flags, fpsr, fpcr, frame */
1592 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1593 /* SSE registers */
1594 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1595 SSE_REGS, SSE_REGS,
1596 /* MMX registers */
1597 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1598 MMX_REGS, MMX_REGS,
1599 /* REX registers */
1600 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1601 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1602 /* SSE REX registers */
1603 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1604 SSE_REGS, SSE_REGS,
1607 /* The "default" register map used in 32bit mode. */
1609 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1611 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1612 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1613 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1614 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1615 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1616 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1617 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1620 /* The "default" register map used in 64bit mode. */
1622 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1624 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1625 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1626 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1627 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1628 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1629 8,9,10,11,12,13,14,15, /* extended integer registers */
1630 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1633 /* Define the register numbers to be used in Dwarf debugging information.
1634 The SVR4 reference port C compiler uses the following register numbers
1635 in its Dwarf output code:
1636 0 for %eax (gcc regno = 0)
1637 1 for %ecx (gcc regno = 2)
1638 2 for %edx (gcc regno = 1)
1639 3 for %ebx (gcc regno = 3)
1640 4 for %esp (gcc regno = 7)
1641 5 for %ebp (gcc regno = 6)
1642 6 for %esi (gcc regno = 4)
1643 7 for %edi (gcc regno = 5)
1644 The following three DWARF register numbers are never generated by
1645 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1646 believes these numbers have these meanings.
1647 8 for %eip (no gcc equivalent)
1648 9 for %eflags (gcc regno = 17)
1649 10 for %trapno (no gcc equivalent)
1650 It is not at all clear how we should number the FP stack registers
1651 for the x86 architecture. If the version of SDB on x86/svr4 were
1652 a bit less brain dead with respect to floating-point then we would
1653 have a precedent to follow with respect to DWARF register numbers
1654 for x86 FP registers, but the SDB on x86/svr4 is so completely
1655 broken with respect to FP registers that it is hardly worth thinking
1656 of it as something to strive for compatibility with.
1657 The version of x86/svr4 SDB I have at the moment does (partially)
1658 seem to believe that DWARF register number 11 is associated with
1659 the x86 register %st(0), but that's about all. Higher DWARF
1660 register numbers don't seem to be associated with anything in
1661 particular, and even for DWARF regno 11, SDB only seems to under-
1662 stand that it should say that a variable lives in %st(0) (when
1663 asked via an `=' command) if we said it was in DWARF regno 11,
1664 but SDB still prints garbage when asked for the value of the
1665 variable in question (via a `/' command).
1666 (Also note that the labels SDB prints for various FP stack regs
1667 when doing an `x' command are all wrong.)
1668 Note that these problems generally don't affect the native SVR4
1669 C compiler because it doesn't allow the use of -O with -g and
1670 because when it is *not* optimizing, it allocates a memory
1671 location for each floating-point variable, and the memory
1672 location is what gets described in the DWARF AT_location
1673 attribute for the variable in question.
1674 Regardless of the severe mental illness of the x86/svr4 SDB, we
1675 do something sensible here and we use the following DWARF
1676 register numbers. Note that these are all stack-top-relative
1677 numbers.
1678 11 for %st(0) (gcc regno = 8)
1679 12 for %st(1) (gcc regno = 9)
1680 13 for %st(2) (gcc regno = 10)
1681 14 for %st(3) (gcc regno = 11)
1682 15 for %st(4) (gcc regno = 12)
1683 16 for %st(5) (gcc regno = 13)
1684 17 for %st(6) (gcc regno = 14)
1685 18 for %st(7) (gcc regno = 15)
1687 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1689 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1690 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1691 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1692 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1693 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1694 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1695 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1698 /* Test and compare insns in i386.md store the information needed to
1699 generate branch and scc insns here. */
1701 rtx ix86_compare_op0 = NULL_RTX;
1702 rtx ix86_compare_op1 = NULL_RTX;
1704 /* Define parameter passing and return registers. */
1706 static int const x86_64_int_parameter_registers[6] =
1708 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1711 static int const x86_64_ms_abi_int_parameter_registers[4] =
1713 CX_REG, DX_REG, R8_REG, R9_REG
1716 static int const x86_64_int_return_registers[4] =
1718 AX_REG, DX_REG, DI_REG, SI_REG
1721 /* Define the structure for the machine field in struct function. */
1723 struct GTY(()) stack_local_entry {
1724 unsigned short mode;
1725 unsigned short n;
1726 rtx rtl;
1727 struct stack_local_entry *next;
1730 /* Structure describing stack frame layout.
1731 Stack grows downward:
1733 [arguments]
1734 <- ARG_POINTER
1735 saved pc
1737 saved frame pointer if frame_pointer_needed
1738 <- HARD_FRAME_POINTER
1739 [saved regs]
1741 [padding0]
1743 [saved SSE regs]
1745 [padding1] \
1747 [va_arg registers] (
1748 > to_allocate <- FRAME_POINTER
1749 [frame] (
1751 [padding2] /
1753 struct ix86_frame
1755 int padding0;
1756 int nsseregs;
1757 int nregs;
1758 int padding1;
1759 int va_arg_size;
1760 HOST_WIDE_INT frame;
1761 int padding2;
1762 int outgoing_arguments_size;
1763 int red_zone_size;
1765 HOST_WIDE_INT to_allocate;
1766 /* The offsets relative to ARG_POINTER. */
1767 HOST_WIDE_INT frame_pointer_offset;
1768 HOST_WIDE_INT hard_frame_pointer_offset;
1769 HOST_WIDE_INT stack_pointer_offset;
1771 /* When save_regs_using_mov is set, emit prologue using
1772 move instead of push instructions. */
1773 bool save_regs_using_mov;
1776 /* Code model option. */
1777 enum cmodel ix86_cmodel;
1778 /* Asm dialect. */
1779 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1780 /* TLS dialects. */
1781 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1783 /* Which unit we are generating floating point math for. */
1784 enum fpmath_unit ix86_fpmath;
1786 /* Which cpu are we scheduling for. */
1787 enum attr_cpu ix86_schedule;
1789 /* Which cpu are we optimizing for. */
1790 enum processor_type ix86_tune;
1792 /* Which instruction set architecture to use. */
1793 enum processor_type ix86_arch;
1795 /* true if sse prefetch instruction is not NOOP. */
1796 int x86_prefetch_sse;
1798 /* ix86_regparm_string as a number */
1799 static int ix86_regparm;
1801 /* -mstackrealign option */
1802 extern int ix86_force_align_arg_pointer;
1803 static const char ix86_force_align_arg_pointer_string[]
1804 = "force_align_arg_pointer";
1806 static rtx (*ix86_gen_leave) (void);
1807 static rtx (*ix86_gen_pop1) (rtx);
1808 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1809 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1810 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1811 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1812 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1813 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1815 /* Preferred alignment for stack boundary in bits. */
1816 unsigned int ix86_preferred_stack_boundary;
1818 /* Alignment for incoming stack boundary in bits specified at
1819 command line. */
1820 static unsigned int ix86_user_incoming_stack_boundary;
1822 /* Default alignment for incoming stack boundary in bits. */
1823 static unsigned int ix86_default_incoming_stack_boundary;
1825 /* Alignment for incoming stack boundary in bits. */
1826 unsigned int ix86_incoming_stack_boundary;
1828 /* The abi used by target. */
1829 enum calling_abi ix86_abi;
1831 /* Values 1-5: see jump.c */
1832 int ix86_branch_cost;
1834 /* Calling abi specific va_list type nodes. */
1835 static GTY(()) tree sysv_va_list_type_node;
1836 static GTY(()) tree ms_va_list_type_node;
1838 /* Variables which are this size or smaller are put in the data/bss
1839 or ldata/lbss sections. */
1841 int ix86_section_threshold = 65536;
1843 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1844 char internal_label_prefix[16];
1845 int internal_label_prefix_len;
1847 /* Fence to use after loop using movnt. */
1848 tree x86_mfence;
1850 /* Register class used for passing given 64bit part of the argument.
1851 These represent classes as documented by the PS ABI, with the exception
1852 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1853 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1855 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1856 whenever possible (upper half does contain padding). */
1857 enum x86_64_reg_class
1859 X86_64_NO_CLASS,
1860 X86_64_INTEGER_CLASS,
1861 X86_64_INTEGERSI_CLASS,
1862 X86_64_SSE_CLASS,
1863 X86_64_SSESF_CLASS,
1864 X86_64_SSEDF_CLASS,
1865 X86_64_SSEUP_CLASS,
1866 X86_64_X87_CLASS,
1867 X86_64_X87UP_CLASS,
1868 X86_64_COMPLEX_X87_CLASS,
1869 X86_64_MEMORY_CLASS
1872 #define MAX_CLASSES 4
1874 /* Table of constants used by fldpi, fldln2, etc.... */
1875 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1876 static bool ext_80387_constants_init = 0;
1879 static struct machine_function * ix86_init_machine_status (void);
1880 static rtx ix86_function_value (const_tree, const_tree, bool);
1881 static rtx ix86_static_chain (const_tree, bool);
1882 static int ix86_function_regparm (const_tree, const_tree);
1883 static void ix86_compute_frame_layout (struct ix86_frame *);
1884 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1885 rtx, rtx, int);
1886 static void ix86_add_new_builtins (int);
1887 static rtx ix86_expand_vec_perm_builtin (tree);
1889 enum ix86_function_specific_strings
1891 IX86_FUNCTION_SPECIFIC_ARCH,
1892 IX86_FUNCTION_SPECIFIC_TUNE,
1893 IX86_FUNCTION_SPECIFIC_FPMATH,
1894 IX86_FUNCTION_SPECIFIC_MAX
1897 static char *ix86_target_string (int, int, const char *, const char *,
1898 const char *, bool);
1899 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1900 static void ix86_function_specific_save (struct cl_target_option *);
1901 static void ix86_function_specific_restore (struct cl_target_option *);
1902 static void ix86_function_specific_print (FILE *, int,
1903 struct cl_target_option *);
1904 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1905 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1906 static bool ix86_can_inline_p (tree, tree);
1907 static void ix86_set_current_function (tree);
1908 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1910 static enum calling_abi ix86_function_abi (const_tree);
1913 /* The svr4 ABI for the i386 says that records and unions are returned
1914 in memory. */
1915 #ifndef DEFAULT_PCC_STRUCT_RETURN
1916 #define DEFAULT_PCC_STRUCT_RETURN 1
1917 #endif
1919 /* Whether -mtune= or -march= were specified */
1920 static int ix86_tune_defaulted;
1921 static int ix86_arch_specified;
1923 /* Bit flags that specify the ISA we are compiling for. */
1924 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1926 /* A mask of ix86_isa_flags that includes bit X if X
1927 was set or cleared on the command line. */
1928 static int ix86_isa_flags_explicit;
1930 /* Define a set of ISAs which are available when a given ISA is
1931 enabled. MMX and SSE ISAs are handled separately. */
1933 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1934 #define OPTION_MASK_ISA_3DNOW_SET \
1935 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1937 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1938 #define OPTION_MASK_ISA_SSE2_SET \
1939 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1940 #define OPTION_MASK_ISA_SSE3_SET \
1941 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1942 #define OPTION_MASK_ISA_SSSE3_SET \
1943 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1944 #define OPTION_MASK_ISA_SSE4_1_SET \
1945 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1946 #define OPTION_MASK_ISA_SSE4_2_SET \
1947 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1948 #define OPTION_MASK_ISA_AVX_SET \
1949 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1950 #define OPTION_MASK_ISA_FMA_SET \
1951 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1953 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1954 as -msse4.2. */
1955 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1957 #define OPTION_MASK_ISA_SSE4A_SET \
1958 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1959 #define OPTION_MASK_ISA_FMA4_SET \
1960 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1961 | OPTION_MASK_ISA_AVX_SET)
1962 #define OPTION_MASK_ISA_XOP_SET \
1963 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1964 #define OPTION_MASK_ISA_LWP_SET \
1965 OPTION_MASK_ISA_LWP
1967 /* AES and PCLMUL need SSE2 because they use xmm registers */
1968 #define OPTION_MASK_ISA_AES_SET \
1969 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1970 #define OPTION_MASK_ISA_PCLMUL_SET \
1971 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1973 #define OPTION_MASK_ISA_ABM_SET \
1974 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1976 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1977 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1978 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1979 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1980 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1982 /* Define a set of ISAs which aren't available when a given ISA is
1983 disabled. MMX and SSE ISAs are handled separately. */
1985 #define OPTION_MASK_ISA_MMX_UNSET \
1986 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1987 #define OPTION_MASK_ISA_3DNOW_UNSET \
1988 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1989 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1991 #define OPTION_MASK_ISA_SSE_UNSET \
1992 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1993 #define OPTION_MASK_ISA_SSE2_UNSET \
1994 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1995 #define OPTION_MASK_ISA_SSE3_UNSET \
1996 (OPTION_MASK_ISA_SSE3 \
1997 | OPTION_MASK_ISA_SSSE3_UNSET \
1998 | OPTION_MASK_ISA_SSE4A_UNSET )
1999 #define OPTION_MASK_ISA_SSSE3_UNSET \
2000 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2001 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2002 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2003 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2004 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2005 #define OPTION_MASK_ISA_AVX_UNSET \
2006 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2007 | OPTION_MASK_ISA_FMA4_UNSET)
2008 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2010 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2011 as -mno-sse4.1. */
2012 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2014 #define OPTION_MASK_ISA_SSE4A_UNSET \
2015 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2017 #define OPTION_MASK_ISA_FMA4_UNSET \
2018 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2019 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2020 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2022 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2023 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2024 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2025 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2026 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2027 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2028 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2029 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2031 /* Vectorization library interface and handlers. */
2032 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2033 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2034 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2036 /* Processor target table, indexed by processor number */
2037 struct ptt
2039 const struct processor_costs *cost; /* Processor costs */
2040 const int align_loop; /* Default alignments. */
2041 const int align_loop_max_skip;
2042 const int align_jump;
2043 const int align_jump_max_skip;
2044 const int align_func;
2047 static const struct ptt processor_target_table[PROCESSOR_max] =
2049 {&i386_cost, 4, 3, 4, 3, 4},
2050 {&i486_cost, 16, 15, 16, 15, 16},
2051 {&pentium_cost, 16, 7, 16, 7, 16},
2052 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2053 {&geode_cost, 0, 0, 0, 0, 0},
2054 {&k6_cost, 32, 7, 32, 7, 32},
2055 {&athlon_cost, 16, 7, 16, 7, 16},
2056 {&pentium4_cost, 0, 0, 0, 0, 0},
2057 {&k8_cost, 16, 7, 16, 7, 16},
2058 {&nocona_cost, 0, 0, 0, 0, 0},
2059 {&core2_cost, 16, 10, 16, 10, 16},
2060 {&generic32_cost, 16, 7, 16, 7, 16},
2061 {&generic64_cost, 16, 10, 16, 10, 16},
2062 {&amdfam10_cost, 32, 24, 32, 7, 32},
2063 {&atom_cost, 16, 7, 16, 7, 16}
2066 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2068 "generic",
2069 "i386",
2070 "i486",
2071 "pentium",
2072 "pentium-mmx",
2073 "pentiumpro",
2074 "pentium2",
2075 "pentium3",
2076 "pentium4",
2077 "pentium-m",
2078 "prescott",
2079 "nocona",
2080 "core2",
2081 "atom",
2082 "geode",
2083 "k6",
2084 "k6-2",
2085 "k6-3",
2086 "athlon",
2087 "athlon-4",
2088 "k8",
2089 "amdfam10"
2092 /* Implement TARGET_HANDLE_OPTION. */
2094 static bool
2095 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2097 switch (code)
2099 case OPT_mmmx:
2100 if (value)
2102 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2103 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2105 else
2107 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2108 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2110 return true;
2112 case OPT_m3dnow:
2113 if (value)
2115 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2116 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2118 else
2120 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2121 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2123 return true;
2125 case OPT_m3dnowa:
2126 return false;
2128 case OPT_msse:
2129 if (value)
2131 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2132 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2134 else
2136 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2137 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2139 return true;
2141 case OPT_msse2:
2142 if (value)
2144 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2145 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2147 else
2149 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2150 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2152 return true;
2154 case OPT_msse3:
2155 if (value)
2157 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2158 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2160 else
2162 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2163 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2165 return true;
2167 case OPT_mssse3:
2168 if (value)
2170 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2171 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2173 else
2175 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2176 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2178 return true;
2180 case OPT_msse4_1:
2181 if (value)
2183 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2184 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2186 else
2188 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2189 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2191 return true;
2193 case OPT_msse4_2:
2194 if (value)
2196 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2197 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2199 else
2201 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2202 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2204 return true;
2206 case OPT_mavx:
2207 if (value)
2209 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2210 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2212 else
2214 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2215 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2217 return true;
2219 case OPT_mfma:
2220 if (value)
2222 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2223 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2225 else
2227 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2228 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2230 return true;
2232 case OPT_msse4:
2233 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2235 return true;
2237 case OPT_mno_sse4:
2238 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2239 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2240 return true;
2242 case OPT_msse4a:
2243 if (value)
2245 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2246 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2248 else
2250 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2251 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2253 return true;
2255 case OPT_mfma4:
2256 if (value)
2258 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2259 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2261 else
2263 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2264 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2266 return true;
2268 case OPT_mxop:
2269 if (value)
2271 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2272 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2274 else
2276 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2277 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2279 return true;
2281 case OPT_mlwp:
2282 if (value)
2284 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2285 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2287 else
2289 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2290 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2292 return true;
2294 case OPT_mabm:
2295 if (value)
2297 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2298 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2300 else
2302 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2303 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2305 return true;
2307 case OPT_mpopcnt:
2308 if (value)
2310 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2311 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2313 else
2315 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2316 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2318 return true;
2320 case OPT_msahf:
2321 if (value)
2323 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2324 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2326 else
2328 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2329 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2331 return true;
2333 case OPT_mcx16:
2334 if (value)
2336 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2339 else
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2344 return true;
2346 case OPT_mmovbe:
2347 if (value)
2349 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2350 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2352 else
2354 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2355 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2357 return true;
2359 case OPT_mcrc32:
2360 if (value)
2362 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2363 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2365 else
2367 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2368 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2370 return true;
2372 case OPT_maes:
2373 if (value)
2375 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2376 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2378 else
2380 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2381 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2383 return true;
2385 case OPT_mpclmul:
2386 if (value)
2388 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2389 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2391 else
2393 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2394 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2396 return true;
2398 default:
2399 return true;
2403 /* Return a string the documents the current -m options. The caller is
2404 responsible for freeing the string. */
2406 static char *
2407 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2408 const char *fpmath, bool add_nl_p)
2410 struct ix86_target_opts
2412 const char *option; /* option string */
2413 int mask; /* isa mask options */
2416 /* This table is ordered so that options like -msse4.2 that imply
2417 preceding options while match those first. */
2418 static struct ix86_target_opts isa_opts[] =
2420 { "-m64", OPTION_MASK_ISA_64BIT },
2421 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2422 { "-mxop", OPTION_MASK_ISA_XOP },
2423 { "-mlwp", OPTION_MASK_ISA_LWP },
2424 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2425 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2426 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2427 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2428 { "-msse3", OPTION_MASK_ISA_SSE3 },
2429 { "-msse2", OPTION_MASK_ISA_SSE2 },
2430 { "-msse", OPTION_MASK_ISA_SSE },
2431 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2432 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2433 { "-mmmx", OPTION_MASK_ISA_MMX },
2434 { "-mabm", OPTION_MASK_ISA_ABM },
2435 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2436 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2437 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2438 { "-maes", OPTION_MASK_ISA_AES },
2439 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2442 /* Flag options. */
2443 static struct ix86_target_opts flag_opts[] =
2445 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2446 { "-m80387", MASK_80387 },
2447 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2448 { "-malign-double", MASK_ALIGN_DOUBLE },
2449 { "-mcld", MASK_CLD },
2450 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2451 { "-mieee-fp", MASK_IEEE_FP },
2452 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2453 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2454 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2455 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2456 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2457 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2458 { "-mno-red-zone", MASK_NO_RED_ZONE },
2459 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2460 { "-mrecip", MASK_RECIP },
2461 { "-mrtd", MASK_RTD },
2462 { "-msseregparm", MASK_SSEREGPARM },
2463 { "-mstack-arg-probe", MASK_STACK_PROBE },
2464 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2467 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2469 char isa_other[40];
2470 char target_other[40];
2471 unsigned num = 0;
2472 unsigned i, j;
2473 char *ret;
2474 char *ptr;
2475 size_t len;
2476 size_t line_len;
2477 size_t sep_len;
2479 memset (opts, '\0', sizeof (opts));
2481 /* Add -march= option. */
2482 if (arch)
2484 opts[num][0] = "-march=";
2485 opts[num++][1] = arch;
2488 /* Add -mtune= option. */
2489 if (tune)
2491 opts[num][0] = "-mtune=";
2492 opts[num++][1] = tune;
2495 /* Pick out the options in isa options. */
2496 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2498 if ((isa & isa_opts[i].mask) != 0)
2500 opts[num++][0] = isa_opts[i].option;
2501 isa &= ~ isa_opts[i].mask;
2505 if (isa && add_nl_p)
2507 opts[num++][0] = isa_other;
2508 sprintf (isa_other, "(other isa: 0x%x)", isa);
2511 /* Add flag options. */
2512 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2514 if ((flags & flag_opts[i].mask) != 0)
2516 opts[num++][0] = flag_opts[i].option;
2517 flags &= ~ flag_opts[i].mask;
2521 if (flags && add_nl_p)
2523 opts[num++][0] = target_other;
2524 sprintf (target_other, "(other flags: 0x%x)", isa);
2527 /* Add -fpmath= option. */
2528 if (fpmath)
2530 opts[num][0] = "-mfpmath=";
2531 opts[num++][1] = fpmath;
2534 /* Any options? */
2535 if (num == 0)
2536 return NULL;
2538 gcc_assert (num < ARRAY_SIZE (opts));
2540 /* Size the string. */
2541 len = 0;
2542 sep_len = (add_nl_p) ? 3 : 1;
2543 for (i = 0; i < num; i++)
2545 len += sep_len;
2546 for (j = 0; j < 2; j++)
2547 if (opts[i][j])
2548 len += strlen (opts[i][j]);
2551 /* Build the string. */
2552 ret = ptr = (char *) xmalloc (len);
2553 line_len = 0;
2555 for (i = 0; i < num; i++)
2557 size_t len2[2];
2559 for (j = 0; j < 2; j++)
2560 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2562 if (i != 0)
2564 *ptr++ = ' ';
2565 line_len++;
2567 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2569 *ptr++ = '\\';
2570 *ptr++ = '\n';
2571 line_len = 0;
2575 for (j = 0; j < 2; j++)
2576 if (opts[i][j])
2578 memcpy (ptr, opts[i][j], len2[j]);
2579 ptr += len2[j];
2580 line_len += len2[j];
2584 *ptr = '\0';
2585 gcc_assert (ret + len >= ptr);
2587 return ret;
2590 /* Function that is callable from the debugger to print the current
2591 options. */
2592 void
2593 ix86_debug_options (void)
2595 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2596 ix86_arch_string, ix86_tune_string,
2597 ix86_fpmath_string, true);
2599 if (opts)
2601 fprintf (stderr, "%s\n\n", opts);
2602 free (opts);
2604 else
2605 fputs ("<no options>\n\n", stderr);
2607 return;
2610 /* Sometimes certain combinations of command options do not make
2611 sense on a particular target machine. You can define a macro
2612 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2613 defined, is executed once just after all the command options have
2614 been parsed.
2616 Don't use this macro to turn on various extra optimizations for
2617 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2619 void
2620 override_options (bool main_args_p)
2622 int i;
2623 unsigned int ix86_arch_mask, ix86_tune_mask;
2624 const char *prefix;
2625 const char *suffix;
2626 const char *sw;
2628 /* Comes from final.c -- no real reason to change it. */
2629 #define MAX_CODE_ALIGN 16
2631 enum pta_flags
2633 PTA_SSE = 1 << 0,
2634 PTA_SSE2 = 1 << 1,
2635 PTA_SSE3 = 1 << 2,
2636 PTA_MMX = 1 << 3,
2637 PTA_PREFETCH_SSE = 1 << 4,
2638 PTA_3DNOW = 1 << 5,
2639 PTA_3DNOW_A = 1 << 6,
2640 PTA_64BIT = 1 << 7,
2641 PTA_SSSE3 = 1 << 8,
2642 PTA_CX16 = 1 << 9,
2643 PTA_POPCNT = 1 << 10,
2644 PTA_ABM = 1 << 11,
2645 PTA_SSE4A = 1 << 12,
2646 PTA_NO_SAHF = 1 << 13,
2647 PTA_SSE4_1 = 1 << 14,
2648 PTA_SSE4_2 = 1 << 15,
2649 PTA_AES = 1 << 16,
2650 PTA_PCLMUL = 1 << 17,
2651 PTA_AVX = 1 << 18,
2652 PTA_FMA = 1 << 19,
2653 PTA_MOVBE = 1 << 20,
2654 PTA_FMA4 = 1 << 21,
2655 PTA_XOP = 1 << 22,
2656 PTA_LWP = 1 << 23
2659 static struct pta
2661 const char *const name; /* processor name or nickname. */
2662 const enum processor_type processor;
2663 const enum attr_cpu schedule;
2664 const unsigned /*enum pta_flags*/ flags;
2666 const processor_alias_table[] =
2668 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2669 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2670 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2671 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2672 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2673 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2674 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2675 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2676 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2677 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2678 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2679 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2680 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2681 PTA_MMX | PTA_SSE},
2682 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2683 PTA_MMX | PTA_SSE},
2684 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2685 PTA_MMX | PTA_SSE | PTA_SSE2},
2686 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2687 PTA_MMX |PTA_SSE | PTA_SSE2},
2688 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2689 PTA_MMX | PTA_SSE | PTA_SSE2},
2690 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2691 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2692 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2693 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2694 | PTA_CX16 | PTA_NO_SAHF},
2695 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2696 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2697 | PTA_SSSE3 | PTA_CX16},
2698 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2699 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2700 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2701 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2702 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2703 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2704 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2705 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2706 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2707 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2708 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2709 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2710 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2711 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2712 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2713 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2714 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2716 {"x86-64", PROCESSOR_K8, CPU_K8,
2717 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2718 {"k8", PROCESSOR_K8, CPU_K8,
2719 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2720 | PTA_SSE2 | PTA_NO_SAHF},
2721 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2722 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2723 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2724 {"opteron", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2726 | PTA_SSE2 | PTA_NO_SAHF},
2727 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2728 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2729 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2730 {"athlon64", PROCESSOR_K8, CPU_K8,
2731 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2732 | PTA_SSE2 | PTA_NO_SAHF},
2733 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2734 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2735 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2736 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2737 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2738 | PTA_SSE2 | PTA_NO_SAHF},
2739 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2740 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2741 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2742 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2743 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2744 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2745 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2746 0 /* flags are only used for -march switch. */ },
2747 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2748 PTA_64BIT /* flags are only used for -march switch. */ },
2751 int const pta_size = ARRAY_SIZE (processor_alias_table);
2753 /* Set up prefix/suffix so the error messages refer to either the command
2754 line argument, or the attribute(target). */
2755 if (main_args_p)
2757 prefix = "-m";
2758 suffix = "";
2759 sw = "switch";
2761 else
2763 prefix = "option(\"";
2764 suffix = "\")";
2765 sw = "attribute";
2768 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2769 SUBTARGET_OVERRIDE_OPTIONS;
2770 #endif
2772 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2773 SUBSUBTARGET_OVERRIDE_OPTIONS;
2774 #endif
2776 /* -fPIC is the default for x86_64. */
2777 if (TARGET_MACHO && TARGET_64BIT)
2778 flag_pic = 2;
2780 /* Set the default values for switches whose default depends on TARGET_64BIT
2781 in case they weren't overwritten by command line options. */
2782 if (TARGET_64BIT)
2784 /* Mach-O doesn't support omitting the frame pointer for now. */
2785 if (flag_omit_frame_pointer == 2)
2786 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2787 if (flag_asynchronous_unwind_tables == 2)
2788 flag_asynchronous_unwind_tables = 1;
2789 if (flag_pcc_struct_return == 2)
2790 flag_pcc_struct_return = 0;
2792 else
2794 if (flag_omit_frame_pointer == 2)
2795 flag_omit_frame_pointer = 0;
2796 if (flag_asynchronous_unwind_tables == 2)
2797 flag_asynchronous_unwind_tables = 0;
2798 if (flag_pcc_struct_return == 2)
2799 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2802 /* Need to check -mtune=generic first. */
2803 if (ix86_tune_string)
2805 if (!strcmp (ix86_tune_string, "generic")
2806 || !strcmp (ix86_tune_string, "i686")
2807 /* As special support for cross compilers we read -mtune=native
2808 as -mtune=generic. With native compilers we won't see the
2809 -mtune=native, as it was changed by the driver. */
2810 || !strcmp (ix86_tune_string, "native"))
2812 if (TARGET_64BIT)
2813 ix86_tune_string = "generic64";
2814 else
2815 ix86_tune_string = "generic32";
2817 /* If this call is for setting the option attribute, allow the
2818 generic32/generic64 that was previously set. */
2819 else if (!main_args_p
2820 && (!strcmp (ix86_tune_string, "generic32")
2821 || !strcmp (ix86_tune_string, "generic64")))
2823 else if (!strncmp (ix86_tune_string, "generic", 7))
2824 error ("bad value (%s) for %stune=%s %s",
2825 ix86_tune_string, prefix, suffix, sw);
2827 else
2829 if (ix86_arch_string)
2830 ix86_tune_string = ix86_arch_string;
2831 if (!ix86_tune_string)
2833 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2834 ix86_tune_defaulted = 1;
2837 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2838 need to use a sensible tune option. */
2839 if (!strcmp (ix86_tune_string, "generic")
2840 || !strcmp (ix86_tune_string, "x86-64")
2841 || !strcmp (ix86_tune_string, "i686"))
2843 if (TARGET_64BIT)
2844 ix86_tune_string = "generic64";
2845 else
2846 ix86_tune_string = "generic32";
2849 if (ix86_stringop_string)
2851 if (!strcmp (ix86_stringop_string, "rep_byte"))
2852 stringop_alg = rep_prefix_1_byte;
2853 else if (!strcmp (ix86_stringop_string, "libcall"))
2854 stringop_alg = libcall;
2855 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2856 stringop_alg = rep_prefix_4_byte;
2857 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2858 && TARGET_64BIT)
2859 /* rep; movq isn't available in 32-bit code. */
2860 stringop_alg = rep_prefix_8_byte;
2861 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2862 stringop_alg = loop_1_byte;
2863 else if (!strcmp (ix86_stringop_string, "loop"))
2864 stringop_alg = loop;
2865 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2866 stringop_alg = unrolled_loop;
2867 else
2868 error ("bad value (%s) for %sstringop-strategy=%s %s",
2869 ix86_stringop_string, prefix, suffix, sw);
2871 if (!strcmp (ix86_tune_string, "x86-64"))
2872 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2873 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2874 prefix, suffix, prefix, suffix, prefix, suffix);
2876 if (!ix86_arch_string)
2877 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2878 else
2879 ix86_arch_specified = 1;
2881 if (!strcmp (ix86_arch_string, "generic"))
2882 error ("generic CPU can be used only for %stune=%s %s",
2883 prefix, suffix, sw);
2884 if (!strncmp (ix86_arch_string, "generic", 7))
2885 error ("bad value (%s) for %sarch=%s %s",
2886 ix86_arch_string, prefix, suffix, sw);
2888 /* Validate -mabi= value. */
2889 if (ix86_abi_string)
2891 if (strcmp (ix86_abi_string, "sysv") == 0)
2892 ix86_abi = SYSV_ABI;
2893 else if (strcmp (ix86_abi_string, "ms") == 0)
2894 ix86_abi = MS_ABI;
2895 else
2896 error ("unknown ABI (%s) for %sabi=%s %s",
2897 ix86_abi_string, prefix, suffix, sw);
2899 else
2900 ix86_abi = DEFAULT_ABI;
2902 if (ix86_cmodel_string != 0)
2904 if (!strcmp (ix86_cmodel_string, "small"))
2905 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2906 else if (!strcmp (ix86_cmodel_string, "medium"))
2907 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2908 else if (!strcmp (ix86_cmodel_string, "large"))
2909 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2910 else if (flag_pic)
2911 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2912 else if (!strcmp (ix86_cmodel_string, "32"))
2913 ix86_cmodel = CM_32;
2914 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2915 ix86_cmodel = CM_KERNEL;
2916 else
2917 error ("bad value (%s) for %scmodel=%s %s",
2918 ix86_cmodel_string, prefix, suffix, sw);
2920 else
2922 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2923 use of rip-relative addressing. This eliminates fixups that
2924 would otherwise be needed if this object is to be placed in a
2925 DLL, and is essentially just as efficient as direct addressing. */
2926 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2927 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2928 else if (TARGET_64BIT)
2929 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2930 else
2931 ix86_cmodel = CM_32;
2933 if (ix86_asm_string != 0)
2935 if (! TARGET_MACHO
2936 && !strcmp (ix86_asm_string, "intel"))
2937 ix86_asm_dialect = ASM_INTEL;
2938 else if (!strcmp (ix86_asm_string, "att"))
2939 ix86_asm_dialect = ASM_ATT;
2940 else
2941 error ("bad value (%s) for %sasm=%s %s",
2942 ix86_asm_string, prefix, suffix, sw);
2944 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2945 error ("code model %qs not supported in the %s bit mode",
2946 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2947 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2948 sorry ("%i-bit mode not compiled in",
2949 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2951 for (i = 0; i < pta_size; i++)
2952 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2954 ix86_schedule = processor_alias_table[i].schedule;
2955 ix86_arch = processor_alias_table[i].processor;
2956 /* Default cpu tuning to the architecture. */
2957 ix86_tune = ix86_arch;
2959 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2960 error ("CPU you selected does not support x86-64 "
2961 "instruction set");
2963 if (processor_alias_table[i].flags & PTA_MMX
2964 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2965 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2966 if (processor_alias_table[i].flags & PTA_3DNOW
2967 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2968 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2969 if (processor_alias_table[i].flags & PTA_3DNOW_A
2970 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2971 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2972 if (processor_alias_table[i].flags & PTA_SSE
2973 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2974 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2975 if (processor_alias_table[i].flags & PTA_SSE2
2976 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2977 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2978 if (processor_alias_table[i].flags & PTA_SSE3
2979 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2980 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2981 if (processor_alias_table[i].flags & PTA_SSSE3
2982 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2983 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2984 if (processor_alias_table[i].flags & PTA_SSE4_1
2985 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2986 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2987 if (processor_alias_table[i].flags & PTA_SSE4_2
2988 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2989 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2990 if (processor_alias_table[i].flags & PTA_AVX
2991 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2992 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2993 if (processor_alias_table[i].flags & PTA_FMA
2994 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2995 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2996 if (processor_alias_table[i].flags & PTA_SSE4A
2997 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2998 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2999 if (processor_alias_table[i].flags & PTA_FMA4
3000 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3001 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3002 if (processor_alias_table[i].flags & PTA_XOP
3003 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3004 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3005 if (processor_alias_table[i].flags & PTA_LWP
3006 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3007 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3008 if (processor_alias_table[i].flags & PTA_ABM
3009 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3010 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3011 if (processor_alias_table[i].flags & PTA_CX16
3012 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3013 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3014 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3015 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3016 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3017 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3018 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3019 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3020 if (processor_alias_table[i].flags & PTA_MOVBE
3021 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3022 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3023 if (processor_alias_table[i].flags & PTA_AES
3024 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3025 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3026 if (processor_alias_table[i].flags & PTA_PCLMUL
3027 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3028 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3029 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3030 x86_prefetch_sse = true;
3032 break;
3035 if (i == pta_size)
3036 error ("bad value (%s) for %sarch=%s %s",
3037 ix86_arch_string, prefix, suffix, sw);
3039 ix86_arch_mask = 1u << ix86_arch;
3040 for (i = 0; i < X86_ARCH_LAST; ++i)
3041 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3043 for (i = 0; i < pta_size; i++)
3044 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3046 ix86_schedule = processor_alias_table[i].schedule;
3047 ix86_tune = processor_alias_table[i].processor;
3048 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3050 if (ix86_tune_defaulted)
3052 ix86_tune_string = "x86-64";
3053 for (i = 0; i < pta_size; i++)
3054 if (! strcmp (ix86_tune_string,
3055 processor_alias_table[i].name))
3056 break;
3057 ix86_schedule = processor_alias_table[i].schedule;
3058 ix86_tune = processor_alias_table[i].processor;
3060 else
3061 error ("CPU you selected does not support x86-64 "
3062 "instruction set");
3064 /* Intel CPUs have always interpreted SSE prefetch instructions as
3065 NOPs; so, we can enable SSE prefetch instructions even when
3066 -mtune (rather than -march) points us to a processor that has them.
3067 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3068 higher processors. */
3069 if (TARGET_CMOVE
3070 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3071 x86_prefetch_sse = true;
3072 break;
3074 if (i == pta_size)
3075 error ("bad value (%s) for %stune=%s %s",
3076 ix86_tune_string, prefix, suffix, sw);
3078 ix86_tune_mask = 1u << ix86_tune;
3079 for (i = 0; i < X86_TUNE_LAST; ++i)
3080 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3082 if (optimize_size)
3083 ix86_cost = &ix86_size_cost;
3084 else
3085 ix86_cost = processor_target_table[ix86_tune].cost;
3087 /* Arrange to set up i386_stack_locals for all functions. */
3088 init_machine_status = ix86_init_machine_status;
3090 /* Validate -mregparm= value. */
3091 if (ix86_regparm_string)
3093 if (TARGET_64BIT)
3094 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3095 i = atoi (ix86_regparm_string);
3096 if (i < 0 || i > REGPARM_MAX)
3097 error ("%sregparm=%d%s is not between 0 and %d",
3098 prefix, i, suffix, REGPARM_MAX);
3099 else
3100 ix86_regparm = i;
3102 if (TARGET_64BIT)
3103 ix86_regparm = REGPARM_MAX;
3105 /* If the user has provided any of the -malign-* options,
3106 warn and use that value only if -falign-* is not set.
3107 Remove this code in GCC 3.2 or later. */
3108 if (ix86_align_loops_string)
3110 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3111 prefix, suffix, suffix);
3112 if (align_loops == 0)
3114 i = atoi (ix86_align_loops_string);
3115 if (i < 0 || i > MAX_CODE_ALIGN)
3116 error ("%salign-loops=%d%s is not between 0 and %d",
3117 prefix, i, suffix, MAX_CODE_ALIGN);
3118 else
3119 align_loops = 1 << i;
3123 if (ix86_align_jumps_string)
3125 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3126 prefix, suffix, suffix);
3127 if (align_jumps == 0)
3129 i = atoi (ix86_align_jumps_string);
3130 if (i < 0 || i > MAX_CODE_ALIGN)
3131 error ("%salign-loops=%d%s is not between 0 and %d",
3132 prefix, i, suffix, MAX_CODE_ALIGN);
3133 else
3134 align_jumps = 1 << i;
3138 if (ix86_align_funcs_string)
3140 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3141 prefix, suffix, suffix);
3142 if (align_functions == 0)
3144 i = atoi (ix86_align_funcs_string);
3145 if (i < 0 || i > MAX_CODE_ALIGN)
3146 error ("%salign-loops=%d%s is not between 0 and %d",
3147 prefix, i, suffix, MAX_CODE_ALIGN);
3148 else
3149 align_functions = 1 << i;
3153 /* Default align_* from the processor table. */
3154 if (align_loops == 0)
3156 align_loops = processor_target_table[ix86_tune].align_loop;
3157 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3159 if (align_jumps == 0)
3161 align_jumps = processor_target_table[ix86_tune].align_jump;
3162 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3164 if (align_functions == 0)
3166 align_functions = processor_target_table[ix86_tune].align_func;
3169 /* Validate -mbranch-cost= value, or provide default. */
3170 ix86_branch_cost = ix86_cost->branch_cost;
3171 if (ix86_branch_cost_string)
3173 i = atoi (ix86_branch_cost_string);
3174 if (i < 0 || i > 5)
3175 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3176 else
3177 ix86_branch_cost = i;
3179 if (ix86_section_threshold_string)
3181 i = atoi (ix86_section_threshold_string);
3182 if (i < 0)
3183 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3184 else
3185 ix86_section_threshold = i;
3188 if (ix86_tls_dialect_string)
3190 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3191 ix86_tls_dialect = TLS_DIALECT_GNU;
3192 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3193 ix86_tls_dialect = TLS_DIALECT_GNU2;
3194 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3195 ix86_tls_dialect = TLS_DIALECT_SUN;
3196 else
3197 error ("bad value (%s) for %stls-dialect=%s %s",
3198 ix86_tls_dialect_string, prefix, suffix, sw);
3201 if (ix87_precision_string)
3203 i = atoi (ix87_precision_string);
3204 if (i != 32 && i != 64 && i != 80)
3205 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3208 if (TARGET_64BIT)
3210 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3212 /* Enable by default the SSE and MMX builtins. Do allow the user to
3213 explicitly disable any of these. In particular, disabling SSE and
3214 MMX for kernel code is extremely useful. */
3215 if (!ix86_arch_specified)
3216 ix86_isa_flags
3217 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3218 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3220 if (TARGET_RTD)
3221 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3223 else
3225 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3227 if (!ix86_arch_specified)
3228 ix86_isa_flags
3229 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3231 /* i386 ABI does not specify red zone. It still makes sense to use it
3232 when programmer takes care to stack from being destroyed. */
3233 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3234 target_flags |= MASK_NO_RED_ZONE;
3237 /* Keep nonleaf frame pointers. */
3238 if (flag_omit_frame_pointer)
3239 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3240 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3241 flag_omit_frame_pointer = 1;
3243 /* If we're doing fast math, we don't care about comparison order
3244 wrt NaNs. This lets us use a shorter comparison sequence. */
3245 if (flag_finite_math_only)
3246 target_flags &= ~MASK_IEEE_FP;
3248 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3249 since the insns won't need emulation. */
3250 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3251 target_flags &= ~MASK_NO_FANCY_MATH_387;
3253 /* Likewise, if the target doesn't have a 387, or we've specified
3254 software floating point, don't use 387 inline intrinsics. */
3255 if (!TARGET_80387)
3256 target_flags |= MASK_NO_FANCY_MATH_387;
3258 /* Turn on MMX builtins for -msse. */
3259 if (TARGET_SSE)
3261 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3262 x86_prefetch_sse = true;
3265 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3266 if (TARGET_SSE4_2 || TARGET_ABM)
3267 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3269 /* Validate -mpreferred-stack-boundary= value or default it to
3270 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3271 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3272 if (ix86_preferred_stack_boundary_string)
3274 i = atoi (ix86_preferred_stack_boundary_string);
3275 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3276 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3277 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3278 else
3279 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3282 /* Set the default value for -mstackrealign. */
3283 if (ix86_force_align_arg_pointer == -1)
3284 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3286 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3288 /* Validate -mincoming-stack-boundary= value or default it to
3289 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3290 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3291 if (ix86_incoming_stack_boundary_string)
3293 i = atoi (ix86_incoming_stack_boundary_string);
3294 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3295 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3296 i, TARGET_64BIT ? 4 : 2);
3297 else
3299 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3300 ix86_incoming_stack_boundary
3301 = ix86_user_incoming_stack_boundary;
3305 /* Accept -msseregparm only if at least SSE support is enabled. */
3306 if (TARGET_SSEREGPARM
3307 && ! TARGET_SSE)
3308 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3310 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3311 if (ix86_fpmath_string != 0)
3313 if (! strcmp (ix86_fpmath_string, "387"))
3314 ix86_fpmath = FPMATH_387;
3315 else if (! strcmp (ix86_fpmath_string, "sse"))
3317 if (!TARGET_SSE)
3319 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3320 ix86_fpmath = FPMATH_387;
3322 else
3323 ix86_fpmath = FPMATH_SSE;
3325 else if (! strcmp (ix86_fpmath_string, "387,sse")
3326 || ! strcmp (ix86_fpmath_string, "387+sse")
3327 || ! strcmp (ix86_fpmath_string, "sse,387")
3328 || ! strcmp (ix86_fpmath_string, "sse+387")
3329 || ! strcmp (ix86_fpmath_string, "both"))
3331 if (!TARGET_SSE)
3333 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3334 ix86_fpmath = FPMATH_387;
3336 else if (!TARGET_80387)
3338 warning (0, "387 instruction set disabled, using SSE arithmetics");
3339 ix86_fpmath = FPMATH_SSE;
3341 else
3342 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3344 else
3345 error ("bad value (%s) for %sfpmath=%s %s",
3346 ix86_fpmath_string, prefix, suffix, sw);
3349 /* If the i387 is disabled, then do not return values in it. */
3350 if (!TARGET_80387)
3351 target_flags &= ~MASK_FLOAT_RETURNS;
3353 /* Use external vectorized library in vectorizing intrinsics. */
3354 if (ix86_veclibabi_string)
3356 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3357 ix86_veclib_handler = ix86_veclibabi_svml;
3358 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3359 ix86_veclib_handler = ix86_veclibabi_acml;
3360 else
3361 error ("unknown vectorization library ABI type (%s) for "
3362 "%sveclibabi=%s %s", ix86_veclibabi_string,
3363 prefix, suffix, sw);
3366 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3367 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3368 && !optimize_size)
3369 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3371 /* ??? Unwind info is not correct around the CFG unless either a frame
3372 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3373 unwind info generation to be aware of the CFG and propagating states
3374 around edges. */
3375 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3376 || flag_exceptions || flag_non_call_exceptions)
3377 && flag_omit_frame_pointer
3378 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3380 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3381 warning (0, "unwind tables currently require either a frame pointer "
3382 "or %saccumulate-outgoing-args%s for correctness",
3383 prefix, suffix);
3384 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3387 /* If stack probes are required, the space used for large function
3388 arguments on the stack must also be probed, so enable
3389 -maccumulate-outgoing-args so this happens in the prologue. */
3390 if (TARGET_STACK_PROBE
3391 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3393 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3394 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3395 "for correctness", prefix, suffix);
3396 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3399 /* For sane SSE instruction set generation we need fcomi instruction.
3400 It is safe to enable all CMOVE instructions. */
3401 if (TARGET_SSE)
3402 TARGET_CMOVE = 1;
3404 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3406 char *p;
3407 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3408 p = strchr (internal_label_prefix, 'X');
3409 internal_label_prefix_len = p - internal_label_prefix;
3410 *p = '\0';
3413 /* When scheduling description is not available, disable scheduler pass
3414 so it won't slow down the compilation and make x87 code slower. */
3415 if (!TARGET_SCHEDULE)
3416 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3418 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3419 set_param_value ("simultaneous-prefetches",
3420 ix86_cost->simultaneous_prefetches);
3421 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3422 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3423 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3424 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3425 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3426 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3428 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3429 can be optimized to ap = __builtin_next_arg (0). */
3430 if (!TARGET_64BIT)
3431 targetm.expand_builtin_va_start = NULL;
3433 if (TARGET_64BIT)
3435 ix86_gen_leave = gen_leave_rex64;
3436 ix86_gen_pop1 = gen_popdi1;
3437 ix86_gen_add3 = gen_adddi3;
3438 ix86_gen_sub3 = gen_subdi3;
3439 ix86_gen_sub3_carry = gen_subdi3_carry;
3440 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3441 ix86_gen_monitor = gen_sse3_monitor64;
3442 ix86_gen_andsp = gen_anddi3;
3444 else
3446 ix86_gen_leave = gen_leave;
3447 ix86_gen_pop1 = gen_popsi1;
3448 ix86_gen_add3 = gen_addsi3;
3449 ix86_gen_sub3 = gen_subsi3;
3450 ix86_gen_sub3_carry = gen_subsi3_carry;
3451 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3452 ix86_gen_monitor = gen_sse3_monitor;
3453 ix86_gen_andsp = gen_andsi3;
3456 #ifdef USE_IX86_CLD
3457 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3458 if (!TARGET_64BIT)
3459 target_flags |= MASK_CLD & ~target_flags_explicit;
3460 #endif
3462 /* Save the initial options in case the user does function specific options */
3463 if (main_args_p)
3464 target_option_default_node = target_option_current_node
3465 = build_target_option_node ();
3468 /* Update register usage after having seen the compiler flags. */
3470 void
3471 ix86_conditional_register_usage (void)
3473 int i;
3474 unsigned int j;
3476 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3478 if (fixed_regs[i] > 1)
3479 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3480 if (call_used_regs[i] > 1)
3481 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 /* The PIC register, if it exists, is fixed. */
3485 j = PIC_OFFSET_TABLE_REGNUM;
3486 if (j != INVALID_REGNUM)
3487 fixed_regs[j] = call_used_regs[j] = 1;
3489 /* The MS_ABI changes the set of call-used registers. */
3490 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3492 call_used_regs[SI_REG] = 0;
3493 call_used_regs[DI_REG] = 0;
3494 call_used_regs[XMM6_REG] = 0;
3495 call_used_regs[XMM7_REG] = 0;
3496 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3497 call_used_regs[i] = 0;
3500 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3501 other call-clobbered regs for 64-bit. */
3502 if (TARGET_64BIT)
3504 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3506 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3507 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3508 && call_used_regs[i])
3509 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3512 /* If MMX is disabled, squash the registers. */
3513 if (! TARGET_MMX)
3514 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3515 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3516 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3518 /* If SSE is disabled, squash the registers. */
3519 if (! TARGET_SSE)
3520 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3521 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3522 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3524 /* If the FPU is disabled, squash the registers. */
3525 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3526 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3527 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3528 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3530 /* If 32-bit, squash the 64-bit registers. */
3531 if (! TARGET_64BIT)
3533 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3534 reg_names[i] = "";
3535 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3536 reg_names[i] = "";
3541 /* Save the current options */
3543 static void
3544 ix86_function_specific_save (struct cl_target_option *ptr)
3546 ptr->arch = ix86_arch;
3547 ptr->schedule = ix86_schedule;
3548 ptr->tune = ix86_tune;
3549 ptr->fpmath = ix86_fpmath;
3550 ptr->branch_cost = ix86_branch_cost;
3551 ptr->tune_defaulted = ix86_tune_defaulted;
3552 ptr->arch_specified = ix86_arch_specified;
3553 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3554 ptr->target_flags_explicit = target_flags_explicit;
3556 /* The fields are char but the variables are not; make sure the
3557 values fit in the fields. */
3558 gcc_assert (ptr->arch == ix86_arch);
3559 gcc_assert (ptr->schedule == ix86_schedule);
3560 gcc_assert (ptr->tune == ix86_tune);
3561 gcc_assert (ptr->fpmath == ix86_fpmath);
3562 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3565 /* Restore the current options */
3567 static void
3568 ix86_function_specific_restore (struct cl_target_option *ptr)
3570 enum processor_type old_tune = ix86_tune;
3571 enum processor_type old_arch = ix86_arch;
3572 unsigned int ix86_arch_mask, ix86_tune_mask;
3573 int i;
3575 ix86_arch = (enum processor_type) ptr->arch;
3576 ix86_schedule = (enum attr_cpu) ptr->schedule;
3577 ix86_tune = (enum processor_type) ptr->tune;
3578 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3579 ix86_branch_cost = ptr->branch_cost;
3580 ix86_tune_defaulted = ptr->tune_defaulted;
3581 ix86_arch_specified = ptr->arch_specified;
3582 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3583 target_flags_explicit = ptr->target_flags_explicit;
3585 /* Recreate the arch feature tests if the arch changed */
3586 if (old_arch != ix86_arch)
3588 ix86_arch_mask = 1u << ix86_arch;
3589 for (i = 0; i < X86_ARCH_LAST; ++i)
3590 ix86_arch_features[i]
3591 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3594 /* Recreate the tune optimization tests */
3595 if (old_tune != ix86_tune)
3597 ix86_tune_mask = 1u << ix86_tune;
3598 for (i = 0; i < X86_TUNE_LAST; ++i)
3599 ix86_tune_features[i]
3600 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3604 /* Print the current options */
3606 static void
3607 ix86_function_specific_print (FILE *file, int indent,
3608 struct cl_target_option *ptr)
3610 char *target_string
3611 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3612 NULL, NULL, NULL, false);
3614 fprintf (file, "%*sarch = %d (%s)\n",
3615 indent, "",
3616 ptr->arch,
3617 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3618 ? cpu_names[ptr->arch]
3619 : "<unknown>"));
3621 fprintf (file, "%*stune = %d (%s)\n",
3622 indent, "",
3623 ptr->tune,
3624 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3625 ? cpu_names[ptr->tune]
3626 : "<unknown>"));
3628 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3629 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3630 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3631 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3633 if (target_string)
3635 fprintf (file, "%*s%s\n", indent, "", target_string);
3636 free (target_string);
3641 /* Inner function to process the attribute((target(...))), take an argument and
3642 set the current options from the argument. If we have a list, recursively go
3643 over the list. */
3645 static bool
3646 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3648 char *next_optstr;
3649 bool ret = true;
3651 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3652 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3653 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3654 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3656 enum ix86_opt_type
3658 ix86_opt_unknown,
3659 ix86_opt_yes,
3660 ix86_opt_no,
3661 ix86_opt_str,
3662 ix86_opt_isa
3665 static const struct
3667 const char *string;
3668 size_t len;
3669 enum ix86_opt_type type;
3670 int opt;
3671 int mask;
3672 } attrs[] = {
3673 /* isa options */
3674 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3675 IX86_ATTR_ISA ("abm", OPT_mabm),
3676 IX86_ATTR_ISA ("aes", OPT_maes),
3677 IX86_ATTR_ISA ("avx", OPT_mavx),
3678 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3679 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3680 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3681 IX86_ATTR_ISA ("sse", OPT_msse),
3682 IX86_ATTR_ISA ("sse2", OPT_msse2),
3683 IX86_ATTR_ISA ("sse3", OPT_msse3),
3684 IX86_ATTR_ISA ("sse4", OPT_msse4),
3685 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3686 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3687 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3688 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3689 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3690 IX86_ATTR_ISA ("xop", OPT_mxop),
3691 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3693 /* string options */
3694 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3695 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3696 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3698 /* flag options */
3699 IX86_ATTR_YES ("cld",
3700 OPT_mcld,
3701 MASK_CLD),
3703 IX86_ATTR_NO ("fancy-math-387",
3704 OPT_mfancy_math_387,
3705 MASK_NO_FANCY_MATH_387),
3707 IX86_ATTR_YES ("ieee-fp",
3708 OPT_mieee_fp,
3709 MASK_IEEE_FP),
3711 IX86_ATTR_YES ("inline-all-stringops",
3712 OPT_minline_all_stringops,
3713 MASK_INLINE_ALL_STRINGOPS),
3715 IX86_ATTR_YES ("inline-stringops-dynamically",
3716 OPT_minline_stringops_dynamically,
3717 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3719 IX86_ATTR_NO ("align-stringops",
3720 OPT_mno_align_stringops,
3721 MASK_NO_ALIGN_STRINGOPS),
3723 IX86_ATTR_YES ("recip",
3724 OPT_mrecip,
3725 MASK_RECIP),
3729 /* If this is a list, recurse to get the options. */
3730 if (TREE_CODE (args) == TREE_LIST)
3732 bool ret = true;
3734 for (; args; args = TREE_CHAIN (args))
3735 if (TREE_VALUE (args)
3736 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3737 ret = false;
3739 return ret;
3742 else if (TREE_CODE (args) != STRING_CST)
3743 gcc_unreachable ();
3745 /* Handle multiple arguments separated by commas. */
3746 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3748 while (next_optstr && *next_optstr != '\0')
3750 char *p = next_optstr;
3751 char *orig_p = p;
3752 char *comma = strchr (next_optstr, ',');
3753 const char *opt_string;
3754 size_t len, opt_len;
3755 int opt;
3756 bool opt_set_p;
3757 char ch;
3758 unsigned i;
3759 enum ix86_opt_type type = ix86_opt_unknown;
3760 int mask = 0;
3762 if (comma)
3764 *comma = '\0';
3765 len = comma - next_optstr;
3766 next_optstr = comma + 1;
3768 else
3770 len = strlen (p);
3771 next_optstr = NULL;
3774 /* Recognize no-xxx. */
3775 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3777 opt_set_p = false;
3778 p += 3;
3779 len -= 3;
3781 else
3782 opt_set_p = true;
3784 /* Find the option. */
3785 ch = *p;
3786 opt = N_OPTS;
3787 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3789 type = attrs[i].type;
3790 opt_len = attrs[i].len;
3791 if (ch == attrs[i].string[0]
3792 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3793 && memcmp (p, attrs[i].string, opt_len) == 0)
3795 opt = attrs[i].opt;
3796 mask = attrs[i].mask;
3797 opt_string = attrs[i].string;
3798 break;
3802 /* Process the option. */
3803 if (opt == N_OPTS)
3805 error ("attribute(target(\"%s\")) is unknown", orig_p);
3806 ret = false;
3809 else if (type == ix86_opt_isa)
3810 ix86_handle_option (opt, p, opt_set_p);
3812 else if (type == ix86_opt_yes || type == ix86_opt_no)
3814 if (type == ix86_opt_no)
3815 opt_set_p = !opt_set_p;
3817 if (opt_set_p)
3818 target_flags |= mask;
3819 else
3820 target_flags &= ~mask;
3823 else if (type == ix86_opt_str)
3825 if (p_strings[opt])
3827 error ("option(\"%s\") was already specified", opt_string);
3828 ret = false;
3830 else
3831 p_strings[opt] = xstrdup (p + opt_len);
3834 else
3835 gcc_unreachable ();
3838 return ret;
3841 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3843 tree
3844 ix86_valid_target_attribute_tree (tree args)
3846 const char *orig_arch_string = ix86_arch_string;
3847 const char *orig_tune_string = ix86_tune_string;
3848 const char *orig_fpmath_string = ix86_fpmath_string;
3849 int orig_tune_defaulted = ix86_tune_defaulted;
3850 int orig_arch_specified = ix86_arch_specified;
3851 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3852 tree t = NULL_TREE;
3853 int i;
3854 struct cl_target_option *def
3855 = TREE_TARGET_OPTION (target_option_default_node);
3857 /* Process each of the options on the chain. */
3858 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3859 return NULL_TREE;
3861 /* If the changed options are different from the default, rerun override_options,
3862 and then save the options away. The string options are are attribute options,
3863 and will be undone when we copy the save structure. */
3864 if (ix86_isa_flags != def->ix86_isa_flags
3865 || target_flags != def->target_flags
3866 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3867 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3868 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3870 /* If we are using the default tune= or arch=, undo the string assigned,
3871 and use the default. */
3872 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3873 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3874 else if (!orig_arch_specified)
3875 ix86_arch_string = NULL;
3877 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3878 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3879 else if (orig_tune_defaulted)
3880 ix86_tune_string = NULL;
3882 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3883 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3884 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3885 else if (!TARGET_64BIT && TARGET_SSE)
3886 ix86_fpmath_string = "sse,387";
3888 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3889 override_options (false);
3891 /* Add any builtin functions with the new isa if any. */
3892 ix86_add_new_builtins (ix86_isa_flags);
3894 /* Save the current options unless we are validating options for
3895 #pragma. */
3896 t = build_target_option_node ();
3898 ix86_arch_string = orig_arch_string;
3899 ix86_tune_string = orig_tune_string;
3900 ix86_fpmath_string = orig_fpmath_string;
3902 /* Free up memory allocated to hold the strings */
3903 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3904 if (option_strings[i])
3905 free (option_strings[i]);
3908 return t;
3911 /* Hook to validate attribute((target("string"))). */
3913 static bool
3914 ix86_valid_target_attribute_p (tree fndecl,
3915 tree ARG_UNUSED (name),
3916 tree args,
3917 int ARG_UNUSED (flags))
3919 struct cl_target_option cur_target;
3920 bool ret = true;
3921 tree old_optimize = build_optimization_node ();
3922 tree new_target, new_optimize;
3923 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3925 /* If the function changed the optimization levels as well as setting target
3926 options, start with the optimizations specified. */
3927 if (func_optimize && func_optimize != old_optimize)
3928 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3930 /* The target attributes may also change some optimization flags, so update
3931 the optimization options if necessary. */
3932 cl_target_option_save (&cur_target);
3933 new_target = ix86_valid_target_attribute_tree (args);
3934 new_optimize = build_optimization_node ();
3936 if (!new_target)
3937 ret = false;
3939 else if (fndecl)
3941 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3943 if (old_optimize != new_optimize)
3944 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3947 cl_target_option_restore (&cur_target);
3949 if (old_optimize != new_optimize)
3950 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3952 return ret;
3956 /* Hook to determine if one function can safely inline another. */
3958 static bool
3959 ix86_can_inline_p (tree caller, tree callee)
3961 bool ret = false;
3962 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3963 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3965 /* If callee has no option attributes, then it is ok to inline. */
3966 if (!callee_tree)
3967 ret = true;
3969 /* If caller has no option attributes, but callee does then it is not ok to
3970 inline. */
3971 else if (!caller_tree)
3972 ret = false;
3974 else
3976 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3977 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3979 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3980 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3981 function. */
3982 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3983 != callee_opts->ix86_isa_flags)
3984 ret = false;
3986 /* See if we have the same non-isa options. */
3987 else if (caller_opts->target_flags != callee_opts->target_flags)
3988 ret = false;
3990 /* See if arch, tune, etc. are the same. */
3991 else if (caller_opts->arch != callee_opts->arch)
3992 ret = false;
3994 else if (caller_opts->tune != callee_opts->tune)
3995 ret = false;
3997 else if (caller_opts->fpmath != callee_opts->fpmath)
3998 ret = false;
4000 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4001 ret = false;
4003 else
4004 ret = true;
4007 return ret;
4011 /* Remember the last target of ix86_set_current_function. */
4012 static GTY(()) tree ix86_previous_fndecl;
4014 /* Establish appropriate back-end context for processing the function
4015 FNDECL. The argument might be NULL to indicate processing at top
4016 level, outside of any function scope. */
4017 static void
4018 ix86_set_current_function (tree fndecl)
4020 /* Only change the context if the function changes. This hook is called
4021 several times in the course of compiling a function, and we don't want to
4022 slow things down too much or call target_reinit when it isn't safe. */
4023 if (fndecl && fndecl != ix86_previous_fndecl)
4025 tree old_tree = (ix86_previous_fndecl
4026 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4027 : NULL_TREE);
4029 tree new_tree = (fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4031 : NULL_TREE);
4033 ix86_previous_fndecl = fndecl;
4034 if (old_tree == new_tree)
4037 else if (new_tree)
4039 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4040 target_reinit ();
4043 else if (old_tree)
4045 struct cl_target_option *def
4046 = TREE_TARGET_OPTION (target_option_current_node);
4048 cl_target_option_restore (def);
4049 target_reinit ();
4055 /* Return true if this goes in large data/bss. */
4057 static bool
4058 ix86_in_large_data_p (tree exp)
4060 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4061 return false;
4063 /* Functions are never large data. */
4064 if (TREE_CODE (exp) == FUNCTION_DECL)
4065 return false;
4067 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4069 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4070 if (strcmp (section, ".ldata") == 0
4071 || strcmp (section, ".lbss") == 0)
4072 return true;
4073 return false;
4075 else
4077 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4079 /* If this is an incomplete type with size 0, then we can't put it
4080 in data because it might be too big when completed. */
4081 if (!size || size > ix86_section_threshold)
4082 return true;
4085 return false;
4088 /* Switch to the appropriate section for output of DECL.
4089 DECL is either a `VAR_DECL' node or a constant of some sort.
4090 RELOC indicates whether forming the initial value of DECL requires
4091 link-time relocations. */
4093 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4094 ATTRIBUTE_UNUSED;
4096 static section *
4097 x86_64_elf_select_section (tree decl, int reloc,
4098 unsigned HOST_WIDE_INT align)
4100 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4101 && ix86_in_large_data_p (decl))
4103 const char *sname = NULL;
4104 unsigned int flags = SECTION_WRITE;
4105 switch (categorize_decl_for_section (decl, reloc))
4107 case SECCAT_DATA:
4108 sname = ".ldata";
4109 break;
4110 case SECCAT_DATA_REL:
4111 sname = ".ldata.rel";
4112 break;
4113 case SECCAT_DATA_REL_LOCAL:
4114 sname = ".ldata.rel.local";
4115 break;
4116 case SECCAT_DATA_REL_RO:
4117 sname = ".ldata.rel.ro";
4118 break;
4119 case SECCAT_DATA_REL_RO_LOCAL:
4120 sname = ".ldata.rel.ro.local";
4121 break;
4122 case SECCAT_BSS:
4123 sname = ".lbss";
4124 flags |= SECTION_BSS;
4125 break;
4126 case SECCAT_RODATA:
4127 case SECCAT_RODATA_MERGE_STR:
4128 case SECCAT_RODATA_MERGE_STR_INIT:
4129 case SECCAT_RODATA_MERGE_CONST:
4130 sname = ".lrodata";
4131 flags = 0;
4132 break;
4133 case SECCAT_SRODATA:
4134 case SECCAT_SDATA:
4135 case SECCAT_SBSS:
4136 gcc_unreachable ();
4137 case SECCAT_TEXT:
4138 case SECCAT_TDATA:
4139 case SECCAT_TBSS:
4140 /* We don't split these for medium model. Place them into
4141 default sections and hope for best. */
4142 break;
4143 case SECCAT_EMUTLS_VAR:
4144 case SECCAT_EMUTLS_TMPL:
4145 gcc_unreachable ();
4147 if (sname)
4149 /* We might get called with string constants, but get_named_section
4150 doesn't like them as they are not DECLs. Also, we need to set
4151 flags in that case. */
4152 if (!DECL_P (decl))
4153 return get_section (sname, flags, NULL);
4154 return get_named_section (decl, sname, reloc);
4157 return default_elf_select_section (decl, reloc, align);
4160 /* Build up a unique section name, expressed as a
4161 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4162 RELOC indicates whether the initial value of EXP requires
4163 link-time relocations. */
4165 static void ATTRIBUTE_UNUSED
4166 x86_64_elf_unique_section (tree decl, int reloc)
4168 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4169 && ix86_in_large_data_p (decl))
4171 const char *prefix = NULL;
4172 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4173 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4175 switch (categorize_decl_for_section (decl, reloc))
4177 case SECCAT_DATA:
4178 case SECCAT_DATA_REL:
4179 case SECCAT_DATA_REL_LOCAL:
4180 case SECCAT_DATA_REL_RO:
4181 case SECCAT_DATA_REL_RO_LOCAL:
4182 prefix = one_only ? ".ld" : ".ldata";
4183 break;
4184 case SECCAT_BSS:
4185 prefix = one_only ? ".lb" : ".lbss";
4186 break;
4187 case SECCAT_RODATA:
4188 case SECCAT_RODATA_MERGE_STR:
4189 case SECCAT_RODATA_MERGE_STR_INIT:
4190 case SECCAT_RODATA_MERGE_CONST:
4191 prefix = one_only ? ".lr" : ".lrodata";
4192 break;
4193 case SECCAT_SRODATA:
4194 case SECCAT_SDATA:
4195 case SECCAT_SBSS:
4196 gcc_unreachable ();
4197 case SECCAT_TEXT:
4198 case SECCAT_TDATA:
4199 case SECCAT_TBSS:
4200 /* We don't split these for medium model. Place them into
4201 default sections and hope for best. */
4202 break;
4203 case SECCAT_EMUTLS_VAR:
4204 prefix = targetm.emutls.var_section;
4205 break;
4206 case SECCAT_EMUTLS_TMPL:
4207 prefix = targetm.emutls.tmpl_section;
4208 break;
4210 if (prefix)
4212 const char *name, *linkonce;
4213 char *string;
4215 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4216 name = targetm.strip_name_encoding (name);
4218 /* If we're using one_only, then there needs to be a .gnu.linkonce
4219 prefix to the section name. */
4220 linkonce = one_only ? ".gnu.linkonce" : "";
4222 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4224 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4225 return;
4228 default_unique_section (decl, reloc);
4231 #ifdef COMMON_ASM_OP
4232 /* This says how to output assembler code to declare an
4233 uninitialized external linkage data object.
4235 For medium model x86-64 we need to use .largecomm opcode for
4236 large objects. */
4237 void
4238 x86_elf_aligned_common (FILE *file,
4239 const char *name, unsigned HOST_WIDE_INT size,
4240 int align)
4242 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4243 && size > (unsigned int)ix86_section_threshold)
4244 fputs (".largecomm\t", file);
4245 else
4246 fputs (COMMON_ASM_OP, file);
4247 assemble_name (file, name);
4248 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4249 size, align / BITS_PER_UNIT);
4251 #endif
4253 /* Utility function for targets to use in implementing
4254 ASM_OUTPUT_ALIGNED_BSS. */
4256 void
4257 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4258 const char *name, unsigned HOST_WIDE_INT size,
4259 int align)
4261 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4262 && size > (unsigned int)ix86_section_threshold)
4263 switch_to_section (get_named_section (decl, ".lbss", 0));
4264 else
4265 switch_to_section (bss_section);
4266 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4267 #ifdef ASM_DECLARE_OBJECT_NAME
4268 last_assemble_variable_decl = decl;
4269 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4270 #else
4271 /* Standard thing is just output label for the object. */
4272 ASM_OUTPUT_LABEL (file, name);
4273 #endif /* ASM_DECLARE_OBJECT_NAME */
4274 ASM_OUTPUT_SKIP (file, size ? size : 1);
4277 void
4278 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4280 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4281 make the problem with not enough registers even worse. */
4282 #ifdef INSN_SCHEDULING
4283 if (level > 1)
4284 flag_schedule_insns = 0;
4285 #endif
4287 if (TARGET_MACHO)
4288 /* The Darwin libraries never set errno, so we might as well
4289 avoid calling them when that's the only reason we would. */
4290 flag_errno_math = 0;
4292 /* The default values of these switches depend on the TARGET_64BIT
4293 that is not known at this moment. Mark these values with 2 and
4294 let user the to override these. In case there is no command line option
4295 specifying them, we will set the defaults in override_options. */
4296 if (optimize >= 1)
4297 flag_omit_frame_pointer = 2;
4298 flag_pcc_struct_return = 2;
4299 flag_asynchronous_unwind_tables = 2;
4300 flag_vect_cost_model = 1;
4301 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4302 SUBTARGET_OPTIMIZATION_OPTIONS;
4303 #endif
4306 /* Decide whether we can make a sibling call to a function. DECL is the
4307 declaration of the function being targeted by the call and EXP is the
4308 CALL_EXPR representing the call. */
4310 static bool
4311 ix86_function_ok_for_sibcall (tree decl, tree exp)
4313 tree type, decl_or_type;
4314 rtx a, b;
4316 /* If we are generating position-independent code, we cannot sibcall
4317 optimize any indirect call, or a direct call to a global function,
4318 as the PLT requires %ebx be live. */
4319 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4320 return false;
4322 /* If we need to align the outgoing stack, then sibcalling would
4323 unalign the stack, which may break the called function. */
4324 if (ix86_minimum_incoming_stack_boundary (true)
4325 < PREFERRED_STACK_BOUNDARY)
4326 return false;
4328 if (decl)
4330 decl_or_type = decl;
4331 type = TREE_TYPE (decl);
4333 else
4335 /* We're looking at the CALL_EXPR, we need the type of the function. */
4336 type = CALL_EXPR_FN (exp); /* pointer expression */
4337 type = TREE_TYPE (type); /* pointer type */
4338 type = TREE_TYPE (type); /* function type */
4339 decl_or_type = type;
4342 /* Check that the return value locations are the same. Like
4343 if we are returning floats on the 80387 register stack, we cannot
4344 make a sibcall from a function that doesn't return a float to a
4345 function that does or, conversely, from a function that does return
4346 a float to a function that doesn't; the necessary stack adjustment
4347 would not be executed. This is also the place we notice
4348 differences in the return value ABI. Note that it is ok for one
4349 of the functions to have void return type as long as the return
4350 value of the other is passed in a register. */
4351 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4352 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4353 cfun->decl, false);
4354 if (STACK_REG_P (a) || STACK_REG_P (b))
4356 if (!rtx_equal_p (a, b))
4357 return false;
4359 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4361 else if (!rtx_equal_p (a, b))
4362 return false;
4364 if (TARGET_64BIT)
4366 /* The SYSV ABI has more call-clobbered registers;
4367 disallow sibcalls from MS to SYSV. */
4368 if (cfun->machine->call_abi == MS_ABI
4369 && ix86_function_type_abi (type) == SYSV_ABI)
4370 return false;
4372 else
4374 /* If this call is indirect, we'll need to be able to use a
4375 call-clobbered register for the address of the target function.
4376 Make sure that all such registers are not used for passing
4377 parameters. Note that DLLIMPORT functions are indirect. */
4378 if (!decl
4379 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4381 if (ix86_function_regparm (type, NULL) >= 3)
4383 /* ??? Need to count the actual number of registers to be used,
4384 not the possible number of registers. Fix later. */
4385 return false;
4390 /* Otherwise okay. That also includes certain types of indirect calls. */
4391 return true;
4394 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4395 calling convention attributes;
4396 arguments as in struct attribute_spec.handler. */
4398 static tree
4399 ix86_handle_cconv_attribute (tree *node, tree name,
4400 tree args,
4401 int flags ATTRIBUTE_UNUSED,
4402 bool *no_add_attrs)
4404 if (TREE_CODE (*node) != FUNCTION_TYPE
4405 && TREE_CODE (*node) != METHOD_TYPE
4406 && TREE_CODE (*node) != FIELD_DECL
4407 && TREE_CODE (*node) != TYPE_DECL)
4409 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4410 name);
4411 *no_add_attrs = true;
4412 return NULL_TREE;
4415 /* Can combine regparm with all attributes but fastcall. */
4416 if (is_attribute_p ("regparm", name))
4418 tree cst;
4420 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4422 error ("fastcall and regparm attributes are not compatible");
4425 cst = TREE_VALUE (args);
4426 if (TREE_CODE (cst) != INTEGER_CST)
4428 warning (OPT_Wattributes,
4429 "%qE attribute requires an integer constant argument",
4430 name);
4431 *no_add_attrs = true;
4433 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4435 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4436 name, REGPARM_MAX);
4437 *no_add_attrs = true;
4440 return NULL_TREE;
4443 if (TARGET_64BIT)
4445 /* Do not warn when emulating the MS ABI. */
4446 if (TREE_CODE (*node) != FUNCTION_TYPE
4447 || ix86_function_type_abi (*node) != MS_ABI)
4448 warning (OPT_Wattributes, "%qE attribute ignored",
4449 name);
4450 *no_add_attrs = true;
4451 return NULL_TREE;
4454 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4455 if (is_attribute_p ("fastcall", name))
4457 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4459 error ("fastcall and cdecl attributes are not compatible");
4461 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4463 error ("fastcall and stdcall attributes are not compatible");
4465 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4467 error ("fastcall and regparm attributes are not compatible");
4471 /* Can combine stdcall with fastcall (redundant), regparm and
4472 sseregparm. */
4473 else if (is_attribute_p ("stdcall", name))
4475 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4477 error ("stdcall and cdecl attributes are not compatible");
4479 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4481 error ("stdcall and fastcall attributes are not compatible");
4485 /* Can combine cdecl with regparm and sseregparm. */
4486 else if (is_attribute_p ("cdecl", name))
4488 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4490 error ("stdcall and cdecl attributes are not compatible");
4492 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4494 error ("fastcall and cdecl attributes are not compatible");
4498 /* Can combine sseregparm with all attributes. */
4500 return NULL_TREE;
4503 /* Return 0 if the attributes for two types are incompatible, 1 if they
4504 are compatible, and 2 if they are nearly compatible (which causes a
4505 warning to be generated). */
4507 static int
4508 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4510 /* Check for mismatch of non-default calling convention. */
4511 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4513 if (TREE_CODE (type1) != FUNCTION_TYPE
4514 && TREE_CODE (type1) != METHOD_TYPE)
4515 return 1;
4517 /* Check for mismatched fastcall/regparm types. */
4518 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4519 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4520 || (ix86_function_regparm (type1, NULL)
4521 != ix86_function_regparm (type2, NULL)))
4522 return 0;
4524 /* Check for mismatched sseregparm types. */
4525 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4526 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4527 return 0;
4529 /* Check for mismatched return types (cdecl vs stdcall). */
4530 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4531 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4532 return 0;
4534 return 1;
4537 /* Return the regparm value for a function with the indicated TYPE and DECL.
4538 DECL may be NULL when calling function indirectly
4539 or considering a libcall. */
4541 static int
4542 ix86_function_regparm (const_tree type, const_tree decl)
4544 tree attr;
4545 int regparm;
4547 if (TARGET_64BIT)
4548 return (ix86_function_type_abi (type) == SYSV_ABI
4549 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4551 regparm = ix86_regparm;
4552 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4553 if (attr)
4555 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4556 return regparm;
4559 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4560 return 2;
4562 /* Use register calling convention for local functions when possible. */
4563 if (decl
4564 && TREE_CODE (decl) == FUNCTION_DECL
4565 && optimize
4566 && !profile_flag)
4568 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4569 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4570 if (i && i->local)
4572 int local_regparm, globals = 0, regno;
4574 /* Make sure no regparm register is taken by a
4575 fixed register variable. */
4576 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4577 if (fixed_regs[local_regparm])
4578 break;
4580 /* We don't want to use regparm(3) for nested functions as
4581 these use a static chain pointer in the third argument. */
4582 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4583 local_regparm = 2;
4585 /* Each fixed register usage increases register pressure,
4586 so less registers should be used for argument passing.
4587 This functionality can be overriden by an explicit
4588 regparm value. */
4589 for (regno = 0; regno <= DI_REG; regno++)
4590 if (fixed_regs[regno])
4591 globals++;
4593 local_regparm
4594 = globals < local_regparm ? local_regparm - globals : 0;
4596 if (local_regparm > regparm)
4597 regparm = local_regparm;
4601 return regparm;
4604 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4605 DFmode (2) arguments in SSE registers for a function with the
4606 indicated TYPE and DECL. DECL may be NULL when calling function
4607 indirectly or considering a libcall. Otherwise return 0. */
4609 static int
4610 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4612 gcc_assert (!TARGET_64BIT);
4614 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4615 by the sseregparm attribute. */
4616 if (TARGET_SSEREGPARM
4617 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4619 if (!TARGET_SSE)
4621 if (warn)
4623 if (decl)
4624 error ("Calling %qD with attribute sseregparm without "
4625 "SSE/SSE2 enabled", decl);
4626 else
4627 error ("Calling %qT with attribute sseregparm without "
4628 "SSE/SSE2 enabled", type);
4630 return 0;
4633 return 2;
4636 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4637 (and DFmode for SSE2) arguments in SSE registers. */
4638 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4640 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4641 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4642 if (i && i->local)
4643 return TARGET_SSE2 ? 2 : 1;
4646 return 0;
4649 /* Return true if EAX is live at the start of the function. Used by
4650 ix86_expand_prologue to determine if we need special help before
4651 calling allocate_stack_worker. */
4653 static bool
4654 ix86_eax_live_at_start_p (void)
4656 /* Cheat. Don't bother working forward from ix86_function_regparm
4657 to the function type to whether an actual argument is located in
4658 eax. Instead just look at cfg info, which is still close enough
4659 to correct at this point. This gives false positives for broken
4660 functions that might use uninitialized data that happens to be
4661 allocated in eax, but who cares? */
4662 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4665 /* Value is the number of bytes of arguments automatically
4666 popped when returning from a subroutine call.
4667 FUNDECL is the declaration node of the function (as a tree),
4668 FUNTYPE is the data type of the function (as a tree),
4669 or for a library call it is an identifier node for the subroutine name.
4670 SIZE is the number of bytes of arguments passed on the stack.
4672 On the 80386, the RTD insn may be used to pop them if the number
4673 of args is fixed, but if the number is variable then the caller
4674 must pop them all. RTD can't be used for library calls now
4675 because the library is compiled with the Unix compiler.
4676 Use of RTD is a selectable option, since it is incompatible with
4677 standard Unix calling sequences. If the option is not selected,
4678 the caller must always pop the args.
4680 The attribute stdcall is equivalent to RTD on a per module basis. */
4683 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4685 int rtd;
4687 /* None of the 64-bit ABIs pop arguments. */
4688 if (TARGET_64BIT)
4689 return 0;
4691 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4693 /* Cdecl functions override -mrtd, and never pop the stack. */
4694 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4696 /* Stdcall and fastcall functions will pop the stack if not
4697 variable args. */
4698 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4699 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4700 rtd = 1;
4702 if (rtd && ! stdarg_p (funtype))
4703 return size;
4706 /* Lose any fake structure return argument if it is passed on the stack. */
4707 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4708 && !KEEP_AGGREGATE_RETURN_POINTER)
4710 int nregs = ix86_function_regparm (funtype, fundecl);
4711 if (nregs == 0)
4712 return GET_MODE_SIZE (Pmode);
4715 return 0;
4718 /* Argument support functions. */
4720 /* Return true when register may be used to pass function parameters. */
4721 bool
4722 ix86_function_arg_regno_p (int regno)
4724 int i;
4725 const int *parm_regs;
4727 if (!TARGET_64BIT)
4729 if (TARGET_MACHO)
4730 return (regno < REGPARM_MAX
4731 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4732 else
4733 return (regno < REGPARM_MAX
4734 || (TARGET_MMX && MMX_REGNO_P (regno)
4735 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4736 || (TARGET_SSE && SSE_REGNO_P (regno)
4737 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4740 if (TARGET_MACHO)
4742 if (SSE_REGNO_P (regno) && TARGET_SSE)
4743 return true;
4745 else
4747 if (TARGET_SSE && SSE_REGNO_P (regno)
4748 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4749 return true;
4752 /* TODO: The function should depend on current function ABI but
4753 builtins.c would need updating then. Therefore we use the
4754 default ABI. */
4756 /* RAX is used as hidden argument to va_arg functions. */
4757 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4758 return true;
4760 if (ix86_abi == MS_ABI)
4761 parm_regs = x86_64_ms_abi_int_parameter_registers;
4762 else
4763 parm_regs = x86_64_int_parameter_registers;
4764 for (i = 0; i < (ix86_abi == MS_ABI
4765 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4766 if (regno == parm_regs[i])
4767 return true;
4768 return false;
4771 /* Return if we do not know how to pass TYPE solely in registers. */
4773 static bool
4774 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4776 if (must_pass_in_stack_var_size_or_pad (mode, type))
4777 return true;
4779 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4780 The layout_type routine is crafty and tries to trick us into passing
4781 currently unsupported vector types on the stack by using TImode. */
4782 return (!TARGET_64BIT && mode == TImode
4783 && type && TREE_CODE (type) != VECTOR_TYPE);
4786 /* It returns the size, in bytes, of the area reserved for arguments passed
4787 in registers for the function represented by fndecl dependent to the used
4788 abi format. */
4790 ix86_reg_parm_stack_space (const_tree fndecl)
4792 enum calling_abi call_abi = SYSV_ABI;
4793 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4794 call_abi = ix86_function_abi (fndecl);
4795 else
4796 call_abi = ix86_function_type_abi (fndecl);
4797 if (call_abi == MS_ABI)
4798 return 32;
4799 return 0;
4802 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4803 call abi used. */
4804 enum calling_abi
4805 ix86_function_type_abi (const_tree fntype)
4807 if (TARGET_64BIT && fntype != NULL)
4809 enum calling_abi abi = ix86_abi;
4810 if (abi == SYSV_ABI)
4812 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4813 abi = MS_ABI;
4815 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4816 abi = SYSV_ABI;
4817 return abi;
4819 return ix86_abi;
4822 static bool
4823 ix86_function_ms_hook_prologue (const_tree fntype)
4825 if (!TARGET_64BIT)
4827 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4829 if (decl_function_context (fntype) != NULL_TREE)
4831 error_at (DECL_SOURCE_LOCATION (fntype),
4832 "ms_hook_prologue is not compatible with nested function");
4835 return true;
4838 return false;
4841 static enum calling_abi
4842 ix86_function_abi (const_tree fndecl)
4844 if (! fndecl)
4845 return ix86_abi;
4846 return ix86_function_type_abi (TREE_TYPE (fndecl));
4849 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4850 call abi used. */
4851 enum calling_abi
4852 ix86_cfun_abi (void)
4854 if (! cfun || ! TARGET_64BIT)
4855 return ix86_abi;
4856 return cfun->machine->call_abi;
4859 /* regclass.c */
4860 extern void init_regs (void);
4862 /* Implementation of call abi switching target hook. Specific to FNDECL
4863 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4864 for more details. */
4865 void
4866 ix86_call_abi_override (const_tree fndecl)
4868 if (fndecl == NULL_TREE)
4869 cfun->machine->call_abi = ix86_abi;
4870 else
4871 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4874 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4875 re-initialization of init_regs each time we switch function context since
4876 this is needed only during RTL expansion. */
4877 static void
4878 ix86_maybe_switch_abi (void)
4880 if (TARGET_64BIT &&
4881 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4882 reinit_regs ();
4885 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4886 for a call to a function whose data type is FNTYPE.
4887 For a library call, FNTYPE is 0. */
4889 void
4890 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4891 tree fntype, /* tree ptr for function decl */
4892 rtx libname, /* SYMBOL_REF of library name or 0 */
4893 tree fndecl)
4895 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4896 memset (cum, 0, sizeof (*cum));
4898 if (fndecl)
4899 cum->call_abi = ix86_function_abi (fndecl);
4900 else
4901 cum->call_abi = ix86_function_type_abi (fntype);
4902 /* Set up the number of registers to use for passing arguments. */
4904 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4905 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4906 "or subtarget optimization implying it");
4907 cum->nregs = ix86_regparm;
4908 if (TARGET_64BIT)
4910 if (cum->call_abi != ix86_abi)
4911 cum->nregs = (ix86_abi != SYSV_ABI
4912 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4914 if (TARGET_SSE)
4916 cum->sse_nregs = SSE_REGPARM_MAX;
4917 if (TARGET_64BIT)
4919 if (cum->call_abi != ix86_abi)
4920 cum->sse_nregs = (ix86_abi != SYSV_ABI
4921 ? X86_64_SSE_REGPARM_MAX
4922 : X86_64_MS_SSE_REGPARM_MAX);
4925 if (TARGET_MMX)
4926 cum->mmx_nregs = MMX_REGPARM_MAX;
4927 cum->warn_avx = true;
4928 cum->warn_sse = true;
4929 cum->warn_mmx = true;
4931 /* Because type might mismatch in between caller and callee, we need to
4932 use actual type of function for local calls.
4933 FIXME: cgraph_analyze can be told to actually record if function uses
4934 va_start so for local functions maybe_vaarg can be made aggressive
4935 helping K&R code.
4936 FIXME: once typesytem is fixed, we won't need this code anymore. */
4937 if (i && i->local)
4938 fntype = TREE_TYPE (fndecl);
4939 cum->maybe_vaarg = (fntype
4940 ? (!prototype_p (fntype) || stdarg_p (fntype))
4941 : !libname);
4943 if (!TARGET_64BIT)
4945 /* If there are variable arguments, then we won't pass anything
4946 in registers in 32-bit mode. */
4947 if (stdarg_p (fntype))
4949 cum->nregs = 0;
4950 cum->sse_nregs = 0;
4951 cum->mmx_nregs = 0;
4952 cum->warn_avx = 0;
4953 cum->warn_sse = 0;
4954 cum->warn_mmx = 0;
4955 return;
4958 /* Use ecx and edx registers if function has fastcall attribute,
4959 else look for regparm information. */
4960 if (fntype)
4962 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4964 cum->nregs = 2;
4965 cum->fastcall = 1;
4967 else
4968 cum->nregs = ix86_function_regparm (fntype, fndecl);
4971 /* Set up the number of SSE registers used for passing SFmode
4972 and DFmode arguments. Warn for mismatching ABI. */
4973 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4977 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4978 But in the case of vector types, it is some vector mode.
4980 When we have only some of our vector isa extensions enabled, then there
4981 are some modes for which vector_mode_supported_p is false. For these
4982 modes, the generic vector support in gcc will choose some non-vector mode
4983 in order to implement the type. By computing the natural mode, we'll
4984 select the proper ABI location for the operand and not depend on whatever
4985 the middle-end decides to do with these vector types.
4987 The midde-end can't deal with the vector types > 16 bytes. In this
4988 case, we return the original mode and warn ABI change if CUM isn't
4989 NULL. */
4991 static enum machine_mode
4992 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4994 enum machine_mode mode = TYPE_MODE (type);
4996 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4998 HOST_WIDE_INT size = int_size_in_bytes (type);
4999 if ((size == 8 || size == 16 || size == 32)
5000 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5001 && TYPE_VECTOR_SUBPARTS (type) > 1)
5003 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5005 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5006 mode = MIN_MODE_VECTOR_FLOAT;
5007 else
5008 mode = MIN_MODE_VECTOR_INT;
5010 /* Get the mode which has this inner mode and number of units. */
5011 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5012 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5013 && GET_MODE_INNER (mode) == innermode)
5015 if (size == 32 && !TARGET_AVX)
5017 static bool warnedavx;
5019 if (cum
5020 && !warnedavx
5021 && cum->warn_avx)
5023 warnedavx = true;
5024 warning (0, "AVX vector argument without AVX "
5025 "enabled changes the ABI");
5027 return TYPE_MODE (type);
5029 else
5030 return mode;
5033 gcc_unreachable ();
5037 return mode;
5040 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5041 this may not agree with the mode that the type system has chosen for the
5042 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5043 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5045 static rtx
5046 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5047 unsigned int regno)
5049 rtx tmp;
5051 if (orig_mode != BLKmode)
5052 tmp = gen_rtx_REG (orig_mode, regno);
5053 else
5055 tmp = gen_rtx_REG (mode, regno);
5056 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5057 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5060 return tmp;
5063 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5064 of this code is to classify each 8bytes of incoming argument by the register
5065 class and assign registers accordingly. */
5067 /* Return the union class of CLASS1 and CLASS2.
5068 See the x86-64 PS ABI for details. */
5070 static enum x86_64_reg_class
5071 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5073 /* Rule #1: If both classes are equal, this is the resulting class. */
5074 if (class1 == class2)
5075 return class1;
5077 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5078 the other class. */
5079 if (class1 == X86_64_NO_CLASS)
5080 return class2;
5081 if (class2 == X86_64_NO_CLASS)
5082 return class1;
5084 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5085 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5086 return X86_64_MEMORY_CLASS;
5088 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5089 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5090 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5091 return X86_64_INTEGERSI_CLASS;
5092 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5093 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5094 return X86_64_INTEGER_CLASS;
5096 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5097 MEMORY is used. */
5098 if (class1 == X86_64_X87_CLASS
5099 || class1 == X86_64_X87UP_CLASS
5100 || class1 == X86_64_COMPLEX_X87_CLASS
5101 || class2 == X86_64_X87_CLASS
5102 || class2 == X86_64_X87UP_CLASS
5103 || class2 == X86_64_COMPLEX_X87_CLASS)
5104 return X86_64_MEMORY_CLASS;
5106 /* Rule #6: Otherwise class SSE is used. */
5107 return X86_64_SSE_CLASS;
5110 /* Classify the argument of type TYPE and mode MODE.
5111 CLASSES will be filled by the register class used to pass each word
5112 of the operand. The number of words is returned. In case the parameter
5113 should be passed in memory, 0 is returned. As a special case for zero
5114 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5116 BIT_OFFSET is used internally for handling records and specifies offset
5117 of the offset in bits modulo 256 to avoid overflow cases.
5119 See the x86-64 PS ABI for details.
5122 static int
5123 classify_argument (enum machine_mode mode, const_tree type,
5124 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5126 HOST_WIDE_INT bytes =
5127 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5128 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5130 /* Variable sized entities are always passed/returned in memory. */
5131 if (bytes < 0)
5132 return 0;
5134 if (mode != VOIDmode
5135 && targetm.calls.must_pass_in_stack (mode, type))
5136 return 0;
5138 if (type && AGGREGATE_TYPE_P (type))
5140 int i;
5141 tree field;
5142 enum x86_64_reg_class subclasses[MAX_CLASSES];
5144 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5145 if (bytes > 32)
5146 return 0;
5148 for (i = 0; i < words; i++)
5149 classes[i] = X86_64_NO_CLASS;
5151 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5152 signalize memory class, so handle it as special case. */
5153 if (!words)
5155 classes[0] = X86_64_NO_CLASS;
5156 return 1;
5159 /* Classify each field of record and merge classes. */
5160 switch (TREE_CODE (type))
5162 case RECORD_TYPE:
5163 /* And now merge the fields of structure. */
5164 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5166 if (TREE_CODE (field) == FIELD_DECL)
5168 int num;
5170 if (TREE_TYPE (field) == error_mark_node)
5171 continue;
5173 /* Bitfields are always classified as integer. Handle them
5174 early, since later code would consider them to be
5175 misaligned integers. */
5176 if (DECL_BIT_FIELD (field))
5178 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5179 i < ((int_bit_position (field) + (bit_offset % 64))
5180 + tree_low_cst (DECL_SIZE (field), 0)
5181 + 63) / 8 / 8; i++)
5182 classes[i] =
5183 merge_classes (X86_64_INTEGER_CLASS,
5184 classes[i]);
5186 else
5188 int pos;
5190 type = TREE_TYPE (field);
5192 /* Flexible array member is ignored. */
5193 if (TYPE_MODE (type) == BLKmode
5194 && TREE_CODE (type) == ARRAY_TYPE
5195 && TYPE_SIZE (type) == NULL_TREE
5196 && TYPE_DOMAIN (type) != NULL_TREE
5197 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5198 == NULL_TREE))
5200 static bool warned;
5202 if (!warned && warn_psabi)
5204 warned = true;
5205 inform (input_location,
5206 "The ABI of passing struct with"
5207 " a flexible array member has"
5208 " changed in GCC 4.4");
5210 continue;
5212 num = classify_argument (TYPE_MODE (type), type,
5213 subclasses,
5214 (int_bit_position (field)
5215 + bit_offset) % 256);
5216 if (!num)
5217 return 0;
5218 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5219 for (i = 0; i < num && (i + pos) < words; i++)
5220 classes[i + pos] =
5221 merge_classes (subclasses[i], classes[i + pos]);
5225 break;
5227 case ARRAY_TYPE:
5228 /* Arrays are handled as small records. */
5230 int num;
5231 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5232 TREE_TYPE (type), subclasses, bit_offset);
5233 if (!num)
5234 return 0;
5236 /* The partial classes are now full classes. */
5237 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5238 subclasses[0] = X86_64_SSE_CLASS;
5239 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5240 && !((bit_offset % 64) == 0 && bytes == 4))
5241 subclasses[0] = X86_64_INTEGER_CLASS;
5243 for (i = 0; i < words; i++)
5244 classes[i] = subclasses[i % num];
5246 break;
5248 case UNION_TYPE:
5249 case QUAL_UNION_TYPE:
5250 /* Unions are similar to RECORD_TYPE but offset is always 0.
5252 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5254 if (TREE_CODE (field) == FIELD_DECL)
5256 int num;
5258 if (TREE_TYPE (field) == error_mark_node)
5259 continue;
5261 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5262 TREE_TYPE (field), subclasses,
5263 bit_offset);
5264 if (!num)
5265 return 0;
5266 for (i = 0; i < num; i++)
5267 classes[i] = merge_classes (subclasses[i], classes[i]);
5270 break;
5272 default:
5273 gcc_unreachable ();
5276 if (words > 2)
5278 /* When size > 16 bytes, if the first one isn't
5279 X86_64_SSE_CLASS or any other ones aren't
5280 X86_64_SSEUP_CLASS, everything should be passed in
5281 memory. */
5282 if (classes[0] != X86_64_SSE_CLASS)
5283 return 0;
5285 for (i = 1; i < words; i++)
5286 if (classes[i] != X86_64_SSEUP_CLASS)
5287 return 0;
5290 /* Final merger cleanup. */
5291 for (i = 0; i < words; i++)
5293 /* If one class is MEMORY, everything should be passed in
5294 memory. */
5295 if (classes[i] == X86_64_MEMORY_CLASS)
5296 return 0;
5298 /* The X86_64_SSEUP_CLASS should be always preceded by
5299 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5300 if (classes[i] == X86_64_SSEUP_CLASS
5301 && classes[i - 1] != X86_64_SSE_CLASS
5302 && classes[i - 1] != X86_64_SSEUP_CLASS)
5304 /* The first one should never be X86_64_SSEUP_CLASS. */
5305 gcc_assert (i != 0);
5306 classes[i] = X86_64_SSE_CLASS;
5309 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5310 everything should be passed in memory. */
5311 if (classes[i] == X86_64_X87UP_CLASS
5312 && (classes[i - 1] != X86_64_X87_CLASS))
5314 static bool warned;
5316 /* The first one should never be X86_64_X87UP_CLASS. */
5317 gcc_assert (i != 0);
5318 if (!warned && warn_psabi)
5320 warned = true;
5321 inform (input_location,
5322 "The ABI of passing union with long double"
5323 " has changed in GCC 4.4");
5325 return 0;
5328 return words;
5331 /* Compute alignment needed. We align all types to natural boundaries with
5332 exception of XFmode that is aligned to 64bits. */
5333 if (mode != VOIDmode && mode != BLKmode)
5335 int mode_alignment = GET_MODE_BITSIZE (mode);
5337 if (mode == XFmode)
5338 mode_alignment = 128;
5339 else if (mode == XCmode)
5340 mode_alignment = 256;
5341 if (COMPLEX_MODE_P (mode))
5342 mode_alignment /= 2;
5343 /* Misaligned fields are always returned in memory. */
5344 if (bit_offset % mode_alignment)
5345 return 0;
5348 /* for V1xx modes, just use the base mode */
5349 if (VECTOR_MODE_P (mode) && mode != V1DImode
5350 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5351 mode = GET_MODE_INNER (mode);
5353 /* Classification of atomic types. */
5354 switch (mode)
5356 case SDmode:
5357 case DDmode:
5358 classes[0] = X86_64_SSE_CLASS;
5359 return 1;
5360 case TDmode:
5361 classes[0] = X86_64_SSE_CLASS;
5362 classes[1] = X86_64_SSEUP_CLASS;
5363 return 2;
5364 case DImode:
5365 case SImode:
5366 case HImode:
5367 case QImode:
5368 case CSImode:
5369 case CHImode:
5370 case CQImode:
5372 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5374 if (size <= 32)
5376 classes[0] = X86_64_INTEGERSI_CLASS;
5377 return 1;
5379 else if (size <= 64)
5381 classes[0] = X86_64_INTEGER_CLASS;
5382 return 1;
5384 else if (size <= 64+32)
5386 classes[0] = X86_64_INTEGER_CLASS;
5387 classes[1] = X86_64_INTEGERSI_CLASS;
5388 return 2;
5390 else if (size <= 64+64)
5392 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5393 return 2;
5395 else
5396 gcc_unreachable ();
5398 case CDImode:
5399 case TImode:
5400 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5401 return 2;
5402 case COImode:
5403 case OImode:
5404 /* OImode shouldn't be used directly. */
5405 gcc_unreachable ();
5406 case CTImode:
5407 return 0;
5408 case SFmode:
5409 if (!(bit_offset % 64))
5410 classes[0] = X86_64_SSESF_CLASS;
5411 else
5412 classes[0] = X86_64_SSE_CLASS;
5413 return 1;
5414 case DFmode:
5415 classes[0] = X86_64_SSEDF_CLASS;
5416 return 1;
5417 case XFmode:
5418 classes[0] = X86_64_X87_CLASS;
5419 classes[1] = X86_64_X87UP_CLASS;
5420 return 2;
5421 case TFmode:
5422 classes[0] = X86_64_SSE_CLASS;
5423 classes[1] = X86_64_SSEUP_CLASS;
5424 return 2;
5425 case SCmode:
5426 classes[0] = X86_64_SSE_CLASS;
5427 if (!(bit_offset % 64))
5428 return 1;
5429 else
5431 static bool warned;
5433 if (!warned && warn_psabi)
5435 warned = true;
5436 inform (input_location,
5437 "The ABI of passing structure with complex float"
5438 " member has changed in GCC 4.4");
5440 classes[1] = X86_64_SSESF_CLASS;
5441 return 2;
5443 case DCmode:
5444 classes[0] = X86_64_SSEDF_CLASS;
5445 classes[1] = X86_64_SSEDF_CLASS;
5446 return 2;
5447 case XCmode:
5448 classes[0] = X86_64_COMPLEX_X87_CLASS;
5449 return 1;
5450 case TCmode:
5451 /* This modes is larger than 16 bytes. */
5452 return 0;
5453 case V8SFmode:
5454 case V8SImode:
5455 case V32QImode:
5456 case V16HImode:
5457 case V4DFmode:
5458 case V4DImode:
5459 classes[0] = X86_64_SSE_CLASS;
5460 classes[1] = X86_64_SSEUP_CLASS;
5461 classes[2] = X86_64_SSEUP_CLASS;
5462 classes[3] = X86_64_SSEUP_CLASS;
5463 return 4;
5464 case V4SFmode:
5465 case V4SImode:
5466 case V16QImode:
5467 case V8HImode:
5468 case V2DFmode:
5469 case V2DImode:
5470 classes[0] = X86_64_SSE_CLASS;
5471 classes[1] = X86_64_SSEUP_CLASS;
5472 return 2;
5473 case V1DImode:
5474 case V2SFmode:
5475 case V2SImode:
5476 case V4HImode:
5477 case V8QImode:
5478 classes[0] = X86_64_SSE_CLASS;
5479 return 1;
5480 case BLKmode:
5481 case VOIDmode:
5482 return 0;
5483 default:
5484 gcc_assert (VECTOR_MODE_P (mode));
5486 if (bytes > 16)
5487 return 0;
5489 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5491 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5492 classes[0] = X86_64_INTEGERSI_CLASS;
5493 else
5494 classes[0] = X86_64_INTEGER_CLASS;
5495 classes[1] = X86_64_INTEGER_CLASS;
5496 return 1 + (bytes > 8);
5500 /* Examine the argument and return set number of register required in each
5501 class. Return 0 iff parameter should be passed in memory. */
5502 static int
5503 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5504 int *int_nregs, int *sse_nregs)
5506 enum x86_64_reg_class regclass[MAX_CLASSES];
5507 int n = classify_argument (mode, type, regclass, 0);
5509 *int_nregs = 0;
5510 *sse_nregs = 0;
5511 if (!n)
5512 return 0;
5513 for (n--; n >= 0; n--)
5514 switch (regclass[n])
5516 case X86_64_INTEGER_CLASS:
5517 case X86_64_INTEGERSI_CLASS:
5518 (*int_nregs)++;
5519 break;
5520 case X86_64_SSE_CLASS:
5521 case X86_64_SSESF_CLASS:
5522 case X86_64_SSEDF_CLASS:
5523 (*sse_nregs)++;
5524 break;
5525 case X86_64_NO_CLASS:
5526 case X86_64_SSEUP_CLASS:
5527 break;
5528 case X86_64_X87_CLASS:
5529 case X86_64_X87UP_CLASS:
5530 if (!in_return)
5531 return 0;
5532 break;
5533 case X86_64_COMPLEX_X87_CLASS:
5534 return in_return ? 2 : 0;
5535 case X86_64_MEMORY_CLASS:
5536 gcc_unreachable ();
5538 return 1;
5541 /* Construct container for the argument used by GCC interface. See
5542 FUNCTION_ARG for the detailed description. */
5544 static rtx
5545 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5546 const_tree type, int in_return, int nintregs, int nsseregs,
5547 const int *intreg, int sse_regno)
5549 /* The following variables hold the static issued_error state. */
5550 static bool issued_sse_arg_error;
5551 static bool issued_sse_ret_error;
5552 static bool issued_x87_ret_error;
5554 enum machine_mode tmpmode;
5555 int bytes =
5556 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5557 enum x86_64_reg_class regclass[MAX_CLASSES];
5558 int n;
5559 int i;
5560 int nexps = 0;
5561 int needed_sseregs, needed_intregs;
5562 rtx exp[MAX_CLASSES];
5563 rtx ret;
5565 n = classify_argument (mode, type, regclass, 0);
5566 if (!n)
5567 return NULL;
5568 if (!examine_argument (mode, type, in_return, &needed_intregs,
5569 &needed_sseregs))
5570 return NULL;
5571 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5572 return NULL;
5574 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5575 some less clueful developer tries to use floating-point anyway. */
5576 if (needed_sseregs && !TARGET_SSE)
5578 if (in_return)
5580 if (!issued_sse_ret_error)
5582 error ("SSE register return with SSE disabled");
5583 issued_sse_ret_error = true;
5586 else if (!issued_sse_arg_error)
5588 error ("SSE register argument with SSE disabled");
5589 issued_sse_arg_error = true;
5591 return NULL;
5594 /* Likewise, error if the ABI requires us to return values in the
5595 x87 registers and the user specified -mno-80387. */
5596 if (!TARGET_80387 && in_return)
5597 for (i = 0; i < n; i++)
5598 if (regclass[i] == X86_64_X87_CLASS
5599 || regclass[i] == X86_64_X87UP_CLASS
5600 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5602 if (!issued_x87_ret_error)
5604 error ("x87 register return with x87 disabled");
5605 issued_x87_ret_error = true;
5607 return NULL;
5610 /* First construct simple cases. Avoid SCmode, since we want to use
5611 single register to pass this type. */
5612 if (n == 1 && mode != SCmode)
5613 switch (regclass[0])
5615 case X86_64_INTEGER_CLASS:
5616 case X86_64_INTEGERSI_CLASS:
5617 return gen_rtx_REG (mode, intreg[0]);
5618 case X86_64_SSE_CLASS:
5619 case X86_64_SSESF_CLASS:
5620 case X86_64_SSEDF_CLASS:
5621 if (mode != BLKmode)
5622 return gen_reg_or_parallel (mode, orig_mode,
5623 SSE_REGNO (sse_regno));
5624 break;
5625 case X86_64_X87_CLASS:
5626 case X86_64_COMPLEX_X87_CLASS:
5627 return gen_rtx_REG (mode, FIRST_STACK_REG);
5628 case X86_64_NO_CLASS:
5629 /* Zero sized array, struct or class. */
5630 return NULL;
5631 default:
5632 gcc_unreachable ();
5634 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5635 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5636 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5637 if (n == 4
5638 && regclass[0] == X86_64_SSE_CLASS
5639 && regclass[1] == X86_64_SSEUP_CLASS
5640 && regclass[2] == X86_64_SSEUP_CLASS
5641 && regclass[3] == X86_64_SSEUP_CLASS
5642 && mode != BLKmode)
5643 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5645 if (n == 2
5646 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5647 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5648 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5649 && regclass[1] == X86_64_INTEGER_CLASS
5650 && (mode == CDImode || mode == TImode || mode == TFmode)
5651 && intreg[0] + 1 == intreg[1])
5652 return gen_rtx_REG (mode, intreg[0]);
5654 /* Otherwise figure out the entries of the PARALLEL. */
5655 for (i = 0; i < n; i++)
5657 int pos;
5659 switch (regclass[i])
5661 case X86_64_NO_CLASS:
5662 break;
5663 case X86_64_INTEGER_CLASS:
5664 case X86_64_INTEGERSI_CLASS:
5665 /* Merge TImodes on aligned occasions here too. */
5666 if (i * 8 + 8 > bytes)
5667 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5668 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5669 tmpmode = SImode;
5670 else
5671 tmpmode = DImode;
5672 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5673 if (tmpmode == BLKmode)
5674 tmpmode = DImode;
5675 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5676 gen_rtx_REG (tmpmode, *intreg),
5677 GEN_INT (i*8));
5678 intreg++;
5679 break;
5680 case X86_64_SSESF_CLASS:
5681 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5682 gen_rtx_REG (SFmode,
5683 SSE_REGNO (sse_regno)),
5684 GEN_INT (i*8));
5685 sse_regno++;
5686 break;
5687 case X86_64_SSEDF_CLASS:
5688 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5689 gen_rtx_REG (DFmode,
5690 SSE_REGNO (sse_regno)),
5691 GEN_INT (i*8));
5692 sse_regno++;
5693 break;
5694 case X86_64_SSE_CLASS:
5695 pos = i;
5696 switch (n)
5698 case 1:
5699 tmpmode = DImode;
5700 break;
5701 case 2:
5702 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5704 tmpmode = TImode;
5705 i++;
5707 else
5708 tmpmode = DImode;
5709 break;
5710 case 4:
5711 gcc_assert (i == 0
5712 && regclass[1] == X86_64_SSEUP_CLASS
5713 && regclass[2] == X86_64_SSEUP_CLASS
5714 && regclass[3] == X86_64_SSEUP_CLASS);
5715 tmpmode = OImode;
5716 i += 3;
5717 break;
5718 default:
5719 gcc_unreachable ();
5721 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5722 gen_rtx_REG (tmpmode,
5723 SSE_REGNO (sse_regno)),
5724 GEN_INT (pos*8));
5725 sse_regno++;
5726 break;
5727 default:
5728 gcc_unreachable ();
5732 /* Empty aligned struct, union or class. */
5733 if (nexps == 0)
5734 return NULL;
5736 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5737 for (i = 0; i < nexps; i++)
5738 XVECEXP (ret, 0, i) = exp [i];
5739 return ret;
5742 /* Update the data in CUM to advance over an argument of mode MODE
5743 and data type TYPE. (TYPE is null for libcalls where that information
5744 may not be available.) */
5746 static void
5747 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5748 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5750 switch (mode)
5752 default:
5753 break;
5755 case BLKmode:
5756 if (bytes < 0)
5757 break;
5758 /* FALLTHRU */
5760 case DImode:
5761 case SImode:
5762 case HImode:
5763 case QImode:
5764 cum->words += words;
5765 cum->nregs -= words;
5766 cum->regno += words;
5768 if (cum->nregs <= 0)
5770 cum->nregs = 0;
5771 cum->regno = 0;
5773 break;
5775 case OImode:
5776 /* OImode shouldn't be used directly. */
5777 gcc_unreachable ();
5779 case DFmode:
5780 if (cum->float_in_sse < 2)
5781 break;
5782 case SFmode:
5783 if (cum->float_in_sse < 1)
5784 break;
5785 /* FALLTHRU */
5787 case V8SFmode:
5788 case V8SImode:
5789 case V32QImode:
5790 case V16HImode:
5791 case V4DFmode:
5792 case V4DImode:
5793 case TImode:
5794 case V16QImode:
5795 case V8HImode:
5796 case V4SImode:
5797 case V2DImode:
5798 case V4SFmode:
5799 case V2DFmode:
5800 if (!type || !AGGREGATE_TYPE_P (type))
5802 cum->sse_words += words;
5803 cum->sse_nregs -= 1;
5804 cum->sse_regno += 1;
5805 if (cum->sse_nregs <= 0)
5807 cum->sse_nregs = 0;
5808 cum->sse_regno = 0;
5811 break;
5813 case V8QImode:
5814 case V4HImode:
5815 case V2SImode:
5816 case V2SFmode:
5817 case V1DImode:
5818 if (!type || !AGGREGATE_TYPE_P (type))
5820 cum->mmx_words += words;
5821 cum->mmx_nregs -= 1;
5822 cum->mmx_regno += 1;
5823 if (cum->mmx_nregs <= 0)
5825 cum->mmx_nregs = 0;
5826 cum->mmx_regno = 0;
5829 break;
5833 static void
5834 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5835 tree type, HOST_WIDE_INT words, int named)
5837 int int_nregs, sse_nregs;
5839 /* Unnamed 256bit vector mode parameters are passed on stack. */
5840 if (!named && VALID_AVX256_REG_MODE (mode))
5841 return;
5843 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5844 cum->words += words;
5845 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5847 cum->nregs -= int_nregs;
5848 cum->sse_nregs -= sse_nregs;
5849 cum->regno += int_nregs;
5850 cum->sse_regno += sse_nregs;
5852 else
5853 cum->words += words;
5856 static void
5857 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5858 HOST_WIDE_INT words)
5860 /* Otherwise, this should be passed indirect. */
5861 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5863 cum->words += words;
5864 if (cum->nregs > 0)
5866 cum->nregs -= 1;
5867 cum->regno += 1;
5871 void
5872 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5873 tree type, int named)
5875 HOST_WIDE_INT bytes, words;
5877 if (mode == BLKmode)
5878 bytes = int_size_in_bytes (type);
5879 else
5880 bytes = GET_MODE_SIZE (mode);
5881 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5883 if (type)
5884 mode = type_natural_mode (type, NULL);
5886 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5887 function_arg_advance_ms_64 (cum, bytes, words);
5888 else if (TARGET_64BIT)
5889 function_arg_advance_64 (cum, mode, type, words, named);
5890 else
5891 function_arg_advance_32 (cum, mode, type, bytes, words);
5894 /* Define where to put the arguments to a function.
5895 Value is zero to push the argument on the stack,
5896 or a hard register in which to store the argument.
5898 MODE is the argument's machine mode.
5899 TYPE is the data type of the argument (as a tree).
5900 This is null for libcalls where that information may
5901 not be available.
5902 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5903 the preceding args and about the function being called.
5904 NAMED is nonzero if this argument is a named parameter
5905 (otherwise it is an extra parameter matching an ellipsis). */
5907 static rtx
5908 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5909 enum machine_mode orig_mode, tree type,
5910 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5912 static bool warnedsse, warnedmmx;
5914 /* Avoid the AL settings for the Unix64 ABI. */
5915 if (mode == VOIDmode)
5916 return constm1_rtx;
5918 switch (mode)
5920 default:
5921 break;
5923 case BLKmode:
5924 if (bytes < 0)
5925 break;
5926 /* FALLTHRU */
5927 case DImode:
5928 case SImode:
5929 case HImode:
5930 case QImode:
5931 if (words <= cum->nregs)
5933 int regno = cum->regno;
5935 /* Fastcall allocates the first two DWORD (SImode) or
5936 smaller arguments to ECX and EDX if it isn't an
5937 aggregate type . */
5938 if (cum->fastcall)
5940 if (mode == BLKmode
5941 || mode == DImode
5942 || (type && AGGREGATE_TYPE_P (type)))
5943 break;
5945 /* ECX not EAX is the first allocated register. */
5946 if (regno == AX_REG)
5947 regno = CX_REG;
5949 return gen_rtx_REG (mode, regno);
5951 break;
5953 case DFmode:
5954 if (cum->float_in_sse < 2)
5955 break;
5956 case SFmode:
5957 if (cum->float_in_sse < 1)
5958 break;
5959 /* FALLTHRU */
5960 case TImode:
5961 /* In 32bit, we pass TImode in xmm registers. */
5962 case V16QImode:
5963 case V8HImode:
5964 case V4SImode:
5965 case V2DImode:
5966 case V4SFmode:
5967 case V2DFmode:
5968 if (!type || !AGGREGATE_TYPE_P (type))
5970 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5972 warnedsse = true;
5973 warning (0, "SSE vector argument without SSE enabled "
5974 "changes the ABI");
5976 if (cum->sse_nregs)
5977 return gen_reg_or_parallel (mode, orig_mode,
5978 cum->sse_regno + FIRST_SSE_REG);
5980 break;
5982 case OImode:
5983 /* OImode shouldn't be used directly. */
5984 gcc_unreachable ();
5986 case V8SFmode:
5987 case V8SImode:
5988 case V32QImode:
5989 case V16HImode:
5990 case V4DFmode:
5991 case V4DImode:
5992 if (!type || !AGGREGATE_TYPE_P (type))
5994 if (cum->sse_nregs)
5995 return gen_reg_or_parallel (mode, orig_mode,
5996 cum->sse_regno + FIRST_SSE_REG);
5998 break;
6000 case V8QImode:
6001 case V4HImode:
6002 case V2SImode:
6003 case V2SFmode:
6004 case V1DImode:
6005 if (!type || !AGGREGATE_TYPE_P (type))
6007 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6009 warnedmmx = true;
6010 warning (0, "MMX vector argument without MMX enabled "
6011 "changes the ABI");
6013 if (cum->mmx_nregs)
6014 return gen_reg_or_parallel (mode, orig_mode,
6015 cum->mmx_regno + FIRST_MMX_REG);
6017 break;
6020 return NULL_RTX;
6023 static rtx
6024 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6025 enum machine_mode orig_mode, tree type, int named)
6027 /* Handle a hidden AL argument containing number of registers
6028 for varargs x86-64 functions. */
6029 if (mode == VOIDmode)
6030 return GEN_INT (cum->maybe_vaarg
6031 ? (cum->sse_nregs < 0
6032 ? (cum->call_abi == ix86_abi
6033 ? SSE_REGPARM_MAX
6034 : (ix86_abi != SYSV_ABI
6035 ? X86_64_SSE_REGPARM_MAX
6036 : X86_64_MS_SSE_REGPARM_MAX))
6037 : cum->sse_regno)
6038 : -1);
6040 switch (mode)
6042 default:
6043 break;
6045 case V8SFmode:
6046 case V8SImode:
6047 case V32QImode:
6048 case V16HImode:
6049 case V4DFmode:
6050 case V4DImode:
6051 /* Unnamed 256bit vector mode parameters are passed on stack. */
6052 if (!named)
6053 return NULL;
6054 break;
6057 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6058 cum->sse_nregs,
6059 &x86_64_int_parameter_registers [cum->regno],
6060 cum->sse_regno);
6063 static rtx
6064 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6065 enum machine_mode orig_mode, int named,
6066 HOST_WIDE_INT bytes)
6068 unsigned int regno;
6070 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6071 We use value of -2 to specify that current function call is MSABI. */
6072 if (mode == VOIDmode)
6073 return GEN_INT (-2);
6075 /* If we've run out of registers, it goes on the stack. */
6076 if (cum->nregs == 0)
6077 return NULL_RTX;
6079 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6081 /* Only floating point modes are passed in anything but integer regs. */
6082 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6084 if (named)
6085 regno = cum->regno + FIRST_SSE_REG;
6086 else
6088 rtx t1, t2;
6090 /* Unnamed floating parameters are passed in both the
6091 SSE and integer registers. */
6092 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6093 t2 = gen_rtx_REG (mode, regno);
6094 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6095 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6096 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6099 /* Handle aggregated types passed in register. */
6100 if (orig_mode == BLKmode)
6102 if (bytes > 0 && bytes <= 8)
6103 mode = (bytes > 4 ? DImode : SImode);
6104 if (mode == BLKmode)
6105 mode = DImode;
6108 return gen_reg_or_parallel (mode, orig_mode, regno);
6112 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6113 tree type, int named)
6115 enum machine_mode mode = omode;
6116 HOST_WIDE_INT bytes, words;
6118 if (mode == BLKmode)
6119 bytes = int_size_in_bytes (type);
6120 else
6121 bytes = GET_MODE_SIZE (mode);
6122 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6124 /* To simplify the code below, represent vector types with a vector mode
6125 even if MMX/SSE are not active. */
6126 if (type && TREE_CODE (type) == VECTOR_TYPE)
6127 mode = type_natural_mode (type, cum);
6129 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6130 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6131 else if (TARGET_64BIT)
6132 return function_arg_64 (cum, mode, omode, type, named);
6133 else
6134 return function_arg_32 (cum, mode, omode, type, bytes, words);
6137 /* A C expression that indicates when an argument must be passed by
6138 reference. If nonzero for an argument, a copy of that argument is
6139 made in memory and a pointer to the argument is passed instead of
6140 the argument itself. The pointer is passed in whatever way is
6141 appropriate for passing a pointer to that type. */
6143 static bool
6144 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6145 enum machine_mode mode ATTRIBUTE_UNUSED,
6146 const_tree type, bool named ATTRIBUTE_UNUSED)
6148 /* See Windows x64 Software Convention. */
6149 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6151 int msize = (int) GET_MODE_SIZE (mode);
6152 if (type)
6154 /* Arrays are passed by reference. */
6155 if (TREE_CODE (type) == ARRAY_TYPE)
6156 return true;
6158 if (AGGREGATE_TYPE_P (type))
6160 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6161 are passed by reference. */
6162 msize = int_size_in_bytes (type);
6166 /* __m128 is passed by reference. */
6167 switch (msize) {
6168 case 1: case 2: case 4: case 8:
6169 break;
6170 default:
6171 return true;
6174 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6175 return 1;
6177 return 0;
6180 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6181 ABI. */
6182 static bool
6183 contains_aligned_value_p (tree type)
6185 enum machine_mode mode = TYPE_MODE (type);
6186 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6187 || mode == TDmode
6188 || mode == TFmode
6189 || mode == TCmode)
6190 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6191 return true;
6192 if (TYPE_ALIGN (type) < 128)
6193 return false;
6195 if (AGGREGATE_TYPE_P (type))
6197 /* Walk the aggregates recursively. */
6198 switch (TREE_CODE (type))
6200 case RECORD_TYPE:
6201 case UNION_TYPE:
6202 case QUAL_UNION_TYPE:
6204 tree field;
6206 /* Walk all the structure fields. */
6207 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6209 if (TREE_CODE (field) == FIELD_DECL
6210 && contains_aligned_value_p (TREE_TYPE (field)))
6211 return true;
6213 break;
6216 case ARRAY_TYPE:
6217 /* Just for use if some languages passes arrays by value. */
6218 if (contains_aligned_value_p (TREE_TYPE (type)))
6219 return true;
6220 break;
6222 default:
6223 gcc_unreachable ();
6226 return false;
6229 /* Gives the alignment boundary, in bits, of an argument with the
6230 specified mode and type. */
6233 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6235 int align;
6236 if (type)
6238 /* Since canonical type is used for call, we convert it to
6239 canonical type if needed. */
6240 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6241 type = TYPE_CANONICAL (type);
6242 align = TYPE_ALIGN (type);
6244 else
6245 align = GET_MODE_ALIGNMENT (mode);
6246 if (align < PARM_BOUNDARY)
6247 align = PARM_BOUNDARY;
6248 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6249 natural boundaries. */
6250 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6252 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6253 make an exception for SSE modes since these require 128bit
6254 alignment.
6256 The handling here differs from field_alignment. ICC aligns MMX
6257 arguments to 4 byte boundaries, while structure fields are aligned
6258 to 8 byte boundaries. */
6259 if (!type)
6261 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6262 align = PARM_BOUNDARY;
6264 else
6266 if (!contains_aligned_value_p (type))
6267 align = PARM_BOUNDARY;
6270 if (align > BIGGEST_ALIGNMENT)
6271 align = BIGGEST_ALIGNMENT;
6272 return align;
6275 /* Return true if N is a possible register number of function value. */
6277 bool
6278 ix86_function_value_regno_p (int regno)
6280 switch (regno)
6282 case 0:
6283 return true;
6285 case FIRST_FLOAT_REG:
6286 /* TODO: The function should depend on current function ABI but
6287 builtins.c would need updating then. Therefore we use the
6288 default ABI. */
6289 if (TARGET_64BIT && ix86_abi == MS_ABI)
6290 return false;
6291 return TARGET_FLOAT_RETURNS_IN_80387;
6293 case FIRST_SSE_REG:
6294 return TARGET_SSE;
6296 case FIRST_MMX_REG:
6297 if (TARGET_MACHO || TARGET_64BIT)
6298 return false;
6299 return TARGET_MMX;
6302 return false;
6305 /* Define how to find the value returned by a function.
6306 VALTYPE is the data type of the value (as a tree).
6307 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6308 otherwise, FUNC is 0. */
6310 static rtx
6311 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6312 const_tree fntype, const_tree fn)
6314 unsigned int regno;
6316 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6317 we normally prevent this case when mmx is not available. However
6318 some ABIs may require the result to be returned like DImode. */
6319 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6320 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6322 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6323 we prevent this case when sse is not available. However some ABIs
6324 may require the result to be returned like integer TImode. */
6325 else if (mode == TImode
6326 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6327 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6329 /* 32-byte vector modes in %ymm0. */
6330 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6331 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6333 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6334 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6335 regno = FIRST_FLOAT_REG;
6336 else
6337 /* Most things go in %eax. */
6338 regno = AX_REG;
6340 /* Override FP return register with %xmm0 for local functions when
6341 SSE math is enabled or for functions with sseregparm attribute. */
6342 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6344 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6345 if ((sse_level >= 1 && mode == SFmode)
6346 || (sse_level == 2 && mode == DFmode))
6347 regno = FIRST_SSE_REG;
6350 /* OImode shouldn't be used directly. */
6351 gcc_assert (mode != OImode);
6353 return gen_rtx_REG (orig_mode, regno);
6356 static rtx
6357 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6358 const_tree valtype)
6360 rtx ret;
6362 /* Handle libcalls, which don't provide a type node. */
6363 if (valtype == NULL)
6365 switch (mode)
6367 case SFmode:
6368 case SCmode:
6369 case DFmode:
6370 case DCmode:
6371 case TFmode:
6372 case SDmode:
6373 case DDmode:
6374 case TDmode:
6375 return gen_rtx_REG (mode, FIRST_SSE_REG);
6376 case XFmode:
6377 case XCmode:
6378 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6379 case TCmode:
6380 return NULL;
6381 default:
6382 return gen_rtx_REG (mode, AX_REG);
6386 ret = construct_container (mode, orig_mode, valtype, 1,
6387 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6388 x86_64_int_return_registers, 0);
6390 /* For zero sized structures, construct_container returns NULL, but we
6391 need to keep rest of compiler happy by returning meaningful value. */
6392 if (!ret)
6393 ret = gen_rtx_REG (orig_mode, AX_REG);
6395 return ret;
6398 static rtx
6399 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6401 unsigned int regno = AX_REG;
6403 if (TARGET_SSE)
6405 switch (GET_MODE_SIZE (mode))
6407 case 16:
6408 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6409 && !COMPLEX_MODE_P (mode))
6410 regno = FIRST_SSE_REG;
6411 break;
6412 case 8:
6413 case 4:
6414 if (mode == SFmode || mode == DFmode)
6415 regno = FIRST_SSE_REG;
6416 break;
6417 default:
6418 break;
6421 return gen_rtx_REG (orig_mode, regno);
6424 static rtx
6425 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6426 enum machine_mode orig_mode, enum machine_mode mode)
6428 const_tree fn, fntype;
6430 fn = NULL_TREE;
6431 if (fntype_or_decl && DECL_P (fntype_or_decl))
6432 fn = fntype_or_decl;
6433 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6435 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6436 return function_value_ms_64 (orig_mode, mode);
6437 else if (TARGET_64BIT)
6438 return function_value_64 (orig_mode, mode, valtype);
6439 else
6440 return function_value_32 (orig_mode, mode, fntype, fn);
6443 static rtx
6444 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6445 bool outgoing ATTRIBUTE_UNUSED)
6447 enum machine_mode mode, orig_mode;
6449 orig_mode = TYPE_MODE (valtype);
6450 mode = type_natural_mode (valtype, NULL);
6451 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6455 ix86_libcall_value (enum machine_mode mode)
6457 return ix86_function_value_1 (NULL, NULL, mode, mode);
6460 /* Return true iff type is returned in memory. */
6462 static int ATTRIBUTE_UNUSED
6463 return_in_memory_32 (const_tree type, enum machine_mode mode)
6465 HOST_WIDE_INT size;
6467 if (mode == BLKmode)
6468 return 1;
6470 size = int_size_in_bytes (type);
6472 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6473 return 0;
6475 if (VECTOR_MODE_P (mode) || mode == TImode)
6477 /* User-created vectors small enough to fit in EAX. */
6478 if (size < 8)
6479 return 0;
6481 /* MMX/3dNow values are returned in MM0,
6482 except when it doesn't exits. */
6483 if (size == 8)
6484 return (TARGET_MMX ? 0 : 1);
6486 /* SSE values are returned in XMM0, except when it doesn't exist. */
6487 if (size == 16)
6488 return (TARGET_SSE ? 0 : 1);
6490 /* AVX values are returned in YMM0, except when it doesn't exist. */
6491 if (size == 32)
6492 return TARGET_AVX ? 0 : 1;
6495 if (mode == XFmode)
6496 return 0;
6498 if (size > 12)
6499 return 1;
6501 /* OImode shouldn't be used directly. */
6502 gcc_assert (mode != OImode);
6504 return 0;
6507 static int ATTRIBUTE_UNUSED
6508 return_in_memory_64 (const_tree type, enum machine_mode mode)
6510 int needed_intregs, needed_sseregs;
6511 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6514 static int ATTRIBUTE_UNUSED
6515 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6517 HOST_WIDE_INT size = int_size_in_bytes (type);
6519 /* __m128 is returned in xmm0. */
6520 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6521 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6522 return 0;
6524 /* Otherwise, the size must be exactly in [1248]. */
6525 return (size != 1 && size != 2 && size != 4 && size != 8);
6528 static bool
6529 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6531 #ifdef SUBTARGET_RETURN_IN_MEMORY
6532 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6533 #else
6534 const enum machine_mode mode = type_natural_mode (type, NULL);
6536 if (TARGET_64BIT)
6538 if (ix86_function_type_abi (fntype) == MS_ABI)
6539 return return_in_memory_ms_64 (type, mode);
6540 else
6541 return return_in_memory_64 (type, mode);
6543 else
6544 return return_in_memory_32 (type, mode);
6545 #endif
6548 /* Return false iff TYPE is returned in memory. This version is used
6549 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6550 but differs notably in that when MMX is available, 8-byte vectors
6551 are returned in memory, rather than in MMX registers. */
6553 bool
6554 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6556 int size;
6557 enum machine_mode mode = type_natural_mode (type, NULL);
6559 if (TARGET_64BIT)
6560 return return_in_memory_64 (type, mode);
6562 if (mode == BLKmode)
6563 return 1;
6565 size = int_size_in_bytes (type);
6567 if (VECTOR_MODE_P (mode))
6569 /* Return in memory only if MMX registers *are* available. This
6570 seems backwards, but it is consistent with the existing
6571 Solaris x86 ABI. */
6572 if (size == 8)
6573 return TARGET_MMX;
6574 if (size == 16)
6575 return !TARGET_SSE;
6577 else if (mode == TImode)
6578 return !TARGET_SSE;
6579 else if (mode == XFmode)
6580 return 0;
6582 return size > 12;
6585 /* When returning SSE vector types, we have a choice of either
6586 (1) being abi incompatible with a -march switch, or
6587 (2) generating an error.
6588 Given no good solution, I think the safest thing is one warning.
6589 The user won't be able to use -Werror, but....
6591 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6592 called in response to actually generating a caller or callee that
6593 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6594 via aggregate_value_p for general type probing from tree-ssa. */
6596 static rtx
6597 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6599 static bool warnedsse, warnedmmx;
6601 if (!TARGET_64BIT && type)
6603 /* Look at the return type of the function, not the function type. */
6604 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6606 if (!TARGET_SSE && !warnedsse)
6608 if (mode == TImode
6609 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6611 warnedsse = true;
6612 warning (0, "SSE vector return without SSE enabled "
6613 "changes the ABI");
6617 if (!TARGET_MMX && !warnedmmx)
6619 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6621 warnedmmx = true;
6622 warning (0, "MMX vector return without MMX enabled "
6623 "changes the ABI");
6628 return NULL;
6632 /* Create the va_list data type. */
6634 /* Returns the calling convention specific va_list date type.
6635 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6637 static tree
6638 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6640 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6642 /* For i386 we use plain pointer to argument area. */
6643 if (!TARGET_64BIT || abi == MS_ABI)
6644 return build_pointer_type (char_type_node);
6646 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6647 type_decl = build_decl (BUILTINS_LOCATION,
6648 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6650 f_gpr = build_decl (BUILTINS_LOCATION,
6651 FIELD_DECL, get_identifier ("gp_offset"),
6652 unsigned_type_node);
6653 f_fpr = build_decl (BUILTINS_LOCATION,
6654 FIELD_DECL, get_identifier ("fp_offset"),
6655 unsigned_type_node);
6656 f_ovf = build_decl (BUILTINS_LOCATION,
6657 FIELD_DECL, get_identifier ("overflow_arg_area"),
6658 ptr_type_node);
6659 f_sav = build_decl (BUILTINS_LOCATION,
6660 FIELD_DECL, get_identifier ("reg_save_area"),
6661 ptr_type_node);
6663 va_list_gpr_counter_field = f_gpr;
6664 va_list_fpr_counter_field = f_fpr;
6666 DECL_FIELD_CONTEXT (f_gpr) = record;
6667 DECL_FIELD_CONTEXT (f_fpr) = record;
6668 DECL_FIELD_CONTEXT (f_ovf) = record;
6669 DECL_FIELD_CONTEXT (f_sav) = record;
6671 TREE_CHAIN (record) = type_decl;
6672 TYPE_NAME (record) = type_decl;
6673 TYPE_FIELDS (record) = f_gpr;
6674 TREE_CHAIN (f_gpr) = f_fpr;
6675 TREE_CHAIN (f_fpr) = f_ovf;
6676 TREE_CHAIN (f_ovf) = f_sav;
6678 layout_type (record);
6680 /* The correct type is an array type of one element. */
6681 return build_array_type (record, build_index_type (size_zero_node));
6684 /* Setup the builtin va_list data type and for 64-bit the additional
6685 calling convention specific va_list data types. */
6687 static tree
6688 ix86_build_builtin_va_list (void)
6690 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6692 /* Initialize abi specific va_list builtin types. */
6693 if (TARGET_64BIT)
6695 tree t;
6696 if (ix86_abi == MS_ABI)
6698 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6699 if (TREE_CODE (t) != RECORD_TYPE)
6700 t = build_variant_type_copy (t);
6701 sysv_va_list_type_node = t;
6703 else
6705 t = ret;
6706 if (TREE_CODE (t) != RECORD_TYPE)
6707 t = build_variant_type_copy (t);
6708 sysv_va_list_type_node = t;
6710 if (ix86_abi != MS_ABI)
6712 t = ix86_build_builtin_va_list_abi (MS_ABI);
6713 if (TREE_CODE (t) != RECORD_TYPE)
6714 t = build_variant_type_copy (t);
6715 ms_va_list_type_node = t;
6717 else
6719 t = ret;
6720 if (TREE_CODE (t) != RECORD_TYPE)
6721 t = build_variant_type_copy (t);
6722 ms_va_list_type_node = t;
6726 return ret;
6729 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6731 static void
6732 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6734 rtx save_area, mem;
6735 rtx label;
6736 rtx label_ref;
6737 rtx tmp_reg;
6738 rtx nsse_reg;
6739 alias_set_type set;
6740 int i;
6741 int regparm = ix86_regparm;
6743 if (cum->call_abi != ix86_abi)
6744 regparm = (ix86_abi != SYSV_ABI
6745 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6747 /* GPR size of varargs save area. */
6748 if (cfun->va_list_gpr_size)
6749 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6750 else
6751 ix86_varargs_gpr_size = 0;
6753 /* FPR size of varargs save area. We don't need it if we don't pass
6754 anything in SSE registers. */
6755 if (cum->sse_nregs && cfun->va_list_fpr_size)
6756 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6757 else
6758 ix86_varargs_fpr_size = 0;
6760 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6761 return;
6763 save_area = frame_pointer_rtx;
6764 set = get_varargs_alias_set ();
6766 for (i = cum->regno;
6767 i < regparm
6768 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6769 i++)
6771 mem = gen_rtx_MEM (Pmode,
6772 plus_constant (save_area, i * UNITS_PER_WORD));
6773 MEM_NOTRAP_P (mem) = 1;
6774 set_mem_alias_set (mem, set);
6775 emit_move_insn (mem, gen_rtx_REG (Pmode,
6776 x86_64_int_parameter_registers[i]));
6779 if (ix86_varargs_fpr_size)
6781 /* Now emit code to save SSE registers. The AX parameter contains number
6782 of SSE parameter registers used to call this function. We use
6783 sse_prologue_save insn template that produces computed jump across
6784 SSE saves. We need some preparation work to get this working. */
6786 label = gen_label_rtx ();
6787 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6789 /* Compute address to jump to :
6790 label - eax*4 + nnamed_sse_arguments*4 Or
6791 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6792 tmp_reg = gen_reg_rtx (Pmode);
6793 nsse_reg = gen_reg_rtx (Pmode);
6794 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6795 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6796 gen_rtx_MULT (Pmode, nsse_reg,
6797 GEN_INT (4))));
6799 /* vmovaps is one byte longer than movaps. */
6800 if (TARGET_AVX)
6801 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6802 gen_rtx_PLUS (Pmode, tmp_reg,
6803 nsse_reg)));
6805 if (cum->sse_regno)
6806 emit_move_insn
6807 (nsse_reg,
6808 gen_rtx_CONST (DImode,
6809 gen_rtx_PLUS (DImode,
6810 label_ref,
6811 GEN_INT (cum->sse_regno
6812 * (TARGET_AVX ? 5 : 4)))));
6813 else
6814 emit_move_insn (nsse_reg, label_ref);
6815 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6817 /* Compute address of memory block we save into. We always use pointer
6818 pointing 127 bytes after first byte to store - this is needed to keep
6819 instruction size limited by 4 bytes (5 bytes for AVX) with one
6820 byte displacement. */
6821 tmp_reg = gen_reg_rtx (Pmode);
6822 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6823 plus_constant (save_area,
6824 ix86_varargs_gpr_size + 127)));
6825 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6826 MEM_NOTRAP_P (mem) = 1;
6827 set_mem_alias_set (mem, set);
6828 set_mem_align (mem, BITS_PER_WORD);
6830 /* And finally do the dirty job! */
6831 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6832 GEN_INT (cum->sse_regno), label));
6836 static void
6837 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6839 alias_set_type set = get_varargs_alias_set ();
6840 int i;
6842 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6844 rtx reg, mem;
6846 mem = gen_rtx_MEM (Pmode,
6847 plus_constant (virtual_incoming_args_rtx,
6848 i * UNITS_PER_WORD));
6849 MEM_NOTRAP_P (mem) = 1;
6850 set_mem_alias_set (mem, set);
6852 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6853 emit_move_insn (mem, reg);
6857 static void
6858 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6859 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6860 int no_rtl)
6862 CUMULATIVE_ARGS next_cum;
6863 tree fntype;
6865 /* This argument doesn't appear to be used anymore. Which is good,
6866 because the old code here didn't suppress rtl generation. */
6867 gcc_assert (!no_rtl);
6869 if (!TARGET_64BIT)
6870 return;
6872 fntype = TREE_TYPE (current_function_decl);
6874 /* For varargs, we do not want to skip the dummy va_dcl argument.
6875 For stdargs, we do want to skip the last named argument. */
6876 next_cum = *cum;
6877 if (stdarg_p (fntype))
6878 function_arg_advance (&next_cum, mode, type, 1);
6880 if (cum->call_abi == MS_ABI)
6881 setup_incoming_varargs_ms_64 (&next_cum);
6882 else
6883 setup_incoming_varargs_64 (&next_cum);
6886 /* Checks if TYPE is of kind va_list char *. */
6888 static bool
6889 is_va_list_char_pointer (tree type)
6891 tree canonic;
6893 /* For 32-bit it is always true. */
6894 if (!TARGET_64BIT)
6895 return true;
6896 canonic = ix86_canonical_va_list_type (type);
6897 return (canonic == ms_va_list_type_node
6898 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6901 /* Implement va_start. */
6903 static void
6904 ix86_va_start (tree valist, rtx nextarg)
6906 HOST_WIDE_INT words, n_gpr, n_fpr;
6907 tree f_gpr, f_fpr, f_ovf, f_sav;
6908 tree gpr, fpr, ovf, sav, t;
6909 tree type;
6911 /* Only 64bit target needs something special. */
6912 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6914 std_expand_builtin_va_start (valist, nextarg);
6915 return;
6918 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6919 f_fpr = TREE_CHAIN (f_gpr);
6920 f_ovf = TREE_CHAIN (f_fpr);
6921 f_sav = TREE_CHAIN (f_ovf);
6923 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6924 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6925 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6926 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6927 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6929 /* Count number of gp and fp argument registers used. */
6930 words = crtl->args.info.words;
6931 n_gpr = crtl->args.info.regno;
6932 n_fpr = crtl->args.info.sse_regno;
6934 if (cfun->va_list_gpr_size)
6936 type = TREE_TYPE (gpr);
6937 t = build2 (MODIFY_EXPR, type,
6938 gpr, build_int_cst (type, n_gpr * 8));
6939 TREE_SIDE_EFFECTS (t) = 1;
6940 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6943 if (TARGET_SSE && cfun->va_list_fpr_size)
6945 type = TREE_TYPE (fpr);
6946 t = build2 (MODIFY_EXPR, type, fpr,
6947 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6948 TREE_SIDE_EFFECTS (t) = 1;
6949 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6952 /* Find the overflow area. */
6953 type = TREE_TYPE (ovf);
6954 t = make_tree (type, crtl->args.internal_arg_pointer);
6955 if (words != 0)
6956 t = build2 (POINTER_PLUS_EXPR, type, t,
6957 size_int (words * UNITS_PER_WORD));
6958 t = build2 (MODIFY_EXPR, type, ovf, t);
6959 TREE_SIDE_EFFECTS (t) = 1;
6960 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6962 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6964 /* Find the register save area.
6965 Prologue of the function save it right above stack frame. */
6966 type = TREE_TYPE (sav);
6967 t = make_tree (type, frame_pointer_rtx);
6968 if (!ix86_varargs_gpr_size)
6969 t = build2 (POINTER_PLUS_EXPR, type, t,
6970 size_int (-8 * X86_64_REGPARM_MAX));
6971 t = build2 (MODIFY_EXPR, type, sav, t);
6972 TREE_SIDE_EFFECTS (t) = 1;
6973 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6977 /* Implement va_arg. */
6979 static tree
6980 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6981 gimple_seq *post_p)
6983 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6984 tree f_gpr, f_fpr, f_ovf, f_sav;
6985 tree gpr, fpr, ovf, sav, t;
6986 int size, rsize;
6987 tree lab_false, lab_over = NULL_TREE;
6988 tree addr, t2;
6989 rtx container;
6990 int indirect_p = 0;
6991 tree ptrtype;
6992 enum machine_mode nat_mode;
6993 int arg_boundary;
6995 /* Only 64bit target needs something special. */
6996 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6997 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6999 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7000 f_fpr = TREE_CHAIN (f_gpr);
7001 f_ovf = TREE_CHAIN (f_fpr);
7002 f_sav = TREE_CHAIN (f_ovf);
7004 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7005 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7006 valist = build_va_arg_indirect_ref (valist);
7007 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7008 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7009 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7011 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7012 if (indirect_p)
7013 type = build_pointer_type (type);
7014 size = int_size_in_bytes (type);
7015 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7017 nat_mode = type_natural_mode (type, NULL);
7018 switch (nat_mode)
7020 case V8SFmode:
7021 case V8SImode:
7022 case V32QImode:
7023 case V16HImode:
7024 case V4DFmode:
7025 case V4DImode:
7026 /* Unnamed 256bit vector mode parameters are passed on stack. */
7027 if (ix86_cfun_abi () == SYSV_ABI)
7029 container = NULL;
7030 break;
7033 default:
7034 container = construct_container (nat_mode, TYPE_MODE (type),
7035 type, 0, X86_64_REGPARM_MAX,
7036 X86_64_SSE_REGPARM_MAX, intreg,
7038 break;
7041 /* Pull the value out of the saved registers. */
7043 addr = create_tmp_var (ptr_type_node, "addr");
7045 if (container)
7047 int needed_intregs, needed_sseregs;
7048 bool need_temp;
7049 tree int_addr, sse_addr;
7051 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7052 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7054 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7056 need_temp = (!REG_P (container)
7057 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7058 || TYPE_ALIGN (type) > 128));
7060 /* In case we are passing structure, verify that it is consecutive block
7061 on the register save area. If not we need to do moves. */
7062 if (!need_temp && !REG_P (container))
7064 /* Verify that all registers are strictly consecutive */
7065 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7067 int i;
7069 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7071 rtx slot = XVECEXP (container, 0, i);
7072 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7073 || INTVAL (XEXP (slot, 1)) != i * 16)
7074 need_temp = 1;
7077 else
7079 int i;
7081 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7083 rtx slot = XVECEXP (container, 0, i);
7084 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7085 || INTVAL (XEXP (slot, 1)) != i * 8)
7086 need_temp = 1;
7090 if (!need_temp)
7092 int_addr = addr;
7093 sse_addr = addr;
7095 else
7097 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7098 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7101 /* First ensure that we fit completely in registers. */
7102 if (needed_intregs)
7104 t = build_int_cst (TREE_TYPE (gpr),
7105 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7106 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7107 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7108 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7109 gimplify_and_add (t, pre_p);
7111 if (needed_sseregs)
7113 t = build_int_cst (TREE_TYPE (fpr),
7114 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7115 + X86_64_REGPARM_MAX * 8);
7116 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7117 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7118 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7119 gimplify_and_add (t, pre_p);
7122 /* Compute index to start of area used for integer regs. */
7123 if (needed_intregs)
7125 /* int_addr = gpr + sav; */
7126 t = fold_convert (sizetype, gpr);
7127 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7128 gimplify_assign (int_addr, t, pre_p);
7130 if (needed_sseregs)
7132 /* sse_addr = fpr + sav; */
7133 t = fold_convert (sizetype, fpr);
7134 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7135 gimplify_assign (sse_addr, t, pre_p);
7137 if (need_temp)
7139 int i;
7140 tree temp = create_tmp_var (type, "va_arg_tmp");
7142 /* addr = &temp; */
7143 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7144 gimplify_assign (addr, t, pre_p);
7146 for (i = 0; i < XVECLEN (container, 0); i++)
7148 rtx slot = XVECEXP (container, 0, i);
7149 rtx reg = XEXP (slot, 0);
7150 enum machine_mode mode = GET_MODE (reg);
7151 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7152 tree addr_type = build_pointer_type (piece_type);
7153 tree daddr_type = build_pointer_type_for_mode (piece_type,
7154 ptr_mode, true);
7155 tree src_addr, src;
7156 int src_offset;
7157 tree dest_addr, dest;
7159 if (SSE_REGNO_P (REGNO (reg)))
7161 src_addr = sse_addr;
7162 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7164 else
7166 src_addr = int_addr;
7167 src_offset = REGNO (reg) * 8;
7169 src_addr = fold_convert (addr_type, src_addr);
7170 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7171 size_int (src_offset));
7172 src = build_va_arg_indirect_ref (src_addr);
7174 dest_addr = fold_convert (daddr_type, addr);
7175 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7176 size_int (INTVAL (XEXP (slot, 1))));
7177 dest = build_va_arg_indirect_ref (dest_addr);
7179 gimplify_assign (dest, src, pre_p);
7183 if (needed_intregs)
7185 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7186 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7187 gimplify_assign (gpr, t, pre_p);
7190 if (needed_sseregs)
7192 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7193 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7194 gimplify_assign (fpr, t, pre_p);
7197 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7199 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7202 /* ... otherwise out of the overflow area. */
7204 /* When we align parameter on stack for caller, if the parameter
7205 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7206 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7207 here with caller. */
7208 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7209 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7210 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7212 /* Care for on-stack alignment if needed. */
7213 if (arg_boundary <= 64
7214 || integer_zerop (TYPE_SIZE (type)))
7215 t = ovf;
7216 else
7218 HOST_WIDE_INT align = arg_boundary / 8;
7219 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7220 size_int (align - 1));
7221 t = fold_convert (sizetype, t);
7222 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7223 size_int (-align));
7224 t = fold_convert (TREE_TYPE (ovf), t);
7226 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7227 gimplify_assign (addr, t, pre_p);
7229 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7230 size_int (rsize * UNITS_PER_WORD));
7231 gimplify_assign (unshare_expr (ovf), t, pre_p);
7233 if (container)
7234 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7236 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7237 addr = fold_convert (ptrtype, addr);
7239 if (indirect_p)
7240 addr = build_va_arg_indirect_ref (addr);
7241 return build_va_arg_indirect_ref (addr);
7244 /* Return nonzero if OPNUM's MEM should be matched
7245 in movabs* patterns. */
7248 ix86_check_movabs (rtx insn, int opnum)
7250 rtx set, mem;
7252 set = PATTERN (insn);
7253 if (GET_CODE (set) == PARALLEL)
7254 set = XVECEXP (set, 0, 0);
7255 gcc_assert (GET_CODE (set) == SET);
7256 mem = XEXP (set, opnum);
7257 while (GET_CODE (mem) == SUBREG)
7258 mem = SUBREG_REG (mem);
7259 gcc_assert (MEM_P (mem));
7260 return (volatile_ok || !MEM_VOLATILE_P (mem));
7263 /* Initialize the table of extra 80387 mathematical constants. */
7265 static void
7266 init_ext_80387_constants (void)
7268 static const char * cst[5] =
7270 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7271 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7272 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7273 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7274 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7276 int i;
7278 for (i = 0; i < 5; i++)
7280 real_from_string (&ext_80387_constants_table[i], cst[i]);
7281 /* Ensure each constant is rounded to XFmode precision. */
7282 real_convert (&ext_80387_constants_table[i],
7283 XFmode, &ext_80387_constants_table[i]);
7286 ext_80387_constants_init = 1;
7289 /* Return true if the constant is something that can be loaded with
7290 a special instruction. */
7293 standard_80387_constant_p (rtx x)
7295 enum machine_mode mode = GET_MODE (x);
7297 REAL_VALUE_TYPE r;
7299 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7300 return -1;
7302 if (x == CONST0_RTX (mode))
7303 return 1;
7304 if (x == CONST1_RTX (mode))
7305 return 2;
7307 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7309 /* For XFmode constants, try to find a special 80387 instruction when
7310 optimizing for size or on those CPUs that benefit from them. */
7311 if (mode == XFmode
7312 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7314 int i;
7316 if (! ext_80387_constants_init)
7317 init_ext_80387_constants ();
7319 for (i = 0; i < 5; i++)
7320 if (real_identical (&r, &ext_80387_constants_table[i]))
7321 return i + 3;
7324 /* Load of the constant -0.0 or -1.0 will be split as
7325 fldz;fchs or fld1;fchs sequence. */
7326 if (real_isnegzero (&r))
7327 return 8;
7328 if (real_identical (&r, &dconstm1))
7329 return 9;
7331 return 0;
7334 /* Return the opcode of the special instruction to be used to load
7335 the constant X. */
7337 const char *
7338 standard_80387_constant_opcode (rtx x)
7340 switch (standard_80387_constant_p (x))
7342 case 1:
7343 return "fldz";
7344 case 2:
7345 return "fld1";
7346 case 3:
7347 return "fldlg2";
7348 case 4:
7349 return "fldln2";
7350 case 5:
7351 return "fldl2e";
7352 case 6:
7353 return "fldl2t";
7354 case 7:
7355 return "fldpi";
7356 case 8:
7357 case 9:
7358 return "#";
7359 default:
7360 gcc_unreachable ();
7364 /* Return the CONST_DOUBLE representing the 80387 constant that is
7365 loaded by the specified special instruction. The argument IDX
7366 matches the return value from standard_80387_constant_p. */
7369 standard_80387_constant_rtx (int idx)
7371 int i;
7373 if (! ext_80387_constants_init)
7374 init_ext_80387_constants ();
7376 switch (idx)
7378 case 3:
7379 case 4:
7380 case 5:
7381 case 6:
7382 case 7:
7383 i = idx - 3;
7384 break;
7386 default:
7387 gcc_unreachable ();
7390 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7391 XFmode);
7394 /* Return 1 if X is all 0s and 2 if x is all 1s
7395 in supported SSE vector mode. */
7398 standard_sse_constant_p (rtx x)
7400 enum machine_mode mode = GET_MODE (x);
7402 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7403 return 1;
7404 if (vector_all_ones_operand (x, mode))
7405 switch (mode)
7407 case V16QImode:
7408 case V8HImode:
7409 case V4SImode:
7410 case V2DImode:
7411 if (TARGET_SSE2)
7412 return 2;
7413 default:
7414 break;
7417 return 0;
7420 /* Return the opcode of the special instruction to be used to load
7421 the constant X. */
7423 const char *
7424 standard_sse_constant_opcode (rtx insn, rtx x)
7426 switch (standard_sse_constant_p (x))
7428 case 1:
7429 switch (get_attr_mode (insn))
7431 case MODE_V4SF:
7432 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7433 case MODE_V2DF:
7434 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7435 case MODE_TI:
7436 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7437 case MODE_V8SF:
7438 return "vxorps\t%x0, %x0, %x0";
7439 case MODE_V4DF:
7440 return "vxorpd\t%x0, %x0, %x0";
7441 case MODE_OI:
7442 return "vpxor\t%x0, %x0, %x0";
7443 default:
7444 break;
7446 case 2:
7447 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7448 default:
7449 break;
7451 gcc_unreachable ();
7454 /* Returns 1 if OP contains a symbol reference */
7457 symbolic_reference_mentioned_p (rtx op)
7459 const char *fmt;
7460 int i;
7462 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7463 return 1;
7465 fmt = GET_RTX_FORMAT (GET_CODE (op));
7466 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7468 if (fmt[i] == 'E')
7470 int j;
7472 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7473 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7474 return 1;
7477 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7478 return 1;
7481 return 0;
7484 /* Return 1 if it is appropriate to emit `ret' instructions in the
7485 body of a function. Do this only if the epilogue is simple, needing a
7486 couple of insns. Prior to reloading, we can't tell how many registers
7487 must be saved, so return 0 then. Return 0 if there is no frame
7488 marker to de-allocate. */
7491 ix86_can_use_return_insn_p (void)
7493 struct ix86_frame frame;
7495 if (! reload_completed || frame_pointer_needed)
7496 return 0;
7498 /* Don't allow more than 32 pop, since that's all we can do
7499 with one instruction. */
7500 if (crtl->args.pops_args
7501 && crtl->args.size >= 32768)
7502 return 0;
7504 ix86_compute_frame_layout (&frame);
7505 return frame.to_allocate == 0 && frame.padding0 == 0
7506 && (frame.nregs + frame.nsseregs) == 0;
7509 /* Value should be nonzero if functions must have frame pointers.
7510 Zero means the frame pointer need not be set up (and parms may
7511 be accessed via the stack pointer) in functions that seem suitable. */
7513 static bool
7514 ix86_frame_pointer_required (void)
7516 /* If we accessed previous frames, then the generated code expects
7517 to be able to access the saved ebp value in our frame. */
7518 if (cfun->machine->accesses_prev_frame)
7519 return true;
7521 /* Several x86 os'es need a frame pointer for other reasons,
7522 usually pertaining to setjmp. */
7523 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7524 return true;
7526 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7527 the frame pointer by default. Turn it back on now if we've not
7528 got a leaf function. */
7529 if (TARGET_OMIT_LEAF_FRAME_POINTER
7530 && (!current_function_is_leaf
7531 || ix86_current_function_calls_tls_descriptor))
7532 return true;
7534 if (crtl->profile)
7535 return true;
7537 return false;
7540 /* Record that the current function accesses previous call frames. */
7542 void
7543 ix86_setup_frame_addresses (void)
7545 cfun->machine->accesses_prev_frame = 1;
7548 #ifndef USE_HIDDEN_LINKONCE
7549 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7550 # define USE_HIDDEN_LINKONCE 1
7551 # else
7552 # define USE_HIDDEN_LINKONCE 0
7553 # endif
7554 #endif
7556 static int pic_labels_used;
7558 /* Fills in the label name that should be used for a pc thunk for
7559 the given register. */
7561 static void
7562 get_pc_thunk_name (char name[32], unsigned int regno)
7564 gcc_assert (!TARGET_64BIT);
7566 if (USE_HIDDEN_LINKONCE)
7567 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7568 else
7569 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7573 /* This function generates code for -fpic that loads %ebx with
7574 the return address of the caller and then returns. */
7576 void
7577 ix86_file_end (void)
7579 rtx xops[2];
7580 int regno;
7582 for (regno = 0; regno < 8; ++regno)
7584 char name[32];
7586 if (! ((pic_labels_used >> regno) & 1))
7587 continue;
7589 get_pc_thunk_name (name, regno);
7591 #if TARGET_MACHO
7592 if (TARGET_MACHO)
7594 switch_to_section (darwin_sections[text_coal_section]);
7595 fputs ("\t.weak_definition\t", asm_out_file);
7596 assemble_name (asm_out_file, name);
7597 fputs ("\n\t.private_extern\t", asm_out_file);
7598 assemble_name (asm_out_file, name);
7599 fputs ("\n", asm_out_file);
7600 ASM_OUTPUT_LABEL (asm_out_file, name);
7602 else
7603 #endif
7604 if (USE_HIDDEN_LINKONCE)
7606 tree decl;
7608 decl = build_decl (BUILTINS_LOCATION,
7609 FUNCTION_DECL, get_identifier (name),
7610 error_mark_node);
7611 TREE_PUBLIC (decl) = 1;
7612 TREE_STATIC (decl) = 1;
7613 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7615 (*targetm.asm_out.unique_section) (decl, 0);
7616 switch_to_section (get_named_section (decl, NULL, 0));
7618 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7619 fputs ("\t.hidden\t", asm_out_file);
7620 assemble_name (asm_out_file, name);
7621 putc ('\n', asm_out_file);
7622 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7624 else
7626 switch_to_section (text_section);
7627 ASM_OUTPUT_LABEL (asm_out_file, name);
7630 xops[0] = gen_rtx_REG (Pmode, regno);
7631 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7632 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7633 output_asm_insn ("ret", xops);
7636 if (NEED_INDICATE_EXEC_STACK)
7637 file_end_indicate_exec_stack ();
7640 /* Emit code for the SET_GOT patterns. */
7642 const char *
7643 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7645 rtx xops[3];
7647 xops[0] = dest;
7649 if (TARGET_VXWORKS_RTP && flag_pic)
7651 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7652 xops[2] = gen_rtx_MEM (Pmode,
7653 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7654 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7656 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7657 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7658 an unadorned address. */
7659 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7660 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7661 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7662 return "";
7665 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7667 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7669 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7671 if (!flag_pic)
7672 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7673 else
7674 output_asm_insn ("call\t%a2", xops);
7676 #if TARGET_MACHO
7677 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7678 is what will be referenced by the Mach-O PIC subsystem. */
7679 if (!label)
7680 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7681 #endif
7683 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7684 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7686 if (flag_pic)
7687 output_asm_insn ("pop%z0\t%0", xops);
7689 else
7691 char name[32];
7692 get_pc_thunk_name (name, REGNO (dest));
7693 pic_labels_used |= 1 << REGNO (dest);
7695 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7696 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7697 output_asm_insn ("call\t%X2", xops);
7698 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7699 is what will be referenced by the Mach-O PIC subsystem. */
7700 #if TARGET_MACHO
7701 if (!label)
7702 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7703 else
7704 targetm.asm_out.internal_label (asm_out_file, "L",
7705 CODE_LABEL_NUMBER (label));
7706 #endif
7709 if (TARGET_MACHO)
7710 return "";
7712 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7713 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7714 else
7715 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7717 return "";
7720 /* Generate an "push" pattern for input ARG. */
7722 static rtx
7723 gen_push (rtx arg)
7725 if (ix86_cfa_state->reg == stack_pointer_rtx)
7726 ix86_cfa_state->offset += UNITS_PER_WORD;
7728 return gen_rtx_SET (VOIDmode,
7729 gen_rtx_MEM (Pmode,
7730 gen_rtx_PRE_DEC (Pmode,
7731 stack_pointer_rtx)),
7732 arg);
7735 /* Return >= 0 if there is an unused call-clobbered register available
7736 for the entire function. */
7738 static unsigned int
7739 ix86_select_alt_pic_regnum (void)
7741 if (current_function_is_leaf && !crtl->profile
7742 && !ix86_current_function_calls_tls_descriptor)
7744 int i, drap;
7745 /* Can't use the same register for both PIC and DRAP. */
7746 if (crtl->drap_reg)
7747 drap = REGNO (crtl->drap_reg);
7748 else
7749 drap = -1;
7750 for (i = 2; i >= 0; --i)
7751 if (i != drap && !df_regs_ever_live_p (i))
7752 return i;
7755 return INVALID_REGNUM;
7758 /* Return 1 if we need to save REGNO. */
7759 static int
7760 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7762 if (pic_offset_table_rtx
7763 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7764 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7765 || crtl->profile
7766 || crtl->calls_eh_return
7767 || crtl->uses_const_pool))
7769 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7770 return 0;
7771 return 1;
7774 if (crtl->calls_eh_return && maybe_eh_return)
7776 unsigned i;
7777 for (i = 0; ; i++)
7779 unsigned test = EH_RETURN_DATA_REGNO (i);
7780 if (test == INVALID_REGNUM)
7781 break;
7782 if (test == regno)
7783 return 1;
7787 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7788 return 1;
7790 return (df_regs_ever_live_p (regno)
7791 && !call_used_regs[regno]
7792 && !fixed_regs[regno]
7793 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7796 /* Return number of saved general prupose registers. */
7798 static int
7799 ix86_nsaved_regs (void)
7801 int nregs = 0;
7802 int regno;
7804 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7805 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7806 nregs ++;
7807 return nregs;
7810 /* Return number of saved SSE registrers. */
7812 static int
7813 ix86_nsaved_sseregs (void)
7815 int nregs = 0;
7816 int regno;
7818 if (ix86_cfun_abi () != MS_ABI)
7819 return 0;
7820 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7821 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7822 nregs ++;
7823 return nregs;
7826 /* Given FROM and TO register numbers, say whether this elimination is
7827 allowed. If stack alignment is needed, we can only replace argument
7828 pointer with hard frame pointer, or replace frame pointer with stack
7829 pointer. Otherwise, frame pointer elimination is automatically
7830 handled and all other eliminations are valid. */
7832 static bool
7833 ix86_can_eliminate (const int from, const int to)
7835 if (stack_realign_fp)
7836 return ((from == ARG_POINTER_REGNUM
7837 && to == HARD_FRAME_POINTER_REGNUM)
7838 || (from == FRAME_POINTER_REGNUM
7839 && to == STACK_POINTER_REGNUM));
7840 else
7841 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7844 /* Return the offset between two registers, one to be eliminated, and the other
7845 its replacement, at the start of a routine. */
7847 HOST_WIDE_INT
7848 ix86_initial_elimination_offset (int from, int to)
7850 struct ix86_frame frame;
7851 ix86_compute_frame_layout (&frame);
7853 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7854 return frame.hard_frame_pointer_offset;
7855 else if (from == FRAME_POINTER_REGNUM
7856 && to == HARD_FRAME_POINTER_REGNUM)
7857 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7858 else
7860 gcc_assert (to == STACK_POINTER_REGNUM);
7862 if (from == ARG_POINTER_REGNUM)
7863 return frame.stack_pointer_offset;
7865 gcc_assert (from == FRAME_POINTER_REGNUM);
7866 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7870 /* In a dynamically-aligned function, we can't know the offset from
7871 stack pointer to frame pointer, so we must ensure that setjmp
7872 eliminates fp against the hard fp (%ebp) rather than trying to
7873 index from %esp up to the top of the frame across a gap that is
7874 of unknown (at compile-time) size. */
7875 static rtx
7876 ix86_builtin_setjmp_frame_value (void)
7878 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7881 /* Fill structure ix86_frame about frame of currently computed function. */
7883 static void
7884 ix86_compute_frame_layout (struct ix86_frame *frame)
7886 unsigned int stack_alignment_needed;
7887 HOST_WIDE_INT offset;
7888 unsigned int preferred_alignment;
7889 HOST_WIDE_INT size = get_frame_size ();
7891 frame->nregs = ix86_nsaved_regs ();
7892 frame->nsseregs = ix86_nsaved_sseregs ();
7894 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7895 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7897 /* MS ABI seem to require stack alignment to be always 16 except for function
7898 prologues. */
7899 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7901 preferred_alignment = 16;
7902 stack_alignment_needed = 16;
7903 crtl->preferred_stack_boundary = 128;
7904 crtl->stack_alignment_needed = 128;
7907 gcc_assert (!size || stack_alignment_needed);
7908 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7909 gcc_assert (preferred_alignment <= stack_alignment_needed);
7911 /* During reload iteration the amount of registers saved can change.
7912 Recompute the value as needed. Do not recompute when amount of registers
7913 didn't change as reload does multiple calls to the function and does not
7914 expect the decision to change within single iteration. */
7915 if (!optimize_function_for_size_p (cfun)
7916 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7918 int count = frame->nregs;
7920 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7921 /* The fast prologue uses move instead of push to save registers. This
7922 is significantly longer, but also executes faster as modern hardware
7923 can execute the moves in parallel, but can't do that for push/pop.
7925 Be careful about choosing what prologue to emit: When function takes
7926 many instructions to execute we may use slow version as well as in
7927 case function is known to be outside hot spot (this is known with
7928 feedback only). Weight the size of function by number of registers
7929 to save as it is cheap to use one or two push instructions but very
7930 slow to use many of them. */
7931 if (count)
7932 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7933 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7934 || (flag_branch_probabilities
7935 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7936 cfun->machine->use_fast_prologue_epilogue = false;
7937 else
7938 cfun->machine->use_fast_prologue_epilogue
7939 = !expensive_function_p (count);
7941 if (TARGET_PROLOGUE_USING_MOVE
7942 && cfun->machine->use_fast_prologue_epilogue)
7943 frame->save_regs_using_mov = true;
7944 else
7945 frame->save_regs_using_mov = false;
7947 /* Skip return address. */
7948 offset = UNITS_PER_WORD;
7950 /* Skip pushed static chain. */
7951 if (ix86_static_chain_on_stack)
7952 offset += UNITS_PER_WORD;
7954 /* Skip saved base pointer. */
7955 if (frame_pointer_needed)
7956 offset += UNITS_PER_WORD;
7958 frame->hard_frame_pointer_offset = offset;
7960 /* Set offset to aligned because the realigned frame starts from
7961 here. */
7962 if (stack_realign_fp)
7963 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7965 /* Register save area */
7966 offset += frame->nregs * UNITS_PER_WORD;
7968 /* Align SSE reg save area. */
7969 if (frame->nsseregs)
7970 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
7971 else
7972 frame->padding0 = 0;
7974 /* SSE register save area. */
7975 offset += frame->padding0 + frame->nsseregs * 16;
7977 /* Va-arg area */
7978 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7979 offset += frame->va_arg_size;
7981 /* Align start of frame for local function. */
7982 frame->padding1 = ((offset + stack_alignment_needed - 1)
7983 & -stack_alignment_needed) - offset;
7985 offset += frame->padding1;
7987 /* Frame pointer points here. */
7988 frame->frame_pointer_offset = offset;
7990 offset += size;
7992 /* Add outgoing arguments area. Can be skipped if we eliminated
7993 all the function calls as dead code.
7994 Skipping is however impossible when function calls alloca. Alloca
7995 expander assumes that last crtl->outgoing_args_size
7996 of stack frame are unused. */
7997 if (ACCUMULATE_OUTGOING_ARGS
7998 && (!current_function_is_leaf || cfun->calls_alloca
7999 || ix86_current_function_calls_tls_descriptor))
8001 offset += crtl->outgoing_args_size;
8002 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8004 else
8005 frame->outgoing_arguments_size = 0;
8007 /* Align stack boundary. Only needed if we're calling another function
8008 or using alloca. */
8009 if (!current_function_is_leaf || cfun->calls_alloca
8010 || ix86_current_function_calls_tls_descriptor)
8011 frame->padding2 = ((offset + preferred_alignment - 1)
8012 & -preferred_alignment) - offset;
8013 else
8014 frame->padding2 = 0;
8016 offset += frame->padding2;
8018 /* We've reached end of stack frame. */
8019 frame->stack_pointer_offset = offset;
8021 /* Size prologue needs to allocate. */
8022 frame->to_allocate =
8023 (size + frame->padding1 + frame->padding2
8024 + frame->outgoing_arguments_size + frame->va_arg_size);
8026 if ((!frame->to_allocate && frame->nregs <= 1)
8027 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8028 frame->save_regs_using_mov = false;
8030 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8031 && current_function_sp_is_unchanging
8032 && current_function_is_leaf
8033 && !ix86_current_function_calls_tls_descriptor)
8035 frame->red_zone_size = frame->to_allocate;
8036 if (frame->save_regs_using_mov)
8037 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8038 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8039 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8041 else
8042 frame->red_zone_size = 0;
8043 frame->to_allocate -= frame->red_zone_size;
8044 frame->stack_pointer_offset -= frame->red_zone_size;
8047 /* Emit code to save registers in the prologue. */
8049 static void
8050 ix86_emit_save_regs (void)
8052 unsigned int regno;
8053 rtx insn;
8055 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8056 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8058 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8059 RTX_FRAME_RELATED_P (insn) = 1;
8063 /* Emit code to save registers using MOV insns. First register
8064 is restored from POINTER + OFFSET. */
8065 static void
8066 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8068 unsigned int regno;
8069 rtx insn;
8071 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8072 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8074 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8075 Pmode, offset),
8076 gen_rtx_REG (Pmode, regno));
8077 RTX_FRAME_RELATED_P (insn) = 1;
8078 offset += UNITS_PER_WORD;
8082 /* Emit code to save registers using MOV insns. First register
8083 is restored from POINTER + OFFSET. */
8084 static void
8085 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8087 unsigned int regno;
8088 rtx insn;
8089 rtx mem;
8091 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8092 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8094 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8095 set_mem_align (mem, 128);
8096 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8097 RTX_FRAME_RELATED_P (insn) = 1;
8098 offset += 16;
8102 static GTY(()) rtx queued_cfa_restores;
8104 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8105 manipulation insn. Don't add it if the previously
8106 saved value will be left untouched within stack red-zone till return,
8107 as unwinders can find the same value in the register and
8108 on the stack. */
8110 static void
8111 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8113 if (TARGET_RED_ZONE
8114 && !TARGET_64BIT_MS_ABI
8115 && red_offset + RED_ZONE_SIZE >= 0
8116 && crtl->args.pops_args < 65536)
8117 return;
8119 if (insn)
8121 add_reg_note (insn, REG_CFA_RESTORE, reg);
8122 RTX_FRAME_RELATED_P (insn) = 1;
8124 else
8125 queued_cfa_restores
8126 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8129 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8131 static void
8132 ix86_add_queued_cfa_restore_notes (rtx insn)
8134 rtx last;
8135 if (!queued_cfa_restores)
8136 return;
8137 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8139 XEXP (last, 1) = REG_NOTES (insn);
8140 REG_NOTES (insn) = queued_cfa_restores;
8141 queued_cfa_restores = NULL_RTX;
8142 RTX_FRAME_RELATED_P (insn) = 1;
8145 /* Expand prologue or epilogue stack adjustment.
8146 The pattern exist to put a dependency on all ebp-based memory accesses.
8147 STYLE should be negative if instructions should be marked as frame related,
8148 zero if %r11 register is live and cannot be freely used and positive
8149 otherwise. */
8151 static void
8152 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8153 int style, bool set_cfa)
8155 rtx insn;
8157 if (! TARGET_64BIT)
8158 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8159 else if (x86_64_immediate_operand (offset, DImode))
8160 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8161 else
8163 rtx r11;
8164 /* r11 is used by indirect sibcall return as well, set before the
8165 epilogue and used after the epilogue. ATM indirect sibcall
8166 shouldn't be used together with huge frame sizes in one
8167 function because of the frame_size check in sibcall.c. */
8168 gcc_assert (style);
8169 r11 = gen_rtx_REG (DImode, R11_REG);
8170 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8171 if (style < 0)
8172 RTX_FRAME_RELATED_P (insn) = 1;
8173 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8174 offset));
8177 if (style >= 0)
8178 ix86_add_queued_cfa_restore_notes (insn);
8180 if (set_cfa)
8182 rtx r;
8184 gcc_assert (ix86_cfa_state->reg == src);
8185 ix86_cfa_state->offset += INTVAL (offset);
8186 ix86_cfa_state->reg = dest;
8188 r = gen_rtx_PLUS (Pmode, src, offset);
8189 r = gen_rtx_SET (VOIDmode, dest, r);
8190 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8191 RTX_FRAME_RELATED_P (insn) = 1;
8193 else if (style < 0)
8194 RTX_FRAME_RELATED_P (insn) = 1;
8197 /* Find an available register to be used as dynamic realign argument
8198 pointer regsiter. Such a register will be written in prologue and
8199 used in begin of body, so it must not be
8200 1. parameter passing register.
8201 2. GOT pointer.
8202 We reuse static-chain register if it is available. Otherwise, we
8203 use DI for i386 and R13 for x86-64. We chose R13 since it has
8204 shorter encoding.
8206 Return: the regno of chosen register. */
8208 static unsigned int
8209 find_drap_reg (void)
8211 tree decl = cfun->decl;
8213 if (TARGET_64BIT)
8215 /* Use R13 for nested function or function need static chain.
8216 Since function with tail call may use any caller-saved
8217 registers in epilogue, DRAP must not use caller-saved
8218 register in such case. */
8219 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8220 return R13_REG;
8222 return R10_REG;
8224 else
8226 /* Use DI for nested function or function need static chain.
8227 Since function with tail call may use any caller-saved
8228 registers in epilogue, DRAP must not use caller-saved
8229 register in such case. */
8230 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8231 return DI_REG;
8233 /* Reuse static chain register if it isn't used for parameter
8234 passing. */
8235 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8236 && !lookup_attribute ("fastcall",
8237 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8238 return CX_REG;
8239 else
8240 return DI_REG;
8244 /* Return minimum incoming stack alignment. */
8246 static unsigned int
8247 ix86_minimum_incoming_stack_boundary (bool sibcall)
8249 unsigned int incoming_stack_boundary;
8251 /* Prefer the one specified at command line. */
8252 if (ix86_user_incoming_stack_boundary)
8253 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8254 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8255 if -mstackrealign is used, it isn't used for sibcall check and
8256 estimated stack alignment is 128bit. */
8257 else if (!sibcall
8258 && !TARGET_64BIT
8259 && ix86_force_align_arg_pointer
8260 && crtl->stack_alignment_estimated == 128)
8261 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8262 else
8263 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8265 /* Incoming stack alignment can be changed on individual functions
8266 via force_align_arg_pointer attribute. We use the smallest
8267 incoming stack boundary. */
8268 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8269 && lookup_attribute (ix86_force_align_arg_pointer_string,
8270 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8271 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8273 /* The incoming stack frame has to be aligned at least at
8274 parm_stack_boundary. */
8275 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8276 incoming_stack_boundary = crtl->parm_stack_boundary;
8278 /* Stack at entrance of main is aligned by runtime. We use the
8279 smallest incoming stack boundary. */
8280 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8281 && DECL_NAME (current_function_decl)
8282 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8283 && DECL_FILE_SCOPE_P (current_function_decl))
8284 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8286 return incoming_stack_boundary;
8289 /* Update incoming stack boundary and estimated stack alignment. */
8291 static void
8292 ix86_update_stack_boundary (void)
8294 ix86_incoming_stack_boundary
8295 = ix86_minimum_incoming_stack_boundary (false);
8297 /* x86_64 vararg needs 16byte stack alignment for register save
8298 area. */
8299 if (TARGET_64BIT
8300 && cfun->stdarg
8301 && crtl->stack_alignment_estimated < 128)
8302 crtl->stack_alignment_estimated = 128;
8305 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8306 needed or an rtx for DRAP otherwise. */
8308 static rtx
8309 ix86_get_drap_rtx (void)
8311 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8312 crtl->need_drap = true;
8314 if (stack_realign_drap)
8316 /* Assign DRAP to vDRAP and returns vDRAP */
8317 unsigned int regno = find_drap_reg ();
8318 rtx drap_vreg;
8319 rtx arg_ptr;
8320 rtx seq, insn;
8322 arg_ptr = gen_rtx_REG (Pmode, regno);
8323 crtl->drap_reg = arg_ptr;
8325 start_sequence ();
8326 drap_vreg = copy_to_reg (arg_ptr);
8327 seq = get_insns ();
8328 end_sequence ();
8330 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8331 RTX_FRAME_RELATED_P (insn) = 1;
8332 return drap_vreg;
8334 else
8335 return NULL;
8338 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8340 static rtx
8341 ix86_internal_arg_pointer (void)
8343 return virtual_incoming_args_rtx;
8346 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8347 to be generated in correct form. */
8348 static void
8349 ix86_finalize_stack_realign_flags (void)
8351 /* Check if stack realign is really needed after reload, and
8352 stores result in cfun */
8353 unsigned int incoming_stack_boundary
8354 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8355 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8356 unsigned int stack_realign = (incoming_stack_boundary
8357 < (current_function_is_leaf
8358 ? crtl->max_used_stack_slot_alignment
8359 : crtl->stack_alignment_needed));
8361 if (crtl->stack_realign_finalized)
8363 /* After stack_realign_needed is finalized, we can't no longer
8364 change it. */
8365 gcc_assert (crtl->stack_realign_needed == stack_realign);
8367 else
8369 crtl->stack_realign_needed = stack_realign;
8370 crtl->stack_realign_finalized = true;
8374 /* Expand the prologue into a bunch of separate insns. */
8376 void
8377 ix86_expand_prologue (void)
8379 rtx insn;
8380 bool pic_reg_used;
8381 struct ix86_frame frame;
8382 HOST_WIDE_INT allocate;
8383 int gen_frame_pointer = frame_pointer_needed;
8385 ix86_finalize_stack_realign_flags ();
8387 /* DRAP should not coexist with stack_realign_fp */
8388 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8390 /* Initialize CFA state for before the prologue. */
8391 ix86_cfa_state->reg = stack_pointer_rtx;
8392 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8394 ix86_compute_frame_layout (&frame);
8396 if (ix86_function_ms_hook_prologue (current_function_decl))
8398 rtx push, mov;
8400 /* Make sure the function starts with
8401 8b ff movl.s %edi,%edi
8402 55 push %ebp
8403 8b ec movl.s %esp,%ebp
8405 This matches the hookable function prologue in Win32 API
8406 functions in Microsoft Windows XP Service Pack 2 and newer.
8407 Wine uses this to enable Windows apps to hook the Win32 API
8408 functions provided by Wine. */
8409 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8410 gen_rtx_REG (SImode, DI_REG)));
8411 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8412 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8413 stack_pointer_rtx));
8415 if (frame_pointer_needed && !(crtl->drap_reg
8416 && crtl->stack_realign_needed))
8418 /* The push %ebp and movl.s %esp, %ebp already set up
8419 the frame pointer. No need to do this again. */
8420 gen_frame_pointer = 0;
8421 RTX_FRAME_RELATED_P (push) = 1;
8422 RTX_FRAME_RELATED_P (mov) = 1;
8423 if (ix86_cfa_state->reg == stack_pointer_rtx)
8424 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8426 else
8427 /* If the frame pointer is not needed, pop %ebp again. This
8428 could be optimized for cases where ebp needs to be backed up
8429 for some other reason. If stack realignment is needed, pop
8430 the base pointer again, align the stack, and later regenerate
8431 the frame pointer setup. The frame pointer generated by the
8432 hook prologue is not aligned, so it can't be used. */
8433 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8436 /* The first insn of a function that accepts its static chain on the
8437 stack is to push the register that would be filled in by a direct
8438 call. This insn will be skipped by the trampoline. */
8439 if (ix86_static_chain_on_stack)
8441 rtx t;
8443 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8444 emit_insn (gen_blockage ());
8446 /* We don't want to interpret this push insn as a register save,
8447 only as a stack adjustment. The real copy of the register as
8448 a save will be done later, if needed. */
8449 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8450 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8451 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8452 RTX_FRAME_RELATED_P (insn) = 1;
8455 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8456 of DRAP is needed and stack realignment is really needed after reload */
8457 if (crtl->drap_reg && crtl->stack_realign_needed)
8459 rtx x, y;
8460 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8461 int param_ptr_offset = UNITS_PER_WORD;
8463 if (ix86_static_chain_on_stack)
8464 param_ptr_offset += UNITS_PER_WORD;
8465 if (!call_used_regs[REGNO (crtl->drap_reg)])
8466 param_ptr_offset += UNITS_PER_WORD;
8468 gcc_assert (stack_realign_drap);
8470 /* Grab the argument pointer. */
8471 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8472 y = crtl->drap_reg;
8474 /* Only need to push parameter pointer reg if it is caller
8475 saved reg */
8476 if (!call_used_regs[REGNO (crtl->drap_reg)])
8478 /* Push arg pointer reg */
8479 insn = emit_insn (gen_push (y));
8480 RTX_FRAME_RELATED_P (insn) = 1;
8483 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8484 RTX_FRAME_RELATED_P (insn) = 1;
8485 ix86_cfa_state->reg = crtl->drap_reg;
8487 /* Align the stack. */
8488 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8489 stack_pointer_rtx,
8490 GEN_INT (-align_bytes)));
8491 RTX_FRAME_RELATED_P (insn) = 1;
8493 /* Replicate the return address on the stack so that return
8494 address can be reached via (argp - 1) slot. This is needed
8495 to implement macro RETURN_ADDR_RTX and intrinsic function
8496 expand_builtin_return_addr etc. */
8497 x = crtl->drap_reg;
8498 x = gen_frame_mem (Pmode,
8499 plus_constant (x, -UNITS_PER_WORD));
8500 insn = emit_insn (gen_push (x));
8501 RTX_FRAME_RELATED_P (insn) = 1;
8504 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8505 slower on all targets. Also sdb doesn't like it. */
8507 if (gen_frame_pointer)
8509 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8510 RTX_FRAME_RELATED_P (insn) = 1;
8512 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8513 RTX_FRAME_RELATED_P (insn) = 1;
8515 if (ix86_cfa_state->reg == stack_pointer_rtx)
8516 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8519 if (stack_realign_fp)
8521 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8522 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8524 /* Align the stack. */
8525 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8526 stack_pointer_rtx,
8527 GEN_INT (-align_bytes)));
8528 RTX_FRAME_RELATED_P (insn) = 1;
8531 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8533 if (!frame.save_regs_using_mov)
8534 ix86_emit_save_regs ();
8535 else
8536 allocate += frame.nregs * UNITS_PER_WORD;
8538 /* When using red zone we may start register saving before allocating
8539 the stack frame saving one cycle of the prologue. However I will
8540 avoid doing this if I am going to have to probe the stack since
8541 at least on x86_64 the stack probe can turn into a call that clobbers
8542 a red zone location */
8543 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8544 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8545 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8546 && !crtl->stack_realign_needed)
8547 ? hard_frame_pointer_rtx
8548 : stack_pointer_rtx,
8549 -frame.nregs * UNITS_PER_WORD);
8551 if (allocate == 0)
8553 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8554 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8555 GEN_INT (-allocate), -1,
8556 ix86_cfa_state->reg == stack_pointer_rtx);
8557 else
8559 /* Only valid for Win32. */
8560 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8561 bool eax_live;
8562 rtx t;
8564 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8566 if (cfun->machine->call_abi == MS_ABI)
8567 eax_live = false;
8568 else
8569 eax_live = ix86_eax_live_at_start_p ();
8571 if (eax_live)
8573 emit_insn (gen_push (eax));
8574 allocate -= UNITS_PER_WORD;
8577 emit_move_insn (eax, GEN_INT (allocate));
8579 if (TARGET_64BIT)
8580 insn = gen_allocate_stack_worker_64 (eax, eax);
8581 else
8582 insn = gen_allocate_stack_worker_32 (eax, eax);
8583 insn = emit_insn (insn);
8585 if (ix86_cfa_state->reg == stack_pointer_rtx)
8587 ix86_cfa_state->offset += allocate;
8588 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8589 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8590 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8591 RTX_FRAME_RELATED_P (insn) = 1;
8594 if (eax_live)
8596 if (frame_pointer_needed)
8597 t = plus_constant (hard_frame_pointer_rtx,
8598 allocate
8599 - frame.to_allocate
8600 - frame.nregs * UNITS_PER_WORD);
8601 else
8602 t = plus_constant (stack_pointer_rtx, allocate);
8603 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8607 if (frame.save_regs_using_mov
8608 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8609 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8611 if (!frame_pointer_needed
8612 || !(frame.to_allocate + frame.padding0)
8613 || crtl->stack_realign_needed)
8614 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8615 frame.to_allocate
8616 + frame.nsseregs * 16 + frame.padding0);
8617 else
8618 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8619 -frame.nregs * UNITS_PER_WORD);
8621 if (!frame_pointer_needed
8622 || !(frame.to_allocate + frame.padding0)
8623 || crtl->stack_realign_needed)
8624 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8625 frame.to_allocate);
8626 else
8627 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8628 - frame.nregs * UNITS_PER_WORD
8629 - frame.nsseregs * 16
8630 - frame.padding0);
8632 pic_reg_used = false;
8633 if (pic_offset_table_rtx
8634 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8635 || crtl->profile))
8637 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8639 if (alt_pic_reg_used != INVALID_REGNUM)
8640 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8642 pic_reg_used = true;
8645 if (pic_reg_used)
8647 if (TARGET_64BIT)
8649 if (ix86_cmodel == CM_LARGE_PIC)
8651 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8652 rtx label = gen_label_rtx ();
8653 emit_label (label);
8654 LABEL_PRESERVE_P (label) = 1;
8655 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8656 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8657 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8658 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8659 pic_offset_table_rtx, tmp_reg));
8661 else
8662 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8664 else
8665 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8668 /* In the pic_reg_used case, make sure that the got load isn't deleted
8669 when mcount needs it. Blockage to avoid call movement across mcount
8670 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8671 note. */
8672 if (crtl->profile && pic_reg_used)
8673 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8675 if (crtl->drap_reg && !crtl->stack_realign_needed)
8677 /* vDRAP is setup but after reload it turns out stack realign
8678 isn't necessary, here we will emit prologue to setup DRAP
8679 without stack realign adjustment */
8680 rtx x;
8681 int drap_bp_offset = UNITS_PER_WORD * 2;
8683 if (ix86_static_chain_on_stack)
8684 drap_bp_offset += UNITS_PER_WORD;
8685 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8686 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8689 /* Prevent instructions from being scheduled into register save push
8690 sequence when access to the redzone area is done through frame pointer.
8691 The offset between the frame pointer and the stack pointer is calculated
8692 relative to the value of the stack pointer at the end of the function
8693 prologue, and moving instructions that access redzone area via frame
8694 pointer inside push sequence violates this assumption. */
8695 if (frame_pointer_needed && frame.red_zone_size)
8696 emit_insn (gen_memory_blockage ());
8698 /* Emit cld instruction if stringops are used in the function. */
8699 if (TARGET_CLD && ix86_current_function_needs_cld)
8700 emit_insn (gen_cld ());
8703 /* Emit code to restore REG using a POP insn. */
8705 static void
8706 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8708 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8710 if (ix86_cfa_state->reg == crtl->drap_reg
8711 && REGNO (reg) == REGNO (crtl->drap_reg))
8713 /* Previously we'd represented the CFA as an expression
8714 like *(%ebp - 8). We've just popped that value from
8715 the stack, which means we need to reset the CFA to
8716 the drap register. This will remain until we restore
8717 the stack pointer. */
8718 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8719 RTX_FRAME_RELATED_P (insn) = 1;
8720 return;
8723 if (ix86_cfa_state->reg == stack_pointer_rtx)
8725 ix86_cfa_state->offset -= UNITS_PER_WORD;
8726 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8727 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8728 RTX_FRAME_RELATED_P (insn) = 1;
8731 /* When the frame pointer is the CFA, and we pop it, we are
8732 swapping back to the stack pointer as the CFA. This happens
8733 for stack frames that don't allocate other data, so we assume
8734 the stack pointer is now pointing at the return address, i.e.
8735 the function entry state, which makes the offset be 1 word. */
8736 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8737 && reg == hard_frame_pointer_rtx)
8739 ix86_cfa_state->reg = stack_pointer_rtx;
8740 ix86_cfa_state->offset -= UNITS_PER_WORD;
8742 add_reg_note (insn, REG_CFA_DEF_CFA,
8743 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8744 GEN_INT (ix86_cfa_state->offset)));
8745 RTX_FRAME_RELATED_P (insn) = 1;
8748 ix86_add_cfa_restore_note (insn, reg, red_offset);
8751 /* Emit code to restore saved registers using POP insns. */
8753 static void
8754 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8756 int regno;
8758 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8759 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8761 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8762 red_offset);
8763 red_offset += UNITS_PER_WORD;
8767 /* Emit code and notes for the LEAVE instruction. */
8769 static void
8770 ix86_emit_leave (HOST_WIDE_INT red_offset)
8772 rtx insn = emit_insn (ix86_gen_leave ());
8774 ix86_add_queued_cfa_restore_notes (insn);
8776 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8778 ix86_cfa_state->reg = stack_pointer_rtx;
8779 ix86_cfa_state->offset -= UNITS_PER_WORD;
8781 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8782 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8783 RTX_FRAME_RELATED_P (insn) = 1;
8784 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8788 /* Emit code to restore saved registers using MOV insns. First register
8789 is restored from POINTER + OFFSET. */
8790 static void
8791 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8792 HOST_WIDE_INT red_offset,
8793 int maybe_eh_return)
8795 unsigned int regno;
8796 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8797 rtx insn;
8799 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8800 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8802 rtx reg = gen_rtx_REG (Pmode, regno);
8804 /* Ensure that adjust_address won't be forced to produce pointer
8805 out of range allowed by x86-64 instruction set. */
8806 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8808 rtx r11;
8810 r11 = gen_rtx_REG (DImode, R11_REG);
8811 emit_move_insn (r11, GEN_INT (offset));
8812 emit_insn (gen_adddi3 (r11, r11, pointer));
8813 base_address = gen_rtx_MEM (Pmode, r11);
8814 offset = 0;
8816 insn = emit_move_insn (reg,
8817 adjust_address (base_address, Pmode, offset));
8818 offset += UNITS_PER_WORD;
8820 if (ix86_cfa_state->reg == crtl->drap_reg
8821 && regno == REGNO (crtl->drap_reg))
8823 /* Previously we'd represented the CFA as an expression
8824 like *(%ebp - 8). We've just popped that value from
8825 the stack, which means we need to reset the CFA to
8826 the drap register. This will remain until we restore
8827 the stack pointer. */
8828 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8829 RTX_FRAME_RELATED_P (insn) = 1;
8831 else
8832 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8834 red_offset += UNITS_PER_WORD;
8838 /* Emit code to restore saved registers using MOV insns. First register
8839 is restored from POINTER + OFFSET. */
8840 static void
8841 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8842 HOST_WIDE_INT red_offset,
8843 int maybe_eh_return)
8845 int regno;
8846 rtx base_address = gen_rtx_MEM (TImode, pointer);
8847 rtx mem;
8849 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8850 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8852 rtx reg = gen_rtx_REG (TImode, regno);
8854 /* Ensure that adjust_address won't be forced to produce pointer
8855 out of range allowed by x86-64 instruction set. */
8856 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8858 rtx r11;
8860 r11 = gen_rtx_REG (DImode, R11_REG);
8861 emit_move_insn (r11, GEN_INT (offset));
8862 emit_insn (gen_adddi3 (r11, r11, pointer));
8863 base_address = gen_rtx_MEM (TImode, r11);
8864 offset = 0;
8866 mem = adjust_address (base_address, TImode, offset);
8867 set_mem_align (mem, 128);
8868 emit_move_insn (reg, mem);
8869 offset += 16;
8871 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8873 red_offset += 16;
8877 /* Restore function stack, frame, and registers. */
8879 void
8880 ix86_expand_epilogue (int style)
8882 int sp_valid;
8883 struct ix86_frame frame;
8884 HOST_WIDE_INT offset, red_offset;
8885 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8886 bool using_drap;
8888 ix86_finalize_stack_realign_flags ();
8890 /* When stack is realigned, SP must be valid. */
8891 sp_valid = (!frame_pointer_needed
8892 || current_function_sp_is_unchanging
8893 || stack_realign_fp);
8895 ix86_compute_frame_layout (&frame);
8897 /* See the comment about red zone and frame
8898 pointer usage in ix86_expand_prologue. */
8899 if (frame_pointer_needed && frame.red_zone_size)
8900 emit_insn (gen_memory_blockage ());
8902 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
8903 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
8905 /* Calculate start of saved registers relative to ebp. Special care
8906 must be taken for the normal return case of a function using
8907 eh_return: the eax and edx registers are marked as saved, but not
8908 restored along this path. */
8909 offset = frame.nregs;
8910 if (crtl->calls_eh_return && style != 2)
8911 offset -= 2;
8912 offset *= -UNITS_PER_WORD;
8913 offset -= frame.nsseregs * 16 + frame.padding0;
8915 /* Calculate start of saved registers relative to esp on entry of the
8916 function. When realigning stack, this needs to be the most negative
8917 value possible at runtime. */
8918 red_offset = offset;
8919 if (using_drap)
8920 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8921 + UNITS_PER_WORD;
8922 else if (stack_realign_fp)
8923 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8924 - UNITS_PER_WORD;
8925 if (ix86_static_chain_on_stack)
8926 red_offset -= UNITS_PER_WORD;
8927 if (frame_pointer_needed)
8928 red_offset -= UNITS_PER_WORD;
8930 /* If we're only restoring one register and sp is not valid then
8931 using a move instruction to restore the register since it's
8932 less work than reloading sp and popping the register.
8934 The default code result in stack adjustment using add/lea instruction,
8935 while this code results in LEAVE instruction (or discrete equivalent),
8936 so it is profitable in some other cases as well. Especially when there
8937 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8938 and there is exactly one register to pop. This heuristic may need some
8939 tuning in future. */
8940 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
8941 || (TARGET_EPILOGUE_USING_MOVE
8942 && cfun->machine->use_fast_prologue_epilogue
8943 && ((frame.nregs + frame.nsseregs) > 1
8944 || (frame.to_allocate + frame.padding0) != 0))
8945 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
8946 && (frame.to_allocate + frame.padding0) != 0)
8947 || (frame_pointer_needed && TARGET_USE_LEAVE
8948 && cfun->machine->use_fast_prologue_epilogue
8949 && (frame.nregs + frame.nsseregs) == 1)
8950 || crtl->calls_eh_return)
8952 /* Restore registers. We can use ebp or esp to address the memory
8953 locations. If both are available, default to ebp, since offsets
8954 are known to be small. Only exception is esp pointing directly
8955 to the end of block of saved registers, where we may simplify
8956 addressing mode.
8958 If we are realigning stack with bp and sp, regs restore can't
8959 be addressed by bp. sp must be used instead. */
8961 if (!frame_pointer_needed
8962 || (sp_valid && !(frame.to_allocate + frame.padding0))
8963 || stack_realign_fp)
8965 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8966 frame.to_allocate, red_offset,
8967 style == 2);
8968 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8969 frame.to_allocate
8970 + frame.nsseregs * 16
8971 + frame.padding0,
8972 red_offset
8973 + frame.nsseregs * 16
8974 + frame.padding0, style == 2);
8976 else
8978 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
8979 offset, red_offset,
8980 style == 2);
8981 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8982 offset
8983 + frame.nsseregs * 16
8984 + frame.padding0,
8985 red_offset
8986 + frame.nsseregs * 16
8987 + frame.padding0, style == 2);
8990 red_offset -= offset;
8992 /* eh_return epilogues need %ecx added to the stack pointer. */
8993 if (style == 2)
8995 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8997 /* Stack align doesn't work with eh_return. */
8998 gcc_assert (!crtl->stack_realign_needed);
8999 /* Neither does regparm nested functions. */
9000 gcc_assert (!ix86_static_chain_on_stack);
9002 if (frame_pointer_needed)
9004 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9005 tmp = plus_constant (tmp, UNITS_PER_WORD);
9006 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9008 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9009 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9011 /* Note that we use SA as a temporary CFA, as the return
9012 address is at the proper place relative to it. We
9013 pretend this happens at the FP restore insn because
9014 prior to this insn the FP would be stored at the wrong
9015 offset relative to SA, and after this insn we have no
9016 other reasonable register to use for the CFA. We don't
9017 bother resetting the CFA to the SP for the duration of
9018 the return insn. */
9019 add_reg_note (tmp, REG_CFA_DEF_CFA,
9020 plus_constant (sa, UNITS_PER_WORD));
9021 ix86_add_queued_cfa_restore_notes (tmp);
9022 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9023 RTX_FRAME_RELATED_P (tmp) = 1;
9024 ix86_cfa_state->reg = sa;
9025 ix86_cfa_state->offset = UNITS_PER_WORD;
9027 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9028 const0_rtx, style, false);
9030 else
9032 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9033 tmp = plus_constant (tmp, (frame.to_allocate
9034 + frame.nregs * UNITS_PER_WORD
9035 + frame.nsseregs * 16
9036 + frame.padding0));
9037 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9038 ix86_add_queued_cfa_restore_notes (tmp);
9040 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9041 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9043 ix86_cfa_state->offset = UNITS_PER_WORD;
9044 add_reg_note (tmp, REG_CFA_DEF_CFA,
9045 plus_constant (stack_pointer_rtx,
9046 UNITS_PER_WORD));
9047 RTX_FRAME_RELATED_P (tmp) = 1;
9051 else if (!frame_pointer_needed)
9052 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9053 GEN_INT (frame.to_allocate
9054 + frame.nregs * UNITS_PER_WORD
9055 + frame.nsseregs * 16
9056 + frame.padding0),
9057 style, !using_drap);
9058 /* If not an i386, mov & pop is faster than "leave". */
9059 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9060 || !cfun->machine->use_fast_prologue_epilogue)
9061 ix86_emit_leave (red_offset);
9062 else
9064 pro_epilogue_adjust_stack (stack_pointer_rtx,
9065 hard_frame_pointer_rtx,
9066 const0_rtx, style, !using_drap);
9068 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9071 else
9073 /* First step is to deallocate the stack frame so that we can
9074 pop the registers.
9076 If we realign stack with frame pointer, then stack pointer
9077 won't be able to recover via lea $offset(%bp), %sp, because
9078 there is a padding area between bp and sp for realign.
9079 "add $to_allocate, %sp" must be used instead. */
9080 if (!sp_valid)
9082 gcc_assert (frame_pointer_needed);
9083 gcc_assert (!stack_realign_fp);
9084 pro_epilogue_adjust_stack (stack_pointer_rtx,
9085 hard_frame_pointer_rtx,
9086 GEN_INT (offset), style, false);
9087 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9088 0, red_offset,
9089 style == 2);
9090 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9091 GEN_INT (frame.nsseregs * 16
9092 + frame.padding0),
9093 style, false);
9095 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9097 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9098 frame.to_allocate, red_offset,
9099 style == 2);
9100 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9101 GEN_INT (frame.to_allocate
9102 + frame.nsseregs * 16
9103 + frame.padding0), style,
9104 !using_drap && !frame_pointer_needed);
9107 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9108 + frame.padding0);
9109 red_offset -= offset;
9111 if (frame_pointer_needed)
9113 /* Leave results in shorter dependency chains on CPUs that are
9114 able to grok it fast. */
9115 if (TARGET_USE_LEAVE)
9116 ix86_emit_leave (red_offset);
9117 else
9119 /* For stack realigned really happens, recover stack
9120 pointer to hard frame pointer is a must, if not using
9121 leave. */
9122 if (stack_realign_fp)
9123 pro_epilogue_adjust_stack (stack_pointer_rtx,
9124 hard_frame_pointer_rtx,
9125 const0_rtx, style, !using_drap);
9126 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9127 red_offset);
9132 if (using_drap)
9134 int param_ptr_offset = UNITS_PER_WORD;
9135 rtx insn;
9137 gcc_assert (stack_realign_drap);
9139 if (ix86_static_chain_on_stack)
9140 param_ptr_offset += UNITS_PER_WORD;
9141 if (!call_used_regs[REGNO (crtl->drap_reg)])
9142 param_ptr_offset += UNITS_PER_WORD;
9144 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9145 crtl->drap_reg,
9146 GEN_INT (-param_ptr_offset)));
9148 ix86_cfa_state->reg = stack_pointer_rtx;
9149 ix86_cfa_state->offset = param_ptr_offset;
9151 add_reg_note (insn, REG_CFA_DEF_CFA,
9152 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9153 GEN_INT (ix86_cfa_state->offset)));
9154 RTX_FRAME_RELATED_P (insn) = 1;
9156 if (!call_used_regs[REGNO (crtl->drap_reg)])
9157 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9160 /* Remove the saved static chain from the stack. The use of ECX is
9161 merely as a scratch register, not as the actual static chain. */
9162 if (ix86_static_chain_on_stack)
9164 rtx r, insn;
9166 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9167 ix86_cfa_state->offset += UNITS_PER_WORD;
9169 r = gen_rtx_REG (Pmode, CX_REG);
9170 insn = emit_insn (ix86_gen_pop1 (r));
9172 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9173 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9174 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9175 RTX_FRAME_RELATED_P (insn) = 1;
9178 /* Sibcall epilogues don't want a return instruction. */
9179 if (style == 0)
9181 *ix86_cfa_state = cfa_state_save;
9182 return;
9185 if (crtl->args.pops_args && crtl->args.size)
9187 rtx popc = GEN_INT (crtl->args.pops_args);
9189 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9190 address, do explicit add, and jump indirectly to the caller. */
9192 if (crtl->args.pops_args >= 65536)
9194 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9195 rtx insn;
9197 /* There is no "pascal" calling convention in any 64bit ABI. */
9198 gcc_assert (!TARGET_64BIT);
9200 insn = emit_insn (gen_popsi1 (ecx));
9201 ix86_cfa_state->offset -= UNITS_PER_WORD;
9203 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9204 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9205 add_reg_note (insn, REG_CFA_REGISTER,
9206 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9207 RTX_FRAME_RELATED_P (insn) = 1;
9209 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9210 popc, -1, true);
9211 emit_jump_insn (gen_return_indirect_internal (ecx));
9213 else
9214 emit_jump_insn (gen_return_pop_internal (popc));
9216 else
9217 emit_jump_insn (gen_return_internal ());
9219 /* Restore the state back to the state from the prologue,
9220 so that it's correct for the next epilogue. */
9221 *ix86_cfa_state = cfa_state_save;
9224 /* Reset from the function's potential modifications. */
9226 static void
9227 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9228 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9230 if (pic_offset_table_rtx)
9231 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9232 #if TARGET_MACHO
9233 /* Mach-O doesn't support labels at the end of objects, so if
9234 it looks like we might want one, insert a NOP. */
9236 rtx insn = get_last_insn ();
9237 while (insn
9238 && NOTE_P (insn)
9239 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9240 insn = PREV_INSN (insn);
9241 if (insn
9242 && (LABEL_P (insn)
9243 || (NOTE_P (insn)
9244 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9245 fputs ("\tnop\n", file);
9247 #endif
9251 /* Extract the parts of an RTL expression that is a valid memory address
9252 for an instruction. Return 0 if the structure of the address is
9253 grossly off. Return -1 if the address contains ASHIFT, so it is not
9254 strictly valid, but still used for computing length of lea instruction. */
9257 ix86_decompose_address (rtx addr, struct ix86_address *out)
9259 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9260 rtx base_reg, index_reg;
9261 HOST_WIDE_INT scale = 1;
9262 rtx scale_rtx = NULL_RTX;
9263 int retval = 1;
9264 enum ix86_address_seg seg = SEG_DEFAULT;
9266 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9267 base = addr;
9268 else if (GET_CODE (addr) == PLUS)
9270 rtx addends[4], op;
9271 int n = 0, i;
9273 op = addr;
9276 if (n >= 4)
9277 return 0;
9278 addends[n++] = XEXP (op, 1);
9279 op = XEXP (op, 0);
9281 while (GET_CODE (op) == PLUS);
9282 if (n >= 4)
9283 return 0;
9284 addends[n] = op;
9286 for (i = n; i >= 0; --i)
9288 op = addends[i];
9289 switch (GET_CODE (op))
9291 case MULT:
9292 if (index)
9293 return 0;
9294 index = XEXP (op, 0);
9295 scale_rtx = XEXP (op, 1);
9296 break;
9298 case UNSPEC:
9299 if (XINT (op, 1) == UNSPEC_TP
9300 && TARGET_TLS_DIRECT_SEG_REFS
9301 && seg == SEG_DEFAULT)
9302 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9303 else
9304 return 0;
9305 break;
9307 case REG:
9308 case SUBREG:
9309 if (!base)
9310 base = op;
9311 else if (!index)
9312 index = op;
9313 else
9314 return 0;
9315 break;
9317 case CONST:
9318 case CONST_INT:
9319 case SYMBOL_REF:
9320 case LABEL_REF:
9321 if (disp)
9322 return 0;
9323 disp = op;
9324 break;
9326 default:
9327 return 0;
9331 else if (GET_CODE (addr) == MULT)
9333 index = XEXP (addr, 0); /* index*scale */
9334 scale_rtx = XEXP (addr, 1);
9336 else if (GET_CODE (addr) == ASHIFT)
9338 rtx tmp;
9340 /* We're called for lea too, which implements ashift on occasion. */
9341 index = XEXP (addr, 0);
9342 tmp = XEXP (addr, 1);
9343 if (!CONST_INT_P (tmp))
9344 return 0;
9345 scale = INTVAL (tmp);
9346 if ((unsigned HOST_WIDE_INT) scale > 3)
9347 return 0;
9348 scale = 1 << scale;
9349 retval = -1;
9351 else
9352 disp = addr; /* displacement */
9354 /* Extract the integral value of scale. */
9355 if (scale_rtx)
9357 if (!CONST_INT_P (scale_rtx))
9358 return 0;
9359 scale = INTVAL (scale_rtx);
9362 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9363 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9365 /* Avoid useless 0 displacement. */
9366 if (disp == const0_rtx && (base || index))
9367 disp = NULL_RTX;
9369 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9370 if (base_reg && index_reg && scale == 1
9371 && (index_reg == arg_pointer_rtx
9372 || index_reg == frame_pointer_rtx
9373 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9375 rtx tmp;
9376 tmp = base, base = index, index = tmp;
9377 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9380 /* Special case: %ebp cannot be encoded as a base without a displacement.
9381 Similarly %r13. */
9382 if (!disp
9383 && base_reg
9384 && (base_reg == hard_frame_pointer_rtx
9385 || base_reg == frame_pointer_rtx
9386 || base_reg == arg_pointer_rtx
9387 || (REG_P (base_reg)
9388 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9389 || REGNO (base_reg) == R13_REG))))
9390 disp = const0_rtx;
9392 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9393 Avoid this by transforming to [%esi+0].
9394 Reload calls address legitimization without cfun defined, so we need
9395 to test cfun for being non-NULL. */
9396 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9397 && base_reg && !index_reg && !disp
9398 && REG_P (base_reg)
9399 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9400 disp = const0_rtx;
9402 /* Special case: encode reg+reg instead of reg*2. */
9403 if (!base && index && scale == 2)
9404 base = index, base_reg = index_reg, scale = 1;
9406 /* Special case: scaling cannot be encoded without base or displacement. */
9407 if (!base && !disp && index && scale != 1)
9408 disp = const0_rtx;
9410 out->base = base;
9411 out->index = index;
9412 out->disp = disp;
9413 out->scale = scale;
9414 out->seg = seg;
9416 return retval;
9419 /* Return cost of the memory address x.
9420 For i386, it is better to use a complex address than let gcc copy
9421 the address into a reg and make a new pseudo. But not if the address
9422 requires to two regs - that would mean more pseudos with longer
9423 lifetimes. */
9424 static int
9425 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9427 struct ix86_address parts;
9428 int cost = 1;
9429 int ok = ix86_decompose_address (x, &parts);
9431 gcc_assert (ok);
9433 if (parts.base && GET_CODE (parts.base) == SUBREG)
9434 parts.base = SUBREG_REG (parts.base);
9435 if (parts.index && GET_CODE (parts.index) == SUBREG)
9436 parts.index = SUBREG_REG (parts.index);
9438 /* Attempt to minimize number of registers in the address. */
9439 if ((parts.base
9440 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9441 || (parts.index
9442 && (!REG_P (parts.index)
9443 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9444 cost++;
9446 if (parts.base
9447 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9448 && parts.index
9449 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9450 && parts.base != parts.index)
9451 cost++;
9453 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9454 since it's predecode logic can't detect the length of instructions
9455 and it degenerates to vector decoded. Increase cost of such
9456 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9457 to split such addresses or even refuse such addresses at all.
9459 Following addressing modes are affected:
9460 [base+scale*index]
9461 [scale*index+disp]
9462 [base+index]
9464 The first and last case may be avoidable by explicitly coding the zero in
9465 memory address, but I don't have AMD-K6 machine handy to check this
9466 theory. */
9468 if (TARGET_K6
9469 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9470 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9471 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9472 cost += 10;
9474 return cost;
9477 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9478 this is used for to form addresses to local data when -fPIC is in
9479 use. */
9481 static bool
9482 darwin_local_data_pic (rtx disp)
9484 return (GET_CODE (disp) == UNSPEC
9485 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9488 /* Determine if a given RTX is a valid constant. We already know this
9489 satisfies CONSTANT_P. */
9491 bool
9492 legitimate_constant_p (rtx x)
9494 switch (GET_CODE (x))
9496 case CONST:
9497 x = XEXP (x, 0);
9499 if (GET_CODE (x) == PLUS)
9501 if (!CONST_INT_P (XEXP (x, 1)))
9502 return false;
9503 x = XEXP (x, 0);
9506 if (TARGET_MACHO && darwin_local_data_pic (x))
9507 return true;
9509 /* Only some unspecs are valid as "constants". */
9510 if (GET_CODE (x) == UNSPEC)
9511 switch (XINT (x, 1))
9513 case UNSPEC_GOT:
9514 case UNSPEC_GOTOFF:
9515 case UNSPEC_PLTOFF:
9516 return TARGET_64BIT;
9517 case UNSPEC_TPOFF:
9518 case UNSPEC_NTPOFF:
9519 x = XVECEXP (x, 0, 0);
9520 return (GET_CODE (x) == SYMBOL_REF
9521 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9522 case UNSPEC_DTPOFF:
9523 x = XVECEXP (x, 0, 0);
9524 return (GET_CODE (x) == SYMBOL_REF
9525 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9526 default:
9527 return false;
9530 /* We must have drilled down to a symbol. */
9531 if (GET_CODE (x) == LABEL_REF)
9532 return true;
9533 if (GET_CODE (x) != SYMBOL_REF)
9534 return false;
9535 /* FALLTHRU */
9537 case SYMBOL_REF:
9538 /* TLS symbols are never valid. */
9539 if (SYMBOL_REF_TLS_MODEL (x))
9540 return false;
9542 /* DLLIMPORT symbols are never valid. */
9543 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9544 && SYMBOL_REF_DLLIMPORT_P (x))
9545 return false;
9546 break;
9548 case CONST_DOUBLE:
9549 if (GET_MODE (x) == TImode
9550 && x != CONST0_RTX (TImode)
9551 && !TARGET_64BIT)
9552 return false;
9553 break;
9555 case CONST_VECTOR:
9556 if (!standard_sse_constant_p (x))
9557 return false;
9559 default:
9560 break;
9563 /* Otherwise we handle everything else in the move patterns. */
9564 return true;
9567 /* Determine if it's legal to put X into the constant pool. This
9568 is not possible for the address of thread-local symbols, which
9569 is checked above. */
9571 static bool
9572 ix86_cannot_force_const_mem (rtx x)
9574 /* We can always put integral constants and vectors in memory. */
9575 switch (GET_CODE (x))
9577 case CONST_INT:
9578 case CONST_DOUBLE:
9579 case CONST_VECTOR:
9580 return false;
9582 default:
9583 break;
9585 return !legitimate_constant_p (x);
9589 /* Nonzero if the constant value X is a legitimate general operand
9590 when generating PIC code. It is given that flag_pic is on and
9591 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9593 bool
9594 legitimate_pic_operand_p (rtx x)
9596 rtx inner;
9598 switch (GET_CODE (x))
9600 case CONST:
9601 inner = XEXP (x, 0);
9602 if (GET_CODE (inner) == PLUS
9603 && CONST_INT_P (XEXP (inner, 1)))
9604 inner = XEXP (inner, 0);
9606 /* Only some unspecs are valid as "constants". */
9607 if (GET_CODE (inner) == UNSPEC)
9608 switch (XINT (inner, 1))
9610 case UNSPEC_GOT:
9611 case UNSPEC_GOTOFF:
9612 case UNSPEC_PLTOFF:
9613 return TARGET_64BIT;
9614 case UNSPEC_TPOFF:
9615 x = XVECEXP (inner, 0, 0);
9616 return (GET_CODE (x) == SYMBOL_REF
9617 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9618 case UNSPEC_MACHOPIC_OFFSET:
9619 return legitimate_pic_address_disp_p (x);
9620 default:
9621 return false;
9623 /* FALLTHRU */
9625 case SYMBOL_REF:
9626 case LABEL_REF:
9627 return legitimate_pic_address_disp_p (x);
9629 default:
9630 return true;
9634 /* Determine if a given CONST RTX is a valid memory displacement
9635 in PIC mode. */
9638 legitimate_pic_address_disp_p (rtx disp)
9640 bool saw_plus;
9642 /* In 64bit mode we can allow direct addresses of symbols and labels
9643 when they are not dynamic symbols. */
9644 if (TARGET_64BIT)
9646 rtx op0 = disp, op1;
9648 switch (GET_CODE (disp))
9650 case LABEL_REF:
9651 return true;
9653 case CONST:
9654 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9655 break;
9656 op0 = XEXP (XEXP (disp, 0), 0);
9657 op1 = XEXP (XEXP (disp, 0), 1);
9658 if (!CONST_INT_P (op1)
9659 || INTVAL (op1) >= 16*1024*1024
9660 || INTVAL (op1) < -16*1024*1024)
9661 break;
9662 if (GET_CODE (op0) == LABEL_REF)
9663 return true;
9664 if (GET_CODE (op0) != SYMBOL_REF)
9665 break;
9666 /* FALLTHRU */
9668 case SYMBOL_REF:
9669 /* TLS references should always be enclosed in UNSPEC. */
9670 if (SYMBOL_REF_TLS_MODEL (op0))
9671 return false;
9672 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9673 && ix86_cmodel != CM_LARGE_PIC)
9674 return true;
9675 break;
9677 default:
9678 break;
9681 if (GET_CODE (disp) != CONST)
9682 return 0;
9683 disp = XEXP (disp, 0);
9685 if (TARGET_64BIT)
9687 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9688 of GOT tables. We should not need these anyway. */
9689 if (GET_CODE (disp) != UNSPEC
9690 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9691 && XINT (disp, 1) != UNSPEC_GOTOFF
9692 && XINT (disp, 1) != UNSPEC_PLTOFF))
9693 return 0;
9695 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9696 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9697 return 0;
9698 return 1;
9701 saw_plus = false;
9702 if (GET_CODE (disp) == PLUS)
9704 if (!CONST_INT_P (XEXP (disp, 1)))
9705 return 0;
9706 disp = XEXP (disp, 0);
9707 saw_plus = true;
9710 if (TARGET_MACHO && darwin_local_data_pic (disp))
9711 return 1;
9713 if (GET_CODE (disp) != UNSPEC)
9714 return 0;
9716 switch (XINT (disp, 1))
9718 case UNSPEC_GOT:
9719 if (saw_plus)
9720 return false;
9721 /* We need to check for both symbols and labels because VxWorks loads
9722 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9723 details. */
9724 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9725 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9726 case UNSPEC_GOTOFF:
9727 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9728 While ABI specify also 32bit relocation but we don't produce it in
9729 small PIC model at all. */
9730 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9731 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9732 && !TARGET_64BIT)
9733 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9734 return false;
9735 case UNSPEC_GOTTPOFF:
9736 case UNSPEC_GOTNTPOFF:
9737 case UNSPEC_INDNTPOFF:
9738 if (saw_plus)
9739 return false;
9740 disp = XVECEXP (disp, 0, 0);
9741 return (GET_CODE (disp) == SYMBOL_REF
9742 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9743 case UNSPEC_NTPOFF:
9744 disp = XVECEXP (disp, 0, 0);
9745 return (GET_CODE (disp) == SYMBOL_REF
9746 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9747 case UNSPEC_DTPOFF:
9748 disp = XVECEXP (disp, 0, 0);
9749 return (GET_CODE (disp) == SYMBOL_REF
9750 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9753 return 0;
9756 /* Recognizes RTL expressions that are valid memory addresses for an
9757 instruction. The MODE argument is the machine mode for the MEM
9758 expression that wants to use this address.
9760 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9761 convert common non-canonical forms to canonical form so that they will
9762 be recognized. */
9764 static bool
9765 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9766 rtx addr, bool strict)
9768 struct ix86_address parts;
9769 rtx base, index, disp;
9770 HOST_WIDE_INT scale;
9772 if (ix86_decompose_address (addr, &parts) <= 0)
9773 /* Decomposition failed. */
9774 return false;
9776 base = parts.base;
9777 index = parts.index;
9778 disp = parts.disp;
9779 scale = parts.scale;
9781 /* Validate base register.
9783 Don't allow SUBREG's that span more than a word here. It can lead to spill
9784 failures when the base is one word out of a two word structure, which is
9785 represented internally as a DImode int. */
9787 if (base)
9789 rtx reg;
9791 if (REG_P (base))
9792 reg = base;
9793 else if (GET_CODE (base) == SUBREG
9794 && REG_P (SUBREG_REG (base))
9795 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9796 <= UNITS_PER_WORD)
9797 reg = SUBREG_REG (base);
9798 else
9799 /* Base is not a register. */
9800 return false;
9802 if (GET_MODE (base) != Pmode)
9803 /* Base is not in Pmode. */
9804 return false;
9806 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9807 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9808 /* Base is not valid. */
9809 return false;
9812 /* Validate index register.
9814 Don't allow SUBREG's that span more than a word here -- same as above. */
9816 if (index)
9818 rtx reg;
9820 if (REG_P (index))
9821 reg = index;
9822 else if (GET_CODE (index) == SUBREG
9823 && REG_P (SUBREG_REG (index))
9824 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9825 <= UNITS_PER_WORD)
9826 reg = SUBREG_REG (index);
9827 else
9828 /* Index is not a register. */
9829 return false;
9831 if (GET_MODE (index) != Pmode)
9832 /* Index is not in Pmode. */
9833 return false;
9835 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9836 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9837 /* Index is not valid. */
9838 return false;
9841 /* Validate scale factor. */
9842 if (scale != 1)
9844 if (!index)
9845 /* Scale without index. */
9846 return false;
9848 if (scale != 2 && scale != 4 && scale != 8)
9849 /* Scale is not a valid multiplier. */
9850 return false;
9853 /* Validate displacement. */
9854 if (disp)
9856 if (GET_CODE (disp) == CONST
9857 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9858 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9859 switch (XINT (XEXP (disp, 0), 1))
9861 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9862 used. While ABI specify also 32bit relocations, we don't produce
9863 them at all and use IP relative instead. */
9864 case UNSPEC_GOT:
9865 case UNSPEC_GOTOFF:
9866 gcc_assert (flag_pic);
9867 if (!TARGET_64BIT)
9868 goto is_legitimate_pic;
9870 /* 64bit address unspec. */
9871 return false;
9873 case UNSPEC_GOTPCREL:
9874 gcc_assert (flag_pic);
9875 goto is_legitimate_pic;
9877 case UNSPEC_GOTTPOFF:
9878 case UNSPEC_GOTNTPOFF:
9879 case UNSPEC_INDNTPOFF:
9880 case UNSPEC_NTPOFF:
9881 case UNSPEC_DTPOFF:
9882 break;
9884 default:
9885 /* Invalid address unspec. */
9886 return false;
9889 else if (SYMBOLIC_CONST (disp)
9890 && (flag_pic
9891 || (TARGET_MACHO
9892 #if TARGET_MACHO
9893 && MACHOPIC_INDIRECT
9894 && !machopic_operand_p (disp)
9895 #endif
9899 is_legitimate_pic:
9900 if (TARGET_64BIT && (index || base))
9902 /* foo@dtpoff(%rX) is ok. */
9903 if (GET_CODE (disp) != CONST
9904 || GET_CODE (XEXP (disp, 0)) != PLUS
9905 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9906 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9907 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9908 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9909 /* Non-constant pic memory reference. */
9910 return false;
9912 else if (! legitimate_pic_address_disp_p (disp))
9913 /* Displacement is an invalid pic construct. */
9914 return false;
9916 /* This code used to verify that a symbolic pic displacement
9917 includes the pic_offset_table_rtx register.
9919 While this is good idea, unfortunately these constructs may
9920 be created by "adds using lea" optimization for incorrect
9921 code like:
9923 int a;
9924 int foo(int i)
9926 return *(&a+i);
9929 This code is nonsensical, but results in addressing
9930 GOT table with pic_offset_table_rtx base. We can't
9931 just refuse it easily, since it gets matched by
9932 "addsi3" pattern, that later gets split to lea in the
9933 case output register differs from input. While this
9934 can be handled by separate addsi pattern for this case
9935 that never results in lea, this seems to be easier and
9936 correct fix for crash to disable this test. */
9938 else if (GET_CODE (disp) != LABEL_REF
9939 && !CONST_INT_P (disp)
9940 && (GET_CODE (disp) != CONST
9941 || !legitimate_constant_p (disp))
9942 && (GET_CODE (disp) != SYMBOL_REF
9943 || !legitimate_constant_p (disp)))
9944 /* Displacement is not constant. */
9945 return false;
9946 else if (TARGET_64BIT
9947 && !x86_64_immediate_operand (disp, VOIDmode))
9948 /* Displacement is out of range. */
9949 return false;
9952 /* Everything looks valid. */
9953 return true;
9956 /* Determine if a given RTX is a valid constant address. */
9958 bool
9959 constant_address_p (rtx x)
9961 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
9964 /* Return a unique alias set for the GOT. */
9966 static alias_set_type
9967 ix86_GOT_alias_set (void)
9969 static alias_set_type set = -1;
9970 if (set == -1)
9971 set = new_alias_set ();
9972 return set;
9975 /* Return a legitimate reference for ORIG (an address) using the
9976 register REG. If REG is 0, a new pseudo is generated.
9978 There are two types of references that must be handled:
9980 1. Global data references must load the address from the GOT, via
9981 the PIC reg. An insn is emitted to do this load, and the reg is
9982 returned.
9984 2. Static data references, constant pool addresses, and code labels
9985 compute the address as an offset from the GOT, whose base is in
9986 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9987 differentiate them from global data objects. The returned
9988 address is the PIC reg + an unspec constant.
9990 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
9991 reg also appears in the address. */
9993 static rtx
9994 legitimize_pic_address (rtx orig, rtx reg)
9996 rtx addr = orig;
9997 rtx new_rtx = orig;
9998 rtx base;
10000 #if TARGET_MACHO
10001 if (TARGET_MACHO && !TARGET_64BIT)
10003 if (reg == 0)
10004 reg = gen_reg_rtx (Pmode);
10005 /* Use the generic Mach-O PIC machinery. */
10006 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10008 #endif
10010 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10011 new_rtx = addr;
10012 else if (TARGET_64BIT
10013 && ix86_cmodel != CM_SMALL_PIC
10014 && gotoff_operand (addr, Pmode))
10016 rtx tmpreg;
10017 /* This symbol may be referenced via a displacement from the PIC
10018 base address (@GOTOFF). */
10020 if (reload_in_progress)
10021 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10022 if (GET_CODE (addr) == CONST)
10023 addr = XEXP (addr, 0);
10024 if (GET_CODE (addr) == PLUS)
10026 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10027 UNSPEC_GOTOFF);
10028 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10030 else
10031 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10032 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10033 if (!reg)
10034 tmpreg = gen_reg_rtx (Pmode);
10035 else
10036 tmpreg = reg;
10037 emit_move_insn (tmpreg, new_rtx);
10039 if (reg != 0)
10041 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10042 tmpreg, 1, OPTAB_DIRECT);
10043 new_rtx = reg;
10045 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10047 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10049 /* This symbol may be referenced via a displacement from the PIC
10050 base address (@GOTOFF). */
10052 if (reload_in_progress)
10053 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10054 if (GET_CODE (addr) == CONST)
10055 addr = XEXP (addr, 0);
10056 if (GET_CODE (addr) == PLUS)
10058 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10059 UNSPEC_GOTOFF);
10060 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10062 else
10063 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10064 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10065 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10067 if (reg != 0)
10069 emit_move_insn (reg, new_rtx);
10070 new_rtx = reg;
10073 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10074 /* We can't use @GOTOFF for text labels on VxWorks;
10075 see gotoff_operand. */
10076 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10078 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10080 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10081 return legitimize_dllimport_symbol (addr, true);
10082 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10083 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10084 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10086 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10087 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10091 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10093 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10094 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10095 new_rtx = gen_const_mem (Pmode, new_rtx);
10096 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10098 if (reg == 0)
10099 reg = gen_reg_rtx (Pmode);
10100 /* Use directly gen_movsi, otherwise the address is loaded
10101 into register for CSE. We don't want to CSE this addresses,
10102 instead we CSE addresses from the GOT table, so skip this. */
10103 emit_insn (gen_movsi (reg, new_rtx));
10104 new_rtx = reg;
10106 else
10108 /* This symbol must be referenced via a load from the
10109 Global Offset Table (@GOT). */
10111 if (reload_in_progress)
10112 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10113 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10114 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10115 if (TARGET_64BIT)
10116 new_rtx = force_reg (Pmode, new_rtx);
10117 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10118 new_rtx = gen_const_mem (Pmode, new_rtx);
10119 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10121 if (reg == 0)
10122 reg = gen_reg_rtx (Pmode);
10123 emit_move_insn (reg, new_rtx);
10124 new_rtx = reg;
10127 else
10129 if (CONST_INT_P (addr)
10130 && !x86_64_immediate_operand (addr, VOIDmode))
10132 if (reg)
10134 emit_move_insn (reg, addr);
10135 new_rtx = reg;
10137 else
10138 new_rtx = force_reg (Pmode, addr);
10140 else if (GET_CODE (addr) == CONST)
10142 addr = XEXP (addr, 0);
10144 /* We must match stuff we generate before. Assume the only
10145 unspecs that can get here are ours. Not that we could do
10146 anything with them anyway.... */
10147 if (GET_CODE (addr) == UNSPEC
10148 || (GET_CODE (addr) == PLUS
10149 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10150 return orig;
10151 gcc_assert (GET_CODE (addr) == PLUS);
10153 if (GET_CODE (addr) == PLUS)
10155 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10157 /* Check first to see if this is a constant offset from a @GOTOFF
10158 symbol reference. */
10159 if (gotoff_operand (op0, Pmode)
10160 && CONST_INT_P (op1))
10162 if (!TARGET_64BIT)
10164 if (reload_in_progress)
10165 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10166 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10167 UNSPEC_GOTOFF);
10168 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10169 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10170 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10172 if (reg != 0)
10174 emit_move_insn (reg, new_rtx);
10175 new_rtx = reg;
10178 else
10180 if (INTVAL (op1) < -16*1024*1024
10181 || INTVAL (op1) >= 16*1024*1024)
10183 if (!x86_64_immediate_operand (op1, Pmode))
10184 op1 = force_reg (Pmode, op1);
10185 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10189 else
10191 base = legitimize_pic_address (XEXP (addr, 0), reg);
10192 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10193 base == reg ? NULL_RTX : reg);
10195 if (CONST_INT_P (new_rtx))
10196 new_rtx = plus_constant (base, INTVAL (new_rtx));
10197 else
10199 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10201 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10202 new_rtx = XEXP (new_rtx, 1);
10204 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10209 return new_rtx;
10212 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10214 static rtx
10215 get_thread_pointer (int to_reg)
10217 rtx tp, reg, insn;
10219 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10220 if (!to_reg)
10221 return tp;
10223 reg = gen_reg_rtx (Pmode);
10224 insn = gen_rtx_SET (VOIDmode, reg, tp);
10225 insn = emit_insn (insn);
10227 return reg;
10230 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10231 false if we expect this to be used for a memory address and true if
10232 we expect to load the address into a register. */
10234 static rtx
10235 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10237 rtx dest, base, off, pic, tp;
10238 int type;
10240 switch (model)
10242 case TLS_MODEL_GLOBAL_DYNAMIC:
10243 dest = gen_reg_rtx (Pmode);
10244 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10246 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10248 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10250 start_sequence ();
10251 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10252 insns = get_insns ();
10253 end_sequence ();
10255 RTL_CONST_CALL_P (insns) = 1;
10256 emit_libcall_block (insns, dest, rax, x);
10258 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10259 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10260 else
10261 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10263 if (TARGET_GNU2_TLS)
10265 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10267 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10269 break;
10271 case TLS_MODEL_LOCAL_DYNAMIC:
10272 base = gen_reg_rtx (Pmode);
10273 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10275 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10277 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10279 start_sequence ();
10280 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10281 insns = get_insns ();
10282 end_sequence ();
10284 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10285 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10286 RTL_CONST_CALL_P (insns) = 1;
10287 emit_libcall_block (insns, base, rax, note);
10289 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10290 emit_insn (gen_tls_local_dynamic_base_64 (base));
10291 else
10292 emit_insn (gen_tls_local_dynamic_base_32 (base));
10294 if (TARGET_GNU2_TLS)
10296 rtx x = ix86_tls_module_base ();
10298 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10299 gen_rtx_MINUS (Pmode, x, tp));
10302 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10303 off = gen_rtx_CONST (Pmode, off);
10305 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10307 if (TARGET_GNU2_TLS)
10309 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10311 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10314 break;
10316 case TLS_MODEL_INITIAL_EXEC:
10317 if (TARGET_64BIT)
10319 pic = NULL;
10320 type = UNSPEC_GOTNTPOFF;
10322 else if (flag_pic)
10324 if (reload_in_progress)
10325 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10326 pic = pic_offset_table_rtx;
10327 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10329 else if (!TARGET_ANY_GNU_TLS)
10331 pic = gen_reg_rtx (Pmode);
10332 emit_insn (gen_set_got (pic));
10333 type = UNSPEC_GOTTPOFF;
10335 else
10337 pic = NULL;
10338 type = UNSPEC_INDNTPOFF;
10341 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10342 off = gen_rtx_CONST (Pmode, off);
10343 if (pic)
10344 off = gen_rtx_PLUS (Pmode, pic, off);
10345 off = gen_const_mem (Pmode, off);
10346 set_mem_alias_set (off, ix86_GOT_alias_set ());
10348 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10350 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10351 off = force_reg (Pmode, off);
10352 return gen_rtx_PLUS (Pmode, base, off);
10354 else
10356 base = get_thread_pointer (true);
10357 dest = gen_reg_rtx (Pmode);
10358 emit_insn (gen_subsi3 (dest, base, off));
10360 break;
10362 case TLS_MODEL_LOCAL_EXEC:
10363 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10364 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10365 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10366 off = gen_rtx_CONST (Pmode, off);
10368 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10370 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10371 return gen_rtx_PLUS (Pmode, base, off);
10373 else
10375 base = get_thread_pointer (true);
10376 dest = gen_reg_rtx (Pmode);
10377 emit_insn (gen_subsi3 (dest, base, off));
10379 break;
10381 default:
10382 gcc_unreachable ();
10385 return dest;
10388 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10389 to symbol DECL. */
10391 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10392 htab_t dllimport_map;
10394 static tree
10395 get_dllimport_decl (tree decl)
10397 struct tree_map *h, in;
10398 void **loc;
10399 const char *name;
10400 const char *prefix;
10401 size_t namelen, prefixlen;
10402 char *imp_name;
10403 tree to;
10404 rtx rtl;
10406 if (!dllimport_map)
10407 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10409 in.hash = htab_hash_pointer (decl);
10410 in.base.from = decl;
10411 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10412 h = (struct tree_map *) *loc;
10413 if (h)
10414 return h->to;
10416 *loc = h = GGC_NEW (struct tree_map);
10417 h->hash = in.hash;
10418 h->base.from = decl;
10419 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10420 VAR_DECL, NULL, ptr_type_node);
10421 DECL_ARTIFICIAL (to) = 1;
10422 DECL_IGNORED_P (to) = 1;
10423 DECL_EXTERNAL (to) = 1;
10424 TREE_READONLY (to) = 1;
10426 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10427 name = targetm.strip_name_encoding (name);
10428 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10429 ? "*__imp_" : "*__imp__";
10430 namelen = strlen (name);
10431 prefixlen = strlen (prefix);
10432 imp_name = (char *) alloca (namelen + prefixlen + 1);
10433 memcpy (imp_name, prefix, prefixlen);
10434 memcpy (imp_name + prefixlen, name, namelen + 1);
10436 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10437 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10438 SET_SYMBOL_REF_DECL (rtl, to);
10439 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10441 rtl = gen_const_mem (Pmode, rtl);
10442 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10444 SET_DECL_RTL (to, rtl);
10445 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10447 return to;
10450 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10451 true if we require the result be a register. */
10453 static rtx
10454 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10456 tree imp_decl;
10457 rtx x;
10459 gcc_assert (SYMBOL_REF_DECL (symbol));
10460 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10462 x = DECL_RTL (imp_decl);
10463 if (want_reg)
10464 x = force_reg (Pmode, x);
10465 return x;
10468 /* Try machine-dependent ways of modifying an illegitimate address
10469 to be legitimate. If we find one, return the new, valid address.
10470 This macro is used in only one place: `memory_address' in explow.c.
10472 OLDX is the address as it was before break_out_memory_refs was called.
10473 In some cases it is useful to look at this to decide what needs to be done.
10475 It is always safe for this macro to do nothing. It exists to recognize
10476 opportunities to optimize the output.
10478 For the 80386, we handle X+REG by loading X into a register R and
10479 using R+REG. R will go in a general reg and indexing will be used.
10480 However, if REG is a broken-out memory address or multiplication,
10481 nothing needs to be done because REG can certainly go in a general reg.
10483 When -fpic is used, special handling is needed for symbolic references.
10484 See comments by legitimize_pic_address in i386.c for details. */
10486 static rtx
10487 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10488 enum machine_mode mode)
10490 int changed = 0;
10491 unsigned log;
10493 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10494 if (log)
10495 return legitimize_tls_address (x, (enum tls_model) log, false);
10496 if (GET_CODE (x) == CONST
10497 && GET_CODE (XEXP (x, 0)) == PLUS
10498 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10499 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10501 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10502 (enum tls_model) log, false);
10503 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10506 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10508 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10509 return legitimize_dllimport_symbol (x, true);
10510 if (GET_CODE (x) == CONST
10511 && GET_CODE (XEXP (x, 0)) == PLUS
10512 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10513 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10515 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10516 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10520 if (flag_pic && SYMBOLIC_CONST (x))
10521 return legitimize_pic_address (x, 0);
10523 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10524 if (GET_CODE (x) == ASHIFT
10525 && CONST_INT_P (XEXP (x, 1))
10526 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10528 changed = 1;
10529 log = INTVAL (XEXP (x, 1));
10530 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10531 GEN_INT (1 << log));
10534 if (GET_CODE (x) == PLUS)
10536 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10538 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10539 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10540 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10542 changed = 1;
10543 log = INTVAL (XEXP (XEXP (x, 0), 1));
10544 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10545 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10546 GEN_INT (1 << log));
10549 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10550 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10551 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10553 changed = 1;
10554 log = INTVAL (XEXP (XEXP (x, 1), 1));
10555 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10556 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10557 GEN_INT (1 << log));
10560 /* Put multiply first if it isn't already. */
10561 if (GET_CODE (XEXP (x, 1)) == MULT)
10563 rtx tmp = XEXP (x, 0);
10564 XEXP (x, 0) = XEXP (x, 1);
10565 XEXP (x, 1) = tmp;
10566 changed = 1;
10569 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10570 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10571 created by virtual register instantiation, register elimination, and
10572 similar optimizations. */
10573 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10575 changed = 1;
10576 x = gen_rtx_PLUS (Pmode,
10577 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10578 XEXP (XEXP (x, 1), 0)),
10579 XEXP (XEXP (x, 1), 1));
10582 /* Canonicalize
10583 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10584 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10585 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10586 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10587 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10588 && CONSTANT_P (XEXP (x, 1)))
10590 rtx constant;
10591 rtx other = NULL_RTX;
10593 if (CONST_INT_P (XEXP (x, 1)))
10595 constant = XEXP (x, 1);
10596 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10598 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10600 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10601 other = XEXP (x, 1);
10603 else
10604 constant = 0;
10606 if (constant)
10608 changed = 1;
10609 x = gen_rtx_PLUS (Pmode,
10610 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10611 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10612 plus_constant (other, INTVAL (constant)));
10616 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10617 return x;
10619 if (GET_CODE (XEXP (x, 0)) == MULT)
10621 changed = 1;
10622 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10625 if (GET_CODE (XEXP (x, 1)) == MULT)
10627 changed = 1;
10628 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10631 if (changed
10632 && REG_P (XEXP (x, 1))
10633 && REG_P (XEXP (x, 0)))
10634 return x;
10636 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10638 changed = 1;
10639 x = legitimize_pic_address (x, 0);
10642 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10643 return x;
10645 if (REG_P (XEXP (x, 0)))
10647 rtx temp = gen_reg_rtx (Pmode);
10648 rtx val = force_operand (XEXP (x, 1), temp);
10649 if (val != temp)
10650 emit_move_insn (temp, val);
10652 XEXP (x, 1) = temp;
10653 return x;
10656 else if (REG_P (XEXP (x, 1)))
10658 rtx temp = gen_reg_rtx (Pmode);
10659 rtx val = force_operand (XEXP (x, 0), temp);
10660 if (val != temp)
10661 emit_move_insn (temp, val);
10663 XEXP (x, 0) = temp;
10664 return x;
10668 return x;
10671 /* Print an integer constant expression in assembler syntax. Addition
10672 and subtraction are the only arithmetic that may appear in these
10673 expressions. FILE is the stdio stream to write to, X is the rtx, and
10674 CODE is the operand print code from the output string. */
10676 static void
10677 output_pic_addr_const (FILE *file, rtx x, int code)
10679 char buf[256];
10681 switch (GET_CODE (x))
10683 case PC:
10684 gcc_assert (flag_pic);
10685 putc ('.', file);
10686 break;
10688 case SYMBOL_REF:
10689 if (! TARGET_MACHO || TARGET_64BIT)
10690 output_addr_const (file, x);
10691 else
10693 const char *name = XSTR (x, 0);
10695 /* Mark the decl as referenced so that cgraph will
10696 output the function. */
10697 if (SYMBOL_REF_DECL (x))
10698 mark_decl_referenced (SYMBOL_REF_DECL (x));
10700 #if TARGET_MACHO
10701 if (MACHOPIC_INDIRECT
10702 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10703 name = machopic_indirection_name (x, /*stub_p=*/true);
10704 #endif
10705 assemble_name (file, name);
10707 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10708 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10709 fputs ("@PLT", file);
10710 break;
10712 case LABEL_REF:
10713 x = XEXP (x, 0);
10714 /* FALLTHRU */
10715 case CODE_LABEL:
10716 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10717 assemble_name (asm_out_file, buf);
10718 break;
10720 case CONST_INT:
10721 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10722 break;
10724 case CONST:
10725 /* This used to output parentheses around the expression,
10726 but that does not work on the 386 (either ATT or BSD assembler). */
10727 output_pic_addr_const (file, XEXP (x, 0), code);
10728 break;
10730 case CONST_DOUBLE:
10731 if (GET_MODE (x) == VOIDmode)
10733 /* We can use %d if the number is <32 bits and positive. */
10734 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10735 fprintf (file, "0x%lx%08lx",
10736 (unsigned long) CONST_DOUBLE_HIGH (x),
10737 (unsigned long) CONST_DOUBLE_LOW (x));
10738 else
10739 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10741 else
10742 /* We can't handle floating point constants;
10743 PRINT_OPERAND must handle them. */
10744 output_operand_lossage ("floating constant misused");
10745 break;
10747 case PLUS:
10748 /* Some assemblers need integer constants to appear first. */
10749 if (CONST_INT_P (XEXP (x, 0)))
10751 output_pic_addr_const (file, XEXP (x, 0), code);
10752 putc ('+', file);
10753 output_pic_addr_const (file, XEXP (x, 1), code);
10755 else
10757 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10758 output_pic_addr_const (file, XEXP (x, 1), code);
10759 putc ('+', file);
10760 output_pic_addr_const (file, XEXP (x, 0), code);
10762 break;
10764 case MINUS:
10765 if (!TARGET_MACHO)
10766 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10767 output_pic_addr_const (file, XEXP (x, 0), code);
10768 putc ('-', file);
10769 output_pic_addr_const (file, XEXP (x, 1), code);
10770 if (!TARGET_MACHO)
10771 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10772 break;
10774 case UNSPEC:
10775 gcc_assert (XVECLEN (x, 0) == 1);
10776 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10777 switch (XINT (x, 1))
10779 case UNSPEC_GOT:
10780 fputs ("@GOT", file);
10781 break;
10782 case UNSPEC_GOTOFF:
10783 fputs ("@GOTOFF", file);
10784 break;
10785 case UNSPEC_PLTOFF:
10786 fputs ("@PLTOFF", file);
10787 break;
10788 case UNSPEC_GOTPCREL:
10789 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10790 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10791 break;
10792 case UNSPEC_GOTTPOFF:
10793 /* FIXME: This might be @TPOFF in Sun ld too. */
10794 fputs ("@GOTTPOFF", file);
10795 break;
10796 case UNSPEC_TPOFF:
10797 fputs ("@TPOFF", file);
10798 break;
10799 case UNSPEC_NTPOFF:
10800 if (TARGET_64BIT)
10801 fputs ("@TPOFF", file);
10802 else
10803 fputs ("@NTPOFF", file);
10804 break;
10805 case UNSPEC_DTPOFF:
10806 fputs ("@DTPOFF", file);
10807 break;
10808 case UNSPEC_GOTNTPOFF:
10809 if (TARGET_64BIT)
10810 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10811 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
10812 else
10813 fputs ("@GOTNTPOFF", file);
10814 break;
10815 case UNSPEC_INDNTPOFF:
10816 fputs ("@INDNTPOFF", file);
10817 break;
10818 #if TARGET_MACHO
10819 case UNSPEC_MACHOPIC_OFFSET:
10820 putc ('-', file);
10821 machopic_output_function_base_name (file);
10822 break;
10823 #endif
10824 default:
10825 output_operand_lossage ("invalid UNSPEC as operand");
10826 break;
10828 break;
10830 default:
10831 output_operand_lossage ("invalid expression as operand");
10835 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10836 We need to emit DTP-relative relocations. */
10838 static void ATTRIBUTE_UNUSED
10839 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10841 fputs (ASM_LONG, file);
10842 output_addr_const (file, x);
10843 fputs ("@DTPOFF", file);
10844 switch (size)
10846 case 4:
10847 break;
10848 case 8:
10849 fputs (", 0", file);
10850 break;
10851 default:
10852 gcc_unreachable ();
10856 /* Return true if X is a representation of the PIC register. This copes
10857 with calls from ix86_find_base_term, where the register might have
10858 been replaced by a cselib value. */
10860 static bool
10861 ix86_pic_register_p (rtx x)
10863 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10864 return (pic_offset_table_rtx
10865 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10866 else
10867 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10870 /* In the name of slightly smaller debug output, and to cater to
10871 general assembler lossage, recognize PIC+GOTOFF and turn it back
10872 into a direct symbol reference.
10874 On Darwin, this is necessary to avoid a crash, because Darwin
10875 has a different PIC label for each routine but the DWARF debugging
10876 information is not associated with any particular routine, so it's
10877 necessary to remove references to the PIC label from RTL stored by
10878 the DWARF output code. */
10880 static rtx
10881 ix86_delegitimize_address (rtx x)
10883 rtx orig_x = delegitimize_mem_from_attrs (x);
10884 /* reg_addend is NULL or a multiple of some register. */
10885 rtx reg_addend = NULL_RTX;
10886 /* const_addend is NULL or a const_int. */
10887 rtx const_addend = NULL_RTX;
10888 /* This is the result, or NULL. */
10889 rtx result = NULL_RTX;
10891 x = orig_x;
10893 if (MEM_P (x))
10894 x = XEXP (x, 0);
10896 if (TARGET_64BIT)
10898 if (GET_CODE (x) != CONST
10899 || GET_CODE (XEXP (x, 0)) != UNSPEC
10900 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10901 || !MEM_P (orig_x))
10902 return orig_x;
10903 return XVECEXP (XEXP (x, 0), 0, 0);
10906 if (GET_CODE (x) != PLUS
10907 || GET_CODE (XEXP (x, 1)) != CONST)
10908 return orig_x;
10910 if (ix86_pic_register_p (XEXP (x, 0)))
10911 /* %ebx + GOT/GOTOFF */
10913 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10915 /* %ebx + %reg * scale + GOT/GOTOFF */
10916 reg_addend = XEXP (x, 0);
10917 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10918 reg_addend = XEXP (reg_addend, 1);
10919 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10920 reg_addend = XEXP (reg_addend, 0);
10921 else
10922 return orig_x;
10923 if (!REG_P (reg_addend)
10924 && GET_CODE (reg_addend) != MULT
10925 && GET_CODE (reg_addend) != ASHIFT)
10926 return orig_x;
10928 else
10929 return orig_x;
10931 x = XEXP (XEXP (x, 1), 0);
10932 if (GET_CODE (x) == PLUS
10933 && CONST_INT_P (XEXP (x, 1)))
10935 const_addend = XEXP (x, 1);
10936 x = XEXP (x, 0);
10939 if (GET_CODE (x) == UNSPEC
10940 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10941 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10942 result = XVECEXP (x, 0, 0);
10944 if (TARGET_MACHO && darwin_local_data_pic (x)
10945 && !MEM_P (orig_x))
10946 result = XVECEXP (x, 0, 0);
10948 if (! result)
10949 return orig_x;
10951 if (const_addend)
10952 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10953 if (reg_addend)
10954 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10955 return result;
10958 /* If X is a machine specific address (i.e. a symbol or label being
10959 referenced as a displacement from the GOT implemented using an
10960 UNSPEC), then return the base term. Otherwise return X. */
10963 ix86_find_base_term (rtx x)
10965 rtx term;
10967 if (TARGET_64BIT)
10969 if (GET_CODE (x) != CONST)
10970 return x;
10971 term = XEXP (x, 0);
10972 if (GET_CODE (term) == PLUS
10973 && (CONST_INT_P (XEXP (term, 1))
10974 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10975 term = XEXP (term, 0);
10976 if (GET_CODE (term) != UNSPEC
10977 || XINT (term, 1) != UNSPEC_GOTPCREL)
10978 return x;
10980 return XVECEXP (term, 0, 0);
10983 return ix86_delegitimize_address (x);
10986 static void
10987 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10988 int fp, FILE *file)
10990 const char *suffix;
10992 if (mode == CCFPmode || mode == CCFPUmode)
10994 code = ix86_fp_compare_code_to_integer (code);
10995 mode = CCmode;
10997 if (reverse)
10998 code = reverse_condition (code);
11000 switch (code)
11002 case EQ:
11003 switch (mode)
11005 case CCAmode:
11006 suffix = "a";
11007 break;
11009 case CCCmode:
11010 suffix = "c";
11011 break;
11013 case CCOmode:
11014 suffix = "o";
11015 break;
11017 case CCSmode:
11018 suffix = "s";
11019 break;
11021 default:
11022 suffix = "e";
11024 break;
11025 case NE:
11026 switch (mode)
11028 case CCAmode:
11029 suffix = "na";
11030 break;
11032 case CCCmode:
11033 suffix = "nc";
11034 break;
11036 case CCOmode:
11037 suffix = "no";
11038 break;
11040 case CCSmode:
11041 suffix = "ns";
11042 break;
11044 default:
11045 suffix = "ne";
11047 break;
11048 case GT:
11049 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11050 suffix = "g";
11051 break;
11052 case GTU:
11053 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11054 Those same assemblers have the same but opposite lossage on cmov. */
11055 if (mode == CCmode)
11056 suffix = fp ? "nbe" : "a";
11057 else if (mode == CCCmode)
11058 suffix = "b";
11059 else
11060 gcc_unreachable ();
11061 break;
11062 case LT:
11063 switch (mode)
11065 case CCNOmode:
11066 case CCGOCmode:
11067 suffix = "s";
11068 break;
11070 case CCmode:
11071 case CCGCmode:
11072 suffix = "l";
11073 break;
11075 default:
11076 gcc_unreachable ();
11078 break;
11079 case LTU:
11080 gcc_assert (mode == CCmode || mode == CCCmode);
11081 suffix = "b";
11082 break;
11083 case GE:
11084 switch (mode)
11086 case CCNOmode:
11087 case CCGOCmode:
11088 suffix = "ns";
11089 break;
11091 case CCmode:
11092 case CCGCmode:
11093 suffix = "ge";
11094 break;
11096 default:
11097 gcc_unreachable ();
11099 break;
11100 case GEU:
11101 /* ??? As above. */
11102 gcc_assert (mode == CCmode || mode == CCCmode);
11103 suffix = fp ? "nb" : "ae";
11104 break;
11105 case LE:
11106 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11107 suffix = "le";
11108 break;
11109 case LEU:
11110 /* ??? As above. */
11111 if (mode == CCmode)
11112 suffix = "be";
11113 else if (mode == CCCmode)
11114 suffix = fp ? "nb" : "ae";
11115 else
11116 gcc_unreachable ();
11117 break;
11118 case UNORDERED:
11119 suffix = fp ? "u" : "p";
11120 break;
11121 case ORDERED:
11122 suffix = fp ? "nu" : "np";
11123 break;
11124 default:
11125 gcc_unreachable ();
11127 fputs (suffix, file);
11130 /* Print the name of register X to FILE based on its machine mode and number.
11131 If CODE is 'w', pretend the mode is HImode.
11132 If CODE is 'b', pretend the mode is QImode.
11133 If CODE is 'k', pretend the mode is SImode.
11134 If CODE is 'q', pretend the mode is DImode.
11135 If CODE is 'x', pretend the mode is V4SFmode.
11136 If CODE is 't', pretend the mode is V8SFmode.
11137 If CODE is 'h', pretend the reg is the 'high' byte register.
11138 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11139 If CODE is 'd', duplicate the operand for AVX instruction.
11142 void
11143 print_reg (rtx x, int code, FILE *file)
11145 const char *reg;
11146 bool duplicated = code == 'd' && TARGET_AVX;
11148 gcc_assert (x == pc_rtx
11149 || (REGNO (x) != ARG_POINTER_REGNUM
11150 && REGNO (x) != FRAME_POINTER_REGNUM
11151 && REGNO (x) != FLAGS_REG
11152 && REGNO (x) != FPSR_REG
11153 && REGNO (x) != FPCR_REG));
11155 if (ASSEMBLER_DIALECT == ASM_ATT)
11156 putc ('%', file);
11158 if (x == pc_rtx)
11160 gcc_assert (TARGET_64BIT);
11161 fputs ("rip", file);
11162 return;
11165 if (code == 'w' || MMX_REG_P (x))
11166 code = 2;
11167 else if (code == 'b')
11168 code = 1;
11169 else if (code == 'k')
11170 code = 4;
11171 else if (code == 'q')
11172 code = 8;
11173 else if (code == 'y')
11174 code = 3;
11175 else if (code == 'h')
11176 code = 0;
11177 else if (code == 'x')
11178 code = 16;
11179 else if (code == 't')
11180 code = 32;
11181 else
11182 code = GET_MODE_SIZE (GET_MODE (x));
11184 /* Irritatingly, AMD extended registers use different naming convention
11185 from the normal registers. */
11186 if (REX_INT_REG_P (x))
11188 gcc_assert (TARGET_64BIT);
11189 switch (code)
11191 case 0:
11192 error ("extended registers have no high halves");
11193 break;
11194 case 1:
11195 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11196 break;
11197 case 2:
11198 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11199 break;
11200 case 4:
11201 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11202 break;
11203 case 8:
11204 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11205 break;
11206 default:
11207 error ("unsupported operand size for extended register");
11208 break;
11210 return;
11213 reg = NULL;
11214 switch (code)
11216 case 3:
11217 if (STACK_TOP_P (x))
11219 reg = "st(0)";
11220 break;
11222 /* FALLTHRU */
11223 case 8:
11224 case 4:
11225 case 12:
11226 if (! ANY_FP_REG_P (x))
11227 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11228 /* FALLTHRU */
11229 case 16:
11230 case 2:
11231 normal:
11232 reg = hi_reg_name[REGNO (x)];
11233 break;
11234 case 1:
11235 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11236 goto normal;
11237 reg = qi_reg_name[REGNO (x)];
11238 break;
11239 case 0:
11240 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11241 goto normal;
11242 reg = qi_high_reg_name[REGNO (x)];
11243 break;
11244 case 32:
11245 if (SSE_REG_P (x))
11247 gcc_assert (!duplicated);
11248 putc ('y', file);
11249 fputs (hi_reg_name[REGNO (x)] + 1, file);
11250 return;
11252 break;
11253 default:
11254 gcc_unreachable ();
11257 fputs (reg, file);
11258 if (duplicated)
11260 if (ASSEMBLER_DIALECT == ASM_ATT)
11261 fprintf (file, ", %%%s", reg);
11262 else
11263 fprintf (file, ", %s", reg);
11267 /* Locate some local-dynamic symbol still in use by this function
11268 so that we can print its name in some tls_local_dynamic_base
11269 pattern. */
11271 static int
11272 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11274 rtx x = *px;
11276 if (GET_CODE (x) == SYMBOL_REF
11277 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11279 cfun->machine->some_ld_name = XSTR (x, 0);
11280 return 1;
11283 return 0;
11286 static const char *
11287 get_some_local_dynamic_name (void)
11289 rtx insn;
11291 if (cfun->machine->some_ld_name)
11292 return cfun->machine->some_ld_name;
11294 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11295 if (INSN_P (insn)
11296 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11297 return cfun->machine->some_ld_name;
11299 return NULL;
11302 /* Meaning of CODE:
11303 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11304 C -- print opcode suffix for set/cmov insn.
11305 c -- like C, but print reversed condition
11306 E,e -- likewise, but for compare-and-branch fused insn.
11307 F,f -- likewise, but for floating-point.
11308 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11309 otherwise nothing
11310 R -- print the prefix for register names.
11311 z -- print the opcode suffix for the size of the current operand.
11312 Z -- likewise, with special suffixes for x87 instructions.
11313 * -- print a star (in certain assembler syntax)
11314 A -- print an absolute memory reference.
11315 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11316 s -- print a shift double count, followed by the assemblers argument
11317 delimiter.
11318 b -- print the QImode name of the register for the indicated operand.
11319 %b0 would print %al if operands[0] is reg 0.
11320 w -- likewise, print the HImode name of the register.
11321 k -- likewise, print the SImode name of the register.
11322 q -- likewise, print the DImode name of the register.
11323 x -- likewise, print the V4SFmode name of the register.
11324 t -- likewise, print the V8SFmode name of the register.
11325 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11326 y -- print "st(0)" instead of "st" as a register.
11327 d -- print duplicated register operand for AVX instruction.
11328 D -- print condition for SSE cmp instruction.
11329 P -- if PIC, print an @PLT suffix.
11330 X -- don't print any sort of PIC '@' suffix for a symbol.
11331 & -- print some in-use local-dynamic symbol name.
11332 H -- print a memory address offset by 8; used for sse high-parts
11333 Y -- print condition for XOP pcom* instruction.
11334 + -- print a branch hint as 'cs' or 'ds' prefix
11335 ; -- print a semicolon (after prefixes due to bug in older gas).
11338 void
11339 print_operand (FILE *file, rtx x, int code)
11341 if (code)
11343 switch (code)
11345 case '*':
11346 if (ASSEMBLER_DIALECT == ASM_ATT)
11347 putc ('*', file);
11348 return;
11350 case '&':
11352 const char *name = get_some_local_dynamic_name ();
11353 if (name == NULL)
11354 output_operand_lossage ("'%%&' used without any "
11355 "local dynamic TLS references");
11356 else
11357 assemble_name (file, name);
11358 return;
11361 case 'A':
11362 switch (ASSEMBLER_DIALECT)
11364 case ASM_ATT:
11365 putc ('*', file);
11366 break;
11368 case ASM_INTEL:
11369 /* Intel syntax. For absolute addresses, registers should not
11370 be surrounded by braces. */
11371 if (!REG_P (x))
11373 putc ('[', file);
11374 PRINT_OPERAND (file, x, 0);
11375 putc (']', file);
11376 return;
11378 break;
11380 default:
11381 gcc_unreachable ();
11384 PRINT_OPERAND (file, x, 0);
11385 return;
11388 case 'L':
11389 if (ASSEMBLER_DIALECT == ASM_ATT)
11390 putc ('l', file);
11391 return;
11393 case 'W':
11394 if (ASSEMBLER_DIALECT == ASM_ATT)
11395 putc ('w', file);
11396 return;
11398 case 'B':
11399 if (ASSEMBLER_DIALECT == ASM_ATT)
11400 putc ('b', file);
11401 return;
11403 case 'Q':
11404 if (ASSEMBLER_DIALECT == ASM_ATT)
11405 putc ('l', file);
11406 return;
11408 case 'S':
11409 if (ASSEMBLER_DIALECT == ASM_ATT)
11410 putc ('s', file);
11411 return;
11413 case 'T':
11414 if (ASSEMBLER_DIALECT == ASM_ATT)
11415 putc ('t', file);
11416 return;
11418 case 'z':
11419 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11421 /* Opcodes don't get size suffixes if using Intel opcodes. */
11422 if (ASSEMBLER_DIALECT == ASM_INTEL)
11423 return;
11425 switch (GET_MODE_SIZE (GET_MODE (x)))
11427 case 1:
11428 putc ('b', file);
11429 return;
11431 case 2:
11432 putc ('w', file);
11433 return;
11435 case 4:
11436 putc ('l', file);
11437 return;
11439 case 8:
11440 putc ('q', file);
11441 return;
11443 default:
11444 output_operand_lossage
11445 ("invalid operand size for operand code '%c'", code);
11446 return;
11450 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11451 warning
11452 (0, "non-integer operand used with operand code '%c'", code);
11453 /* FALLTHRU */
11455 case 'Z':
11456 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11457 if (ASSEMBLER_DIALECT == ASM_INTEL)
11458 return;
11460 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11462 switch (GET_MODE_SIZE (GET_MODE (x)))
11464 case 2:
11465 #ifdef HAVE_AS_IX86_FILDS
11466 putc ('s', file);
11467 #endif
11468 return;
11470 case 4:
11471 putc ('l', file);
11472 return;
11474 case 8:
11475 #ifdef HAVE_AS_IX86_FILDQ
11476 putc ('q', file);
11477 #else
11478 fputs ("ll", file);
11479 #endif
11480 return;
11482 default:
11483 break;
11486 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11488 /* 387 opcodes don't get size suffixes
11489 if the operands are registers. */
11490 if (STACK_REG_P (x))
11491 return;
11493 switch (GET_MODE_SIZE (GET_MODE (x)))
11495 case 4:
11496 putc ('s', file);
11497 return;
11499 case 8:
11500 putc ('l', file);
11501 return;
11503 case 12:
11504 case 16:
11505 putc ('t', file);
11506 return;
11508 default:
11509 break;
11512 else
11514 output_operand_lossage
11515 ("invalid operand type used with operand code '%c'", code);
11516 return;
11519 output_operand_lossage
11520 ("invalid operand size for operand code '%c'", code);
11521 return;
11523 case 'd':
11524 case 'b':
11525 case 'w':
11526 case 'k':
11527 case 'q':
11528 case 'h':
11529 case 't':
11530 case 'y':
11531 case 'x':
11532 case 'X':
11533 case 'P':
11534 break;
11536 case 's':
11537 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11539 PRINT_OPERAND (file, x, 0);
11540 fputs (", ", file);
11542 return;
11544 case 'D':
11545 /* Little bit of braindamage here. The SSE compare instructions
11546 does use completely different names for the comparisons that the
11547 fp conditional moves. */
11548 if (TARGET_AVX)
11550 switch (GET_CODE (x))
11552 case EQ:
11553 fputs ("eq", file);
11554 break;
11555 case UNEQ:
11556 fputs ("eq_us", file);
11557 break;
11558 case LT:
11559 fputs ("lt", file);
11560 break;
11561 case UNLT:
11562 fputs ("nge", file);
11563 break;
11564 case LE:
11565 fputs ("le", file);
11566 break;
11567 case UNLE:
11568 fputs ("ngt", file);
11569 break;
11570 case UNORDERED:
11571 fputs ("unord", file);
11572 break;
11573 case NE:
11574 fputs ("neq", file);
11575 break;
11576 case LTGT:
11577 fputs ("neq_oq", file);
11578 break;
11579 case GE:
11580 fputs ("ge", file);
11581 break;
11582 case UNGE:
11583 fputs ("nlt", file);
11584 break;
11585 case GT:
11586 fputs ("gt", file);
11587 break;
11588 case UNGT:
11589 fputs ("nle", file);
11590 break;
11591 case ORDERED:
11592 fputs ("ord", file);
11593 break;
11594 default:
11595 output_operand_lossage ("operand is not a condition code, "
11596 "invalid operand code 'D'");
11597 return;
11600 else
11602 switch (GET_CODE (x))
11604 case EQ:
11605 case UNEQ:
11606 fputs ("eq", file);
11607 break;
11608 case LT:
11609 case UNLT:
11610 fputs ("lt", file);
11611 break;
11612 case LE:
11613 case UNLE:
11614 fputs ("le", file);
11615 break;
11616 case UNORDERED:
11617 fputs ("unord", file);
11618 break;
11619 case NE:
11620 case LTGT:
11621 fputs ("neq", file);
11622 break;
11623 case UNGE:
11624 case GE:
11625 fputs ("nlt", file);
11626 break;
11627 case UNGT:
11628 case GT:
11629 fputs ("nle", file);
11630 break;
11631 case ORDERED:
11632 fputs ("ord", file);
11633 break;
11634 default:
11635 output_operand_lossage ("operand is not a condition code, "
11636 "invalid operand code 'D'");
11637 return;
11640 return;
11641 case 'O':
11642 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11643 if (ASSEMBLER_DIALECT == ASM_ATT)
11645 switch (GET_MODE (x))
11647 case HImode: putc ('w', file); break;
11648 case SImode:
11649 case SFmode: putc ('l', file); break;
11650 case DImode:
11651 case DFmode: putc ('q', file); break;
11652 default: gcc_unreachable ();
11654 putc ('.', file);
11656 #endif
11657 return;
11658 case 'C':
11659 if (!COMPARISON_P (x))
11661 output_operand_lossage ("operand is neither a constant nor a "
11662 "condition code, invalid operand code "
11663 "'C'");
11664 return;
11666 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11667 return;
11668 case 'F':
11669 if (!COMPARISON_P (x))
11671 output_operand_lossage ("operand is neither a constant nor a "
11672 "condition code, invalid operand code "
11673 "'F'");
11674 return;
11676 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11677 if (ASSEMBLER_DIALECT == ASM_ATT)
11678 putc ('.', file);
11679 #endif
11680 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11681 return;
11683 /* Like above, but reverse condition */
11684 case 'c':
11685 /* Check to see if argument to %c is really a constant
11686 and not a condition code which needs to be reversed. */
11687 if (!COMPARISON_P (x))
11689 output_operand_lossage ("operand is neither a constant nor a "
11690 "condition code, invalid operand "
11691 "code 'c'");
11692 return;
11694 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11695 return;
11696 case 'f':
11697 if (!COMPARISON_P (x))
11699 output_operand_lossage ("operand is neither a constant nor a "
11700 "condition code, invalid operand "
11701 "code 'f'");
11702 return;
11704 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11705 if (ASSEMBLER_DIALECT == ASM_ATT)
11706 putc ('.', file);
11707 #endif
11708 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11709 return;
11711 case 'E':
11712 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11713 return;
11715 case 'e':
11716 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11717 return;
11719 case 'H':
11720 /* It doesn't actually matter what mode we use here, as we're
11721 only going to use this for printing. */
11722 x = adjust_address_nv (x, DImode, 8);
11723 break;
11725 case '+':
11727 rtx x;
11729 if (!optimize
11730 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11731 return;
11733 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11734 if (x)
11736 int pred_val = INTVAL (XEXP (x, 0));
11738 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11739 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11741 int taken = pred_val > REG_BR_PROB_BASE / 2;
11742 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11744 /* Emit hints only in the case default branch prediction
11745 heuristics would fail. */
11746 if (taken != cputaken)
11748 /* We use 3e (DS) prefix for taken branches and
11749 2e (CS) prefix for not taken branches. */
11750 if (taken)
11751 fputs ("ds ; ", file);
11752 else
11753 fputs ("cs ; ", file);
11757 return;
11760 case 'Y':
11761 switch (GET_CODE (x))
11763 case NE:
11764 fputs ("neq", file);
11765 break;
11766 case EQ:
11767 fputs ("eq", file);
11768 break;
11769 case GE:
11770 case GEU:
11771 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11772 break;
11773 case GT:
11774 case GTU:
11775 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11776 break;
11777 case LE:
11778 case LEU:
11779 fputs ("le", file);
11780 break;
11781 case LT:
11782 case LTU:
11783 fputs ("lt", file);
11784 break;
11785 case UNORDERED:
11786 fputs ("unord", file);
11787 break;
11788 case ORDERED:
11789 fputs ("ord", file);
11790 break;
11791 case UNEQ:
11792 fputs ("ueq", file);
11793 break;
11794 case UNGE:
11795 fputs ("nlt", file);
11796 break;
11797 case UNGT:
11798 fputs ("nle", file);
11799 break;
11800 case UNLE:
11801 fputs ("ule", file);
11802 break;
11803 case UNLT:
11804 fputs ("ult", file);
11805 break;
11806 case LTGT:
11807 fputs ("une", file);
11808 break;
11809 default:
11810 output_operand_lossage ("operand is not a condition code, "
11811 "invalid operand code 'Y'");
11812 return;
11814 return;
11816 case ';':
11817 #if TARGET_MACHO
11818 fputs (" ; ", file);
11819 #else
11820 putc (' ', file);
11821 #endif
11822 return;
11824 default:
11825 output_operand_lossage ("invalid operand code '%c'", code);
11829 if (REG_P (x))
11830 print_reg (x, code, file);
11832 else if (MEM_P (x))
11834 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11835 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11836 && GET_MODE (x) != BLKmode)
11838 const char * size;
11839 switch (GET_MODE_SIZE (GET_MODE (x)))
11841 case 1: size = "BYTE"; break;
11842 case 2: size = "WORD"; break;
11843 case 4: size = "DWORD"; break;
11844 case 8: size = "QWORD"; break;
11845 case 12: size = "TBYTE"; break;
11846 case 16:
11847 if (GET_MODE (x) == XFmode)
11848 size = "TBYTE";
11849 else
11850 size = "XMMWORD";
11851 break;
11852 case 32: size = "YMMWORD"; break;
11853 default:
11854 gcc_unreachable ();
11857 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11858 if (code == 'b')
11859 size = "BYTE";
11860 else if (code == 'w')
11861 size = "WORD";
11862 else if (code == 'k')
11863 size = "DWORD";
11865 fputs (size, file);
11866 fputs (" PTR ", file);
11869 x = XEXP (x, 0);
11870 /* Avoid (%rip) for call operands. */
11871 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11872 && !CONST_INT_P (x))
11873 output_addr_const (file, x);
11874 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11875 output_operand_lossage ("invalid constraints for operand");
11876 else
11877 output_address (x);
11880 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11882 REAL_VALUE_TYPE r;
11883 long l;
11885 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11886 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11888 if (ASSEMBLER_DIALECT == ASM_ATT)
11889 putc ('$', file);
11890 fprintf (file, "0x%08lx", (long unsigned int) l);
11893 /* These float cases don't actually occur as immediate operands. */
11894 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11896 char dstr[30];
11898 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11899 fputs (dstr, file);
11902 else if (GET_CODE (x) == CONST_DOUBLE
11903 && GET_MODE (x) == XFmode)
11905 char dstr[30];
11907 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11908 fputs (dstr, file);
11911 else
11913 /* We have patterns that allow zero sets of memory, for instance.
11914 In 64-bit mode, we should probably support all 8-byte vectors,
11915 since we can in fact encode that into an immediate. */
11916 if (GET_CODE (x) == CONST_VECTOR)
11918 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11919 x = const0_rtx;
11922 if (code != 'P')
11924 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11926 if (ASSEMBLER_DIALECT == ASM_ATT)
11927 putc ('$', file);
11929 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11930 || GET_CODE (x) == LABEL_REF)
11932 if (ASSEMBLER_DIALECT == ASM_ATT)
11933 putc ('$', file);
11934 else
11935 fputs ("OFFSET FLAT:", file);
11938 if (CONST_INT_P (x))
11939 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11940 else if (flag_pic)
11941 output_pic_addr_const (file, x, code);
11942 else
11943 output_addr_const (file, x);
11947 /* Print a memory operand whose address is ADDR. */
11949 void
11950 print_operand_address (FILE *file, rtx addr)
11952 struct ix86_address parts;
11953 rtx base, index, disp;
11954 int scale;
11955 int ok = ix86_decompose_address (addr, &parts);
11957 gcc_assert (ok);
11959 base = parts.base;
11960 index = parts.index;
11961 disp = parts.disp;
11962 scale = parts.scale;
11964 switch (parts.seg)
11966 case SEG_DEFAULT:
11967 break;
11968 case SEG_FS:
11969 case SEG_GS:
11970 if (ASSEMBLER_DIALECT == ASM_ATT)
11971 putc ('%', file);
11972 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11973 break;
11974 default:
11975 gcc_unreachable ();
11978 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11979 if (TARGET_64BIT && !base && !index)
11981 rtx symbol = disp;
11983 if (GET_CODE (disp) == CONST
11984 && GET_CODE (XEXP (disp, 0)) == PLUS
11985 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11986 symbol = XEXP (XEXP (disp, 0), 0);
11988 if (GET_CODE (symbol) == LABEL_REF
11989 || (GET_CODE (symbol) == SYMBOL_REF
11990 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11991 base = pc_rtx;
11993 if (!base && !index)
11995 /* Displacement only requires special attention. */
11997 if (CONST_INT_P (disp))
11999 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12000 fputs ("ds:", file);
12001 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12003 else if (flag_pic)
12004 output_pic_addr_const (file, disp, 0);
12005 else
12006 output_addr_const (file, disp);
12008 else
12010 if (ASSEMBLER_DIALECT == ASM_ATT)
12012 if (disp)
12014 if (flag_pic)
12015 output_pic_addr_const (file, disp, 0);
12016 else if (GET_CODE (disp) == LABEL_REF)
12017 output_asm_label (disp);
12018 else
12019 output_addr_const (file, disp);
12022 putc ('(', file);
12023 if (base)
12024 print_reg (base, 0, file);
12025 if (index)
12027 putc (',', file);
12028 print_reg (index, 0, file);
12029 if (scale != 1)
12030 fprintf (file, ",%d", scale);
12032 putc (')', file);
12034 else
12036 rtx offset = NULL_RTX;
12038 if (disp)
12040 /* Pull out the offset of a symbol; print any symbol itself. */
12041 if (GET_CODE (disp) == CONST
12042 && GET_CODE (XEXP (disp, 0)) == PLUS
12043 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12045 offset = XEXP (XEXP (disp, 0), 1);
12046 disp = gen_rtx_CONST (VOIDmode,
12047 XEXP (XEXP (disp, 0), 0));
12050 if (flag_pic)
12051 output_pic_addr_const (file, disp, 0);
12052 else if (GET_CODE (disp) == LABEL_REF)
12053 output_asm_label (disp);
12054 else if (CONST_INT_P (disp))
12055 offset = disp;
12056 else
12057 output_addr_const (file, disp);
12060 putc ('[', file);
12061 if (base)
12063 print_reg (base, 0, file);
12064 if (offset)
12066 if (INTVAL (offset) >= 0)
12067 putc ('+', file);
12068 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12071 else if (offset)
12072 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12073 else
12074 putc ('0', file);
12076 if (index)
12078 putc ('+', file);
12079 print_reg (index, 0, file);
12080 if (scale != 1)
12081 fprintf (file, "*%d", scale);
12083 putc (']', file);
12088 bool
12089 output_addr_const_extra (FILE *file, rtx x)
12091 rtx op;
12093 if (GET_CODE (x) != UNSPEC)
12094 return false;
12096 op = XVECEXP (x, 0, 0);
12097 switch (XINT (x, 1))
12099 case UNSPEC_GOTTPOFF:
12100 output_addr_const (file, op);
12101 /* FIXME: This might be @TPOFF in Sun ld. */
12102 fputs ("@GOTTPOFF", file);
12103 break;
12104 case UNSPEC_TPOFF:
12105 output_addr_const (file, op);
12106 fputs ("@TPOFF", file);
12107 break;
12108 case UNSPEC_NTPOFF:
12109 output_addr_const (file, op);
12110 if (TARGET_64BIT)
12111 fputs ("@TPOFF", file);
12112 else
12113 fputs ("@NTPOFF", file);
12114 break;
12115 case UNSPEC_DTPOFF:
12116 output_addr_const (file, op);
12117 fputs ("@DTPOFF", file);
12118 break;
12119 case UNSPEC_GOTNTPOFF:
12120 output_addr_const (file, op);
12121 if (TARGET_64BIT)
12122 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12123 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
12124 else
12125 fputs ("@GOTNTPOFF", file);
12126 break;
12127 case UNSPEC_INDNTPOFF:
12128 output_addr_const (file, op);
12129 fputs ("@INDNTPOFF", file);
12130 break;
12131 #if TARGET_MACHO
12132 case UNSPEC_MACHOPIC_OFFSET:
12133 output_addr_const (file, op);
12134 putc ('-', file);
12135 machopic_output_function_base_name (file);
12136 break;
12137 #endif
12139 default:
12140 return false;
12143 return true;
12146 /* Split one or more DImode RTL references into pairs of SImode
12147 references. The RTL can be REG, offsettable MEM, integer constant, or
12148 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12149 split and "num" is its length. lo_half and hi_half are output arrays
12150 that parallel "operands". */
12152 void
12153 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12155 while (num--)
12157 rtx op = operands[num];
12159 /* simplify_subreg refuse to split volatile memory addresses,
12160 but we still have to handle it. */
12161 if (MEM_P (op))
12163 lo_half[num] = adjust_address (op, SImode, 0);
12164 hi_half[num] = adjust_address (op, SImode, 4);
12166 else
12168 lo_half[num] = simplify_gen_subreg (SImode, op,
12169 GET_MODE (op) == VOIDmode
12170 ? DImode : GET_MODE (op), 0);
12171 hi_half[num] = simplify_gen_subreg (SImode, op,
12172 GET_MODE (op) == VOIDmode
12173 ? DImode : GET_MODE (op), 4);
12177 /* Split one or more TImode RTL references into pairs of DImode
12178 references. The RTL can be REG, offsettable MEM, integer constant, or
12179 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12180 split and "num" is its length. lo_half and hi_half are output arrays
12181 that parallel "operands". */
12183 void
12184 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12186 while (num--)
12188 rtx op = operands[num];
12190 /* simplify_subreg refuse to split volatile memory addresses, but we
12191 still have to handle it. */
12192 if (MEM_P (op))
12194 lo_half[num] = adjust_address (op, DImode, 0);
12195 hi_half[num] = adjust_address (op, DImode, 8);
12197 else
12199 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12200 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12205 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12206 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12207 is the expression of the binary operation. The output may either be
12208 emitted here, or returned to the caller, like all output_* functions.
12210 There is no guarantee that the operands are the same mode, as they
12211 might be within FLOAT or FLOAT_EXTEND expressions. */
12213 #ifndef SYSV386_COMPAT
12214 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12215 wants to fix the assemblers because that causes incompatibility
12216 with gcc. No-one wants to fix gcc because that causes
12217 incompatibility with assemblers... You can use the option of
12218 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12219 #define SYSV386_COMPAT 1
12220 #endif
12222 const char *
12223 output_387_binary_op (rtx insn, rtx *operands)
12225 static char buf[40];
12226 const char *p;
12227 const char *ssep;
12228 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12230 #ifdef ENABLE_CHECKING
12231 /* Even if we do not want to check the inputs, this documents input
12232 constraints. Which helps in understanding the following code. */
12233 if (STACK_REG_P (operands[0])
12234 && ((REG_P (operands[1])
12235 && REGNO (operands[0]) == REGNO (operands[1])
12236 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12237 || (REG_P (operands[2])
12238 && REGNO (operands[0]) == REGNO (operands[2])
12239 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12240 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12241 ; /* ok */
12242 else
12243 gcc_assert (is_sse);
12244 #endif
12246 switch (GET_CODE (operands[3]))
12248 case PLUS:
12249 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12250 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12251 p = "fiadd";
12252 else
12253 p = "fadd";
12254 ssep = "vadd";
12255 break;
12257 case MINUS:
12258 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12259 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12260 p = "fisub";
12261 else
12262 p = "fsub";
12263 ssep = "vsub";
12264 break;
12266 case MULT:
12267 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12268 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12269 p = "fimul";
12270 else
12271 p = "fmul";
12272 ssep = "vmul";
12273 break;
12275 case DIV:
12276 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12277 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12278 p = "fidiv";
12279 else
12280 p = "fdiv";
12281 ssep = "vdiv";
12282 break;
12284 default:
12285 gcc_unreachable ();
12288 if (is_sse)
12290 if (TARGET_AVX)
12292 strcpy (buf, ssep);
12293 if (GET_MODE (operands[0]) == SFmode)
12294 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12295 else
12296 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12298 else
12300 strcpy (buf, ssep + 1);
12301 if (GET_MODE (operands[0]) == SFmode)
12302 strcat (buf, "ss\t{%2, %0|%0, %2}");
12303 else
12304 strcat (buf, "sd\t{%2, %0|%0, %2}");
12306 return buf;
12308 strcpy (buf, p);
12310 switch (GET_CODE (operands[3]))
12312 case MULT:
12313 case PLUS:
12314 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12316 rtx temp = operands[2];
12317 operands[2] = operands[1];
12318 operands[1] = temp;
12321 /* know operands[0] == operands[1]. */
12323 if (MEM_P (operands[2]))
12325 p = "%Z2\t%2";
12326 break;
12329 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12331 if (STACK_TOP_P (operands[0]))
12332 /* How is it that we are storing to a dead operand[2]?
12333 Well, presumably operands[1] is dead too. We can't
12334 store the result to st(0) as st(0) gets popped on this
12335 instruction. Instead store to operands[2] (which I
12336 think has to be st(1)). st(1) will be popped later.
12337 gcc <= 2.8.1 didn't have this check and generated
12338 assembly code that the Unixware assembler rejected. */
12339 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12340 else
12341 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12342 break;
12345 if (STACK_TOP_P (operands[0]))
12346 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12347 else
12348 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12349 break;
12351 case MINUS:
12352 case DIV:
12353 if (MEM_P (operands[1]))
12355 p = "r%Z1\t%1";
12356 break;
12359 if (MEM_P (operands[2]))
12361 p = "%Z2\t%2";
12362 break;
12365 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12367 #if SYSV386_COMPAT
12368 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12369 derived assemblers, confusingly reverse the direction of
12370 the operation for fsub{r} and fdiv{r} when the
12371 destination register is not st(0). The Intel assembler
12372 doesn't have this brain damage. Read !SYSV386_COMPAT to
12373 figure out what the hardware really does. */
12374 if (STACK_TOP_P (operands[0]))
12375 p = "{p\t%0, %2|rp\t%2, %0}";
12376 else
12377 p = "{rp\t%2, %0|p\t%0, %2}";
12378 #else
12379 if (STACK_TOP_P (operands[0]))
12380 /* As above for fmul/fadd, we can't store to st(0). */
12381 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12382 else
12383 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12384 #endif
12385 break;
12388 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12390 #if SYSV386_COMPAT
12391 if (STACK_TOP_P (operands[0]))
12392 p = "{rp\t%0, %1|p\t%1, %0}";
12393 else
12394 p = "{p\t%1, %0|rp\t%0, %1}";
12395 #else
12396 if (STACK_TOP_P (operands[0]))
12397 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12398 else
12399 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12400 #endif
12401 break;
12404 if (STACK_TOP_P (operands[0]))
12406 if (STACK_TOP_P (operands[1]))
12407 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12408 else
12409 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12410 break;
12412 else if (STACK_TOP_P (operands[1]))
12414 #if SYSV386_COMPAT
12415 p = "{\t%1, %0|r\t%0, %1}";
12416 #else
12417 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12418 #endif
12420 else
12422 #if SYSV386_COMPAT
12423 p = "{r\t%2, %0|\t%0, %2}";
12424 #else
12425 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12426 #endif
12428 break;
12430 default:
12431 gcc_unreachable ();
12434 strcat (buf, p);
12435 return buf;
12438 /* Return needed mode for entity in optimize_mode_switching pass. */
12441 ix86_mode_needed (int entity, rtx insn)
12443 enum attr_i387_cw mode;
12445 /* The mode UNINITIALIZED is used to store control word after a
12446 function call or ASM pattern. The mode ANY specify that function
12447 has no requirements on the control word and make no changes in the
12448 bits we are interested in. */
12450 if (CALL_P (insn)
12451 || (NONJUMP_INSN_P (insn)
12452 && (asm_noperands (PATTERN (insn)) >= 0
12453 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12454 return I387_CW_UNINITIALIZED;
12456 if (recog_memoized (insn) < 0)
12457 return I387_CW_ANY;
12459 mode = get_attr_i387_cw (insn);
12461 switch (entity)
12463 case I387_TRUNC:
12464 if (mode == I387_CW_TRUNC)
12465 return mode;
12466 break;
12468 case I387_FLOOR:
12469 if (mode == I387_CW_FLOOR)
12470 return mode;
12471 break;
12473 case I387_CEIL:
12474 if (mode == I387_CW_CEIL)
12475 return mode;
12476 break;
12478 case I387_MASK_PM:
12479 if (mode == I387_CW_MASK_PM)
12480 return mode;
12481 break;
12483 default:
12484 gcc_unreachable ();
12487 return I387_CW_ANY;
12490 /* Output code to initialize control word copies used by trunc?f?i and
12491 rounding patterns. CURRENT_MODE is set to current control word,
12492 while NEW_MODE is set to new control word. */
12494 void
12495 emit_i387_cw_initialization (int mode)
12497 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12498 rtx new_mode;
12500 enum ix86_stack_slot slot;
12502 rtx reg = gen_reg_rtx (HImode);
12504 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12505 emit_move_insn (reg, copy_rtx (stored_mode));
12507 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12508 || optimize_function_for_size_p (cfun))
12510 switch (mode)
12512 case I387_CW_TRUNC:
12513 /* round toward zero (truncate) */
12514 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12515 slot = SLOT_CW_TRUNC;
12516 break;
12518 case I387_CW_FLOOR:
12519 /* round down toward -oo */
12520 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12521 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12522 slot = SLOT_CW_FLOOR;
12523 break;
12525 case I387_CW_CEIL:
12526 /* round up toward +oo */
12527 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12528 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12529 slot = SLOT_CW_CEIL;
12530 break;
12532 case I387_CW_MASK_PM:
12533 /* mask precision exception for nearbyint() */
12534 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12535 slot = SLOT_CW_MASK_PM;
12536 break;
12538 default:
12539 gcc_unreachable ();
12542 else
12544 switch (mode)
12546 case I387_CW_TRUNC:
12547 /* round toward zero (truncate) */
12548 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12549 slot = SLOT_CW_TRUNC;
12550 break;
12552 case I387_CW_FLOOR:
12553 /* round down toward -oo */
12554 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12555 slot = SLOT_CW_FLOOR;
12556 break;
12558 case I387_CW_CEIL:
12559 /* round up toward +oo */
12560 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12561 slot = SLOT_CW_CEIL;
12562 break;
12564 case I387_CW_MASK_PM:
12565 /* mask precision exception for nearbyint() */
12566 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12567 slot = SLOT_CW_MASK_PM;
12568 break;
12570 default:
12571 gcc_unreachable ();
12575 gcc_assert (slot < MAX_386_STACK_LOCALS);
12577 new_mode = assign_386_stack_local (HImode, slot);
12578 emit_move_insn (new_mode, reg);
12581 /* Output code for INSN to convert a float to a signed int. OPERANDS
12582 are the insn operands. The output may be [HSD]Imode and the input
12583 operand may be [SDX]Fmode. */
12585 const char *
12586 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12588 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12589 int dimode_p = GET_MODE (operands[0]) == DImode;
12590 int round_mode = get_attr_i387_cw (insn);
12592 /* Jump through a hoop or two for DImode, since the hardware has no
12593 non-popping instruction. We used to do this a different way, but
12594 that was somewhat fragile and broke with post-reload splitters. */
12595 if ((dimode_p || fisttp) && !stack_top_dies)
12596 output_asm_insn ("fld\t%y1", operands);
12598 gcc_assert (STACK_TOP_P (operands[1]));
12599 gcc_assert (MEM_P (operands[0]));
12600 gcc_assert (GET_MODE (operands[1]) != TFmode);
12602 if (fisttp)
12603 output_asm_insn ("fisttp%Z0\t%0", operands);
12604 else
12606 if (round_mode != I387_CW_ANY)
12607 output_asm_insn ("fldcw\t%3", operands);
12608 if (stack_top_dies || dimode_p)
12609 output_asm_insn ("fistp%Z0\t%0", operands);
12610 else
12611 output_asm_insn ("fist%Z0\t%0", operands);
12612 if (round_mode != I387_CW_ANY)
12613 output_asm_insn ("fldcw\t%2", operands);
12616 return "";
12619 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12620 have the values zero or one, indicates the ffreep insn's operand
12621 from the OPERANDS array. */
12623 static const char *
12624 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12626 if (TARGET_USE_FFREEP)
12627 #ifdef HAVE_AS_IX86_FFREEP
12628 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12629 #else
12631 static char retval[32];
12632 int regno = REGNO (operands[opno]);
12634 gcc_assert (FP_REGNO_P (regno));
12636 regno -= FIRST_STACK_REG;
12638 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12639 return retval;
12641 #endif
12643 return opno ? "fstp\t%y1" : "fstp\t%y0";
12647 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12648 should be used. UNORDERED_P is true when fucom should be used. */
12650 const char *
12651 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12653 int stack_top_dies;
12654 rtx cmp_op0, cmp_op1;
12655 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12657 if (eflags_p)
12659 cmp_op0 = operands[0];
12660 cmp_op1 = operands[1];
12662 else
12664 cmp_op0 = operands[1];
12665 cmp_op1 = operands[2];
12668 if (is_sse)
12670 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12671 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12672 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12673 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12675 if (GET_MODE (operands[0]) == SFmode)
12676 if (unordered_p)
12677 return &ucomiss[TARGET_AVX ? 0 : 1];
12678 else
12679 return &comiss[TARGET_AVX ? 0 : 1];
12680 else
12681 if (unordered_p)
12682 return &ucomisd[TARGET_AVX ? 0 : 1];
12683 else
12684 return &comisd[TARGET_AVX ? 0 : 1];
12687 gcc_assert (STACK_TOP_P (cmp_op0));
12689 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12691 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12693 if (stack_top_dies)
12695 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12696 return output_387_ffreep (operands, 1);
12698 else
12699 return "ftst\n\tfnstsw\t%0";
12702 if (STACK_REG_P (cmp_op1)
12703 && stack_top_dies
12704 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12705 && REGNO (cmp_op1) != FIRST_STACK_REG)
12707 /* If both the top of the 387 stack dies, and the other operand
12708 is also a stack register that dies, then this must be a
12709 `fcompp' float compare */
12711 if (eflags_p)
12713 /* There is no double popping fcomi variant. Fortunately,
12714 eflags is immune from the fstp's cc clobbering. */
12715 if (unordered_p)
12716 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12717 else
12718 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12719 return output_387_ffreep (operands, 0);
12721 else
12723 if (unordered_p)
12724 return "fucompp\n\tfnstsw\t%0";
12725 else
12726 return "fcompp\n\tfnstsw\t%0";
12729 else
12731 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12733 static const char * const alt[16] =
12735 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12736 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12737 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12738 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12740 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12741 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12742 NULL,
12743 NULL,
12745 "fcomi\t{%y1, %0|%0, %y1}",
12746 "fcomip\t{%y1, %0|%0, %y1}",
12747 "fucomi\t{%y1, %0|%0, %y1}",
12748 "fucomip\t{%y1, %0|%0, %y1}",
12750 NULL,
12751 NULL,
12752 NULL,
12753 NULL
12756 int mask;
12757 const char *ret;
12759 mask = eflags_p << 3;
12760 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12761 mask |= unordered_p << 1;
12762 mask |= stack_top_dies;
12764 gcc_assert (mask < 16);
12765 ret = alt[mask];
12766 gcc_assert (ret);
12768 return ret;
12772 void
12773 ix86_output_addr_vec_elt (FILE *file, int value)
12775 const char *directive = ASM_LONG;
12777 #ifdef ASM_QUAD
12778 if (TARGET_64BIT)
12779 directive = ASM_QUAD;
12780 #else
12781 gcc_assert (!TARGET_64BIT);
12782 #endif
12784 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12787 void
12788 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12790 const char *directive = ASM_LONG;
12792 #ifdef ASM_QUAD
12793 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12794 directive = ASM_QUAD;
12795 #else
12796 gcc_assert (!TARGET_64BIT);
12797 #endif
12798 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12799 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12800 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12801 directive, value, rel);
12802 else if (HAVE_AS_GOTOFF_IN_DATA)
12803 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12804 #if TARGET_MACHO
12805 else if (TARGET_MACHO)
12807 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12808 machopic_output_function_base_name (file);
12809 putc ('\n', file);
12811 #endif
12812 else
12813 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12814 GOT_SYMBOL_NAME, value);
12817 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12818 for the target. */
12820 void
12821 ix86_expand_clear (rtx dest)
12823 rtx tmp;
12825 /* We play register width games, which are only valid after reload. */
12826 gcc_assert (reload_completed);
12828 /* Avoid HImode and its attendant prefix byte. */
12829 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12830 dest = gen_rtx_REG (SImode, REGNO (dest));
12831 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12833 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12834 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12836 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12837 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12840 emit_insn (tmp);
12843 /* X is an unchanging MEM. If it is a constant pool reference, return
12844 the constant pool rtx, else NULL. */
12847 maybe_get_pool_constant (rtx x)
12849 x = ix86_delegitimize_address (XEXP (x, 0));
12851 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12852 return get_pool_constant (x);
12854 return NULL_RTX;
12857 void
12858 ix86_expand_move (enum machine_mode mode, rtx operands[])
12860 rtx op0, op1;
12861 enum tls_model model;
12863 op0 = operands[0];
12864 op1 = operands[1];
12866 if (GET_CODE (op1) == SYMBOL_REF)
12868 model = SYMBOL_REF_TLS_MODEL (op1);
12869 if (model)
12871 op1 = legitimize_tls_address (op1, model, true);
12872 op1 = force_operand (op1, op0);
12873 if (op1 == op0)
12874 return;
12876 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12877 && SYMBOL_REF_DLLIMPORT_P (op1))
12878 op1 = legitimize_dllimport_symbol (op1, false);
12880 else if (GET_CODE (op1) == CONST
12881 && GET_CODE (XEXP (op1, 0)) == PLUS
12882 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12884 rtx addend = XEXP (XEXP (op1, 0), 1);
12885 rtx symbol = XEXP (XEXP (op1, 0), 0);
12886 rtx tmp = NULL;
12888 model = SYMBOL_REF_TLS_MODEL (symbol);
12889 if (model)
12890 tmp = legitimize_tls_address (symbol, model, true);
12891 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12892 && SYMBOL_REF_DLLIMPORT_P (symbol))
12893 tmp = legitimize_dllimport_symbol (symbol, true);
12895 if (tmp)
12897 tmp = force_operand (tmp, NULL);
12898 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12899 op0, 1, OPTAB_DIRECT);
12900 if (tmp == op0)
12901 return;
12905 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12907 if (TARGET_MACHO && !TARGET_64BIT)
12909 #if TARGET_MACHO
12910 if (MACHOPIC_PURE)
12912 rtx temp = ((reload_in_progress
12913 || ((op0 && REG_P (op0))
12914 && mode == Pmode))
12915 ? op0 : gen_reg_rtx (Pmode));
12916 op1 = machopic_indirect_data_reference (op1, temp);
12917 op1 = machopic_legitimize_pic_address (op1, mode,
12918 temp == op1 ? 0 : temp);
12920 else if (MACHOPIC_INDIRECT)
12921 op1 = machopic_indirect_data_reference (op1, 0);
12922 if (op0 == op1)
12923 return;
12924 #endif
12926 else
12928 if (MEM_P (op0))
12929 op1 = force_reg (Pmode, op1);
12930 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12932 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
12933 op1 = legitimize_pic_address (op1, reg);
12934 if (op0 == op1)
12935 return;
12939 else
12941 if (MEM_P (op0)
12942 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12943 || !push_operand (op0, mode))
12944 && MEM_P (op1))
12945 op1 = force_reg (mode, op1);
12947 if (push_operand (op0, mode)
12948 && ! general_no_elim_operand (op1, mode))
12949 op1 = copy_to_mode_reg (mode, op1);
12951 /* Force large constants in 64bit compilation into register
12952 to get them CSEed. */
12953 if (can_create_pseudo_p ()
12954 && (mode == DImode) && TARGET_64BIT
12955 && immediate_operand (op1, mode)
12956 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12957 && !register_operand (op0, mode)
12958 && optimize)
12959 op1 = copy_to_mode_reg (mode, op1);
12961 if (can_create_pseudo_p ()
12962 && FLOAT_MODE_P (mode)
12963 && GET_CODE (op1) == CONST_DOUBLE)
12965 /* If we are loading a floating point constant to a register,
12966 force the value to memory now, since we'll get better code
12967 out the back end. */
12969 op1 = validize_mem (force_const_mem (mode, op1));
12970 if (!register_operand (op0, mode))
12972 rtx temp = gen_reg_rtx (mode);
12973 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12974 emit_move_insn (op0, temp);
12975 return;
12980 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12983 void
12984 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12986 rtx op0 = operands[0], op1 = operands[1];
12987 unsigned int align = GET_MODE_ALIGNMENT (mode);
12989 /* Force constants other than zero into memory. We do not know how
12990 the instructions used to build constants modify the upper 64 bits
12991 of the register, once we have that information we may be able
12992 to handle some of them more efficiently. */
12993 if (can_create_pseudo_p ()
12994 && register_operand (op0, mode)
12995 && (CONSTANT_P (op1)
12996 || (GET_CODE (op1) == SUBREG
12997 && CONSTANT_P (SUBREG_REG (op1))))
12998 && !standard_sse_constant_p (op1))
12999 op1 = validize_mem (force_const_mem (mode, op1));
13001 /* We need to check memory alignment for SSE mode since attribute
13002 can make operands unaligned. */
13003 if (can_create_pseudo_p ()
13004 && SSE_REG_MODE_P (mode)
13005 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13006 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13008 rtx tmp[2];
13010 /* ix86_expand_vector_move_misalign() does not like constants ... */
13011 if (CONSTANT_P (op1)
13012 || (GET_CODE (op1) == SUBREG
13013 && CONSTANT_P (SUBREG_REG (op1))))
13014 op1 = validize_mem (force_const_mem (mode, op1));
13016 /* ... nor both arguments in memory. */
13017 if (!register_operand (op0, mode)
13018 && !register_operand (op1, mode))
13019 op1 = force_reg (mode, op1);
13021 tmp[0] = op0; tmp[1] = op1;
13022 ix86_expand_vector_move_misalign (mode, tmp);
13023 return;
13026 /* Make operand1 a register if it isn't already. */
13027 if (can_create_pseudo_p ()
13028 && !register_operand (op0, mode)
13029 && !register_operand (op1, mode))
13031 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13032 return;
13035 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13038 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13039 straight to ix86_expand_vector_move. */
13040 /* Code generation for scalar reg-reg moves of single and double precision data:
13041 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13042 movaps reg, reg
13043 else
13044 movss reg, reg
13045 if (x86_sse_partial_reg_dependency == true)
13046 movapd reg, reg
13047 else
13048 movsd reg, reg
13050 Code generation for scalar loads of double precision data:
13051 if (x86_sse_split_regs == true)
13052 movlpd mem, reg (gas syntax)
13053 else
13054 movsd mem, reg
13056 Code generation for unaligned packed loads of single precision data
13057 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13058 if (x86_sse_unaligned_move_optimal)
13059 movups mem, reg
13061 if (x86_sse_partial_reg_dependency == true)
13063 xorps reg, reg
13064 movlps mem, reg
13065 movhps mem+8, reg
13067 else
13069 movlps mem, reg
13070 movhps mem+8, reg
13073 Code generation for unaligned packed loads of double precision data
13074 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13075 if (x86_sse_unaligned_move_optimal)
13076 movupd mem, reg
13078 if (x86_sse_split_regs == true)
13080 movlpd mem, reg
13081 movhpd mem+8, reg
13083 else
13085 movsd mem, reg
13086 movhpd mem+8, reg
13090 void
13091 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13093 rtx op0, op1, m;
13095 op0 = operands[0];
13096 op1 = operands[1];
13098 if (TARGET_AVX)
13100 switch (GET_MODE_CLASS (mode))
13102 case MODE_VECTOR_INT:
13103 case MODE_INT:
13104 switch (GET_MODE_SIZE (mode))
13106 case 16:
13107 op0 = gen_lowpart (V16QImode, op0);
13108 op1 = gen_lowpart (V16QImode, op1);
13109 emit_insn (gen_avx_movdqu (op0, op1));
13110 break;
13111 case 32:
13112 op0 = gen_lowpart (V32QImode, op0);
13113 op1 = gen_lowpart (V32QImode, op1);
13114 emit_insn (gen_avx_movdqu256 (op0, op1));
13115 break;
13116 default:
13117 gcc_unreachable ();
13119 break;
13120 case MODE_VECTOR_FLOAT:
13121 op0 = gen_lowpart (mode, op0);
13122 op1 = gen_lowpart (mode, op1);
13124 switch (mode)
13126 case V4SFmode:
13127 emit_insn (gen_avx_movups (op0, op1));
13128 break;
13129 case V8SFmode:
13130 emit_insn (gen_avx_movups256 (op0, op1));
13131 break;
13132 case V2DFmode:
13133 emit_insn (gen_avx_movupd (op0, op1));
13134 break;
13135 case V4DFmode:
13136 emit_insn (gen_avx_movupd256 (op0, op1));
13137 break;
13138 default:
13139 gcc_unreachable ();
13141 break;
13143 default:
13144 gcc_unreachable ();
13147 return;
13150 if (MEM_P (op1))
13152 /* If we're optimizing for size, movups is the smallest. */
13153 if (optimize_insn_for_size_p ())
13155 op0 = gen_lowpart (V4SFmode, op0);
13156 op1 = gen_lowpart (V4SFmode, op1);
13157 emit_insn (gen_sse_movups (op0, op1));
13158 return;
13161 /* ??? If we have typed data, then it would appear that using
13162 movdqu is the only way to get unaligned data loaded with
13163 integer type. */
13164 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13166 op0 = gen_lowpart (V16QImode, op0);
13167 op1 = gen_lowpart (V16QImode, op1);
13168 emit_insn (gen_sse2_movdqu (op0, op1));
13169 return;
13172 if (TARGET_SSE2 && mode == V2DFmode)
13174 rtx zero;
13176 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13178 op0 = gen_lowpart (V2DFmode, op0);
13179 op1 = gen_lowpart (V2DFmode, op1);
13180 emit_insn (gen_sse2_movupd (op0, op1));
13181 return;
13184 /* When SSE registers are split into halves, we can avoid
13185 writing to the top half twice. */
13186 if (TARGET_SSE_SPLIT_REGS)
13188 emit_clobber (op0);
13189 zero = op0;
13191 else
13193 /* ??? Not sure about the best option for the Intel chips.
13194 The following would seem to satisfy; the register is
13195 entirely cleared, breaking the dependency chain. We
13196 then store to the upper half, with a dependency depth
13197 of one. A rumor has it that Intel recommends two movsd
13198 followed by an unpacklpd, but this is unconfirmed. And
13199 given that the dependency depth of the unpacklpd would
13200 still be one, I'm not sure why this would be better. */
13201 zero = CONST0_RTX (V2DFmode);
13204 m = adjust_address (op1, DFmode, 0);
13205 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13206 m = adjust_address (op1, DFmode, 8);
13207 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13209 else
13211 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13213 op0 = gen_lowpart (V4SFmode, op0);
13214 op1 = gen_lowpart (V4SFmode, op1);
13215 emit_insn (gen_sse_movups (op0, op1));
13216 return;
13219 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13220 emit_move_insn (op0, CONST0_RTX (mode));
13221 else
13222 emit_clobber (op0);
13224 if (mode != V4SFmode)
13225 op0 = gen_lowpart (V4SFmode, op0);
13226 m = adjust_address (op1, V2SFmode, 0);
13227 emit_insn (gen_sse_loadlps (op0, op0, m));
13228 m = adjust_address (op1, V2SFmode, 8);
13229 emit_insn (gen_sse_loadhps (op0, op0, m));
13232 else if (MEM_P (op0))
13234 /* If we're optimizing for size, movups is the smallest. */
13235 if (optimize_insn_for_size_p ())
13237 op0 = gen_lowpart (V4SFmode, op0);
13238 op1 = gen_lowpart (V4SFmode, op1);
13239 emit_insn (gen_sse_movups (op0, op1));
13240 return;
13243 /* ??? Similar to above, only less clear because of quote
13244 typeless stores unquote. */
13245 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13246 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13248 op0 = gen_lowpart (V16QImode, op0);
13249 op1 = gen_lowpart (V16QImode, op1);
13250 emit_insn (gen_sse2_movdqu (op0, op1));
13251 return;
13254 if (TARGET_SSE2 && mode == V2DFmode)
13256 m = adjust_address (op0, DFmode, 0);
13257 emit_insn (gen_sse2_storelpd (m, op1));
13258 m = adjust_address (op0, DFmode, 8);
13259 emit_insn (gen_sse2_storehpd (m, op1));
13261 else
13263 if (mode != V4SFmode)
13264 op1 = gen_lowpart (V4SFmode, op1);
13265 m = adjust_address (op0, V2SFmode, 0);
13266 emit_insn (gen_sse_storelps (m, op1));
13267 m = adjust_address (op0, V2SFmode, 8);
13268 emit_insn (gen_sse_storehps (m, op1));
13271 else
13272 gcc_unreachable ();
13275 /* Expand a push in MODE. This is some mode for which we do not support
13276 proper push instructions, at least from the registers that we expect
13277 the value to live in. */
13279 void
13280 ix86_expand_push (enum machine_mode mode, rtx x)
13282 rtx tmp;
13284 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13285 GEN_INT (-GET_MODE_SIZE (mode)),
13286 stack_pointer_rtx, 1, OPTAB_DIRECT);
13287 if (tmp != stack_pointer_rtx)
13288 emit_move_insn (stack_pointer_rtx, tmp);
13290 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13292 /* When we push an operand onto stack, it has to be aligned at least
13293 at the function argument boundary. However since we don't have
13294 the argument type, we can't determine the actual argument
13295 boundary. */
13296 emit_move_insn (tmp, x);
13299 /* Helper function of ix86_fixup_binary_operands to canonicalize
13300 operand order. Returns true if the operands should be swapped. */
13302 static bool
13303 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13304 rtx operands[])
13306 rtx dst = operands[0];
13307 rtx src1 = operands[1];
13308 rtx src2 = operands[2];
13310 /* If the operation is not commutative, we can't do anything. */
13311 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13312 return false;
13314 /* Highest priority is that src1 should match dst. */
13315 if (rtx_equal_p (dst, src1))
13316 return false;
13317 if (rtx_equal_p (dst, src2))
13318 return true;
13320 /* Next highest priority is that immediate constants come second. */
13321 if (immediate_operand (src2, mode))
13322 return false;
13323 if (immediate_operand (src1, mode))
13324 return true;
13326 /* Lowest priority is that memory references should come second. */
13327 if (MEM_P (src2))
13328 return false;
13329 if (MEM_P (src1))
13330 return true;
13332 return false;
13336 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13337 destination to use for the operation. If different from the true
13338 destination in operands[0], a copy operation will be required. */
13341 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13342 rtx operands[])
13344 rtx dst = operands[0];
13345 rtx src1 = operands[1];
13346 rtx src2 = operands[2];
13348 /* Canonicalize operand order. */
13349 if (ix86_swap_binary_operands_p (code, mode, operands))
13351 rtx temp;
13353 /* It is invalid to swap operands of different modes. */
13354 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13356 temp = src1;
13357 src1 = src2;
13358 src2 = temp;
13361 /* Both source operands cannot be in memory. */
13362 if (MEM_P (src1) && MEM_P (src2))
13364 /* Optimization: Only read from memory once. */
13365 if (rtx_equal_p (src1, src2))
13367 src2 = force_reg (mode, src2);
13368 src1 = src2;
13370 else
13371 src2 = force_reg (mode, src2);
13374 /* If the destination is memory, and we do not have matching source
13375 operands, do things in registers. */
13376 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13377 dst = gen_reg_rtx (mode);
13379 /* Source 1 cannot be a constant. */
13380 if (CONSTANT_P (src1))
13381 src1 = force_reg (mode, src1);
13383 /* Source 1 cannot be a non-matching memory. */
13384 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13385 src1 = force_reg (mode, src1);
13387 /* In order for the multiply-add patterns to get matched, we need
13388 to aid combine by forcing all operands into registers to start. */
13389 if (optimize && TARGET_FMA4)
13391 if (MEM_P (src2))
13392 src2 = force_reg (GET_MODE (src2), src2);
13393 else if (MEM_P (src1))
13394 src1 = force_reg (GET_MODE (src1), src1);
13397 operands[1] = src1;
13398 operands[2] = src2;
13399 return dst;
13402 /* Similarly, but assume that the destination has already been
13403 set up properly. */
13405 void
13406 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13407 enum machine_mode mode, rtx operands[])
13409 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13410 gcc_assert (dst == operands[0]);
13413 /* Attempt to expand a binary operator. Make the expansion closer to the
13414 actual machine, then just general_operand, which will allow 3 separate
13415 memory references (one output, two input) in a single insn. */
13417 void
13418 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13419 rtx operands[])
13421 rtx src1, src2, dst, op, clob;
13423 dst = ix86_fixup_binary_operands (code, mode, operands);
13424 src1 = operands[1];
13425 src2 = operands[2];
13427 /* Emit the instruction. */
13429 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13430 if (reload_in_progress)
13432 /* Reload doesn't know about the flags register, and doesn't know that
13433 it doesn't want to clobber it. We can only do this with PLUS. */
13434 gcc_assert (code == PLUS);
13435 emit_insn (op);
13437 else
13439 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13440 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13443 /* Fix up the destination if needed. */
13444 if (dst != operands[0])
13445 emit_move_insn (operands[0], dst);
13448 /* Return TRUE or FALSE depending on whether the binary operator meets the
13449 appropriate constraints. */
13452 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13453 rtx operands[3])
13455 rtx dst = operands[0];
13456 rtx src1 = operands[1];
13457 rtx src2 = operands[2];
13459 /* Both source operands cannot be in memory. */
13460 if (MEM_P (src1) && MEM_P (src2))
13461 return 0;
13463 /* Canonicalize operand order for commutative operators. */
13464 if (ix86_swap_binary_operands_p (code, mode, operands))
13466 rtx temp = src1;
13467 src1 = src2;
13468 src2 = temp;
13471 /* If the destination is memory, we must have a matching source operand. */
13472 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13473 return 0;
13475 /* Source 1 cannot be a constant. */
13476 if (CONSTANT_P (src1))
13477 return 0;
13479 /* Source 1 cannot be a non-matching memory. */
13480 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13481 return 0;
13483 return 1;
13486 /* Attempt to expand a unary operator. Make the expansion closer to the
13487 actual machine, then just general_operand, which will allow 2 separate
13488 memory references (one output, one input) in a single insn. */
13490 void
13491 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13492 rtx operands[])
13494 int matching_memory;
13495 rtx src, dst, op, clob;
13497 dst = operands[0];
13498 src = operands[1];
13500 /* If the destination is memory, and we do not have matching source
13501 operands, do things in registers. */
13502 matching_memory = 0;
13503 if (MEM_P (dst))
13505 if (rtx_equal_p (dst, src))
13506 matching_memory = 1;
13507 else
13508 dst = gen_reg_rtx (mode);
13511 /* When source operand is memory, destination must match. */
13512 if (MEM_P (src) && !matching_memory)
13513 src = force_reg (mode, src);
13515 /* Emit the instruction. */
13517 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13518 if (reload_in_progress || code == NOT)
13520 /* Reload doesn't know about the flags register, and doesn't know that
13521 it doesn't want to clobber it. */
13522 gcc_assert (code == NOT);
13523 emit_insn (op);
13525 else
13527 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13528 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13531 /* Fix up the destination if needed. */
13532 if (dst != operands[0])
13533 emit_move_insn (operands[0], dst);
13536 #define LEA_SEARCH_THRESHOLD 12
13538 /* Search backward for non-agu definition of register number REGNO1
13539 or register number REGNO2 in INSN's basic block until
13540 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13541 2. Reach BB boundary, or
13542 3. Reach agu definition.
13543 Returns the distance between the non-agu definition point and INSN.
13544 If no definition point, returns -1. */
13546 static int
13547 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13548 rtx insn)
13550 basic_block bb = BLOCK_FOR_INSN (insn);
13551 int distance = 0;
13552 df_ref *def_rec;
13553 enum attr_type insn_type;
13555 if (insn != BB_HEAD (bb))
13557 rtx prev = PREV_INSN (insn);
13558 while (prev && distance < LEA_SEARCH_THRESHOLD)
13560 if (INSN_P (prev))
13562 distance++;
13563 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13564 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13565 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13566 && (regno1 == DF_REF_REGNO (*def_rec)
13567 || regno2 == DF_REF_REGNO (*def_rec)))
13569 insn_type = get_attr_type (prev);
13570 if (insn_type != TYPE_LEA)
13571 goto done;
13574 if (prev == BB_HEAD (bb))
13575 break;
13576 prev = PREV_INSN (prev);
13580 if (distance < LEA_SEARCH_THRESHOLD)
13582 edge e;
13583 edge_iterator ei;
13584 bool simple_loop = false;
13586 FOR_EACH_EDGE (e, ei, bb->preds)
13587 if (e->src == bb)
13589 simple_loop = true;
13590 break;
13593 if (simple_loop)
13595 rtx prev = BB_END (bb);
13596 while (prev
13597 && prev != insn
13598 && distance < LEA_SEARCH_THRESHOLD)
13600 if (INSN_P (prev))
13602 distance++;
13603 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13604 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13605 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13606 && (regno1 == DF_REF_REGNO (*def_rec)
13607 || regno2 == DF_REF_REGNO (*def_rec)))
13609 insn_type = get_attr_type (prev);
13610 if (insn_type != TYPE_LEA)
13611 goto done;
13614 prev = PREV_INSN (prev);
13619 distance = -1;
13621 done:
13622 /* get_attr_type may modify recog data. We want to make sure
13623 that recog data is valid for instruction INSN, on which
13624 distance_non_agu_define is called. INSN is unchanged here. */
13625 extract_insn_cached (insn);
13626 return distance;
13629 /* Return the distance between INSN and the next insn that uses
13630 register number REGNO0 in memory address. Return -1 if no such
13631 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13633 static int
13634 distance_agu_use (unsigned int regno0, rtx insn)
13636 basic_block bb = BLOCK_FOR_INSN (insn);
13637 int distance = 0;
13638 df_ref *def_rec;
13639 df_ref *use_rec;
13641 if (insn != BB_END (bb))
13643 rtx next = NEXT_INSN (insn);
13644 while (next && distance < LEA_SEARCH_THRESHOLD)
13646 if (INSN_P (next))
13648 distance++;
13650 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13651 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13652 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13653 && regno0 == DF_REF_REGNO (*use_rec))
13655 /* Return DISTANCE if OP0 is used in memory
13656 address in NEXT. */
13657 return distance;
13660 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13661 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13662 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13663 && regno0 == DF_REF_REGNO (*def_rec))
13665 /* Return -1 if OP0 is set in NEXT. */
13666 return -1;
13669 if (next == BB_END (bb))
13670 break;
13671 next = NEXT_INSN (next);
13675 if (distance < LEA_SEARCH_THRESHOLD)
13677 edge e;
13678 edge_iterator ei;
13679 bool simple_loop = false;
13681 FOR_EACH_EDGE (e, ei, bb->succs)
13682 if (e->dest == bb)
13684 simple_loop = true;
13685 break;
13688 if (simple_loop)
13690 rtx next = BB_HEAD (bb);
13691 while (next
13692 && next != insn
13693 && distance < LEA_SEARCH_THRESHOLD)
13695 if (INSN_P (next))
13697 distance++;
13699 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13700 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13701 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13702 && regno0 == DF_REF_REGNO (*use_rec))
13704 /* Return DISTANCE if OP0 is used in memory
13705 address in NEXT. */
13706 return distance;
13709 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13710 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13711 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13712 && regno0 == DF_REF_REGNO (*def_rec))
13714 /* Return -1 if OP0 is set in NEXT. */
13715 return -1;
13719 next = NEXT_INSN (next);
13724 return -1;
13727 /* Define this macro to tune LEA priority vs ADD, it take effect when
13728 there is a dilemma of choicing LEA or ADD
13729 Negative value: ADD is more preferred than LEA
13730 Zero: Netrual
13731 Positive value: LEA is more preferred than ADD*/
13732 #define IX86_LEA_PRIORITY 2
13734 /* Return true if it is ok to optimize an ADD operation to LEA
13735 operation to avoid flag register consumation. For the processors
13736 like ATOM, if the destination register of LEA holds an actual
13737 address which will be used soon, LEA is better and otherwise ADD
13738 is better. */
13740 bool
13741 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13742 rtx insn, rtx operands[])
13744 unsigned int regno0 = true_regnum (operands[0]);
13745 unsigned int regno1 = true_regnum (operands[1]);
13746 unsigned int regno2;
13748 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13749 return regno0 != regno1;
13751 regno2 = true_regnum (operands[2]);
13753 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13754 if (regno0 != regno1 && regno0 != regno2)
13755 return true;
13756 else
13758 int dist_define, dist_use;
13759 dist_define = distance_non_agu_define (regno1, regno2, insn);
13760 if (dist_define <= 0)
13761 return true;
13763 /* If this insn has both backward non-agu dependence and forward
13764 agu dependence, the one with short distance take effect. */
13765 dist_use = distance_agu_use (regno0, insn);
13766 if (dist_use <= 0
13767 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13768 return false;
13770 return true;
13774 /* Return true if destination reg of SET_BODY is shift count of
13775 USE_BODY. */
13777 static bool
13778 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13780 rtx set_dest;
13781 rtx shift_rtx;
13782 int i;
13784 /* Retrieve destination of SET_BODY. */
13785 switch (GET_CODE (set_body))
13787 case SET:
13788 set_dest = SET_DEST (set_body);
13789 if (!set_dest || !REG_P (set_dest))
13790 return false;
13791 break;
13792 case PARALLEL:
13793 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13794 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13795 use_body))
13796 return true;
13797 default:
13798 return false;
13799 break;
13802 /* Retrieve shift count of USE_BODY. */
13803 switch (GET_CODE (use_body))
13805 case SET:
13806 shift_rtx = XEXP (use_body, 1);
13807 break;
13808 case PARALLEL:
13809 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13810 if (ix86_dep_by_shift_count_body (set_body,
13811 XVECEXP (use_body, 0, i)))
13812 return true;
13813 default:
13814 return false;
13815 break;
13818 if (shift_rtx
13819 && (GET_CODE (shift_rtx) == ASHIFT
13820 || GET_CODE (shift_rtx) == LSHIFTRT
13821 || GET_CODE (shift_rtx) == ASHIFTRT
13822 || GET_CODE (shift_rtx) == ROTATE
13823 || GET_CODE (shift_rtx) == ROTATERT))
13825 rtx shift_count = XEXP (shift_rtx, 1);
13827 /* Return true if shift count is dest of SET_BODY. */
13828 if (REG_P (shift_count)
13829 && true_regnum (set_dest) == true_regnum (shift_count))
13830 return true;
13833 return false;
13836 /* Return true if destination reg of SET_INSN is shift count of
13837 USE_INSN. */
13839 bool
13840 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13842 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13843 PATTERN (use_insn));
13846 /* Return TRUE or FALSE depending on whether the unary operator meets the
13847 appropriate constraints. */
13850 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13851 enum machine_mode mode ATTRIBUTE_UNUSED,
13852 rtx operands[2] ATTRIBUTE_UNUSED)
13854 /* If one of operands is memory, source and destination must match. */
13855 if ((MEM_P (operands[0])
13856 || MEM_P (operands[1]))
13857 && ! rtx_equal_p (operands[0], operands[1]))
13858 return FALSE;
13859 return TRUE;
13862 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13863 are ok, keeping in mind the possible movddup alternative. */
13865 bool
13866 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13868 if (MEM_P (operands[0]))
13869 return rtx_equal_p (operands[0], operands[1 + high]);
13870 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13871 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13872 return true;
13875 /* Post-reload splitter for converting an SF or DFmode value in an
13876 SSE register into an unsigned SImode. */
13878 void
13879 ix86_split_convert_uns_si_sse (rtx operands[])
13881 enum machine_mode vecmode;
13882 rtx value, large, zero_or_two31, input, two31, x;
13884 large = operands[1];
13885 zero_or_two31 = operands[2];
13886 input = operands[3];
13887 two31 = operands[4];
13888 vecmode = GET_MODE (large);
13889 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13891 /* Load up the value into the low element. We must ensure that the other
13892 elements are valid floats -- zero is the easiest such value. */
13893 if (MEM_P (input))
13895 if (vecmode == V4SFmode)
13896 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13897 else
13898 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13900 else
13902 input = gen_rtx_REG (vecmode, REGNO (input));
13903 emit_move_insn (value, CONST0_RTX (vecmode));
13904 if (vecmode == V4SFmode)
13905 emit_insn (gen_sse_movss (value, value, input));
13906 else
13907 emit_insn (gen_sse2_movsd (value, value, input));
13910 emit_move_insn (large, two31);
13911 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13913 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13914 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13916 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13917 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13919 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
13920 emit_insn (gen_rtx_SET (VOIDmode, value, x));
13922 large = gen_rtx_REG (V4SImode, REGNO (large));
13923 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
13925 x = gen_rtx_REG (V4SImode, REGNO (value));
13926 if (vecmode == V4SFmode)
13927 emit_insn (gen_sse2_cvttps2dq (x, value));
13928 else
13929 emit_insn (gen_sse2_cvttpd2dq (x, value));
13930 value = x;
13932 emit_insn (gen_xorv4si3 (value, value, large));
13935 /* Convert an unsigned DImode value into a DFmode, using only SSE.
13936 Expects the 64-bit DImode to be supplied in a pair of integral
13937 registers. Requires SSE2; will use SSE3 if available. For x86_32,
13938 -mfpmath=sse, !optimize_size only. */
13940 void
13941 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
13943 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
13944 rtx int_xmm, fp_xmm;
13945 rtx biases, exponents;
13946 rtx x;
13948 int_xmm = gen_reg_rtx (V4SImode);
13949 if (TARGET_INTER_UNIT_MOVES)
13950 emit_insn (gen_movdi_to_sse (int_xmm, input));
13951 else if (TARGET_SSE_SPLIT_REGS)
13953 emit_clobber (int_xmm);
13954 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
13956 else
13958 x = gen_reg_rtx (V2DImode);
13959 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
13960 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
13963 x = gen_rtx_CONST_VECTOR (V4SImode,
13964 gen_rtvec (4, GEN_INT (0x43300000UL),
13965 GEN_INT (0x45300000UL),
13966 const0_rtx, const0_rtx));
13967 exponents = validize_mem (force_const_mem (V4SImode, x));
13969 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
13970 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
13972 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
13973 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
13974 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
13975 (0x1.0p84 + double(fp_value_hi_xmm)).
13976 Note these exponents differ by 32. */
13978 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
13980 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
13981 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
13982 real_ldexp (&bias_lo_rvt, &dconst1, 52);
13983 real_ldexp (&bias_hi_rvt, &dconst1, 84);
13984 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
13985 x = const_double_from_real_value (bias_hi_rvt, DFmode);
13986 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
13987 biases = validize_mem (force_const_mem (V2DFmode, biases));
13988 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
13990 /* Add the upper and lower DFmode values together. */
13991 if (TARGET_SSE3)
13992 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
13993 else
13995 x = copy_to_mode_reg (V2DFmode, fp_xmm);
13996 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
13997 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14000 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14003 /* Not used, but eases macroization of patterns. */
14004 void
14005 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14006 rtx input ATTRIBUTE_UNUSED)
14008 gcc_unreachable ();
14011 /* Convert an unsigned SImode value into a DFmode. Only currently used
14012 for SSE, but applicable anywhere. */
14014 void
14015 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14017 REAL_VALUE_TYPE TWO31r;
14018 rtx x, fp;
14020 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14021 NULL, 1, OPTAB_DIRECT);
14023 fp = gen_reg_rtx (DFmode);
14024 emit_insn (gen_floatsidf2 (fp, x));
14026 real_ldexp (&TWO31r, &dconst1, 31);
14027 x = const_double_from_real_value (TWO31r, DFmode);
14029 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14030 if (x != target)
14031 emit_move_insn (target, x);
14034 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14035 32-bit mode; otherwise we have a direct convert instruction. */
14037 void
14038 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14040 REAL_VALUE_TYPE TWO32r;
14041 rtx fp_lo, fp_hi, x;
14043 fp_lo = gen_reg_rtx (DFmode);
14044 fp_hi = gen_reg_rtx (DFmode);
14046 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14048 real_ldexp (&TWO32r, &dconst1, 32);
14049 x = const_double_from_real_value (TWO32r, DFmode);
14050 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14052 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14054 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14055 0, OPTAB_DIRECT);
14056 if (x != target)
14057 emit_move_insn (target, x);
14060 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14061 For x86_32, -mfpmath=sse, !optimize_size only. */
14062 void
14063 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14065 REAL_VALUE_TYPE ONE16r;
14066 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14068 real_ldexp (&ONE16r, &dconst1, 16);
14069 x = const_double_from_real_value (ONE16r, SFmode);
14070 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14071 NULL, 0, OPTAB_DIRECT);
14072 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14073 NULL, 0, OPTAB_DIRECT);
14074 fp_hi = gen_reg_rtx (SFmode);
14075 fp_lo = gen_reg_rtx (SFmode);
14076 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14077 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14078 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14079 0, OPTAB_DIRECT);
14080 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14081 0, OPTAB_DIRECT);
14082 if (!rtx_equal_p (target, fp_hi))
14083 emit_move_insn (target, fp_hi);
14086 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14087 then replicate the value for all elements of the vector
14088 register. */
14091 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14093 rtvec v;
14094 switch (mode)
14096 case SImode:
14097 gcc_assert (vect);
14098 v = gen_rtvec (4, value, value, value, value);
14099 return gen_rtx_CONST_VECTOR (V4SImode, v);
14101 case DImode:
14102 gcc_assert (vect);
14103 v = gen_rtvec (2, value, value);
14104 return gen_rtx_CONST_VECTOR (V2DImode, v);
14106 case SFmode:
14107 if (vect)
14108 v = gen_rtvec (4, value, value, value, value);
14109 else
14110 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14111 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14112 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14114 case DFmode:
14115 if (vect)
14116 v = gen_rtvec (2, value, value);
14117 else
14118 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14119 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14121 default:
14122 gcc_unreachable ();
14126 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14127 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14128 for an SSE register. If VECT is true, then replicate the mask for
14129 all elements of the vector register. If INVERT is true, then create
14130 a mask excluding the sign bit. */
14133 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14135 enum machine_mode vec_mode, imode;
14136 HOST_WIDE_INT hi, lo;
14137 int shift = 63;
14138 rtx v;
14139 rtx mask;
14141 /* Find the sign bit, sign extended to 2*HWI. */
14142 switch (mode)
14144 case SImode:
14145 case SFmode:
14146 imode = SImode;
14147 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14148 lo = 0x80000000, hi = lo < 0;
14149 break;
14151 case DImode:
14152 case DFmode:
14153 imode = DImode;
14154 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14155 if (HOST_BITS_PER_WIDE_INT >= 64)
14156 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14157 else
14158 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14159 break;
14161 case TImode:
14162 case TFmode:
14163 vec_mode = VOIDmode;
14164 if (HOST_BITS_PER_WIDE_INT >= 64)
14166 imode = TImode;
14167 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14169 else
14171 rtvec vec;
14173 imode = DImode;
14174 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14176 if (invert)
14178 lo = ~lo, hi = ~hi;
14179 v = constm1_rtx;
14181 else
14182 v = const0_rtx;
14184 mask = immed_double_const (lo, hi, imode);
14186 vec = gen_rtvec (2, v, mask);
14187 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14188 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14190 return v;
14192 break;
14194 default:
14195 gcc_unreachable ();
14198 if (invert)
14199 lo = ~lo, hi = ~hi;
14201 /* Force this value into the low part of a fp vector constant. */
14202 mask = immed_double_const (lo, hi, imode);
14203 mask = gen_lowpart (mode, mask);
14205 if (vec_mode == VOIDmode)
14206 return force_reg (mode, mask);
14208 v = ix86_build_const_vector (mode, vect, mask);
14209 return force_reg (vec_mode, v);
14212 /* Generate code for floating point ABS or NEG. */
14214 void
14215 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14216 rtx operands[])
14218 rtx mask, set, use, clob, dst, src;
14219 bool use_sse = false;
14220 bool vector_mode = VECTOR_MODE_P (mode);
14221 enum machine_mode elt_mode = mode;
14223 if (vector_mode)
14225 elt_mode = GET_MODE_INNER (mode);
14226 use_sse = true;
14228 else if (mode == TFmode)
14229 use_sse = true;
14230 else if (TARGET_SSE_MATH)
14231 use_sse = SSE_FLOAT_MODE_P (mode);
14233 /* NEG and ABS performed with SSE use bitwise mask operations.
14234 Create the appropriate mask now. */
14235 if (use_sse)
14236 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14237 else
14238 mask = NULL_RTX;
14240 dst = operands[0];
14241 src = operands[1];
14243 if (vector_mode)
14245 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14246 set = gen_rtx_SET (VOIDmode, dst, set);
14247 emit_insn (set);
14249 else
14251 set = gen_rtx_fmt_e (code, mode, src);
14252 set = gen_rtx_SET (VOIDmode, dst, set);
14253 if (mask)
14255 use = gen_rtx_USE (VOIDmode, mask);
14256 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14257 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14258 gen_rtvec (3, set, use, clob)));
14260 else
14261 emit_insn (set);
14265 /* Expand a copysign operation. Special case operand 0 being a constant. */
14267 void
14268 ix86_expand_copysign (rtx operands[])
14270 enum machine_mode mode;
14271 rtx dest, op0, op1, mask, nmask;
14273 dest = operands[0];
14274 op0 = operands[1];
14275 op1 = operands[2];
14277 mode = GET_MODE (dest);
14279 if (GET_CODE (op0) == CONST_DOUBLE)
14281 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14283 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14284 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14286 if (mode == SFmode || mode == DFmode)
14288 enum machine_mode vmode;
14290 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14292 if (op0 == CONST0_RTX (mode))
14293 op0 = CONST0_RTX (vmode);
14294 else
14296 rtx v = ix86_build_const_vector (mode, false, op0);
14298 op0 = force_reg (vmode, v);
14301 else if (op0 != CONST0_RTX (mode))
14302 op0 = force_reg (mode, op0);
14304 mask = ix86_build_signbit_mask (mode, 0, 0);
14306 if (mode == SFmode)
14307 copysign_insn = gen_copysignsf3_const;
14308 else if (mode == DFmode)
14309 copysign_insn = gen_copysigndf3_const;
14310 else
14311 copysign_insn = gen_copysigntf3_const;
14313 emit_insn (copysign_insn (dest, op0, op1, mask));
14315 else
14317 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14319 nmask = ix86_build_signbit_mask (mode, 0, 1);
14320 mask = ix86_build_signbit_mask (mode, 0, 0);
14322 if (mode == SFmode)
14323 copysign_insn = gen_copysignsf3_var;
14324 else if (mode == DFmode)
14325 copysign_insn = gen_copysigndf3_var;
14326 else
14327 copysign_insn = gen_copysigntf3_var;
14329 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14333 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14334 be a constant, and so has already been expanded into a vector constant. */
14336 void
14337 ix86_split_copysign_const (rtx operands[])
14339 enum machine_mode mode, vmode;
14340 rtx dest, op0, mask, x;
14342 dest = operands[0];
14343 op0 = operands[1];
14344 mask = operands[3];
14346 mode = GET_MODE (dest);
14347 vmode = GET_MODE (mask);
14349 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14350 x = gen_rtx_AND (vmode, dest, mask);
14351 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14353 if (op0 != CONST0_RTX (vmode))
14355 x = gen_rtx_IOR (vmode, dest, op0);
14356 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14360 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14361 so we have to do two masks. */
14363 void
14364 ix86_split_copysign_var (rtx operands[])
14366 enum machine_mode mode, vmode;
14367 rtx dest, scratch, op0, op1, mask, nmask, x;
14369 dest = operands[0];
14370 scratch = operands[1];
14371 op0 = operands[2];
14372 op1 = operands[3];
14373 nmask = operands[4];
14374 mask = operands[5];
14376 mode = GET_MODE (dest);
14377 vmode = GET_MODE (mask);
14379 if (rtx_equal_p (op0, op1))
14381 /* Shouldn't happen often (it's useless, obviously), but when it does
14382 we'd generate incorrect code if we continue below. */
14383 emit_move_insn (dest, op0);
14384 return;
14387 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14389 gcc_assert (REGNO (op1) == REGNO (scratch));
14391 x = gen_rtx_AND (vmode, scratch, mask);
14392 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14394 dest = mask;
14395 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14396 x = gen_rtx_NOT (vmode, dest);
14397 x = gen_rtx_AND (vmode, x, op0);
14398 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14400 else
14402 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14404 x = gen_rtx_AND (vmode, scratch, mask);
14406 else /* alternative 2,4 */
14408 gcc_assert (REGNO (mask) == REGNO (scratch));
14409 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14410 x = gen_rtx_AND (vmode, scratch, op1);
14412 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14414 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14416 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14417 x = gen_rtx_AND (vmode, dest, nmask);
14419 else /* alternative 3,4 */
14421 gcc_assert (REGNO (nmask) == REGNO (dest));
14422 dest = nmask;
14423 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14424 x = gen_rtx_AND (vmode, dest, op0);
14426 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14429 x = gen_rtx_IOR (vmode, dest, scratch);
14430 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14433 /* Return TRUE or FALSE depending on whether the first SET in INSN
14434 has source and destination with matching CC modes, and that the
14435 CC mode is at least as constrained as REQ_MODE. */
14438 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14440 rtx set;
14441 enum machine_mode set_mode;
14443 set = PATTERN (insn);
14444 if (GET_CODE (set) == PARALLEL)
14445 set = XVECEXP (set, 0, 0);
14446 gcc_assert (GET_CODE (set) == SET);
14447 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14449 set_mode = GET_MODE (SET_DEST (set));
14450 switch (set_mode)
14452 case CCNOmode:
14453 if (req_mode != CCNOmode
14454 && (req_mode != CCmode
14455 || XEXP (SET_SRC (set), 1) != const0_rtx))
14456 return 0;
14457 break;
14458 case CCmode:
14459 if (req_mode == CCGCmode)
14460 return 0;
14461 /* FALLTHRU */
14462 case CCGCmode:
14463 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14464 return 0;
14465 /* FALLTHRU */
14466 case CCGOCmode:
14467 if (req_mode == CCZmode)
14468 return 0;
14469 /* FALLTHRU */
14470 case CCAmode:
14471 case CCCmode:
14472 case CCOmode:
14473 case CCSmode:
14474 case CCZmode:
14475 break;
14477 default:
14478 gcc_unreachable ();
14481 return (GET_MODE (SET_SRC (set)) == set_mode);
14484 /* Generate insn patterns to do an integer compare of OPERANDS. */
14486 static rtx
14487 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14489 enum machine_mode cmpmode;
14490 rtx tmp, flags;
14492 cmpmode = SELECT_CC_MODE (code, op0, op1);
14493 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14495 /* This is very simple, but making the interface the same as in the
14496 FP case makes the rest of the code easier. */
14497 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14498 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14500 /* Return the test that should be put into the flags user, i.e.
14501 the bcc, scc, or cmov instruction. */
14502 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14505 /* Figure out whether to use ordered or unordered fp comparisons.
14506 Return the appropriate mode to use. */
14508 enum machine_mode
14509 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14511 /* ??? In order to make all comparisons reversible, we do all comparisons
14512 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14513 all forms trapping and nontrapping comparisons, we can make inequality
14514 comparisons trapping again, since it results in better code when using
14515 FCOM based compares. */
14516 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14519 enum machine_mode
14520 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14522 enum machine_mode mode = GET_MODE (op0);
14524 if (SCALAR_FLOAT_MODE_P (mode))
14526 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14527 return ix86_fp_compare_mode (code);
14530 switch (code)
14532 /* Only zero flag is needed. */
14533 case EQ: /* ZF=0 */
14534 case NE: /* ZF!=0 */
14535 return CCZmode;
14536 /* Codes needing carry flag. */
14537 case GEU: /* CF=0 */
14538 case LTU: /* CF=1 */
14539 /* Detect overflow checks. They need just the carry flag. */
14540 if (GET_CODE (op0) == PLUS
14541 && rtx_equal_p (op1, XEXP (op0, 0)))
14542 return CCCmode;
14543 else
14544 return CCmode;
14545 case GTU: /* CF=0 & ZF=0 */
14546 case LEU: /* CF=1 | ZF=1 */
14547 /* Detect overflow checks. They need just the carry flag. */
14548 if (GET_CODE (op0) == MINUS
14549 && rtx_equal_p (op1, XEXP (op0, 0)))
14550 return CCCmode;
14551 else
14552 return CCmode;
14553 /* Codes possibly doable only with sign flag when
14554 comparing against zero. */
14555 case GE: /* SF=OF or SF=0 */
14556 case LT: /* SF<>OF or SF=1 */
14557 if (op1 == const0_rtx)
14558 return CCGOCmode;
14559 else
14560 /* For other cases Carry flag is not required. */
14561 return CCGCmode;
14562 /* Codes doable only with sign flag when comparing
14563 against zero, but we miss jump instruction for it
14564 so we need to use relational tests against overflow
14565 that thus needs to be zero. */
14566 case GT: /* ZF=0 & SF=OF */
14567 case LE: /* ZF=1 | SF<>OF */
14568 if (op1 == const0_rtx)
14569 return CCNOmode;
14570 else
14571 return CCGCmode;
14572 /* strcmp pattern do (use flags) and combine may ask us for proper
14573 mode. */
14574 case USE:
14575 return CCmode;
14576 default:
14577 gcc_unreachable ();
14581 /* Return the fixed registers used for condition codes. */
14583 static bool
14584 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14586 *p1 = FLAGS_REG;
14587 *p2 = FPSR_REG;
14588 return true;
14591 /* If two condition code modes are compatible, return a condition code
14592 mode which is compatible with both. Otherwise, return
14593 VOIDmode. */
14595 static enum machine_mode
14596 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14598 if (m1 == m2)
14599 return m1;
14601 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14602 return VOIDmode;
14604 if ((m1 == CCGCmode && m2 == CCGOCmode)
14605 || (m1 == CCGOCmode && m2 == CCGCmode))
14606 return CCGCmode;
14608 switch (m1)
14610 default:
14611 gcc_unreachable ();
14613 case CCmode:
14614 case CCGCmode:
14615 case CCGOCmode:
14616 case CCNOmode:
14617 case CCAmode:
14618 case CCCmode:
14619 case CCOmode:
14620 case CCSmode:
14621 case CCZmode:
14622 switch (m2)
14624 default:
14625 return VOIDmode;
14627 case CCmode:
14628 case CCGCmode:
14629 case CCGOCmode:
14630 case CCNOmode:
14631 case CCAmode:
14632 case CCCmode:
14633 case CCOmode:
14634 case CCSmode:
14635 case CCZmode:
14636 return CCmode;
14639 case CCFPmode:
14640 case CCFPUmode:
14641 /* These are only compatible with themselves, which we already
14642 checked above. */
14643 return VOIDmode;
14648 /* Return a comparison we can do and that it is equivalent to
14649 swap_condition (code) apart possibly from orderedness.
14650 But, never change orderedness if TARGET_IEEE_FP, returning
14651 UNKNOWN in that case if necessary. */
14653 static enum rtx_code
14654 ix86_fp_swap_condition (enum rtx_code code)
14656 switch (code)
14658 case GT: /* GTU - CF=0 & ZF=0 */
14659 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14660 case GE: /* GEU - CF=0 */
14661 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14662 case UNLT: /* LTU - CF=1 */
14663 return TARGET_IEEE_FP ? UNKNOWN : GT;
14664 case UNLE: /* LEU - CF=1 | ZF=1 */
14665 return TARGET_IEEE_FP ? UNKNOWN : GE;
14666 default:
14667 return swap_condition (code);
14671 /* Return cost of comparison CODE using the best strategy for performance.
14672 All following functions do use number of instructions as a cost metrics.
14673 In future this should be tweaked to compute bytes for optimize_size and
14674 take into account performance of various instructions on various CPUs. */
14676 static int
14677 ix86_fp_comparison_cost (enum rtx_code code)
14679 int arith_cost;
14681 /* The cost of code using bit-twiddling on %ah. */
14682 switch (code)
14684 case UNLE:
14685 case UNLT:
14686 case LTGT:
14687 case GT:
14688 case GE:
14689 case UNORDERED:
14690 case ORDERED:
14691 case UNEQ:
14692 arith_cost = 4;
14693 break;
14694 case LT:
14695 case NE:
14696 case EQ:
14697 case UNGE:
14698 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14699 break;
14700 case LE:
14701 case UNGT:
14702 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14703 break;
14704 default:
14705 gcc_unreachable ();
14708 switch (ix86_fp_comparison_strategy (code))
14710 case IX86_FPCMP_COMI:
14711 return arith_cost > 4 ? 3 : 2;
14712 case IX86_FPCMP_SAHF:
14713 return arith_cost > 4 ? 4 : 3;
14714 default:
14715 return arith_cost;
14719 /* Return strategy to use for floating-point. We assume that fcomi is always
14720 preferrable where available, since that is also true when looking at size
14721 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14723 enum ix86_fpcmp_strategy
14724 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14726 /* Do fcomi/sahf based test when profitable. */
14728 if (TARGET_CMOVE)
14729 return IX86_FPCMP_COMI;
14731 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14732 return IX86_FPCMP_SAHF;
14734 return IX86_FPCMP_ARITH;
14737 /* Swap, force into registers, or otherwise massage the two operands
14738 to a fp comparison. The operands are updated in place; the new
14739 comparison code is returned. */
14741 static enum rtx_code
14742 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14744 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14745 rtx op0 = *pop0, op1 = *pop1;
14746 enum machine_mode op_mode = GET_MODE (op0);
14747 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14749 /* All of the unordered compare instructions only work on registers.
14750 The same is true of the fcomi compare instructions. The XFmode
14751 compare instructions require registers except when comparing
14752 against zero or when converting operand 1 from fixed point to
14753 floating point. */
14755 if (!is_sse
14756 && (fpcmp_mode == CCFPUmode
14757 || (op_mode == XFmode
14758 && ! (standard_80387_constant_p (op0) == 1
14759 || standard_80387_constant_p (op1) == 1)
14760 && GET_CODE (op1) != FLOAT)
14761 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14763 op0 = force_reg (op_mode, op0);
14764 op1 = force_reg (op_mode, op1);
14766 else
14768 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14769 things around if they appear profitable, otherwise force op0
14770 into a register. */
14772 if (standard_80387_constant_p (op0) == 0
14773 || (MEM_P (op0)
14774 && ! (standard_80387_constant_p (op1) == 0
14775 || MEM_P (op1))))
14777 enum rtx_code new_code = ix86_fp_swap_condition (code);
14778 if (new_code != UNKNOWN)
14780 rtx tmp;
14781 tmp = op0, op0 = op1, op1 = tmp;
14782 code = new_code;
14786 if (!REG_P (op0))
14787 op0 = force_reg (op_mode, op0);
14789 if (CONSTANT_P (op1))
14791 int tmp = standard_80387_constant_p (op1);
14792 if (tmp == 0)
14793 op1 = validize_mem (force_const_mem (op_mode, op1));
14794 else if (tmp == 1)
14796 if (TARGET_CMOVE)
14797 op1 = force_reg (op_mode, op1);
14799 else
14800 op1 = force_reg (op_mode, op1);
14804 /* Try to rearrange the comparison to make it cheaper. */
14805 if (ix86_fp_comparison_cost (code)
14806 > ix86_fp_comparison_cost (swap_condition (code))
14807 && (REG_P (op1) || can_create_pseudo_p ()))
14809 rtx tmp;
14810 tmp = op0, op0 = op1, op1 = tmp;
14811 code = swap_condition (code);
14812 if (!REG_P (op0))
14813 op0 = force_reg (op_mode, op0);
14816 *pop0 = op0;
14817 *pop1 = op1;
14818 return code;
14821 /* Convert comparison codes we use to represent FP comparison to integer
14822 code that will result in proper branch. Return UNKNOWN if no such code
14823 is available. */
14825 enum rtx_code
14826 ix86_fp_compare_code_to_integer (enum rtx_code code)
14828 switch (code)
14830 case GT:
14831 return GTU;
14832 case GE:
14833 return GEU;
14834 case ORDERED:
14835 case UNORDERED:
14836 return code;
14837 break;
14838 case UNEQ:
14839 return EQ;
14840 break;
14841 case UNLT:
14842 return LTU;
14843 break;
14844 case UNLE:
14845 return LEU;
14846 break;
14847 case LTGT:
14848 return NE;
14849 break;
14850 default:
14851 return UNKNOWN;
14855 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14857 static rtx
14858 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14860 enum machine_mode fpcmp_mode, intcmp_mode;
14861 rtx tmp, tmp2;
14863 fpcmp_mode = ix86_fp_compare_mode (code);
14864 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14866 /* Do fcomi/sahf based test when profitable. */
14867 switch (ix86_fp_comparison_strategy (code))
14869 case IX86_FPCMP_COMI:
14870 intcmp_mode = fpcmp_mode;
14871 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14872 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14873 tmp);
14874 emit_insn (tmp);
14875 break;
14877 case IX86_FPCMP_SAHF:
14878 intcmp_mode = fpcmp_mode;
14879 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14880 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14881 tmp);
14883 if (!scratch)
14884 scratch = gen_reg_rtx (HImode);
14885 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14886 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14887 break;
14889 case IX86_FPCMP_ARITH:
14890 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14891 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14892 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14893 if (!scratch)
14894 scratch = gen_reg_rtx (HImode);
14895 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14897 /* In the unordered case, we have to check C2 for NaN's, which
14898 doesn't happen to work out to anything nice combination-wise.
14899 So do some bit twiddling on the value we've got in AH to come
14900 up with an appropriate set of condition codes. */
14902 intcmp_mode = CCNOmode;
14903 switch (code)
14905 case GT:
14906 case UNGT:
14907 if (code == GT || !TARGET_IEEE_FP)
14909 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14910 code = EQ;
14912 else
14914 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14915 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14916 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14917 intcmp_mode = CCmode;
14918 code = GEU;
14920 break;
14921 case LT:
14922 case UNLT:
14923 if (code == LT && TARGET_IEEE_FP)
14925 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14926 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
14927 intcmp_mode = CCmode;
14928 code = EQ;
14930 else
14932 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
14933 code = NE;
14935 break;
14936 case GE:
14937 case UNGE:
14938 if (code == GE || !TARGET_IEEE_FP)
14940 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
14941 code = EQ;
14943 else
14945 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14946 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
14947 code = NE;
14949 break;
14950 case LE:
14951 case UNLE:
14952 if (code == LE && TARGET_IEEE_FP)
14954 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14955 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14956 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14957 intcmp_mode = CCmode;
14958 code = LTU;
14960 else
14962 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14963 code = NE;
14965 break;
14966 case EQ:
14967 case UNEQ:
14968 if (code == EQ && TARGET_IEEE_FP)
14970 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14971 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14972 intcmp_mode = CCmode;
14973 code = EQ;
14975 else
14977 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14978 code = NE;
14980 break;
14981 case NE:
14982 case LTGT:
14983 if (code == NE && TARGET_IEEE_FP)
14985 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14986 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14987 GEN_INT (0x40)));
14988 code = NE;
14990 else
14992 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14993 code = EQ;
14995 break;
14997 case UNORDERED:
14998 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14999 code = NE;
15000 break;
15001 case ORDERED:
15002 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15003 code = EQ;
15004 break;
15006 default:
15007 gcc_unreachable ();
15009 break;
15011 default:
15012 gcc_unreachable();
15015 /* Return the test that should be put into the flags user, i.e.
15016 the bcc, scc, or cmov instruction. */
15017 return gen_rtx_fmt_ee (code, VOIDmode,
15018 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15019 const0_rtx);
15023 ix86_expand_compare (enum rtx_code code)
15025 rtx op0, op1, ret;
15026 op0 = ix86_compare_op0;
15027 op1 = ix86_compare_op1;
15029 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15030 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15032 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15034 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15035 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15037 else
15038 ret = ix86_expand_int_compare (code, op0, op1);
15040 return ret;
15043 void
15044 ix86_expand_branch (enum rtx_code code, rtx label)
15046 rtx tmp;
15048 switch (GET_MODE (ix86_compare_op0))
15050 case SFmode:
15051 case DFmode:
15052 case XFmode:
15053 case QImode:
15054 case HImode:
15055 case SImode:
15056 simple:
15057 tmp = ix86_expand_compare (code);
15058 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15059 gen_rtx_LABEL_REF (VOIDmode, label),
15060 pc_rtx);
15061 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15062 return;
15064 case DImode:
15065 if (TARGET_64BIT)
15066 goto simple;
15067 case TImode:
15068 /* Expand DImode branch into multiple compare+branch. */
15070 rtx lo[2], hi[2], label2;
15071 enum rtx_code code1, code2, code3;
15072 enum machine_mode submode;
15074 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15076 tmp = ix86_compare_op0;
15077 ix86_compare_op0 = ix86_compare_op1;
15078 ix86_compare_op1 = tmp;
15079 code = swap_condition (code);
15081 if (GET_MODE (ix86_compare_op0) == DImode)
15083 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15084 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15085 submode = SImode;
15087 else
15089 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15090 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15091 submode = DImode;
15094 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15095 avoid two branches. This costs one extra insn, so disable when
15096 optimizing for size. */
15098 if ((code == EQ || code == NE)
15099 && (!optimize_insn_for_size_p ()
15100 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15102 rtx xor0, xor1;
15104 xor1 = hi[0];
15105 if (hi[1] != const0_rtx)
15106 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15107 NULL_RTX, 0, OPTAB_WIDEN);
15109 xor0 = lo[0];
15110 if (lo[1] != const0_rtx)
15111 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15112 NULL_RTX, 0, OPTAB_WIDEN);
15114 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15115 NULL_RTX, 0, OPTAB_WIDEN);
15117 ix86_compare_op0 = tmp;
15118 ix86_compare_op1 = const0_rtx;
15119 ix86_expand_branch (code, label);
15120 return;
15123 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15124 op1 is a constant and the low word is zero, then we can just
15125 examine the high word. Similarly for low word -1 and
15126 less-or-equal-than or greater-than. */
15128 if (CONST_INT_P (hi[1]))
15129 switch (code)
15131 case LT: case LTU: case GE: case GEU:
15132 if (lo[1] == const0_rtx)
15134 ix86_compare_op0 = hi[0];
15135 ix86_compare_op1 = hi[1];
15136 ix86_expand_branch (code, label);
15137 return;
15139 break;
15140 case LE: case LEU: case GT: case GTU:
15141 if (lo[1] == constm1_rtx)
15143 ix86_compare_op0 = hi[0];
15144 ix86_compare_op1 = hi[1];
15145 ix86_expand_branch (code, label);
15146 return;
15148 break;
15149 default:
15150 break;
15153 /* Otherwise, we need two or three jumps. */
15155 label2 = gen_label_rtx ();
15157 code1 = code;
15158 code2 = swap_condition (code);
15159 code3 = unsigned_condition (code);
15161 switch (code)
15163 case LT: case GT: case LTU: case GTU:
15164 break;
15166 case LE: code1 = LT; code2 = GT; break;
15167 case GE: code1 = GT; code2 = LT; break;
15168 case LEU: code1 = LTU; code2 = GTU; break;
15169 case GEU: code1 = GTU; code2 = LTU; break;
15171 case EQ: code1 = UNKNOWN; code2 = NE; break;
15172 case NE: code2 = UNKNOWN; break;
15174 default:
15175 gcc_unreachable ();
15179 * a < b =>
15180 * if (hi(a) < hi(b)) goto true;
15181 * if (hi(a) > hi(b)) goto false;
15182 * if (lo(a) < lo(b)) goto true;
15183 * false:
15186 ix86_compare_op0 = hi[0];
15187 ix86_compare_op1 = hi[1];
15189 if (code1 != UNKNOWN)
15190 ix86_expand_branch (code1, label);
15191 if (code2 != UNKNOWN)
15192 ix86_expand_branch (code2, label2);
15194 ix86_compare_op0 = lo[0];
15195 ix86_compare_op1 = lo[1];
15196 ix86_expand_branch (code3, label);
15198 if (code2 != UNKNOWN)
15199 emit_label (label2);
15200 return;
15203 default:
15204 /* If we have already emitted a compare insn, go straight to simple.
15205 ix86_expand_compare won't emit anything if ix86_compare_emitted
15206 is non NULL. */
15207 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15208 goto simple;
15212 /* Split branch based on floating point condition. */
15213 void
15214 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15215 rtx target1, rtx target2, rtx tmp, rtx pushed)
15217 rtx condition;
15218 rtx i;
15220 if (target2 != pc_rtx)
15222 rtx tmp = target2;
15223 code = reverse_condition_maybe_unordered (code);
15224 target2 = target1;
15225 target1 = tmp;
15228 condition = ix86_expand_fp_compare (code, op1, op2,
15229 tmp);
15231 /* Remove pushed operand from stack. */
15232 if (pushed)
15233 ix86_free_from_memory (GET_MODE (pushed));
15235 i = emit_jump_insn (gen_rtx_SET
15236 (VOIDmode, pc_rtx,
15237 gen_rtx_IF_THEN_ELSE (VOIDmode,
15238 condition, target1, target2)));
15239 if (split_branch_probability >= 0)
15240 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15243 void
15244 ix86_expand_setcc (enum rtx_code code, rtx dest)
15246 rtx ret;
15248 gcc_assert (GET_MODE (dest) == QImode);
15250 ret = ix86_expand_compare (code);
15251 PUT_MODE (ret, QImode);
15252 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15255 /* Expand comparison setting or clearing carry flag. Return true when
15256 successful and set pop for the operation. */
15257 static bool
15258 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15260 enum machine_mode mode =
15261 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15263 /* Do not handle DImode compares that go through special path. */
15264 if (mode == (TARGET_64BIT ? TImode : DImode))
15265 return false;
15267 if (SCALAR_FLOAT_MODE_P (mode))
15269 rtx compare_op, compare_seq;
15271 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15273 /* Shortcut: following common codes never translate
15274 into carry flag compares. */
15275 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15276 || code == ORDERED || code == UNORDERED)
15277 return false;
15279 /* These comparisons require zero flag; swap operands so they won't. */
15280 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15281 && !TARGET_IEEE_FP)
15283 rtx tmp = op0;
15284 op0 = op1;
15285 op1 = tmp;
15286 code = swap_condition (code);
15289 /* Try to expand the comparison and verify that we end up with
15290 carry flag based comparison. This fails to be true only when
15291 we decide to expand comparison using arithmetic that is not
15292 too common scenario. */
15293 start_sequence ();
15294 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15295 compare_seq = get_insns ();
15296 end_sequence ();
15298 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15299 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15300 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15301 else
15302 code = GET_CODE (compare_op);
15304 if (code != LTU && code != GEU)
15305 return false;
15307 emit_insn (compare_seq);
15308 *pop = compare_op;
15309 return true;
15312 if (!INTEGRAL_MODE_P (mode))
15313 return false;
15315 switch (code)
15317 case LTU:
15318 case GEU:
15319 break;
15321 /* Convert a==0 into (unsigned)a<1. */
15322 case EQ:
15323 case NE:
15324 if (op1 != const0_rtx)
15325 return false;
15326 op1 = const1_rtx;
15327 code = (code == EQ ? LTU : GEU);
15328 break;
15330 /* Convert a>b into b<a or a>=b-1. */
15331 case GTU:
15332 case LEU:
15333 if (CONST_INT_P (op1))
15335 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15336 /* Bail out on overflow. We still can swap operands but that
15337 would force loading of the constant into register. */
15338 if (op1 == const0_rtx
15339 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15340 return false;
15341 code = (code == GTU ? GEU : LTU);
15343 else
15345 rtx tmp = op1;
15346 op1 = op0;
15347 op0 = tmp;
15348 code = (code == GTU ? LTU : GEU);
15350 break;
15352 /* Convert a>=0 into (unsigned)a<0x80000000. */
15353 case LT:
15354 case GE:
15355 if (mode == DImode || op1 != const0_rtx)
15356 return false;
15357 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15358 code = (code == LT ? GEU : LTU);
15359 break;
15360 case LE:
15361 case GT:
15362 if (mode == DImode || op1 != constm1_rtx)
15363 return false;
15364 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15365 code = (code == LE ? GEU : LTU);
15366 break;
15368 default:
15369 return false;
15371 /* Swapping operands may cause constant to appear as first operand. */
15372 if (!nonimmediate_operand (op0, VOIDmode))
15374 if (!can_create_pseudo_p ())
15375 return false;
15376 op0 = force_reg (mode, op0);
15378 ix86_compare_op0 = op0;
15379 ix86_compare_op1 = op1;
15380 *pop = ix86_expand_compare (code);
15381 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15382 return true;
15386 ix86_expand_int_movcc (rtx operands[])
15388 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15389 rtx compare_seq, compare_op;
15390 enum machine_mode mode = GET_MODE (operands[0]);
15391 bool sign_bit_compare_p = false;;
15393 start_sequence ();
15394 ix86_compare_op0 = XEXP (operands[1], 0);
15395 ix86_compare_op1 = XEXP (operands[1], 1);
15396 compare_op = ix86_expand_compare (code);
15397 compare_seq = get_insns ();
15398 end_sequence ();
15400 compare_code = GET_CODE (compare_op);
15402 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15403 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15404 sign_bit_compare_p = true;
15406 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15407 HImode insns, we'd be swallowed in word prefix ops. */
15409 if ((mode != HImode || TARGET_FAST_PREFIX)
15410 && (mode != (TARGET_64BIT ? TImode : DImode))
15411 && CONST_INT_P (operands[2])
15412 && CONST_INT_P (operands[3]))
15414 rtx out = operands[0];
15415 HOST_WIDE_INT ct = INTVAL (operands[2]);
15416 HOST_WIDE_INT cf = INTVAL (operands[3]);
15417 HOST_WIDE_INT diff;
15419 diff = ct - cf;
15420 /* Sign bit compares are better done using shifts than we do by using
15421 sbb. */
15422 if (sign_bit_compare_p
15423 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15424 ix86_compare_op1, &compare_op))
15426 /* Detect overlap between destination and compare sources. */
15427 rtx tmp = out;
15429 if (!sign_bit_compare_p)
15431 rtx flags;
15432 rtx (*insn)(rtx, rtx, rtx);
15433 bool fpcmp = false;
15435 compare_code = GET_CODE (compare_op);
15437 flags = XEXP (compare_op, 0);
15439 if (GET_MODE (flags) == CCFPmode
15440 || GET_MODE (flags) == CCFPUmode)
15442 fpcmp = true;
15443 compare_code
15444 = ix86_fp_compare_code_to_integer (compare_code);
15447 /* To simplify rest of code, restrict to the GEU case. */
15448 if (compare_code == LTU)
15450 HOST_WIDE_INT tmp = ct;
15451 ct = cf;
15452 cf = tmp;
15453 compare_code = reverse_condition (compare_code);
15454 code = reverse_condition (code);
15456 else
15458 if (fpcmp)
15459 PUT_CODE (compare_op,
15460 reverse_condition_maybe_unordered
15461 (GET_CODE (compare_op)));
15462 else
15463 PUT_CODE (compare_op,
15464 reverse_condition (GET_CODE (compare_op)));
15466 diff = ct - cf;
15468 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15469 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15470 tmp = gen_reg_rtx (mode);
15472 if (mode == DImode)
15473 insn = gen_x86_movdicc_0_m1;
15474 else
15475 insn = gen_x86_movsicc_0_m1;
15477 emit_insn (insn (tmp, flags, compare_op));
15479 else
15481 if (code == GT || code == GE)
15482 code = reverse_condition (code);
15483 else
15485 HOST_WIDE_INT tmp = ct;
15486 ct = cf;
15487 cf = tmp;
15488 diff = ct - cf;
15490 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15491 ix86_compare_op1, VOIDmode, 0, -1);
15494 if (diff == 1)
15497 * cmpl op0,op1
15498 * sbbl dest,dest
15499 * [addl dest, ct]
15501 * Size 5 - 8.
15503 if (ct)
15504 tmp = expand_simple_binop (mode, PLUS,
15505 tmp, GEN_INT (ct),
15506 copy_rtx (tmp), 1, OPTAB_DIRECT);
15508 else if (cf == -1)
15511 * cmpl op0,op1
15512 * sbbl dest,dest
15513 * orl $ct, dest
15515 * Size 8.
15517 tmp = expand_simple_binop (mode, IOR,
15518 tmp, GEN_INT (ct),
15519 copy_rtx (tmp), 1, OPTAB_DIRECT);
15521 else if (diff == -1 && ct)
15524 * cmpl op0,op1
15525 * sbbl dest,dest
15526 * notl dest
15527 * [addl dest, cf]
15529 * Size 8 - 11.
15531 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15532 if (cf)
15533 tmp = expand_simple_binop (mode, PLUS,
15534 copy_rtx (tmp), GEN_INT (cf),
15535 copy_rtx (tmp), 1, OPTAB_DIRECT);
15537 else
15540 * cmpl op0,op1
15541 * sbbl dest,dest
15542 * [notl dest]
15543 * andl cf - ct, dest
15544 * [addl dest, ct]
15546 * Size 8 - 11.
15549 if (cf == 0)
15551 cf = ct;
15552 ct = 0;
15553 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15556 tmp = expand_simple_binop (mode, AND,
15557 copy_rtx (tmp),
15558 gen_int_mode (cf - ct, mode),
15559 copy_rtx (tmp), 1, OPTAB_DIRECT);
15560 if (ct)
15561 tmp = expand_simple_binop (mode, PLUS,
15562 copy_rtx (tmp), GEN_INT (ct),
15563 copy_rtx (tmp), 1, OPTAB_DIRECT);
15566 if (!rtx_equal_p (tmp, out))
15567 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15569 return 1; /* DONE */
15572 if (diff < 0)
15574 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15576 HOST_WIDE_INT tmp;
15577 tmp = ct, ct = cf, cf = tmp;
15578 diff = -diff;
15580 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15582 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15584 /* We may be reversing unordered compare to normal compare, that
15585 is not valid in general (we may convert non-trapping condition
15586 to trapping one), however on i386 we currently emit all
15587 comparisons unordered. */
15588 compare_code = reverse_condition_maybe_unordered (compare_code);
15589 code = reverse_condition_maybe_unordered (code);
15591 else
15593 compare_code = reverse_condition (compare_code);
15594 code = reverse_condition (code);
15598 compare_code = UNKNOWN;
15599 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15600 && CONST_INT_P (ix86_compare_op1))
15602 if (ix86_compare_op1 == const0_rtx
15603 && (code == LT || code == GE))
15604 compare_code = code;
15605 else if (ix86_compare_op1 == constm1_rtx)
15607 if (code == LE)
15608 compare_code = LT;
15609 else if (code == GT)
15610 compare_code = GE;
15614 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15615 if (compare_code != UNKNOWN
15616 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15617 && (cf == -1 || ct == -1))
15619 /* If lea code below could be used, only optimize
15620 if it results in a 2 insn sequence. */
15622 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15623 || diff == 3 || diff == 5 || diff == 9)
15624 || (compare_code == LT && ct == -1)
15625 || (compare_code == GE && cf == -1))
15628 * notl op1 (if necessary)
15629 * sarl $31, op1
15630 * orl cf, op1
15632 if (ct != -1)
15634 cf = ct;
15635 ct = -1;
15636 code = reverse_condition (code);
15639 out = emit_store_flag (out, code, ix86_compare_op0,
15640 ix86_compare_op1, VOIDmode, 0, -1);
15642 out = expand_simple_binop (mode, IOR,
15643 out, GEN_INT (cf),
15644 out, 1, OPTAB_DIRECT);
15645 if (out != operands[0])
15646 emit_move_insn (operands[0], out);
15648 return 1; /* DONE */
15653 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15654 || diff == 3 || diff == 5 || diff == 9)
15655 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15656 && (mode != DImode
15657 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15660 * xorl dest,dest
15661 * cmpl op1,op2
15662 * setcc dest
15663 * lea cf(dest*(ct-cf)),dest
15665 * Size 14.
15667 * This also catches the degenerate setcc-only case.
15670 rtx tmp;
15671 int nops;
15673 out = emit_store_flag (out, code, ix86_compare_op0,
15674 ix86_compare_op1, VOIDmode, 0, 1);
15676 nops = 0;
15677 /* On x86_64 the lea instruction operates on Pmode, so we need
15678 to get arithmetics done in proper mode to match. */
15679 if (diff == 1)
15680 tmp = copy_rtx (out);
15681 else
15683 rtx out1;
15684 out1 = copy_rtx (out);
15685 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15686 nops++;
15687 if (diff & 1)
15689 tmp = gen_rtx_PLUS (mode, tmp, out1);
15690 nops++;
15693 if (cf != 0)
15695 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15696 nops++;
15698 if (!rtx_equal_p (tmp, out))
15700 if (nops == 1)
15701 out = force_operand (tmp, copy_rtx (out));
15702 else
15703 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15705 if (!rtx_equal_p (out, operands[0]))
15706 emit_move_insn (operands[0], copy_rtx (out));
15708 return 1; /* DONE */
15712 * General case: Jumpful:
15713 * xorl dest,dest cmpl op1, op2
15714 * cmpl op1, op2 movl ct, dest
15715 * setcc dest jcc 1f
15716 * decl dest movl cf, dest
15717 * andl (cf-ct),dest 1:
15718 * addl ct,dest
15720 * Size 20. Size 14.
15722 * This is reasonably steep, but branch mispredict costs are
15723 * high on modern cpus, so consider failing only if optimizing
15724 * for space.
15727 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15728 && BRANCH_COST (optimize_insn_for_speed_p (),
15729 false) >= 2)
15731 if (cf == 0)
15733 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15735 cf = ct;
15736 ct = 0;
15738 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15740 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15742 /* We may be reversing unordered compare to normal compare,
15743 that is not valid in general (we may convert non-trapping
15744 condition to trapping one), however on i386 we currently
15745 emit all comparisons unordered. */
15746 code = reverse_condition_maybe_unordered (code);
15748 else
15750 code = reverse_condition (code);
15751 if (compare_code != UNKNOWN)
15752 compare_code = reverse_condition (compare_code);
15756 if (compare_code != UNKNOWN)
15758 /* notl op1 (if needed)
15759 sarl $31, op1
15760 andl (cf-ct), op1
15761 addl ct, op1
15763 For x < 0 (resp. x <= -1) there will be no notl,
15764 so if possible swap the constants to get rid of the
15765 complement.
15766 True/false will be -1/0 while code below (store flag
15767 followed by decrement) is 0/-1, so the constants need
15768 to be exchanged once more. */
15770 if (compare_code == GE || !cf)
15772 code = reverse_condition (code);
15773 compare_code = LT;
15775 else
15777 HOST_WIDE_INT tmp = cf;
15778 cf = ct;
15779 ct = tmp;
15782 out = emit_store_flag (out, code, ix86_compare_op0,
15783 ix86_compare_op1, VOIDmode, 0, -1);
15785 else
15787 out = emit_store_flag (out, code, ix86_compare_op0,
15788 ix86_compare_op1, VOIDmode, 0, 1);
15790 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15791 copy_rtx (out), 1, OPTAB_DIRECT);
15794 out = expand_simple_binop (mode, AND, copy_rtx (out),
15795 gen_int_mode (cf - ct, mode),
15796 copy_rtx (out), 1, OPTAB_DIRECT);
15797 if (ct)
15798 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15799 copy_rtx (out), 1, OPTAB_DIRECT);
15800 if (!rtx_equal_p (out, operands[0]))
15801 emit_move_insn (operands[0], copy_rtx (out));
15803 return 1; /* DONE */
15807 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15809 /* Try a few things more with specific constants and a variable. */
15811 optab op;
15812 rtx var, orig_out, out, tmp;
15814 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15815 return 0; /* FAIL */
15817 /* If one of the two operands is an interesting constant, load a
15818 constant with the above and mask it in with a logical operation. */
15820 if (CONST_INT_P (operands[2]))
15822 var = operands[3];
15823 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15824 operands[3] = constm1_rtx, op = and_optab;
15825 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15826 operands[3] = const0_rtx, op = ior_optab;
15827 else
15828 return 0; /* FAIL */
15830 else if (CONST_INT_P (operands[3]))
15832 var = operands[2];
15833 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15834 operands[2] = constm1_rtx, op = and_optab;
15835 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15836 operands[2] = const0_rtx, op = ior_optab;
15837 else
15838 return 0; /* FAIL */
15840 else
15841 return 0; /* FAIL */
15843 orig_out = operands[0];
15844 tmp = gen_reg_rtx (mode);
15845 operands[0] = tmp;
15847 /* Recurse to get the constant loaded. */
15848 if (ix86_expand_int_movcc (operands) == 0)
15849 return 0; /* FAIL */
15851 /* Mask in the interesting variable. */
15852 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15853 OPTAB_WIDEN);
15854 if (!rtx_equal_p (out, orig_out))
15855 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15857 return 1; /* DONE */
15861 * For comparison with above,
15863 * movl cf,dest
15864 * movl ct,tmp
15865 * cmpl op1,op2
15866 * cmovcc tmp,dest
15868 * Size 15.
15871 if (! nonimmediate_operand (operands[2], mode))
15872 operands[2] = force_reg (mode, operands[2]);
15873 if (! nonimmediate_operand (operands[3], mode))
15874 operands[3] = force_reg (mode, operands[3]);
15876 if (! register_operand (operands[2], VOIDmode)
15877 && (mode == QImode
15878 || ! register_operand (operands[3], VOIDmode)))
15879 operands[2] = force_reg (mode, operands[2]);
15881 if (mode == QImode
15882 && ! register_operand (operands[3], VOIDmode))
15883 operands[3] = force_reg (mode, operands[3]);
15885 emit_insn (compare_seq);
15886 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15887 gen_rtx_IF_THEN_ELSE (mode,
15888 compare_op, operands[2],
15889 operands[3])));
15891 return 1; /* DONE */
15894 /* Swap, force into registers, or otherwise massage the two operands
15895 to an sse comparison with a mask result. Thus we differ a bit from
15896 ix86_prepare_fp_compare_args which expects to produce a flags result.
15898 The DEST operand exists to help determine whether to commute commutative
15899 operators. The POP0/POP1 operands are updated in place. The new
15900 comparison code is returned, or UNKNOWN if not implementable. */
15902 static enum rtx_code
15903 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15904 rtx *pop0, rtx *pop1)
15906 rtx tmp;
15908 switch (code)
15910 case LTGT:
15911 case UNEQ:
15912 /* We have no LTGT as an operator. We could implement it with
15913 NE & ORDERED, but this requires an extra temporary. It's
15914 not clear that it's worth it. */
15915 return UNKNOWN;
15917 case LT:
15918 case LE:
15919 case UNGT:
15920 case UNGE:
15921 /* These are supported directly. */
15922 break;
15924 case EQ:
15925 case NE:
15926 case UNORDERED:
15927 case ORDERED:
15928 /* For commutative operators, try to canonicalize the destination
15929 operand to be first in the comparison - this helps reload to
15930 avoid extra moves. */
15931 if (!dest || !rtx_equal_p (dest, *pop1))
15932 break;
15933 /* FALLTHRU */
15935 case GE:
15936 case GT:
15937 case UNLE:
15938 case UNLT:
15939 /* These are not supported directly. Swap the comparison operands
15940 to transform into something that is supported. */
15941 tmp = *pop0;
15942 *pop0 = *pop1;
15943 *pop1 = tmp;
15944 code = swap_condition (code);
15945 break;
15947 default:
15948 gcc_unreachable ();
15951 return code;
15954 /* Detect conditional moves that exactly match min/max operational
15955 semantics. Note that this is IEEE safe, as long as we don't
15956 interchange the operands.
15958 Returns FALSE if this conditional move doesn't match a MIN/MAX,
15959 and TRUE if the operation is successful and instructions are emitted. */
15961 static bool
15962 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
15963 rtx cmp_op1, rtx if_true, rtx if_false)
15965 enum machine_mode mode;
15966 bool is_min;
15967 rtx tmp;
15969 if (code == LT)
15971 else if (code == UNGE)
15973 tmp = if_true;
15974 if_true = if_false;
15975 if_false = tmp;
15977 else
15978 return false;
15980 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
15981 is_min = true;
15982 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
15983 is_min = false;
15984 else
15985 return false;
15987 mode = GET_MODE (dest);
15989 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15990 but MODE may be a vector mode and thus not appropriate. */
15991 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15993 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15994 rtvec v;
15996 if_true = force_reg (mode, if_true);
15997 v = gen_rtvec (2, if_true, if_false);
15998 tmp = gen_rtx_UNSPEC (mode, v, u);
16000 else
16002 code = is_min ? SMIN : SMAX;
16003 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16006 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16007 return true;
16010 /* Expand an sse vector comparison. Return the register with the result. */
16012 static rtx
16013 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16014 rtx op_true, rtx op_false)
16016 enum machine_mode mode = GET_MODE (dest);
16017 rtx x;
16019 cmp_op0 = force_reg (mode, cmp_op0);
16020 if (!nonimmediate_operand (cmp_op1, mode))
16021 cmp_op1 = force_reg (mode, cmp_op1);
16023 if (optimize
16024 || reg_overlap_mentioned_p (dest, op_true)
16025 || reg_overlap_mentioned_p (dest, op_false))
16026 dest = gen_reg_rtx (mode);
16028 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16029 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16031 return dest;
16034 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16035 operations. This is used for both scalar and vector conditional moves. */
16037 static void
16038 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16040 enum machine_mode mode = GET_MODE (dest);
16041 rtx t2, t3, x;
16043 if (op_false == CONST0_RTX (mode))
16045 op_true = force_reg (mode, op_true);
16046 x = gen_rtx_AND (mode, cmp, op_true);
16047 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16049 else if (op_true == CONST0_RTX (mode))
16051 op_false = force_reg (mode, op_false);
16052 x = gen_rtx_NOT (mode, cmp);
16053 x = gen_rtx_AND (mode, x, op_false);
16054 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16056 else if (TARGET_XOP)
16058 rtx pcmov = gen_rtx_SET (mode, dest,
16059 gen_rtx_IF_THEN_ELSE (mode, cmp,
16060 op_true,
16061 op_false));
16062 emit_insn (pcmov);
16064 else
16066 op_true = force_reg (mode, op_true);
16067 op_false = force_reg (mode, op_false);
16069 t2 = gen_reg_rtx (mode);
16070 if (optimize)
16071 t3 = gen_reg_rtx (mode);
16072 else
16073 t3 = dest;
16075 x = gen_rtx_AND (mode, op_true, cmp);
16076 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16078 x = gen_rtx_NOT (mode, cmp);
16079 x = gen_rtx_AND (mode, x, op_false);
16080 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16082 x = gen_rtx_IOR (mode, t3, t2);
16083 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16087 /* Expand a floating-point conditional move. Return true if successful. */
16090 ix86_expand_fp_movcc (rtx operands[])
16092 enum machine_mode mode = GET_MODE (operands[0]);
16093 enum rtx_code code = GET_CODE (operands[1]);
16094 rtx tmp, compare_op;
16096 ix86_compare_op0 = XEXP (operands[1], 0);
16097 ix86_compare_op1 = XEXP (operands[1], 1);
16098 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16100 enum machine_mode cmode;
16102 /* Since we've no cmove for sse registers, don't force bad register
16103 allocation just to gain access to it. Deny movcc when the
16104 comparison mode doesn't match the move mode. */
16105 cmode = GET_MODE (ix86_compare_op0);
16106 if (cmode == VOIDmode)
16107 cmode = GET_MODE (ix86_compare_op1);
16108 if (cmode != mode)
16109 return 0;
16111 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16112 &ix86_compare_op0,
16113 &ix86_compare_op1);
16114 if (code == UNKNOWN)
16115 return 0;
16117 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16118 ix86_compare_op1, operands[2],
16119 operands[3]))
16120 return 1;
16122 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16123 ix86_compare_op1, operands[2], operands[3]);
16124 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16125 return 1;
16128 /* The floating point conditional move instructions don't directly
16129 support conditions resulting from a signed integer comparison. */
16131 compare_op = ix86_expand_compare (code);
16132 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16134 tmp = gen_reg_rtx (QImode);
16135 ix86_expand_setcc (code, tmp);
16136 code = NE;
16137 ix86_compare_op0 = tmp;
16138 ix86_compare_op1 = const0_rtx;
16139 compare_op = ix86_expand_compare (code);
16142 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16143 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16144 operands[2], operands[3])));
16146 return 1;
16149 /* Expand a floating-point vector conditional move; a vcond operation
16150 rather than a movcc operation. */
16152 bool
16153 ix86_expand_fp_vcond (rtx operands[])
16155 enum rtx_code code = GET_CODE (operands[3]);
16156 rtx cmp;
16158 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16159 &operands[4], &operands[5]);
16160 if (code == UNKNOWN)
16161 return false;
16163 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16164 operands[5], operands[1], operands[2]))
16165 return true;
16167 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16168 operands[1], operands[2]);
16169 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16170 return true;
16173 /* Expand a signed/unsigned integral vector conditional move. */
16175 bool
16176 ix86_expand_int_vcond (rtx operands[])
16178 enum machine_mode mode = GET_MODE (operands[0]);
16179 enum rtx_code code = GET_CODE (operands[3]);
16180 bool negate = false;
16181 rtx x, cop0, cop1;
16183 cop0 = operands[4];
16184 cop1 = operands[5];
16186 /* XOP supports all of the comparisons on all vector int types. */
16187 if (!TARGET_XOP)
16189 /* Canonicalize the comparison to EQ, GT, GTU. */
16190 switch (code)
16192 case EQ:
16193 case GT:
16194 case GTU:
16195 break;
16197 case NE:
16198 case LE:
16199 case LEU:
16200 code = reverse_condition (code);
16201 negate = true;
16202 break;
16204 case GE:
16205 case GEU:
16206 code = reverse_condition (code);
16207 negate = true;
16208 /* FALLTHRU */
16210 case LT:
16211 case LTU:
16212 code = swap_condition (code);
16213 x = cop0, cop0 = cop1, cop1 = x;
16214 break;
16216 default:
16217 gcc_unreachable ();
16220 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16221 if (mode == V2DImode)
16223 switch (code)
16225 case EQ:
16226 /* SSE4.1 supports EQ. */
16227 if (!TARGET_SSE4_1)
16228 return false;
16229 break;
16231 case GT:
16232 case GTU:
16233 /* SSE4.2 supports GT/GTU. */
16234 if (!TARGET_SSE4_2)
16235 return false;
16236 break;
16238 default:
16239 gcc_unreachable ();
16243 /* Unsigned parallel compare is not supported by the hardware. Play some
16244 tricks to turn this into a signed comparison against 0. */
16245 if (code == GTU)
16247 cop0 = force_reg (mode, cop0);
16249 switch (mode)
16251 case V4SImode:
16252 case V2DImode:
16254 rtx t1, t2, mask;
16256 /* Perform a parallel modulo subtraction. */
16257 t1 = gen_reg_rtx (mode);
16258 emit_insn ((mode == V4SImode
16259 ? gen_subv4si3
16260 : gen_subv2di3) (t1, cop0, cop1));
16262 /* Extract the original sign bit of op0. */
16263 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16264 true, false);
16265 t2 = gen_reg_rtx (mode);
16266 emit_insn ((mode == V4SImode
16267 ? gen_andv4si3
16268 : gen_andv2di3) (t2, cop0, mask));
16270 /* XOR it back into the result of the subtraction. This results
16271 in the sign bit set iff we saw unsigned underflow. */
16272 x = gen_reg_rtx (mode);
16273 emit_insn ((mode == V4SImode
16274 ? gen_xorv4si3
16275 : gen_xorv2di3) (x, t1, t2));
16277 code = GT;
16279 break;
16281 case V16QImode:
16282 case V8HImode:
16283 /* Perform a parallel unsigned saturating subtraction. */
16284 x = gen_reg_rtx (mode);
16285 emit_insn (gen_rtx_SET (VOIDmode, x,
16286 gen_rtx_US_MINUS (mode, cop0, cop1)));
16288 code = EQ;
16289 negate = !negate;
16290 break;
16292 default:
16293 gcc_unreachable ();
16296 cop0 = x;
16297 cop1 = CONST0_RTX (mode);
16301 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16302 operands[1+negate], operands[2-negate]);
16304 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16305 operands[2-negate]);
16306 return true;
16309 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16310 true if we should do zero extension, else sign extension. HIGH_P is
16311 true if we want the N/2 high elements, else the low elements. */
16313 void
16314 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16316 enum machine_mode imode = GET_MODE (operands[1]);
16317 rtx (*unpack)(rtx, rtx, rtx);
16318 rtx se, dest;
16320 switch (imode)
16322 case V16QImode:
16323 if (high_p)
16324 unpack = gen_vec_interleave_highv16qi;
16325 else
16326 unpack = gen_vec_interleave_lowv16qi;
16327 break;
16328 case V8HImode:
16329 if (high_p)
16330 unpack = gen_vec_interleave_highv8hi;
16331 else
16332 unpack = gen_vec_interleave_lowv8hi;
16333 break;
16334 case V4SImode:
16335 if (high_p)
16336 unpack = gen_vec_interleave_highv4si;
16337 else
16338 unpack = gen_vec_interleave_lowv4si;
16339 break;
16340 default:
16341 gcc_unreachable ();
16344 dest = gen_lowpart (imode, operands[0]);
16346 if (unsigned_p)
16347 se = force_reg (imode, CONST0_RTX (imode));
16348 else
16349 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16350 operands[1], pc_rtx, pc_rtx);
16352 emit_insn (unpack (dest, operands[1], se));
16355 /* This function performs the same task as ix86_expand_sse_unpack,
16356 but with SSE4.1 instructions. */
16358 void
16359 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16361 enum machine_mode imode = GET_MODE (operands[1]);
16362 rtx (*unpack)(rtx, rtx);
16363 rtx src, dest;
16365 switch (imode)
16367 case V16QImode:
16368 if (unsigned_p)
16369 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16370 else
16371 unpack = gen_sse4_1_extendv8qiv8hi2;
16372 break;
16373 case V8HImode:
16374 if (unsigned_p)
16375 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16376 else
16377 unpack = gen_sse4_1_extendv4hiv4si2;
16378 break;
16379 case V4SImode:
16380 if (unsigned_p)
16381 unpack = gen_sse4_1_zero_extendv2siv2di2;
16382 else
16383 unpack = gen_sse4_1_extendv2siv2di2;
16384 break;
16385 default:
16386 gcc_unreachable ();
16389 dest = operands[0];
16390 if (high_p)
16392 /* Shift higher 8 bytes to lower 8 bytes. */
16393 src = gen_reg_rtx (imode);
16394 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
16395 gen_lowpart (TImode, operands[1]),
16396 GEN_INT (64)));
16398 else
16399 src = operands[1];
16401 emit_insn (unpack (dest, src));
16404 /* Expand conditional increment or decrement using adb/sbb instructions.
16405 The default case using setcc followed by the conditional move can be
16406 done by generic code. */
16408 ix86_expand_int_addcc (rtx operands[])
16410 enum rtx_code code = GET_CODE (operands[1]);
16411 rtx flags;
16412 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16413 rtx compare_op;
16414 rtx val = const0_rtx;
16415 bool fpcmp = false;
16416 enum machine_mode mode;
16418 ix86_compare_op0 = XEXP (operands[1], 0);
16419 ix86_compare_op1 = XEXP (operands[1], 1);
16420 if (operands[3] != const1_rtx
16421 && operands[3] != constm1_rtx)
16422 return 0;
16423 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16424 ix86_compare_op1, &compare_op))
16425 return 0;
16426 code = GET_CODE (compare_op);
16428 flags = XEXP (compare_op, 0);
16430 if (GET_MODE (flags) == CCFPmode
16431 || GET_MODE (flags) == CCFPUmode)
16433 fpcmp = true;
16434 code = ix86_fp_compare_code_to_integer (code);
16437 if (code != LTU)
16439 val = constm1_rtx;
16440 if (fpcmp)
16441 PUT_CODE (compare_op,
16442 reverse_condition_maybe_unordered
16443 (GET_CODE (compare_op)));
16444 else
16445 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16448 mode = GET_MODE (operands[0]);
16450 /* Construct either adc or sbb insn. */
16451 if ((code == LTU) == (operands[3] == constm1_rtx))
16453 switch (mode)
16455 case QImode:
16456 insn = gen_subqi3_carry;
16457 break;
16458 case HImode:
16459 insn = gen_subhi3_carry;
16460 break;
16461 case SImode:
16462 insn = gen_subsi3_carry;
16463 break;
16464 case DImode:
16465 insn = gen_subdi3_carry;
16466 break;
16467 default:
16468 gcc_unreachable ();
16471 else
16473 switch (mode)
16475 case QImode:
16476 insn = gen_addqi3_carry;
16477 break;
16478 case HImode:
16479 insn = gen_addhi3_carry;
16480 break;
16481 case SImode:
16482 insn = gen_addsi3_carry;
16483 break;
16484 case DImode:
16485 insn = gen_adddi3_carry;
16486 break;
16487 default:
16488 gcc_unreachable ();
16491 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16493 return 1; /* DONE */
16497 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16498 works for floating pointer parameters and nonoffsetable memories.
16499 For pushes, it returns just stack offsets; the values will be saved
16500 in the right order. Maximally three parts are generated. */
16502 static int
16503 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16505 int size;
16507 if (!TARGET_64BIT)
16508 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16509 else
16510 size = (GET_MODE_SIZE (mode) + 4) / 8;
16512 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16513 gcc_assert (size >= 2 && size <= 4);
16515 /* Optimize constant pool reference to immediates. This is used by fp
16516 moves, that force all constants to memory to allow combining. */
16517 if (MEM_P (operand) && MEM_READONLY_P (operand))
16519 rtx tmp = maybe_get_pool_constant (operand);
16520 if (tmp)
16521 operand = tmp;
16524 if (MEM_P (operand) && !offsettable_memref_p (operand))
16526 /* The only non-offsetable memories we handle are pushes. */
16527 int ok = push_operand (operand, VOIDmode);
16529 gcc_assert (ok);
16531 operand = copy_rtx (operand);
16532 PUT_MODE (operand, Pmode);
16533 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16534 return size;
16537 if (GET_CODE (operand) == CONST_VECTOR)
16539 enum machine_mode imode = int_mode_for_mode (mode);
16540 /* Caution: if we looked through a constant pool memory above,
16541 the operand may actually have a different mode now. That's
16542 ok, since we want to pun this all the way back to an integer. */
16543 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16544 gcc_assert (operand != NULL);
16545 mode = imode;
16548 if (!TARGET_64BIT)
16550 if (mode == DImode)
16551 split_di (&operand, 1, &parts[0], &parts[1]);
16552 else
16554 int i;
16556 if (REG_P (operand))
16558 gcc_assert (reload_completed);
16559 for (i = 0; i < size; i++)
16560 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16562 else if (offsettable_memref_p (operand))
16564 operand = adjust_address (operand, SImode, 0);
16565 parts[0] = operand;
16566 for (i = 1; i < size; i++)
16567 parts[i] = adjust_address (operand, SImode, 4 * i);
16569 else if (GET_CODE (operand) == CONST_DOUBLE)
16571 REAL_VALUE_TYPE r;
16572 long l[4];
16574 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16575 switch (mode)
16577 case TFmode:
16578 real_to_target (l, &r, mode);
16579 parts[3] = gen_int_mode (l[3], SImode);
16580 parts[2] = gen_int_mode (l[2], SImode);
16581 break;
16582 case XFmode:
16583 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16584 parts[2] = gen_int_mode (l[2], SImode);
16585 break;
16586 case DFmode:
16587 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16588 break;
16589 default:
16590 gcc_unreachable ();
16592 parts[1] = gen_int_mode (l[1], SImode);
16593 parts[0] = gen_int_mode (l[0], SImode);
16595 else
16596 gcc_unreachable ();
16599 else
16601 if (mode == TImode)
16602 split_ti (&operand, 1, &parts[0], &parts[1]);
16603 if (mode == XFmode || mode == TFmode)
16605 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16606 if (REG_P (operand))
16608 gcc_assert (reload_completed);
16609 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16610 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16612 else if (offsettable_memref_p (operand))
16614 operand = adjust_address (operand, DImode, 0);
16615 parts[0] = operand;
16616 parts[1] = adjust_address (operand, upper_mode, 8);
16618 else if (GET_CODE (operand) == CONST_DOUBLE)
16620 REAL_VALUE_TYPE r;
16621 long l[4];
16623 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16624 real_to_target (l, &r, mode);
16626 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16627 if (HOST_BITS_PER_WIDE_INT >= 64)
16628 parts[0]
16629 = gen_int_mode
16630 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16631 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16632 DImode);
16633 else
16634 parts[0] = immed_double_const (l[0], l[1], DImode);
16636 if (upper_mode == SImode)
16637 parts[1] = gen_int_mode (l[2], SImode);
16638 else if (HOST_BITS_PER_WIDE_INT >= 64)
16639 parts[1]
16640 = gen_int_mode
16641 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16642 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16643 DImode);
16644 else
16645 parts[1] = immed_double_const (l[2], l[3], DImode);
16647 else
16648 gcc_unreachable ();
16652 return size;
16655 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16656 Return false when normal moves are needed; true when all required
16657 insns have been emitted. Operands 2-4 contain the input values
16658 int the correct order; operands 5-7 contain the output values. */
16660 void
16661 ix86_split_long_move (rtx operands[])
16663 rtx part[2][4];
16664 int nparts, i, j;
16665 int push = 0;
16666 int collisions = 0;
16667 enum machine_mode mode = GET_MODE (operands[0]);
16668 bool collisionparts[4];
16670 /* The DFmode expanders may ask us to move double.
16671 For 64bit target this is single move. By hiding the fact
16672 here we simplify i386.md splitters. */
16673 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16675 /* Optimize constant pool reference to immediates. This is used by
16676 fp moves, that force all constants to memory to allow combining. */
16678 if (MEM_P (operands[1])
16679 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16680 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16681 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16682 if (push_operand (operands[0], VOIDmode))
16684 operands[0] = copy_rtx (operands[0]);
16685 PUT_MODE (operands[0], Pmode);
16687 else
16688 operands[0] = gen_lowpart (DImode, operands[0]);
16689 operands[1] = gen_lowpart (DImode, operands[1]);
16690 emit_move_insn (operands[0], operands[1]);
16691 return;
16694 /* The only non-offsettable memory we handle is push. */
16695 if (push_operand (operands[0], VOIDmode))
16696 push = 1;
16697 else
16698 gcc_assert (!MEM_P (operands[0])
16699 || offsettable_memref_p (operands[0]));
16701 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16702 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16704 /* When emitting push, take care for source operands on the stack. */
16705 if (push && MEM_P (operands[1])
16706 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16708 rtx src_base = XEXP (part[1][nparts - 1], 0);
16710 /* Compensate for the stack decrement by 4. */
16711 if (!TARGET_64BIT && nparts == 3
16712 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16713 src_base = plus_constant (src_base, 4);
16715 /* src_base refers to the stack pointer and is
16716 automatically decreased by emitted push. */
16717 for (i = 0; i < nparts; i++)
16718 part[1][i] = change_address (part[1][i],
16719 GET_MODE (part[1][i]), src_base);
16722 /* We need to do copy in the right order in case an address register
16723 of the source overlaps the destination. */
16724 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16726 rtx tmp;
16728 for (i = 0; i < nparts; i++)
16730 collisionparts[i]
16731 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16732 if (collisionparts[i])
16733 collisions++;
16736 /* Collision in the middle part can be handled by reordering. */
16737 if (collisions == 1 && nparts == 3 && collisionparts [1])
16739 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16740 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16742 else if (collisions == 1
16743 && nparts == 4
16744 && (collisionparts [1] || collisionparts [2]))
16746 if (collisionparts [1])
16748 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16749 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16751 else
16753 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16754 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16758 /* If there are more collisions, we can't handle it by reordering.
16759 Do an lea to the last part and use only one colliding move. */
16760 else if (collisions > 1)
16762 rtx base;
16764 collisions = 1;
16766 base = part[0][nparts - 1];
16768 /* Handle the case when the last part isn't valid for lea.
16769 Happens in 64-bit mode storing the 12-byte XFmode. */
16770 if (GET_MODE (base) != Pmode)
16771 base = gen_rtx_REG (Pmode, REGNO (base));
16773 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16774 part[1][0] = replace_equiv_address (part[1][0], base);
16775 for (i = 1; i < nparts; i++)
16777 tmp = plus_constant (base, UNITS_PER_WORD * i);
16778 part[1][i] = replace_equiv_address (part[1][i], tmp);
16783 if (push)
16785 if (!TARGET_64BIT)
16787 if (nparts == 3)
16789 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16790 emit_insn (gen_addsi3 (stack_pointer_rtx,
16791 stack_pointer_rtx, GEN_INT (-4)));
16792 emit_move_insn (part[0][2], part[1][2]);
16794 else if (nparts == 4)
16796 emit_move_insn (part[0][3], part[1][3]);
16797 emit_move_insn (part[0][2], part[1][2]);
16800 else
16802 /* In 64bit mode we don't have 32bit push available. In case this is
16803 register, it is OK - we will just use larger counterpart. We also
16804 retype memory - these comes from attempt to avoid REX prefix on
16805 moving of second half of TFmode value. */
16806 if (GET_MODE (part[1][1]) == SImode)
16808 switch (GET_CODE (part[1][1]))
16810 case MEM:
16811 part[1][1] = adjust_address (part[1][1], DImode, 0);
16812 break;
16814 case REG:
16815 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16816 break;
16818 default:
16819 gcc_unreachable ();
16822 if (GET_MODE (part[1][0]) == SImode)
16823 part[1][0] = part[1][1];
16826 emit_move_insn (part[0][1], part[1][1]);
16827 emit_move_insn (part[0][0], part[1][0]);
16828 return;
16831 /* Choose correct order to not overwrite the source before it is copied. */
16832 if ((REG_P (part[0][0])
16833 && REG_P (part[1][1])
16834 && (REGNO (part[0][0]) == REGNO (part[1][1])
16835 || (nparts == 3
16836 && REGNO (part[0][0]) == REGNO (part[1][2]))
16837 || (nparts == 4
16838 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16839 || (collisions > 0
16840 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16842 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16844 operands[2 + i] = part[0][j];
16845 operands[6 + i] = part[1][j];
16848 else
16850 for (i = 0; i < nparts; i++)
16852 operands[2 + i] = part[0][i];
16853 operands[6 + i] = part[1][i];
16857 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16858 if (optimize_insn_for_size_p ())
16860 for (j = 0; j < nparts - 1; j++)
16861 if (CONST_INT_P (operands[6 + j])
16862 && operands[6 + j] != const0_rtx
16863 && REG_P (operands[2 + j]))
16864 for (i = j; i < nparts - 1; i++)
16865 if (CONST_INT_P (operands[7 + i])
16866 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16867 operands[7 + i] = operands[2 + j];
16870 for (i = 0; i < nparts; i++)
16871 emit_move_insn (operands[2 + i], operands[6 + i]);
16873 return;
16876 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16877 left shift by a constant, either using a single shift or
16878 a sequence of add instructions. */
16880 static void
16881 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16883 if (count == 1)
16885 emit_insn ((mode == DImode
16886 ? gen_addsi3
16887 : gen_adddi3) (operand, operand, operand));
16889 else if (!optimize_insn_for_size_p ()
16890 && count * ix86_cost->add <= ix86_cost->shift_const)
16892 int i;
16893 for (i=0; i<count; i++)
16895 emit_insn ((mode == DImode
16896 ? gen_addsi3
16897 : gen_adddi3) (operand, operand, operand));
16900 else
16901 emit_insn ((mode == DImode
16902 ? gen_ashlsi3
16903 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16906 void
16907 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16909 rtx low[2], high[2];
16910 int count;
16911 const int single_width = mode == DImode ? 32 : 64;
16913 if (CONST_INT_P (operands[2]))
16915 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16916 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16918 if (count >= single_width)
16920 emit_move_insn (high[0], low[1]);
16921 emit_move_insn (low[0], const0_rtx);
16923 if (count > single_width)
16924 ix86_expand_ashl_const (high[0], count - single_width, mode);
16926 else
16928 if (!rtx_equal_p (operands[0], operands[1]))
16929 emit_move_insn (operands[0], operands[1]);
16930 emit_insn ((mode == DImode
16931 ? gen_x86_shld
16932 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16933 ix86_expand_ashl_const (low[0], count, mode);
16935 return;
16938 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16940 if (operands[1] == const1_rtx)
16942 /* Assuming we've chosen a QImode capable registers, then 1 << N
16943 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16944 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16946 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16948 ix86_expand_clear (low[0]);
16949 ix86_expand_clear (high[0]);
16950 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16952 d = gen_lowpart (QImode, low[0]);
16953 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16954 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16955 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16957 d = gen_lowpart (QImode, high[0]);
16958 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16959 s = gen_rtx_NE (QImode, flags, const0_rtx);
16960 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16963 /* Otherwise, we can get the same results by manually performing
16964 a bit extract operation on bit 5/6, and then performing the two
16965 shifts. The two methods of getting 0/1 into low/high are exactly
16966 the same size. Avoiding the shift in the bit extract case helps
16967 pentium4 a bit; no one else seems to care much either way. */
16968 else
16970 rtx x;
16972 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16973 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16974 else
16975 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16976 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16978 emit_insn ((mode == DImode
16979 ? gen_lshrsi3
16980 : gen_lshrdi3) (high[0], high[0],
16981 GEN_INT (mode == DImode ? 5 : 6)));
16982 emit_insn ((mode == DImode
16983 ? gen_andsi3
16984 : gen_anddi3) (high[0], high[0], const1_rtx));
16985 emit_move_insn (low[0], high[0]);
16986 emit_insn ((mode == DImode
16987 ? gen_xorsi3
16988 : gen_xordi3) (low[0], low[0], const1_rtx));
16991 emit_insn ((mode == DImode
16992 ? gen_ashlsi3
16993 : gen_ashldi3) (low[0], low[0], operands[2]));
16994 emit_insn ((mode == DImode
16995 ? gen_ashlsi3
16996 : gen_ashldi3) (high[0], high[0], operands[2]));
16997 return;
17000 if (operands[1] == constm1_rtx)
17002 /* For -1 << N, we can avoid the shld instruction, because we
17003 know that we're shifting 0...31/63 ones into a -1. */
17004 emit_move_insn (low[0], constm1_rtx);
17005 if (optimize_insn_for_size_p ())
17006 emit_move_insn (high[0], low[0]);
17007 else
17008 emit_move_insn (high[0], constm1_rtx);
17010 else
17012 if (!rtx_equal_p (operands[0], operands[1]))
17013 emit_move_insn (operands[0], operands[1]);
17015 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17016 emit_insn ((mode == DImode
17017 ? gen_x86_shld
17018 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17021 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
17023 if (TARGET_CMOVE && scratch)
17025 ix86_expand_clear (scratch);
17026 emit_insn ((mode == DImode
17027 ? gen_x86_shift_adj_1
17028 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
17029 scratch));
17031 else
17032 emit_insn ((mode == DImode
17033 ? gen_x86_shift_adj_2
17034 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
17037 void
17038 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17040 rtx low[2], high[2];
17041 int count;
17042 const int single_width = mode == DImode ? 32 : 64;
17044 if (CONST_INT_P (operands[2]))
17046 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17047 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17049 if (count == single_width * 2 - 1)
17051 emit_move_insn (high[0], high[1]);
17052 emit_insn ((mode == DImode
17053 ? gen_ashrsi3
17054 : gen_ashrdi3) (high[0], high[0],
17055 GEN_INT (single_width - 1)));
17056 emit_move_insn (low[0], high[0]);
17059 else if (count >= single_width)
17061 emit_move_insn (low[0], high[1]);
17062 emit_move_insn (high[0], low[0]);
17063 emit_insn ((mode == DImode
17064 ? gen_ashrsi3
17065 : gen_ashrdi3) (high[0], high[0],
17066 GEN_INT (single_width - 1)));
17067 if (count > single_width)
17068 emit_insn ((mode == DImode
17069 ? gen_ashrsi3
17070 : gen_ashrdi3) (low[0], low[0],
17071 GEN_INT (count - single_width)));
17073 else
17075 if (!rtx_equal_p (operands[0], operands[1]))
17076 emit_move_insn (operands[0], operands[1]);
17077 emit_insn ((mode == DImode
17078 ? gen_x86_shrd
17079 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17080 emit_insn ((mode == DImode
17081 ? gen_ashrsi3
17082 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17085 else
17087 if (!rtx_equal_p (operands[0], operands[1]))
17088 emit_move_insn (operands[0], operands[1]);
17090 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17092 emit_insn ((mode == DImode
17093 ? gen_x86_shrd
17094 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17095 emit_insn ((mode == DImode
17096 ? gen_ashrsi3
17097 : gen_ashrdi3) (high[0], high[0], operands[2]));
17099 if (TARGET_CMOVE && scratch)
17101 emit_move_insn (scratch, high[0]);
17102 emit_insn ((mode == DImode
17103 ? gen_ashrsi3
17104 : gen_ashrdi3) (scratch, scratch,
17105 GEN_INT (single_width - 1)));
17106 emit_insn ((mode == DImode
17107 ? gen_x86_shift_adj_1
17108 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17109 scratch));
17111 else
17112 emit_insn ((mode == DImode
17113 ? gen_x86_shift_adj_3
17114 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
17118 void
17119 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17121 rtx low[2], high[2];
17122 int count;
17123 const int single_width = mode == DImode ? 32 : 64;
17125 if (CONST_INT_P (operands[2]))
17127 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17128 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17130 if (count >= single_width)
17132 emit_move_insn (low[0], high[1]);
17133 ix86_expand_clear (high[0]);
17135 if (count > single_width)
17136 emit_insn ((mode == DImode
17137 ? gen_lshrsi3
17138 : gen_lshrdi3) (low[0], low[0],
17139 GEN_INT (count - single_width)));
17141 else
17143 if (!rtx_equal_p (operands[0], operands[1]))
17144 emit_move_insn (operands[0], operands[1]);
17145 emit_insn ((mode == DImode
17146 ? gen_x86_shrd
17147 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17148 emit_insn ((mode == DImode
17149 ? gen_lshrsi3
17150 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17153 else
17155 if (!rtx_equal_p (operands[0], operands[1]))
17156 emit_move_insn (operands[0], operands[1]);
17158 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17160 emit_insn ((mode == DImode
17161 ? gen_x86_shrd
17162 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17163 emit_insn ((mode == DImode
17164 ? gen_lshrsi3
17165 : gen_lshrdi3) (high[0], high[0], operands[2]));
17167 /* Heh. By reversing the arguments, we can reuse this pattern. */
17168 if (TARGET_CMOVE && scratch)
17170 ix86_expand_clear (scratch);
17171 emit_insn ((mode == DImode
17172 ? gen_x86_shift_adj_1
17173 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17174 scratch));
17176 else
17177 emit_insn ((mode == DImode
17178 ? gen_x86_shift_adj_2
17179 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
17183 /* Predict just emitted jump instruction to be taken with probability PROB. */
17184 static void
17185 predict_jump (int prob)
17187 rtx insn = get_last_insn ();
17188 gcc_assert (JUMP_P (insn));
17189 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17192 /* Helper function for the string operations below. Dest VARIABLE whether
17193 it is aligned to VALUE bytes. If true, jump to the label. */
17194 static rtx
17195 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17197 rtx label = gen_label_rtx ();
17198 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17199 if (GET_MODE (variable) == DImode)
17200 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17201 else
17202 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17203 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17204 1, label);
17205 if (epilogue)
17206 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17207 else
17208 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17209 return label;
17212 /* Adjust COUNTER by the VALUE. */
17213 static void
17214 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17216 if (GET_MODE (countreg) == DImode)
17217 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17218 else
17219 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17222 /* Zero extend possibly SImode EXP to Pmode register. */
17224 ix86_zero_extend_to_Pmode (rtx exp)
17226 rtx r;
17227 if (GET_MODE (exp) == VOIDmode)
17228 return force_reg (Pmode, exp);
17229 if (GET_MODE (exp) == Pmode)
17230 return copy_to_mode_reg (Pmode, exp);
17231 r = gen_reg_rtx (Pmode);
17232 emit_insn (gen_zero_extendsidi2 (r, exp));
17233 return r;
17236 /* Divide COUNTREG by SCALE. */
17237 static rtx
17238 scale_counter (rtx countreg, int scale)
17240 rtx sc;
17242 if (scale == 1)
17243 return countreg;
17244 if (CONST_INT_P (countreg))
17245 return GEN_INT (INTVAL (countreg) / scale);
17246 gcc_assert (REG_P (countreg));
17248 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17249 GEN_INT (exact_log2 (scale)),
17250 NULL, 1, OPTAB_DIRECT);
17251 return sc;
17254 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17255 DImode for constant loop counts. */
17257 static enum machine_mode
17258 counter_mode (rtx count_exp)
17260 if (GET_MODE (count_exp) != VOIDmode)
17261 return GET_MODE (count_exp);
17262 if (!CONST_INT_P (count_exp))
17263 return Pmode;
17264 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17265 return DImode;
17266 return SImode;
17269 /* When SRCPTR is non-NULL, output simple loop to move memory
17270 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17271 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17272 equivalent loop to set memory by VALUE (supposed to be in MODE).
17274 The size is rounded down to whole number of chunk size moved at once.
17275 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17278 static void
17279 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17280 rtx destptr, rtx srcptr, rtx value,
17281 rtx count, enum machine_mode mode, int unroll,
17282 int expected_size)
17284 rtx out_label, top_label, iter, tmp;
17285 enum machine_mode iter_mode = counter_mode (count);
17286 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17287 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17288 rtx size;
17289 rtx x_addr;
17290 rtx y_addr;
17291 int i;
17293 top_label = gen_label_rtx ();
17294 out_label = gen_label_rtx ();
17295 iter = gen_reg_rtx (iter_mode);
17297 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17298 NULL, 1, OPTAB_DIRECT);
17299 /* Those two should combine. */
17300 if (piece_size == const1_rtx)
17302 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17303 true, out_label);
17304 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17306 emit_move_insn (iter, const0_rtx);
17308 emit_label (top_label);
17310 tmp = convert_modes (Pmode, iter_mode, iter, true);
17311 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17312 destmem = change_address (destmem, mode, x_addr);
17314 if (srcmem)
17316 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17317 srcmem = change_address (srcmem, mode, y_addr);
17319 /* When unrolling for chips that reorder memory reads and writes,
17320 we can save registers by using single temporary.
17321 Also using 4 temporaries is overkill in 32bit mode. */
17322 if (!TARGET_64BIT && 0)
17324 for (i = 0; i < unroll; i++)
17326 if (i)
17328 destmem =
17329 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17330 srcmem =
17331 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17333 emit_move_insn (destmem, srcmem);
17336 else
17338 rtx tmpreg[4];
17339 gcc_assert (unroll <= 4);
17340 for (i = 0; i < unroll; i++)
17342 tmpreg[i] = gen_reg_rtx (mode);
17343 if (i)
17345 srcmem =
17346 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17348 emit_move_insn (tmpreg[i], srcmem);
17350 for (i = 0; i < unroll; i++)
17352 if (i)
17354 destmem =
17355 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17357 emit_move_insn (destmem, tmpreg[i]);
17361 else
17362 for (i = 0; i < unroll; i++)
17364 if (i)
17365 destmem =
17366 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17367 emit_move_insn (destmem, value);
17370 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17371 true, OPTAB_LIB_WIDEN);
17372 if (tmp != iter)
17373 emit_move_insn (iter, tmp);
17375 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17376 true, top_label);
17377 if (expected_size != -1)
17379 expected_size /= GET_MODE_SIZE (mode) * unroll;
17380 if (expected_size == 0)
17381 predict_jump (0);
17382 else if (expected_size > REG_BR_PROB_BASE)
17383 predict_jump (REG_BR_PROB_BASE - 1);
17384 else
17385 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17387 else
17388 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17389 iter = ix86_zero_extend_to_Pmode (iter);
17390 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17391 true, OPTAB_LIB_WIDEN);
17392 if (tmp != destptr)
17393 emit_move_insn (destptr, tmp);
17394 if (srcptr)
17396 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17397 true, OPTAB_LIB_WIDEN);
17398 if (tmp != srcptr)
17399 emit_move_insn (srcptr, tmp);
17401 emit_label (out_label);
17404 /* Output "rep; mov" instruction.
17405 Arguments have same meaning as for previous function */
17406 static void
17407 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17408 rtx destptr, rtx srcptr,
17409 rtx count,
17410 enum machine_mode mode)
17412 rtx destexp;
17413 rtx srcexp;
17414 rtx countreg;
17416 /* If the size is known, it is shorter to use rep movs. */
17417 if (mode == QImode && CONST_INT_P (count)
17418 && !(INTVAL (count) & 3))
17419 mode = SImode;
17421 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17422 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17423 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17424 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17425 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17426 if (mode != QImode)
17428 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17429 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17430 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17431 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17432 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17433 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17435 else
17437 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17438 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17440 if (CONST_INT_P (count))
17442 count = GEN_INT (INTVAL (count)
17443 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17444 destmem = shallow_copy_rtx (destmem);
17445 srcmem = shallow_copy_rtx (srcmem);
17446 set_mem_size (destmem, count);
17447 set_mem_size (srcmem, count);
17449 else
17451 if (MEM_SIZE (destmem))
17452 set_mem_size (destmem, NULL_RTX);
17453 if (MEM_SIZE (srcmem))
17454 set_mem_size (srcmem, NULL_RTX);
17456 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17457 destexp, srcexp));
17460 /* Output "rep; stos" instruction.
17461 Arguments have same meaning as for previous function */
17462 static void
17463 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17464 rtx count, enum machine_mode mode,
17465 rtx orig_value)
17467 rtx destexp;
17468 rtx countreg;
17470 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17471 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17472 value = force_reg (mode, gen_lowpart (mode, value));
17473 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17474 if (mode != QImode)
17476 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17477 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17478 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17480 else
17481 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17482 if (orig_value == const0_rtx && CONST_INT_P (count))
17484 count = GEN_INT (INTVAL (count)
17485 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17486 destmem = shallow_copy_rtx (destmem);
17487 set_mem_size (destmem, count);
17489 else if (MEM_SIZE (destmem))
17490 set_mem_size (destmem, NULL_RTX);
17491 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17494 static void
17495 emit_strmov (rtx destmem, rtx srcmem,
17496 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17498 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17499 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17500 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17503 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17504 static void
17505 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17506 rtx destptr, rtx srcptr, rtx count, int max_size)
17508 rtx src, dest;
17509 if (CONST_INT_P (count))
17511 HOST_WIDE_INT countval = INTVAL (count);
17512 int offset = 0;
17514 if ((countval & 0x10) && max_size > 16)
17516 if (TARGET_64BIT)
17518 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17519 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17521 else
17522 gcc_unreachable ();
17523 offset += 16;
17525 if ((countval & 0x08) && max_size > 8)
17527 if (TARGET_64BIT)
17528 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17529 else
17531 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17532 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17534 offset += 8;
17536 if ((countval & 0x04) && max_size > 4)
17538 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17539 offset += 4;
17541 if ((countval & 0x02) && max_size > 2)
17543 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17544 offset += 2;
17546 if ((countval & 0x01) && max_size > 1)
17548 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17549 offset += 1;
17551 return;
17553 if (max_size > 8)
17555 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17556 count, 1, OPTAB_DIRECT);
17557 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17558 count, QImode, 1, 4);
17559 return;
17562 /* When there are stringops, we can cheaply increase dest and src pointers.
17563 Otherwise we save code size by maintaining offset (zero is readily
17564 available from preceding rep operation) and using x86 addressing modes.
17566 if (TARGET_SINGLE_STRINGOP)
17568 if (max_size > 4)
17570 rtx label = ix86_expand_aligntest (count, 4, true);
17571 src = change_address (srcmem, SImode, srcptr);
17572 dest = change_address (destmem, SImode, destptr);
17573 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17574 emit_label (label);
17575 LABEL_NUSES (label) = 1;
17577 if (max_size > 2)
17579 rtx label = ix86_expand_aligntest (count, 2, true);
17580 src = change_address (srcmem, HImode, srcptr);
17581 dest = change_address (destmem, HImode, destptr);
17582 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17583 emit_label (label);
17584 LABEL_NUSES (label) = 1;
17586 if (max_size > 1)
17588 rtx label = ix86_expand_aligntest (count, 1, true);
17589 src = change_address (srcmem, QImode, srcptr);
17590 dest = change_address (destmem, QImode, destptr);
17591 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17592 emit_label (label);
17593 LABEL_NUSES (label) = 1;
17596 else
17598 rtx offset = force_reg (Pmode, const0_rtx);
17599 rtx tmp;
17601 if (max_size > 4)
17603 rtx label = ix86_expand_aligntest (count, 4, true);
17604 src = change_address (srcmem, SImode, srcptr);
17605 dest = change_address (destmem, SImode, destptr);
17606 emit_move_insn (dest, src);
17607 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17608 true, OPTAB_LIB_WIDEN);
17609 if (tmp != offset)
17610 emit_move_insn (offset, tmp);
17611 emit_label (label);
17612 LABEL_NUSES (label) = 1;
17614 if (max_size > 2)
17616 rtx label = ix86_expand_aligntest (count, 2, true);
17617 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17618 src = change_address (srcmem, HImode, tmp);
17619 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17620 dest = change_address (destmem, HImode, tmp);
17621 emit_move_insn (dest, src);
17622 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17623 true, OPTAB_LIB_WIDEN);
17624 if (tmp != offset)
17625 emit_move_insn (offset, tmp);
17626 emit_label (label);
17627 LABEL_NUSES (label) = 1;
17629 if (max_size > 1)
17631 rtx label = ix86_expand_aligntest (count, 1, true);
17632 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17633 src = change_address (srcmem, QImode, tmp);
17634 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17635 dest = change_address (destmem, QImode, tmp);
17636 emit_move_insn (dest, src);
17637 emit_label (label);
17638 LABEL_NUSES (label) = 1;
17643 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17644 static void
17645 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17646 rtx count, int max_size)
17648 count =
17649 expand_simple_binop (counter_mode (count), AND, count,
17650 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17651 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17652 gen_lowpart (QImode, value), count, QImode,
17653 1, max_size / 2);
17656 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17657 static void
17658 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17660 rtx dest;
17662 if (CONST_INT_P (count))
17664 HOST_WIDE_INT countval = INTVAL (count);
17665 int offset = 0;
17667 if ((countval & 0x10) && max_size > 16)
17669 if (TARGET_64BIT)
17671 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17672 emit_insn (gen_strset (destptr, dest, value));
17673 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17674 emit_insn (gen_strset (destptr, dest, value));
17676 else
17677 gcc_unreachable ();
17678 offset += 16;
17680 if ((countval & 0x08) && max_size > 8)
17682 if (TARGET_64BIT)
17684 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17685 emit_insn (gen_strset (destptr, dest, value));
17687 else
17689 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17690 emit_insn (gen_strset (destptr, dest, value));
17691 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17692 emit_insn (gen_strset (destptr, dest, value));
17694 offset += 8;
17696 if ((countval & 0x04) && max_size > 4)
17698 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17699 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17700 offset += 4;
17702 if ((countval & 0x02) && max_size > 2)
17704 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17705 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17706 offset += 2;
17708 if ((countval & 0x01) && max_size > 1)
17710 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17711 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17712 offset += 1;
17714 return;
17716 if (max_size > 32)
17718 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17719 return;
17721 if (max_size > 16)
17723 rtx label = ix86_expand_aligntest (count, 16, true);
17724 if (TARGET_64BIT)
17726 dest = change_address (destmem, DImode, destptr);
17727 emit_insn (gen_strset (destptr, dest, value));
17728 emit_insn (gen_strset (destptr, dest, value));
17730 else
17732 dest = change_address (destmem, SImode, destptr);
17733 emit_insn (gen_strset (destptr, dest, value));
17734 emit_insn (gen_strset (destptr, dest, value));
17735 emit_insn (gen_strset (destptr, dest, value));
17736 emit_insn (gen_strset (destptr, dest, value));
17738 emit_label (label);
17739 LABEL_NUSES (label) = 1;
17741 if (max_size > 8)
17743 rtx label = ix86_expand_aligntest (count, 8, true);
17744 if (TARGET_64BIT)
17746 dest = change_address (destmem, DImode, destptr);
17747 emit_insn (gen_strset (destptr, dest, value));
17749 else
17751 dest = change_address (destmem, SImode, destptr);
17752 emit_insn (gen_strset (destptr, dest, value));
17753 emit_insn (gen_strset (destptr, dest, value));
17755 emit_label (label);
17756 LABEL_NUSES (label) = 1;
17758 if (max_size > 4)
17760 rtx label = ix86_expand_aligntest (count, 4, true);
17761 dest = change_address (destmem, SImode, destptr);
17762 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17763 emit_label (label);
17764 LABEL_NUSES (label) = 1;
17766 if (max_size > 2)
17768 rtx label = ix86_expand_aligntest (count, 2, true);
17769 dest = change_address (destmem, HImode, destptr);
17770 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17771 emit_label (label);
17772 LABEL_NUSES (label) = 1;
17774 if (max_size > 1)
17776 rtx label = ix86_expand_aligntest (count, 1, true);
17777 dest = change_address (destmem, QImode, destptr);
17778 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17779 emit_label (label);
17780 LABEL_NUSES (label) = 1;
17784 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17785 DESIRED_ALIGNMENT. */
17786 static void
17787 expand_movmem_prologue (rtx destmem, rtx srcmem,
17788 rtx destptr, rtx srcptr, rtx count,
17789 int align, int desired_alignment)
17791 if (align <= 1 && desired_alignment > 1)
17793 rtx label = ix86_expand_aligntest (destptr, 1, false);
17794 srcmem = change_address (srcmem, QImode, srcptr);
17795 destmem = change_address (destmem, QImode, destptr);
17796 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17797 ix86_adjust_counter (count, 1);
17798 emit_label (label);
17799 LABEL_NUSES (label) = 1;
17801 if (align <= 2 && desired_alignment > 2)
17803 rtx label = ix86_expand_aligntest (destptr, 2, false);
17804 srcmem = change_address (srcmem, HImode, srcptr);
17805 destmem = change_address (destmem, HImode, destptr);
17806 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17807 ix86_adjust_counter (count, 2);
17808 emit_label (label);
17809 LABEL_NUSES (label) = 1;
17811 if (align <= 4 && desired_alignment > 4)
17813 rtx label = ix86_expand_aligntest (destptr, 4, false);
17814 srcmem = change_address (srcmem, SImode, srcptr);
17815 destmem = change_address (destmem, SImode, destptr);
17816 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17817 ix86_adjust_counter (count, 4);
17818 emit_label (label);
17819 LABEL_NUSES (label) = 1;
17821 gcc_assert (desired_alignment <= 8);
17824 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17825 ALIGN_BYTES is how many bytes need to be copied. */
17826 static rtx
17827 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17828 int desired_align, int align_bytes)
17830 rtx src = *srcp;
17831 rtx src_size, dst_size;
17832 int off = 0;
17833 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17834 if (src_align_bytes >= 0)
17835 src_align_bytes = desired_align - src_align_bytes;
17836 src_size = MEM_SIZE (src);
17837 dst_size = MEM_SIZE (dst);
17838 if (align_bytes & 1)
17840 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17841 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17842 off = 1;
17843 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17845 if (align_bytes & 2)
17847 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17848 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17849 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17850 set_mem_align (dst, 2 * BITS_PER_UNIT);
17851 if (src_align_bytes >= 0
17852 && (src_align_bytes & 1) == (align_bytes & 1)
17853 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17854 set_mem_align (src, 2 * BITS_PER_UNIT);
17855 off = 2;
17856 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17858 if (align_bytes & 4)
17860 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17861 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17862 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17863 set_mem_align (dst, 4 * BITS_PER_UNIT);
17864 if (src_align_bytes >= 0)
17866 unsigned int src_align = 0;
17867 if ((src_align_bytes & 3) == (align_bytes & 3))
17868 src_align = 4;
17869 else if ((src_align_bytes & 1) == (align_bytes & 1))
17870 src_align = 2;
17871 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17872 set_mem_align (src, src_align * BITS_PER_UNIT);
17874 off = 4;
17875 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17877 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17878 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17879 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17880 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17881 if (src_align_bytes >= 0)
17883 unsigned int src_align = 0;
17884 if ((src_align_bytes & 7) == (align_bytes & 7))
17885 src_align = 8;
17886 else if ((src_align_bytes & 3) == (align_bytes & 3))
17887 src_align = 4;
17888 else if ((src_align_bytes & 1) == (align_bytes & 1))
17889 src_align = 2;
17890 if (src_align > (unsigned int) desired_align)
17891 src_align = desired_align;
17892 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17893 set_mem_align (src, src_align * BITS_PER_UNIT);
17895 if (dst_size)
17896 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17897 if (src_size)
17898 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17899 *srcp = src;
17900 return dst;
17903 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17904 DESIRED_ALIGNMENT. */
17905 static void
17906 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17907 int align, int desired_alignment)
17909 if (align <= 1 && desired_alignment > 1)
17911 rtx label = ix86_expand_aligntest (destptr, 1, false);
17912 destmem = change_address (destmem, QImode, destptr);
17913 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17914 ix86_adjust_counter (count, 1);
17915 emit_label (label);
17916 LABEL_NUSES (label) = 1;
17918 if (align <= 2 && desired_alignment > 2)
17920 rtx label = ix86_expand_aligntest (destptr, 2, false);
17921 destmem = change_address (destmem, HImode, destptr);
17922 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17923 ix86_adjust_counter (count, 2);
17924 emit_label (label);
17925 LABEL_NUSES (label) = 1;
17927 if (align <= 4 && desired_alignment > 4)
17929 rtx label = ix86_expand_aligntest (destptr, 4, false);
17930 destmem = change_address (destmem, SImode, destptr);
17931 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17932 ix86_adjust_counter (count, 4);
17933 emit_label (label);
17934 LABEL_NUSES (label) = 1;
17936 gcc_assert (desired_alignment <= 8);
17939 /* Set enough from DST to align DST known to by aligned by ALIGN to
17940 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
17941 static rtx
17942 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
17943 int desired_align, int align_bytes)
17945 int off = 0;
17946 rtx dst_size = MEM_SIZE (dst);
17947 if (align_bytes & 1)
17949 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17950 off = 1;
17951 emit_insn (gen_strset (destreg, dst,
17952 gen_lowpart (QImode, value)));
17954 if (align_bytes & 2)
17956 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17957 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17958 set_mem_align (dst, 2 * BITS_PER_UNIT);
17959 off = 2;
17960 emit_insn (gen_strset (destreg, dst,
17961 gen_lowpart (HImode, value)));
17963 if (align_bytes & 4)
17965 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17966 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17967 set_mem_align (dst, 4 * BITS_PER_UNIT);
17968 off = 4;
17969 emit_insn (gen_strset (destreg, dst,
17970 gen_lowpart (SImode, value)));
17972 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17973 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17974 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17975 if (dst_size)
17976 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17977 return dst;
17980 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17981 static enum stringop_alg
17982 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17983 int *dynamic_check)
17985 const struct stringop_algs * algs;
17986 bool optimize_for_speed;
17987 /* Algorithms using the rep prefix want at least edi and ecx;
17988 additionally, memset wants eax and memcpy wants esi. Don't
17989 consider such algorithms if the user has appropriated those
17990 registers for their own purposes. */
17991 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17992 || (memset
17993 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17995 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17996 || (alg != rep_prefix_1_byte \
17997 && alg != rep_prefix_4_byte \
17998 && alg != rep_prefix_8_byte))
17999 const struct processor_costs *cost;
18001 /* Even if the string operation call is cold, we still might spend a lot
18002 of time processing large blocks. */
18003 if (optimize_function_for_size_p (cfun)
18004 || (optimize_insn_for_size_p ()
18005 && expected_size != -1 && expected_size < 256))
18006 optimize_for_speed = false;
18007 else
18008 optimize_for_speed = true;
18010 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18012 *dynamic_check = -1;
18013 if (memset)
18014 algs = &cost->memset[TARGET_64BIT != 0];
18015 else
18016 algs = &cost->memcpy[TARGET_64BIT != 0];
18017 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18018 return stringop_alg;
18019 /* rep; movq or rep; movl is the smallest variant. */
18020 else if (!optimize_for_speed)
18022 if (!count || (count & 3))
18023 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18024 else
18025 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18027 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18029 else if (expected_size != -1 && expected_size < 4)
18030 return loop_1_byte;
18031 else if (expected_size != -1)
18033 unsigned int i;
18034 enum stringop_alg alg = libcall;
18035 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18037 /* We get here if the algorithms that were not libcall-based
18038 were rep-prefix based and we are unable to use rep prefixes
18039 based on global register usage. Break out of the loop and
18040 use the heuristic below. */
18041 if (algs->size[i].max == 0)
18042 break;
18043 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18045 enum stringop_alg candidate = algs->size[i].alg;
18047 if (candidate != libcall && ALG_USABLE_P (candidate))
18048 alg = candidate;
18049 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18050 last non-libcall inline algorithm. */
18051 if (TARGET_INLINE_ALL_STRINGOPS)
18053 /* When the current size is best to be copied by a libcall,
18054 but we are still forced to inline, run the heuristic below
18055 that will pick code for medium sized blocks. */
18056 if (alg != libcall)
18057 return alg;
18058 break;
18060 else if (ALG_USABLE_P (candidate))
18061 return candidate;
18064 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18066 /* When asked to inline the call anyway, try to pick meaningful choice.
18067 We look for maximal size of block that is faster to copy by hand and
18068 take blocks of at most of that size guessing that average size will
18069 be roughly half of the block.
18071 If this turns out to be bad, we might simply specify the preferred
18072 choice in ix86_costs. */
18073 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18074 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18076 int max = -1;
18077 enum stringop_alg alg;
18078 int i;
18079 bool any_alg_usable_p = true;
18081 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18083 enum stringop_alg candidate = algs->size[i].alg;
18084 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18086 if (candidate != libcall && candidate
18087 && ALG_USABLE_P (candidate))
18088 max = algs->size[i].max;
18090 /* If there aren't any usable algorithms, then recursing on
18091 smaller sizes isn't going to find anything. Just return the
18092 simple byte-at-a-time copy loop. */
18093 if (!any_alg_usable_p)
18095 /* Pick something reasonable. */
18096 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18097 *dynamic_check = 128;
18098 return loop_1_byte;
18100 if (max == -1)
18101 max = 4096;
18102 alg = decide_alg (count, max / 2, memset, dynamic_check);
18103 gcc_assert (*dynamic_check == -1);
18104 gcc_assert (alg != libcall);
18105 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18106 *dynamic_check = max;
18107 return alg;
18109 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18110 #undef ALG_USABLE_P
18113 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18114 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18115 static int
18116 decide_alignment (int align,
18117 enum stringop_alg alg,
18118 int expected_size)
18120 int desired_align = 0;
18121 switch (alg)
18123 case no_stringop:
18124 gcc_unreachable ();
18125 case loop:
18126 case unrolled_loop:
18127 desired_align = GET_MODE_SIZE (Pmode);
18128 break;
18129 case rep_prefix_8_byte:
18130 desired_align = 8;
18131 break;
18132 case rep_prefix_4_byte:
18133 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18134 copying whole cacheline at once. */
18135 if (TARGET_PENTIUMPRO)
18136 desired_align = 8;
18137 else
18138 desired_align = 4;
18139 break;
18140 case rep_prefix_1_byte:
18141 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18142 copying whole cacheline at once. */
18143 if (TARGET_PENTIUMPRO)
18144 desired_align = 8;
18145 else
18146 desired_align = 1;
18147 break;
18148 case loop_1_byte:
18149 desired_align = 1;
18150 break;
18151 case libcall:
18152 return 0;
18155 if (optimize_size)
18156 desired_align = 1;
18157 if (desired_align < align)
18158 desired_align = align;
18159 if (expected_size != -1 && expected_size < 4)
18160 desired_align = align;
18161 return desired_align;
18164 /* Return the smallest power of 2 greater than VAL. */
18165 static int
18166 smallest_pow2_greater_than (int val)
18168 int ret = 1;
18169 while (ret <= val)
18170 ret <<= 1;
18171 return ret;
18174 /* Expand string move (memcpy) operation. Use i386 string operations when
18175 profitable. expand_setmem contains similar code. The code depends upon
18176 architecture, block size and alignment, but always has the same
18177 overall structure:
18179 1) Prologue guard: Conditional that jumps up to epilogues for small
18180 blocks that can be handled by epilogue alone. This is faster but
18181 also needed for correctness, since prologue assume the block is larger
18182 than the desired alignment.
18184 Optional dynamic check for size and libcall for large
18185 blocks is emitted here too, with -minline-stringops-dynamically.
18187 2) Prologue: copy first few bytes in order to get destination aligned
18188 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18189 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18190 We emit either a jump tree on power of two sized blocks, or a byte loop.
18192 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18193 with specified algorithm.
18195 4) Epilogue: code copying tail of the block that is too small to be
18196 handled by main body (or up to size guarded by prologue guard). */
18199 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18200 rtx expected_align_exp, rtx expected_size_exp)
18202 rtx destreg;
18203 rtx srcreg;
18204 rtx label = NULL;
18205 rtx tmp;
18206 rtx jump_around_label = NULL;
18207 HOST_WIDE_INT align = 1;
18208 unsigned HOST_WIDE_INT count = 0;
18209 HOST_WIDE_INT expected_size = -1;
18210 int size_needed = 0, epilogue_size_needed;
18211 int desired_align = 0, align_bytes = 0;
18212 enum stringop_alg alg;
18213 int dynamic_check;
18214 bool need_zero_guard = false;
18216 if (CONST_INT_P (align_exp))
18217 align = INTVAL (align_exp);
18218 /* i386 can do misaligned access on reasonably increased cost. */
18219 if (CONST_INT_P (expected_align_exp)
18220 && INTVAL (expected_align_exp) > align)
18221 align = INTVAL (expected_align_exp);
18222 /* ALIGN is the minimum of destination and source alignment, but we care here
18223 just about destination alignment. */
18224 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18225 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18227 if (CONST_INT_P (count_exp))
18228 count = expected_size = INTVAL (count_exp);
18229 if (CONST_INT_P (expected_size_exp) && count == 0)
18230 expected_size = INTVAL (expected_size_exp);
18232 /* Make sure we don't need to care about overflow later on. */
18233 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18234 return 0;
18236 /* Step 0: Decide on preferred algorithm, desired alignment and
18237 size of chunks to be copied by main loop. */
18239 alg = decide_alg (count, expected_size, false, &dynamic_check);
18240 desired_align = decide_alignment (align, alg, expected_size);
18242 if (!TARGET_ALIGN_STRINGOPS)
18243 align = desired_align;
18245 if (alg == libcall)
18246 return 0;
18247 gcc_assert (alg != no_stringop);
18248 if (!count)
18249 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18250 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18251 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18252 switch (alg)
18254 case libcall:
18255 case no_stringop:
18256 gcc_unreachable ();
18257 case loop:
18258 need_zero_guard = true;
18259 size_needed = GET_MODE_SIZE (Pmode);
18260 break;
18261 case unrolled_loop:
18262 need_zero_guard = true;
18263 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18264 break;
18265 case rep_prefix_8_byte:
18266 size_needed = 8;
18267 break;
18268 case rep_prefix_4_byte:
18269 size_needed = 4;
18270 break;
18271 case rep_prefix_1_byte:
18272 size_needed = 1;
18273 break;
18274 case loop_1_byte:
18275 need_zero_guard = true;
18276 size_needed = 1;
18277 break;
18280 epilogue_size_needed = size_needed;
18282 /* Step 1: Prologue guard. */
18284 /* Alignment code needs count to be in register. */
18285 if (CONST_INT_P (count_exp) && desired_align > align)
18287 if (INTVAL (count_exp) > desired_align
18288 && INTVAL (count_exp) > size_needed)
18290 align_bytes
18291 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18292 if (align_bytes <= 0)
18293 align_bytes = 0;
18294 else
18295 align_bytes = desired_align - align_bytes;
18297 if (align_bytes == 0)
18298 count_exp = force_reg (counter_mode (count_exp), count_exp);
18300 gcc_assert (desired_align >= 1 && align >= 1);
18302 /* Ensure that alignment prologue won't copy past end of block. */
18303 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18305 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18306 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18307 Make sure it is power of 2. */
18308 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18310 if (count)
18312 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18314 /* If main algorithm works on QImode, no epilogue is needed.
18315 For small sizes just don't align anything. */
18316 if (size_needed == 1)
18317 desired_align = align;
18318 else
18319 goto epilogue;
18322 else
18324 label = gen_label_rtx ();
18325 emit_cmp_and_jump_insns (count_exp,
18326 GEN_INT (epilogue_size_needed),
18327 LTU, 0, counter_mode (count_exp), 1, label);
18328 if (expected_size == -1 || expected_size < epilogue_size_needed)
18329 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18330 else
18331 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18335 /* Emit code to decide on runtime whether library call or inline should be
18336 used. */
18337 if (dynamic_check != -1)
18339 if (CONST_INT_P (count_exp))
18341 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18343 emit_block_move_via_libcall (dst, src, count_exp, false);
18344 count_exp = const0_rtx;
18345 goto epilogue;
18348 else
18350 rtx hot_label = gen_label_rtx ();
18351 jump_around_label = gen_label_rtx ();
18352 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18353 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18354 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18355 emit_block_move_via_libcall (dst, src, count_exp, false);
18356 emit_jump (jump_around_label);
18357 emit_label (hot_label);
18361 /* Step 2: Alignment prologue. */
18363 if (desired_align > align)
18365 if (align_bytes == 0)
18367 /* Except for the first move in epilogue, we no longer know
18368 constant offset in aliasing info. It don't seems to worth
18369 the pain to maintain it for the first move, so throw away
18370 the info early. */
18371 src = change_address (src, BLKmode, srcreg);
18372 dst = change_address (dst, BLKmode, destreg);
18373 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18374 desired_align);
18376 else
18378 /* If we know how many bytes need to be stored before dst is
18379 sufficiently aligned, maintain aliasing info accurately. */
18380 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18381 desired_align, align_bytes);
18382 count_exp = plus_constant (count_exp, -align_bytes);
18383 count -= align_bytes;
18385 if (need_zero_guard
18386 && (count < (unsigned HOST_WIDE_INT) size_needed
18387 || (align_bytes == 0
18388 && count < ((unsigned HOST_WIDE_INT) size_needed
18389 + desired_align - align))))
18391 /* It is possible that we copied enough so the main loop will not
18392 execute. */
18393 gcc_assert (size_needed > 1);
18394 if (label == NULL_RTX)
18395 label = gen_label_rtx ();
18396 emit_cmp_and_jump_insns (count_exp,
18397 GEN_INT (size_needed),
18398 LTU, 0, counter_mode (count_exp), 1, label);
18399 if (expected_size == -1
18400 || expected_size < (desired_align - align) / 2 + size_needed)
18401 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18402 else
18403 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18406 if (label && size_needed == 1)
18408 emit_label (label);
18409 LABEL_NUSES (label) = 1;
18410 label = NULL;
18411 epilogue_size_needed = 1;
18413 else if (label == NULL_RTX)
18414 epilogue_size_needed = size_needed;
18416 /* Step 3: Main loop. */
18418 switch (alg)
18420 case libcall:
18421 case no_stringop:
18422 gcc_unreachable ();
18423 case loop_1_byte:
18424 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18425 count_exp, QImode, 1, expected_size);
18426 break;
18427 case loop:
18428 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18429 count_exp, Pmode, 1, expected_size);
18430 break;
18431 case unrolled_loop:
18432 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18433 registers for 4 temporaries anyway. */
18434 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18435 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18436 expected_size);
18437 break;
18438 case rep_prefix_8_byte:
18439 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18440 DImode);
18441 break;
18442 case rep_prefix_4_byte:
18443 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18444 SImode);
18445 break;
18446 case rep_prefix_1_byte:
18447 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18448 QImode);
18449 break;
18451 /* Adjust properly the offset of src and dest memory for aliasing. */
18452 if (CONST_INT_P (count_exp))
18454 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18455 (count / size_needed) * size_needed);
18456 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18457 (count / size_needed) * size_needed);
18459 else
18461 src = change_address (src, BLKmode, srcreg);
18462 dst = change_address (dst, BLKmode, destreg);
18465 /* Step 4: Epilogue to copy the remaining bytes. */
18466 epilogue:
18467 if (label)
18469 /* When the main loop is done, COUNT_EXP might hold original count,
18470 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18471 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18472 bytes. Compensate if needed. */
18474 if (size_needed < epilogue_size_needed)
18476 tmp =
18477 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18478 GEN_INT (size_needed - 1), count_exp, 1,
18479 OPTAB_DIRECT);
18480 if (tmp != count_exp)
18481 emit_move_insn (count_exp, tmp);
18483 emit_label (label);
18484 LABEL_NUSES (label) = 1;
18487 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18488 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18489 epilogue_size_needed);
18490 if (jump_around_label)
18491 emit_label (jump_around_label);
18492 return 1;
18495 /* Helper function for memcpy. For QImode value 0xXY produce
18496 0xXYXYXYXY of wide specified by MODE. This is essentially
18497 a * 0x10101010, but we can do slightly better than
18498 synth_mult by unwinding the sequence by hand on CPUs with
18499 slow multiply. */
18500 static rtx
18501 promote_duplicated_reg (enum machine_mode mode, rtx val)
18503 enum machine_mode valmode = GET_MODE (val);
18504 rtx tmp;
18505 int nops = mode == DImode ? 3 : 2;
18507 gcc_assert (mode == SImode || mode == DImode);
18508 if (val == const0_rtx)
18509 return copy_to_mode_reg (mode, const0_rtx);
18510 if (CONST_INT_P (val))
18512 HOST_WIDE_INT v = INTVAL (val) & 255;
18514 v |= v << 8;
18515 v |= v << 16;
18516 if (mode == DImode)
18517 v |= (v << 16) << 16;
18518 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18521 if (valmode == VOIDmode)
18522 valmode = QImode;
18523 if (valmode != QImode)
18524 val = gen_lowpart (QImode, val);
18525 if (mode == QImode)
18526 return val;
18527 if (!TARGET_PARTIAL_REG_STALL)
18528 nops--;
18529 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18530 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18531 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18532 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18534 rtx reg = convert_modes (mode, QImode, val, true);
18535 tmp = promote_duplicated_reg (mode, const1_rtx);
18536 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18537 OPTAB_DIRECT);
18539 else
18541 rtx reg = convert_modes (mode, QImode, val, true);
18543 if (!TARGET_PARTIAL_REG_STALL)
18544 if (mode == SImode)
18545 emit_insn (gen_movsi_insv_1 (reg, reg));
18546 else
18547 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18548 else
18550 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18551 NULL, 1, OPTAB_DIRECT);
18552 reg =
18553 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18555 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18556 NULL, 1, OPTAB_DIRECT);
18557 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18558 if (mode == SImode)
18559 return reg;
18560 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18561 NULL, 1, OPTAB_DIRECT);
18562 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18563 return reg;
18567 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18568 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18569 alignment from ALIGN to DESIRED_ALIGN. */
18570 static rtx
18571 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18573 rtx promoted_val;
18575 if (TARGET_64BIT
18576 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18577 promoted_val = promote_duplicated_reg (DImode, val);
18578 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18579 promoted_val = promote_duplicated_reg (SImode, val);
18580 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18581 promoted_val = promote_duplicated_reg (HImode, val);
18582 else
18583 promoted_val = val;
18585 return promoted_val;
18588 /* Expand string clear operation (bzero). Use i386 string operations when
18589 profitable. See expand_movmem comment for explanation of individual
18590 steps performed. */
18592 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18593 rtx expected_align_exp, rtx expected_size_exp)
18595 rtx destreg;
18596 rtx label = NULL;
18597 rtx tmp;
18598 rtx jump_around_label = NULL;
18599 HOST_WIDE_INT align = 1;
18600 unsigned HOST_WIDE_INT count = 0;
18601 HOST_WIDE_INT expected_size = -1;
18602 int size_needed = 0, epilogue_size_needed;
18603 int desired_align = 0, align_bytes = 0;
18604 enum stringop_alg alg;
18605 rtx promoted_val = NULL;
18606 bool force_loopy_epilogue = false;
18607 int dynamic_check;
18608 bool need_zero_guard = false;
18610 if (CONST_INT_P (align_exp))
18611 align = INTVAL (align_exp);
18612 /* i386 can do misaligned access on reasonably increased cost. */
18613 if (CONST_INT_P (expected_align_exp)
18614 && INTVAL (expected_align_exp) > align)
18615 align = INTVAL (expected_align_exp);
18616 if (CONST_INT_P (count_exp))
18617 count = expected_size = INTVAL (count_exp);
18618 if (CONST_INT_P (expected_size_exp) && count == 0)
18619 expected_size = INTVAL (expected_size_exp);
18621 /* Make sure we don't need to care about overflow later on. */
18622 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18623 return 0;
18625 /* Step 0: Decide on preferred algorithm, desired alignment and
18626 size of chunks to be copied by main loop. */
18628 alg = decide_alg (count, expected_size, true, &dynamic_check);
18629 desired_align = decide_alignment (align, alg, expected_size);
18631 if (!TARGET_ALIGN_STRINGOPS)
18632 align = desired_align;
18634 if (alg == libcall)
18635 return 0;
18636 gcc_assert (alg != no_stringop);
18637 if (!count)
18638 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18639 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18640 switch (alg)
18642 case libcall:
18643 case no_stringop:
18644 gcc_unreachable ();
18645 case loop:
18646 need_zero_guard = true;
18647 size_needed = GET_MODE_SIZE (Pmode);
18648 break;
18649 case unrolled_loop:
18650 need_zero_guard = true;
18651 size_needed = GET_MODE_SIZE (Pmode) * 4;
18652 break;
18653 case rep_prefix_8_byte:
18654 size_needed = 8;
18655 break;
18656 case rep_prefix_4_byte:
18657 size_needed = 4;
18658 break;
18659 case rep_prefix_1_byte:
18660 size_needed = 1;
18661 break;
18662 case loop_1_byte:
18663 need_zero_guard = true;
18664 size_needed = 1;
18665 break;
18667 epilogue_size_needed = size_needed;
18669 /* Step 1: Prologue guard. */
18671 /* Alignment code needs count to be in register. */
18672 if (CONST_INT_P (count_exp) && desired_align > align)
18674 if (INTVAL (count_exp) > desired_align
18675 && INTVAL (count_exp) > size_needed)
18677 align_bytes
18678 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18679 if (align_bytes <= 0)
18680 align_bytes = 0;
18681 else
18682 align_bytes = desired_align - align_bytes;
18684 if (align_bytes == 0)
18686 enum machine_mode mode = SImode;
18687 if (TARGET_64BIT && (count & ~0xffffffff))
18688 mode = DImode;
18689 count_exp = force_reg (mode, count_exp);
18692 /* Do the cheap promotion to allow better CSE across the
18693 main loop and epilogue (ie one load of the big constant in the
18694 front of all code. */
18695 if (CONST_INT_P (val_exp))
18696 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18697 desired_align, align);
18698 /* Ensure that alignment prologue won't copy past end of block. */
18699 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18701 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18702 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18703 Make sure it is power of 2. */
18704 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18706 /* To improve performance of small blocks, we jump around the VAL
18707 promoting mode. This mean that if the promoted VAL is not constant,
18708 we might not use it in the epilogue and have to use byte
18709 loop variant. */
18710 if (epilogue_size_needed > 2 && !promoted_val)
18711 force_loopy_epilogue = true;
18712 if (count)
18714 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18716 /* If main algorithm works on QImode, no epilogue is needed.
18717 For small sizes just don't align anything. */
18718 if (size_needed == 1)
18719 desired_align = align;
18720 else
18721 goto epilogue;
18724 else
18726 label = gen_label_rtx ();
18727 emit_cmp_and_jump_insns (count_exp,
18728 GEN_INT (epilogue_size_needed),
18729 LTU, 0, counter_mode (count_exp), 1, label);
18730 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18731 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18732 else
18733 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18736 if (dynamic_check != -1)
18738 rtx hot_label = gen_label_rtx ();
18739 jump_around_label = gen_label_rtx ();
18740 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18741 LEU, 0, counter_mode (count_exp), 1, hot_label);
18742 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18743 set_storage_via_libcall (dst, count_exp, val_exp, false);
18744 emit_jump (jump_around_label);
18745 emit_label (hot_label);
18748 /* Step 2: Alignment prologue. */
18750 /* Do the expensive promotion once we branched off the small blocks. */
18751 if (!promoted_val)
18752 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18753 desired_align, align);
18754 gcc_assert (desired_align >= 1 && align >= 1);
18756 if (desired_align > align)
18758 if (align_bytes == 0)
18760 /* Except for the first move in epilogue, we no longer know
18761 constant offset in aliasing info. It don't seems to worth
18762 the pain to maintain it for the first move, so throw away
18763 the info early. */
18764 dst = change_address (dst, BLKmode, destreg);
18765 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18766 desired_align);
18768 else
18770 /* If we know how many bytes need to be stored before dst is
18771 sufficiently aligned, maintain aliasing info accurately. */
18772 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18773 desired_align, align_bytes);
18774 count_exp = plus_constant (count_exp, -align_bytes);
18775 count -= align_bytes;
18777 if (need_zero_guard
18778 && (count < (unsigned HOST_WIDE_INT) size_needed
18779 || (align_bytes == 0
18780 && count < ((unsigned HOST_WIDE_INT) size_needed
18781 + desired_align - align))))
18783 /* It is possible that we copied enough so the main loop will not
18784 execute. */
18785 gcc_assert (size_needed > 1);
18786 if (label == NULL_RTX)
18787 label = gen_label_rtx ();
18788 emit_cmp_and_jump_insns (count_exp,
18789 GEN_INT (size_needed),
18790 LTU, 0, counter_mode (count_exp), 1, label);
18791 if (expected_size == -1
18792 || expected_size < (desired_align - align) / 2 + size_needed)
18793 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18794 else
18795 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18798 if (label && size_needed == 1)
18800 emit_label (label);
18801 LABEL_NUSES (label) = 1;
18802 label = NULL;
18803 promoted_val = val_exp;
18804 epilogue_size_needed = 1;
18806 else if (label == NULL_RTX)
18807 epilogue_size_needed = size_needed;
18809 /* Step 3: Main loop. */
18811 switch (alg)
18813 case libcall:
18814 case no_stringop:
18815 gcc_unreachable ();
18816 case loop_1_byte:
18817 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18818 count_exp, QImode, 1, expected_size);
18819 break;
18820 case loop:
18821 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18822 count_exp, Pmode, 1, expected_size);
18823 break;
18824 case unrolled_loop:
18825 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18826 count_exp, Pmode, 4, expected_size);
18827 break;
18828 case rep_prefix_8_byte:
18829 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18830 DImode, val_exp);
18831 break;
18832 case rep_prefix_4_byte:
18833 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18834 SImode, val_exp);
18835 break;
18836 case rep_prefix_1_byte:
18837 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18838 QImode, val_exp);
18839 break;
18841 /* Adjust properly the offset of src and dest memory for aliasing. */
18842 if (CONST_INT_P (count_exp))
18843 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18844 (count / size_needed) * size_needed);
18845 else
18846 dst = change_address (dst, BLKmode, destreg);
18848 /* Step 4: Epilogue to copy the remaining bytes. */
18850 if (label)
18852 /* When the main loop is done, COUNT_EXP might hold original count,
18853 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18854 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18855 bytes. Compensate if needed. */
18857 if (size_needed < epilogue_size_needed)
18859 tmp =
18860 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18861 GEN_INT (size_needed - 1), count_exp, 1,
18862 OPTAB_DIRECT);
18863 if (tmp != count_exp)
18864 emit_move_insn (count_exp, tmp);
18866 emit_label (label);
18867 LABEL_NUSES (label) = 1;
18869 epilogue:
18870 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18872 if (force_loopy_epilogue)
18873 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18874 epilogue_size_needed);
18875 else
18876 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18877 epilogue_size_needed);
18879 if (jump_around_label)
18880 emit_label (jump_around_label);
18881 return 1;
18884 /* Expand the appropriate insns for doing strlen if not just doing
18885 repnz; scasb
18887 out = result, initialized with the start address
18888 align_rtx = alignment of the address.
18889 scratch = scratch register, initialized with the startaddress when
18890 not aligned, otherwise undefined
18892 This is just the body. It needs the initializations mentioned above and
18893 some address computing at the end. These things are done in i386.md. */
18895 static void
18896 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18898 int align;
18899 rtx tmp;
18900 rtx align_2_label = NULL_RTX;
18901 rtx align_3_label = NULL_RTX;
18902 rtx align_4_label = gen_label_rtx ();
18903 rtx end_0_label = gen_label_rtx ();
18904 rtx mem;
18905 rtx tmpreg = gen_reg_rtx (SImode);
18906 rtx scratch = gen_reg_rtx (SImode);
18907 rtx cmp;
18909 align = 0;
18910 if (CONST_INT_P (align_rtx))
18911 align = INTVAL (align_rtx);
18913 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18915 /* Is there a known alignment and is it less than 4? */
18916 if (align < 4)
18918 rtx scratch1 = gen_reg_rtx (Pmode);
18919 emit_move_insn (scratch1, out);
18920 /* Is there a known alignment and is it not 2? */
18921 if (align != 2)
18923 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18924 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18926 /* Leave just the 3 lower bits. */
18927 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18928 NULL_RTX, 0, OPTAB_WIDEN);
18930 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18931 Pmode, 1, align_4_label);
18932 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18933 Pmode, 1, align_2_label);
18934 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18935 Pmode, 1, align_3_label);
18937 else
18939 /* Since the alignment is 2, we have to check 2 or 0 bytes;
18940 check if is aligned to 4 - byte. */
18942 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
18943 NULL_RTX, 0, OPTAB_WIDEN);
18945 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18946 Pmode, 1, align_4_label);
18949 mem = change_address (src, QImode, out);
18951 /* Now compare the bytes. */
18953 /* Compare the first n unaligned byte on a byte per byte basis. */
18954 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
18955 QImode, 1, end_0_label);
18957 /* Increment the address. */
18958 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18960 /* Not needed with an alignment of 2 */
18961 if (align != 2)
18963 emit_label (align_2_label);
18965 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18966 end_0_label);
18968 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18970 emit_label (align_3_label);
18973 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18974 end_0_label);
18976 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18979 /* Generate loop to check 4 bytes at a time. It is not a good idea to
18980 align this loop. It gives only huge programs, but does not help to
18981 speed up. */
18982 emit_label (align_4_label);
18984 mem = change_address (src, SImode, out);
18985 emit_move_insn (scratch, mem);
18986 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
18988 /* This formula yields a nonzero result iff one of the bytes is zero.
18989 This saves three branches inside loop and many cycles. */
18991 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
18992 emit_insn (gen_one_cmplsi2 (scratch, scratch));
18993 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
18994 emit_insn (gen_andsi3 (tmpreg, tmpreg,
18995 gen_int_mode (0x80808080, SImode)));
18996 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
18997 align_4_label);
18999 if (TARGET_CMOVE)
19001 rtx reg = gen_reg_rtx (SImode);
19002 rtx reg2 = gen_reg_rtx (Pmode);
19003 emit_move_insn (reg, tmpreg);
19004 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19006 /* If zero is not in the first two bytes, move two bytes forward. */
19007 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19008 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19009 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19010 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19011 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19012 reg,
19013 tmpreg)));
19014 /* Emit lea manually to avoid clobbering of flags. */
19015 emit_insn (gen_rtx_SET (SImode, reg2,
19016 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19018 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19019 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19020 emit_insn (gen_rtx_SET (VOIDmode, out,
19021 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19022 reg2,
19023 out)));
19025 else
19027 rtx end_2_label = gen_label_rtx ();
19028 /* Is zero in the first two bytes? */
19030 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19031 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19032 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19033 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19034 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19035 pc_rtx);
19036 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19037 JUMP_LABEL (tmp) = end_2_label;
19039 /* Not in the first two. Move two bytes forward. */
19040 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19041 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19043 emit_label (end_2_label);
19047 /* Avoid branch in fixing the byte. */
19048 tmpreg = gen_lowpart (QImode, tmpreg);
19049 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19050 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19051 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19052 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19054 emit_label (end_0_label);
19057 /* Expand strlen. */
19060 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19062 rtx addr, scratch1, scratch2, scratch3, scratch4;
19064 /* The generic case of strlen expander is long. Avoid it's
19065 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19067 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19068 && !TARGET_INLINE_ALL_STRINGOPS
19069 && !optimize_insn_for_size_p ()
19070 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19071 return 0;
19073 addr = force_reg (Pmode, XEXP (src, 0));
19074 scratch1 = gen_reg_rtx (Pmode);
19076 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19077 && !optimize_insn_for_size_p ())
19079 /* Well it seems that some optimizer does not combine a call like
19080 foo(strlen(bar), strlen(bar));
19081 when the move and the subtraction is done here. It does calculate
19082 the length just once when these instructions are done inside of
19083 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19084 often used and I use one fewer register for the lifetime of
19085 output_strlen_unroll() this is better. */
19087 emit_move_insn (out, addr);
19089 ix86_expand_strlensi_unroll_1 (out, src, align);
19091 /* strlensi_unroll_1 returns the address of the zero at the end of
19092 the string, like memchr(), so compute the length by subtracting
19093 the start address. */
19094 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19096 else
19098 rtx unspec;
19100 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19101 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19102 return false;
19104 scratch2 = gen_reg_rtx (Pmode);
19105 scratch3 = gen_reg_rtx (Pmode);
19106 scratch4 = force_reg (Pmode, constm1_rtx);
19108 emit_move_insn (scratch3, addr);
19109 eoschar = force_reg (QImode, eoschar);
19111 src = replace_equiv_address_nv (src, scratch3);
19113 /* If .md starts supporting :P, this can be done in .md. */
19114 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19115 scratch4), UNSPEC_SCAS);
19116 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19117 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19118 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19120 return 1;
19123 /* For given symbol (function) construct code to compute address of it's PLT
19124 entry in large x86-64 PIC model. */
19126 construct_plt_address (rtx symbol)
19128 rtx tmp = gen_reg_rtx (Pmode);
19129 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19131 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19132 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19134 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19135 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19136 return tmp;
19139 void
19140 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19141 rtx callarg2,
19142 rtx pop, int sibcall)
19144 rtx use = NULL, call;
19146 if (pop == const0_rtx)
19147 pop = NULL;
19148 gcc_assert (!TARGET_64BIT || !pop);
19150 if (TARGET_MACHO && !TARGET_64BIT)
19152 #if TARGET_MACHO
19153 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19154 fnaddr = machopic_indirect_call_target (fnaddr);
19155 #endif
19157 else
19159 /* Static functions and indirect calls don't need the pic register. */
19160 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19161 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19162 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19163 use_reg (&use, pic_offset_table_rtx);
19166 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19168 rtx al = gen_rtx_REG (QImode, AX_REG);
19169 emit_move_insn (al, callarg2);
19170 use_reg (&use, al);
19173 if (ix86_cmodel == CM_LARGE_PIC
19174 && MEM_P (fnaddr)
19175 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19176 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19177 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19178 else if (sibcall
19179 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19180 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19182 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19183 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19186 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19187 if (retval)
19188 call = gen_rtx_SET (VOIDmode, retval, call);
19189 if (pop)
19191 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19192 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19193 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19195 if (TARGET_64BIT
19196 && ix86_cfun_abi () == MS_ABI
19197 && (!callarg2 || INTVAL (callarg2) != -2))
19199 /* We need to represent that SI and DI registers are clobbered
19200 by SYSV calls. */
19201 static int clobbered_registers[] = {
19202 XMM6_REG, XMM7_REG, XMM8_REG,
19203 XMM9_REG, XMM10_REG, XMM11_REG,
19204 XMM12_REG, XMM13_REG, XMM14_REG,
19205 XMM15_REG, SI_REG, DI_REG
19207 unsigned int i;
19208 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19209 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19210 UNSPEC_MS_TO_SYSV_CALL);
19212 vec[0] = call;
19213 vec[1] = unspec;
19214 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19215 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19216 ? TImode : DImode,
19217 gen_rtx_REG
19218 (SSE_REGNO_P (clobbered_registers[i])
19219 ? TImode : DImode,
19220 clobbered_registers[i]));
19222 call = gen_rtx_PARALLEL (VOIDmode,
19223 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19224 + 2, vec));
19227 call = emit_call_insn (call);
19228 if (use)
19229 CALL_INSN_FUNCTION_USAGE (call) = use;
19233 /* Clear stack slot assignments remembered from previous functions.
19234 This is called from INIT_EXPANDERS once before RTL is emitted for each
19235 function. */
19237 static struct machine_function *
19238 ix86_init_machine_status (void)
19240 struct machine_function *f;
19242 f = GGC_CNEW (struct machine_function);
19243 f->use_fast_prologue_epilogue_nregs = -1;
19244 f->tls_descriptor_call_expanded_p = 0;
19245 f->call_abi = ix86_abi;
19247 return f;
19250 /* Return a MEM corresponding to a stack slot with mode MODE.
19251 Allocate a new slot if necessary.
19253 The RTL for a function can have several slots available: N is
19254 which slot to use. */
19257 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19259 struct stack_local_entry *s;
19261 gcc_assert (n < MAX_386_STACK_LOCALS);
19263 /* Virtual slot is valid only before vregs are instantiated. */
19264 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19266 for (s = ix86_stack_locals; s; s = s->next)
19267 if (s->mode == mode && s->n == n)
19268 return copy_rtx (s->rtl);
19270 s = (struct stack_local_entry *)
19271 ggc_alloc (sizeof (struct stack_local_entry));
19272 s->n = n;
19273 s->mode = mode;
19274 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19276 s->next = ix86_stack_locals;
19277 ix86_stack_locals = s;
19278 return s->rtl;
19281 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19283 static GTY(()) rtx ix86_tls_symbol;
19285 ix86_tls_get_addr (void)
19288 if (!ix86_tls_symbol)
19290 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19291 (TARGET_ANY_GNU_TLS
19292 && !TARGET_64BIT)
19293 ? "___tls_get_addr"
19294 : "__tls_get_addr");
19297 return ix86_tls_symbol;
19300 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19302 static GTY(()) rtx ix86_tls_module_base_symbol;
19304 ix86_tls_module_base (void)
19307 if (!ix86_tls_module_base_symbol)
19309 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19310 "_TLS_MODULE_BASE_");
19311 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19312 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19315 return ix86_tls_module_base_symbol;
19318 /* Calculate the length of the memory address in the instruction
19319 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19322 memory_address_length (rtx addr)
19324 struct ix86_address parts;
19325 rtx base, index, disp;
19326 int len;
19327 int ok;
19329 if (GET_CODE (addr) == PRE_DEC
19330 || GET_CODE (addr) == POST_INC
19331 || GET_CODE (addr) == PRE_MODIFY
19332 || GET_CODE (addr) == POST_MODIFY)
19333 return 0;
19335 ok = ix86_decompose_address (addr, &parts);
19336 gcc_assert (ok);
19338 if (parts.base && GET_CODE (parts.base) == SUBREG)
19339 parts.base = SUBREG_REG (parts.base);
19340 if (parts.index && GET_CODE (parts.index) == SUBREG)
19341 parts.index = SUBREG_REG (parts.index);
19343 base = parts.base;
19344 index = parts.index;
19345 disp = parts.disp;
19346 len = 0;
19348 /* Rule of thumb:
19349 - esp as the base always wants an index,
19350 - ebp as the base always wants a displacement,
19351 - r12 as the base always wants an index,
19352 - r13 as the base always wants a displacement. */
19354 /* Register Indirect. */
19355 if (base && !index && !disp)
19357 /* esp (for its index) and ebp (for its displacement) need
19358 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19359 code. */
19360 if (REG_P (addr)
19361 && (addr == arg_pointer_rtx
19362 || addr == frame_pointer_rtx
19363 || REGNO (addr) == SP_REG
19364 || REGNO (addr) == BP_REG
19365 || REGNO (addr) == R12_REG
19366 || REGNO (addr) == R13_REG))
19367 len = 1;
19370 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19371 is not disp32, but disp32(%rip), so for disp32
19372 SIB byte is needed, unless print_operand_address
19373 optimizes it into disp32(%rip) or (%rip) is implied
19374 by UNSPEC. */
19375 else if (disp && !base && !index)
19377 len = 4;
19378 if (TARGET_64BIT)
19380 rtx symbol = disp;
19382 if (GET_CODE (disp) == CONST)
19383 symbol = XEXP (disp, 0);
19384 if (GET_CODE (symbol) == PLUS
19385 && CONST_INT_P (XEXP (symbol, 1)))
19386 symbol = XEXP (symbol, 0);
19388 if (GET_CODE (symbol) != LABEL_REF
19389 && (GET_CODE (symbol) != SYMBOL_REF
19390 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19391 && (GET_CODE (symbol) != UNSPEC
19392 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19393 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19394 len += 1;
19398 else
19400 /* Find the length of the displacement constant. */
19401 if (disp)
19403 if (base && satisfies_constraint_K (disp))
19404 len = 1;
19405 else
19406 len = 4;
19408 /* ebp always wants a displacement. Similarly r13. */
19409 else if (base && REG_P (base)
19410 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19411 len = 1;
19413 /* An index requires the two-byte modrm form.... */
19414 if (index
19415 /* ...like esp (or r12), which always wants an index. */
19416 || base == arg_pointer_rtx
19417 || base == frame_pointer_rtx
19418 || (base && REG_P (base)
19419 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19420 len += 1;
19423 switch (parts.seg)
19425 case SEG_FS:
19426 case SEG_GS:
19427 len += 1;
19428 break;
19429 default:
19430 break;
19433 return len;
19436 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19437 is set, expect that insn have 8bit immediate alternative. */
19439 ix86_attr_length_immediate_default (rtx insn, int shortform)
19441 int len = 0;
19442 int i;
19443 extract_insn_cached (insn);
19444 for (i = recog_data.n_operands - 1; i >= 0; --i)
19445 if (CONSTANT_P (recog_data.operand[i]))
19447 enum attr_mode mode = get_attr_mode (insn);
19449 gcc_assert (!len);
19450 if (shortform && CONST_INT_P (recog_data.operand[i]))
19452 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19453 switch (mode)
19455 case MODE_QI:
19456 len = 1;
19457 continue;
19458 case MODE_HI:
19459 ival = trunc_int_for_mode (ival, HImode);
19460 break;
19461 case MODE_SI:
19462 ival = trunc_int_for_mode (ival, SImode);
19463 break;
19464 default:
19465 break;
19467 if (IN_RANGE (ival, -128, 127))
19469 len = 1;
19470 continue;
19473 switch (mode)
19475 case MODE_QI:
19476 len = 1;
19477 break;
19478 case MODE_HI:
19479 len = 2;
19480 break;
19481 case MODE_SI:
19482 len = 4;
19483 break;
19484 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19485 case MODE_DI:
19486 len = 4;
19487 break;
19488 default:
19489 fatal_insn ("unknown insn mode", insn);
19492 return len;
19494 /* Compute default value for "length_address" attribute. */
19496 ix86_attr_length_address_default (rtx insn)
19498 int i;
19500 if (get_attr_type (insn) == TYPE_LEA)
19502 rtx set = PATTERN (insn), addr;
19504 if (GET_CODE (set) == PARALLEL)
19505 set = XVECEXP (set, 0, 0);
19507 gcc_assert (GET_CODE (set) == SET);
19509 addr = SET_SRC (set);
19510 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19512 if (GET_CODE (addr) == ZERO_EXTEND)
19513 addr = XEXP (addr, 0);
19514 if (GET_CODE (addr) == SUBREG)
19515 addr = SUBREG_REG (addr);
19518 return memory_address_length (addr);
19521 extract_insn_cached (insn);
19522 for (i = recog_data.n_operands - 1; i >= 0; --i)
19523 if (MEM_P (recog_data.operand[i]))
19525 constrain_operands_cached (reload_completed);
19526 if (which_alternative != -1)
19528 const char *constraints = recog_data.constraints[i];
19529 int alt = which_alternative;
19531 while (*constraints == '=' || *constraints == '+')
19532 constraints++;
19533 while (alt-- > 0)
19534 while (*constraints++ != ',')
19536 /* Skip ignored operands. */
19537 if (*constraints == 'X')
19538 continue;
19540 return memory_address_length (XEXP (recog_data.operand[i], 0));
19542 return 0;
19545 /* Compute default value for "length_vex" attribute. It includes
19546 2 or 3 byte VEX prefix and 1 opcode byte. */
19549 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19550 int has_vex_w)
19552 int i;
19554 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19555 byte VEX prefix. */
19556 if (!has_0f_opcode || has_vex_w)
19557 return 3 + 1;
19559 /* We can always use 2 byte VEX prefix in 32bit. */
19560 if (!TARGET_64BIT)
19561 return 2 + 1;
19563 extract_insn_cached (insn);
19565 for (i = recog_data.n_operands - 1; i >= 0; --i)
19566 if (REG_P (recog_data.operand[i]))
19568 /* REX.W bit uses 3 byte VEX prefix. */
19569 if (GET_MODE (recog_data.operand[i]) == DImode
19570 && GENERAL_REG_P (recog_data.operand[i]))
19571 return 3 + 1;
19573 else
19575 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19576 if (MEM_P (recog_data.operand[i])
19577 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19578 return 3 + 1;
19581 return 2 + 1;
19584 /* Return the maximum number of instructions a cpu can issue. */
19586 static int
19587 ix86_issue_rate (void)
19589 switch (ix86_tune)
19591 case PROCESSOR_PENTIUM:
19592 case PROCESSOR_ATOM:
19593 case PROCESSOR_K6:
19594 return 2;
19596 case PROCESSOR_PENTIUMPRO:
19597 case PROCESSOR_PENTIUM4:
19598 case PROCESSOR_ATHLON:
19599 case PROCESSOR_K8:
19600 case PROCESSOR_AMDFAM10:
19601 case PROCESSOR_NOCONA:
19602 case PROCESSOR_GENERIC32:
19603 case PROCESSOR_GENERIC64:
19604 return 3;
19606 case PROCESSOR_CORE2:
19607 return 4;
19609 default:
19610 return 1;
19614 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19615 by DEP_INSN and nothing set by DEP_INSN. */
19617 static int
19618 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19620 rtx set, set2;
19622 /* Simplify the test for uninteresting insns. */
19623 if (insn_type != TYPE_SETCC
19624 && insn_type != TYPE_ICMOV
19625 && insn_type != TYPE_FCMOV
19626 && insn_type != TYPE_IBR)
19627 return 0;
19629 if ((set = single_set (dep_insn)) != 0)
19631 set = SET_DEST (set);
19632 set2 = NULL_RTX;
19634 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19635 && XVECLEN (PATTERN (dep_insn), 0) == 2
19636 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19637 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19639 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19640 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19642 else
19643 return 0;
19645 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19646 return 0;
19648 /* This test is true if the dependent insn reads the flags but
19649 not any other potentially set register. */
19650 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19651 return 0;
19653 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19654 return 0;
19656 return 1;
19659 /* Return true iff USE_INSN has a memory address with operands set by
19660 SET_INSN. */
19662 bool
19663 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19665 int i;
19666 extract_insn_cached (use_insn);
19667 for (i = recog_data.n_operands - 1; i >= 0; --i)
19668 if (MEM_P (recog_data.operand[i]))
19670 rtx addr = XEXP (recog_data.operand[i], 0);
19671 return modified_in_p (addr, set_insn) != 0;
19673 return false;
19676 static int
19677 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19679 enum attr_type insn_type, dep_insn_type;
19680 enum attr_memory memory;
19681 rtx set, set2;
19682 int dep_insn_code_number;
19684 /* Anti and output dependencies have zero cost on all CPUs. */
19685 if (REG_NOTE_KIND (link) != 0)
19686 return 0;
19688 dep_insn_code_number = recog_memoized (dep_insn);
19690 /* If we can't recognize the insns, we can't really do anything. */
19691 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19692 return cost;
19694 insn_type = get_attr_type (insn);
19695 dep_insn_type = get_attr_type (dep_insn);
19697 switch (ix86_tune)
19699 case PROCESSOR_PENTIUM:
19700 /* Address Generation Interlock adds a cycle of latency. */
19701 if (insn_type == TYPE_LEA)
19703 rtx addr = PATTERN (insn);
19705 if (GET_CODE (addr) == PARALLEL)
19706 addr = XVECEXP (addr, 0, 0);
19708 gcc_assert (GET_CODE (addr) == SET);
19710 addr = SET_SRC (addr);
19711 if (modified_in_p (addr, dep_insn))
19712 cost += 1;
19714 else if (ix86_agi_dependent (dep_insn, insn))
19715 cost += 1;
19717 /* ??? Compares pair with jump/setcc. */
19718 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19719 cost = 0;
19721 /* Floating point stores require value to be ready one cycle earlier. */
19722 if (insn_type == TYPE_FMOV
19723 && get_attr_memory (insn) == MEMORY_STORE
19724 && !ix86_agi_dependent (dep_insn, insn))
19725 cost += 1;
19726 break;
19728 case PROCESSOR_PENTIUMPRO:
19729 memory = get_attr_memory (insn);
19731 /* INT->FP conversion is expensive. */
19732 if (get_attr_fp_int_src (dep_insn))
19733 cost += 5;
19735 /* There is one cycle extra latency between an FP op and a store. */
19736 if (insn_type == TYPE_FMOV
19737 && (set = single_set (dep_insn)) != NULL_RTX
19738 && (set2 = single_set (insn)) != NULL_RTX
19739 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19740 && MEM_P (SET_DEST (set2)))
19741 cost += 1;
19743 /* Show ability of reorder buffer to hide latency of load by executing
19744 in parallel with previous instruction in case
19745 previous instruction is not needed to compute the address. */
19746 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19747 && !ix86_agi_dependent (dep_insn, insn))
19749 /* Claim moves to take one cycle, as core can issue one load
19750 at time and the next load can start cycle later. */
19751 if (dep_insn_type == TYPE_IMOV
19752 || dep_insn_type == TYPE_FMOV)
19753 cost = 1;
19754 else if (cost > 1)
19755 cost--;
19757 break;
19759 case PROCESSOR_K6:
19760 memory = get_attr_memory (insn);
19762 /* The esp dependency is resolved before the instruction is really
19763 finished. */
19764 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19765 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19766 return 1;
19768 /* INT->FP conversion is expensive. */
19769 if (get_attr_fp_int_src (dep_insn))
19770 cost += 5;
19772 /* Show ability of reorder buffer to hide latency of load by executing
19773 in parallel with previous instruction in case
19774 previous instruction is not needed to compute the address. */
19775 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19776 && !ix86_agi_dependent (dep_insn, insn))
19778 /* Claim moves to take one cycle, as core can issue one load
19779 at time and the next load can start cycle later. */
19780 if (dep_insn_type == TYPE_IMOV
19781 || dep_insn_type == TYPE_FMOV)
19782 cost = 1;
19783 else if (cost > 2)
19784 cost -= 2;
19785 else
19786 cost = 1;
19788 break;
19790 case PROCESSOR_ATHLON:
19791 case PROCESSOR_K8:
19792 case PROCESSOR_AMDFAM10:
19793 case PROCESSOR_ATOM:
19794 case PROCESSOR_GENERIC32:
19795 case PROCESSOR_GENERIC64:
19796 memory = get_attr_memory (insn);
19798 /* Show ability of reorder buffer to hide latency of load by executing
19799 in parallel with previous instruction in case
19800 previous instruction is not needed to compute the address. */
19801 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19802 && !ix86_agi_dependent (dep_insn, insn))
19804 enum attr_unit unit = get_attr_unit (insn);
19805 int loadcost = 3;
19807 /* Because of the difference between the length of integer and
19808 floating unit pipeline preparation stages, the memory operands
19809 for floating point are cheaper.
19811 ??? For Athlon it the difference is most probably 2. */
19812 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19813 loadcost = 3;
19814 else
19815 loadcost = TARGET_ATHLON ? 2 : 0;
19817 if (cost >= loadcost)
19818 cost -= loadcost;
19819 else
19820 cost = 0;
19823 default:
19824 break;
19827 return cost;
19830 /* How many alternative schedules to try. This should be as wide as the
19831 scheduling freedom in the DFA, but no wider. Making this value too
19832 large results extra work for the scheduler. */
19834 static int
19835 ia32_multipass_dfa_lookahead (void)
19837 switch (ix86_tune)
19839 case PROCESSOR_PENTIUM:
19840 return 2;
19842 case PROCESSOR_PENTIUMPRO:
19843 case PROCESSOR_K6:
19844 return 1;
19846 default:
19847 return 0;
19852 /* Compute the alignment given to a constant that is being placed in memory.
19853 EXP is the constant and ALIGN is the alignment that the object would
19854 ordinarily have.
19855 The value of this function is used instead of that alignment to align
19856 the object. */
19859 ix86_constant_alignment (tree exp, int align)
19861 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19862 || TREE_CODE (exp) == INTEGER_CST)
19864 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19865 return 64;
19866 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19867 return 128;
19869 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19870 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19871 return BITS_PER_WORD;
19873 return align;
19876 /* Compute the alignment for a static variable.
19877 TYPE is the data type, and ALIGN is the alignment that
19878 the object would ordinarily have. The value of this function is used
19879 instead of that alignment to align the object. */
19882 ix86_data_alignment (tree type, int align)
19884 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19886 if (AGGREGATE_TYPE_P (type)
19887 && TYPE_SIZE (type)
19888 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19889 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19890 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19891 && align < max_align)
19892 align = max_align;
19894 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19895 to 16byte boundary. */
19896 if (TARGET_64BIT)
19898 if (AGGREGATE_TYPE_P (type)
19899 && TYPE_SIZE (type)
19900 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19901 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19902 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19903 return 128;
19906 if (TREE_CODE (type) == ARRAY_TYPE)
19908 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19909 return 64;
19910 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19911 return 128;
19913 else if (TREE_CODE (type) == COMPLEX_TYPE)
19916 if (TYPE_MODE (type) == DCmode && align < 64)
19917 return 64;
19918 if ((TYPE_MODE (type) == XCmode
19919 || TYPE_MODE (type) == TCmode) && align < 128)
19920 return 128;
19922 else if ((TREE_CODE (type) == RECORD_TYPE
19923 || TREE_CODE (type) == UNION_TYPE
19924 || TREE_CODE (type) == QUAL_UNION_TYPE)
19925 && TYPE_FIELDS (type))
19927 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19928 return 64;
19929 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19930 return 128;
19932 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19933 || TREE_CODE (type) == INTEGER_TYPE)
19935 if (TYPE_MODE (type) == DFmode && align < 64)
19936 return 64;
19937 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19938 return 128;
19941 return align;
19944 /* Compute the alignment for a local variable or a stack slot. EXP is
19945 the data type or decl itself, MODE is the widest mode available and
19946 ALIGN is the alignment that the object would ordinarily have. The
19947 value of this macro is used instead of that alignment to align the
19948 object. */
19950 unsigned int
19951 ix86_local_alignment (tree exp, enum machine_mode mode,
19952 unsigned int align)
19954 tree type, decl;
19956 if (exp && DECL_P (exp))
19958 type = TREE_TYPE (exp);
19959 decl = exp;
19961 else
19963 type = exp;
19964 decl = NULL;
19967 /* Don't do dynamic stack realignment for long long objects with
19968 -mpreferred-stack-boundary=2. */
19969 if (!TARGET_64BIT
19970 && align == 64
19971 && ix86_preferred_stack_boundary < 64
19972 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
19973 && (!type || !TYPE_USER_ALIGN (type))
19974 && (!decl || !DECL_USER_ALIGN (decl)))
19975 align = 32;
19977 /* If TYPE is NULL, we are allocating a stack slot for caller-save
19978 register in MODE. We will return the largest alignment of XF
19979 and DF. */
19980 if (!type)
19982 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
19983 align = GET_MODE_ALIGNMENT (DFmode);
19984 return align;
19987 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19988 to 16byte boundary. */
19989 if (TARGET_64BIT)
19991 if (AGGREGATE_TYPE_P (type)
19992 && TYPE_SIZE (type)
19993 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19994 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
19995 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19996 return 128;
19998 if (TREE_CODE (type) == ARRAY_TYPE)
20000 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20001 return 64;
20002 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20003 return 128;
20005 else if (TREE_CODE (type) == COMPLEX_TYPE)
20007 if (TYPE_MODE (type) == DCmode && align < 64)
20008 return 64;
20009 if ((TYPE_MODE (type) == XCmode
20010 || TYPE_MODE (type) == TCmode) && align < 128)
20011 return 128;
20013 else if ((TREE_CODE (type) == RECORD_TYPE
20014 || TREE_CODE (type) == UNION_TYPE
20015 || TREE_CODE (type) == QUAL_UNION_TYPE)
20016 && TYPE_FIELDS (type))
20018 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20019 return 64;
20020 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20021 return 128;
20023 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20024 || TREE_CODE (type) == INTEGER_TYPE)
20027 if (TYPE_MODE (type) == DFmode && align < 64)
20028 return 64;
20029 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20030 return 128;
20032 return align;
20035 /* Compute the minimum required alignment for dynamic stack realignment
20036 purposes for a local variable, parameter or a stack slot. EXP is
20037 the data type or decl itself, MODE is its mode and ALIGN is the
20038 alignment that the object would ordinarily have. */
20040 unsigned int
20041 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20042 unsigned int align)
20044 tree type, decl;
20046 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20047 return align;
20049 if (exp && DECL_P (exp))
20051 type = TREE_TYPE (exp);
20052 decl = exp;
20054 else
20056 type = exp;
20057 decl = NULL;
20060 /* Don't do dynamic stack realignment for long long objects with
20061 -mpreferred-stack-boundary=2. */
20062 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20063 && (!type || !TYPE_USER_ALIGN (type))
20064 && (!decl || !DECL_USER_ALIGN (decl)))
20065 return 32;
20067 return align;
20070 /* Find a location for the static chain incoming to a nested function.
20071 This is a register, unless all free registers are used by arguments. */
20073 static rtx
20074 ix86_static_chain (const_tree fndecl, bool incoming_p)
20076 unsigned regno;
20078 if (!DECL_STATIC_CHAIN (fndecl))
20079 return NULL;
20081 if (TARGET_64BIT)
20083 /* We always use R10 in 64-bit mode. */
20084 regno = R10_REG;
20086 else
20088 tree fntype;
20089 /* By default in 32-bit mode we use ECX to pass the static chain. */
20090 regno = CX_REG;
20092 fntype = TREE_TYPE (fndecl);
20093 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20095 /* Fastcall functions use ecx/edx for arguments, which leaves
20096 us with EAX for the static chain. */
20097 regno = AX_REG;
20099 else if (ix86_function_regparm (fntype, fndecl) == 3)
20101 /* For regparm 3, we have no free call-clobbered registers in
20102 which to store the static chain. In order to implement this,
20103 we have the trampoline push the static chain to the stack.
20104 However, we can't push a value below the return address when
20105 we call the nested function directly, so we have to use an
20106 alternate entry point. For this we use ESI, and have the
20107 alternate entry point push ESI, so that things appear the
20108 same once we're executing the nested function. */
20109 if (incoming_p)
20111 if (fndecl == current_function_decl)
20112 ix86_static_chain_on_stack = true;
20113 return gen_frame_mem (SImode,
20114 plus_constant (arg_pointer_rtx, -8));
20116 regno = SI_REG;
20120 return gen_rtx_REG (Pmode, regno);
20123 /* Emit RTL insns to initialize the variable parts of a trampoline.
20124 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20125 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20126 to be passed to the target function. */
20128 static void
20129 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20131 rtx mem, fnaddr;
20133 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20135 if (!TARGET_64BIT)
20137 rtx disp, chain;
20138 int opcode;
20140 /* Depending on the static chain location, either load a register
20141 with a constant, or push the constant to the stack. All of the
20142 instructions are the same size. */
20143 chain = ix86_static_chain (fndecl, true);
20144 if (REG_P (chain))
20146 if (REGNO (chain) == CX_REG)
20147 opcode = 0xb9;
20148 else if (REGNO (chain) == AX_REG)
20149 opcode = 0xb8;
20150 else
20151 gcc_unreachable ();
20153 else
20154 opcode = 0x68;
20156 mem = adjust_address (m_tramp, QImode, 0);
20157 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20159 mem = adjust_address (m_tramp, SImode, 1);
20160 emit_move_insn (mem, chain_value);
20162 /* Compute offset from the end of the jmp to the target function.
20163 In the case in which the trampoline stores the static chain on
20164 the stack, we need to skip the first insn which pushes the
20165 (call-saved) register static chain; this push is 1 byte. */
20166 disp = expand_binop (SImode, sub_optab, fnaddr,
20167 plus_constant (XEXP (m_tramp, 0),
20168 MEM_P (chain) ? 9 : 10),
20169 NULL_RTX, 1, OPTAB_DIRECT);
20171 mem = adjust_address (m_tramp, QImode, 5);
20172 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20174 mem = adjust_address (m_tramp, SImode, 6);
20175 emit_move_insn (mem, disp);
20177 else
20179 int offset = 0;
20181 /* Load the function address to r11. Try to load address using
20182 the shorter movl instead of movabs. We may want to support
20183 movq for kernel mode, but kernel does not use trampolines at
20184 the moment. */
20185 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20187 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20189 mem = adjust_address (m_tramp, HImode, offset);
20190 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20192 mem = adjust_address (m_tramp, SImode, offset + 2);
20193 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20194 offset += 6;
20196 else
20198 mem = adjust_address (m_tramp, HImode, offset);
20199 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20201 mem = adjust_address (m_tramp, DImode, offset + 2);
20202 emit_move_insn (mem, fnaddr);
20203 offset += 10;
20206 /* Load static chain using movabs to r10. */
20207 mem = adjust_address (m_tramp, HImode, offset);
20208 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20210 mem = adjust_address (m_tramp, DImode, offset + 2);
20211 emit_move_insn (mem, chain_value);
20212 offset += 10;
20214 /* Jump to r11; the last (unused) byte is a nop, only there to
20215 pad the write out to a single 32-bit store. */
20216 mem = adjust_address (m_tramp, SImode, offset);
20217 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20218 offset += 4;
20220 gcc_assert (offset <= TRAMPOLINE_SIZE);
20223 #ifdef ENABLE_EXECUTE_STACK
20224 #ifdef CHECK_EXECUTE_STACK_ENABLED
20225 if (CHECK_EXECUTE_STACK_ENABLED)
20226 #endif
20227 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20228 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20229 #endif
20232 /* The following file contains several enumerations and data structures
20233 built from the definitions in i386-builtin-types.def. */
20235 #include "i386-builtin-types.inc"
20237 /* Table for the ix86 builtin non-function types. */
20238 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20240 /* Retrieve an element from the above table, building some of
20241 the types lazily. */
20243 static tree
20244 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20246 unsigned int index;
20247 tree type, itype;
20249 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20251 type = ix86_builtin_type_tab[(int) tcode];
20252 if (type != NULL)
20253 return type;
20255 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20256 if (tcode <= IX86_BT_LAST_VECT)
20258 enum machine_mode mode;
20260 index = tcode - IX86_BT_LAST_PRIM - 1;
20261 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20262 mode = ix86_builtin_type_vect_mode[index];
20264 type = build_vector_type_for_mode (itype, mode);
20266 else
20268 int quals;
20270 index = tcode - IX86_BT_LAST_VECT - 1;
20271 if (tcode <= IX86_BT_LAST_PTR)
20272 quals = TYPE_UNQUALIFIED;
20273 else
20274 quals = TYPE_QUAL_CONST;
20276 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20277 if (quals != TYPE_UNQUALIFIED)
20278 itype = build_qualified_type (itype, quals);
20280 type = build_pointer_type (itype);
20283 ix86_builtin_type_tab[(int) tcode] = type;
20284 return type;
20287 /* Table for the ix86 builtin function types. */
20288 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20290 /* Retrieve an element from the above table, building some of
20291 the types lazily. */
20293 static tree
20294 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20296 tree type;
20298 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20300 type = ix86_builtin_func_type_tab[(int) tcode];
20301 if (type != NULL)
20302 return type;
20304 if (tcode <= IX86_BT_LAST_FUNC)
20306 unsigned start = ix86_builtin_func_start[(int) tcode];
20307 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20308 tree rtype, atype, args = void_list_node;
20309 unsigned i;
20311 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20312 for (i = after - 1; i > start; --i)
20314 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20315 args = tree_cons (NULL, atype, args);
20318 type = build_function_type (rtype, args);
20320 else
20322 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20323 enum ix86_builtin_func_type icode;
20325 icode = ix86_builtin_func_alias_base[index];
20326 type = ix86_get_builtin_func_type (icode);
20329 ix86_builtin_func_type_tab[(int) tcode] = type;
20330 return type;
20334 /* Codes for all the SSE/MMX builtins. */
20335 enum ix86_builtins
20337 IX86_BUILTIN_ADDPS,
20338 IX86_BUILTIN_ADDSS,
20339 IX86_BUILTIN_DIVPS,
20340 IX86_BUILTIN_DIVSS,
20341 IX86_BUILTIN_MULPS,
20342 IX86_BUILTIN_MULSS,
20343 IX86_BUILTIN_SUBPS,
20344 IX86_BUILTIN_SUBSS,
20346 IX86_BUILTIN_CMPEQPS,
20347 IX86_BUILTIN_CMPLTPS,
20348 IX86_BUILTIN_CMPLEPS,
20349 IX86_BUILTIN_CMPGTPS,
20350 IX86_BUILTIN_CMPGEPS,
20351 IX86_BUILTIN_CMPNEQPS,
20352 IX86_BUILTIN_CMPNLTPS,
20353 IX86_BUILTIN_CMPNLEPS,
20354 IX86_BUILTIN_CMPNGTPS,
20355 IX86_BUILTIN_CMPNGEPS,
20356 IX86_BUILTIN_CMPORDPS,
20357 IX86_BUILTIN_CMPUNORDPS,
20358 IX86_BUILTIN_CMPEQSS,
20359 IX86_BUILTIN_CMPLTSS,
20360 IX86_BUILTIN_CMPLESS,
20361 IX86_BUILTIN_CMPNEQSS,
20362 IX86_BUILTIN_CMPNLTSS,
20363 IX86_BUILTIN_CMPNLESS,
20364 IX86_BUILTIN_CMPNGTSS,
20365 IX86_BUILTIN_CMPNGESS,
20366 IX86_BUILTIN_CMPORDSS,
20367 IX86_BUILTIN_CMPUNORDSS,
20369 IX86_BUILTIN_COMIEQSS,
20370 IX86_BUILTIN_COMILTSS,
20371 IX86_BUILTIN_COMILESS,
20372 IX86_BUILTIN_COMIGTSS,
20373 IX86_BUILTIN_COMIGESS,
20374 IX86_BUILTIN_COMINEQSS,
20375 IX86_BUILTIN_UCOMIEQSS,
20376 IX86_BUILTIN_UCOMILTSS,
20377 IX86_BUILTIN_UCOMILESS,
20378 IX86_BUILTIN_UCOMIGTSS,
20379 IX86_BUILTIN_UCOMIGESS,
20380 IX86_BUILTIN_UCOMINEQSS,
20382 IX86_BUILTIN_CVTPI2PS,
20383 IX86_BUILTIN_CVTPS2PI,
20384 IX86_BUILTIN_CVTSI2SS,
20385 IX86_BUILTIN_CVTSI642SS,
20386 IX86_BUILTIN_CVTSS2SI,
20387 IX86_BUILTIN_CVTSS2SI64,
20388 IX86_BUILTIN_CVTTPS2PI,
20389 IX86_BUILTIN_CVTTSS2SI,
20390 IX86_BUILTIN_CVTTSS2SI64,
20392 IX86_BUILTIN_MAXPS,
20393 IX86_BUILTIN_MAXSS,
20394 IX86_BUILTIN_MINPS,
20395 IX86_BUILTIN_MINSS,
20397 IX86_BUILTIN_LOADUPS,
20398 IX86_BUILTIN_STOREUPS,
20399 IX86_BUILTIN_MOVSS,
20401 IX86_BUILTIN_MOVHLPS,
20402 IX86_BUILTIN_MOVLHPS,
20403 IX86_BUILTIN_LOADHPS,
20404 IX86_BUILTIN_LOADLPS,
20405 IX86_BUILTIN_STOREHPS,
20406 IX86_BUILTIN_STORELPS,
20408 IX86_BUILTIN_MASKMOVQ,
20409 IX86_BUILTIN_MOVMSKPS,
20410 IX86_BUILTIN_PMOVMSKB,
20412 IX86_BUILTIN_MOVNTPS,
20413 IX86_BUILTIN_MOVNTQ,
20415 IX86_BUILTIN_LOADDQU,
20416 IX86_BUILTIN_STOREDQU,
20418 IX86_BUILTIN_PACKSSWB,
20419 IX86_BUILTIN_PACKSSDW,
20420 IX86_BUILTIN_PACKUSWB,
20422 IX86_BUILTIN_PADDB,
20423 IX86_BUILTIN_PADDW,
20424 IX86_BUILTIN_PADDD,
20425 IX86_BUILTIN_PADDQ,
20426 IX86_BUILTIN_PADDSB,
20427 IX86_BUILTIN_PADDSW,
20428 IX86_BUILTIN_PADDUSB,
20429 IX86_BUILTIN_PADDUSW,
20430 IX86_BUILTIN_PSUBB,
20431 IX86_BUILTIN_PSUBW,
20432 IX86_BUILTIN_PSUBD,
20433 IX86_BUILTIN_PSUBQ,
20434 IX86_BUILTIN_PSUBSB,
20435 IX86_BUILTIN_PSUBSW,
20436 IX86_BUILTIN_PSUBUSB,
20437 IX86_BUILTIN_PSUBUSW,
20439 IX86_BUILTIN_PAND,
20440 IX86_BUILTIN_PANDN,
20441 IX86_BUILTIN_POR,
20442 IX86_BUILTIN_PXOR,
20444 IX86_BUILTIN_PAVGB,
20445 IX86_BUILTIN_PAVGW,
20447 IX86_BUILTIN_PCMPEQB,
20448 IX86_BUILTIN_PCMPEQW,
20449 IX86_BUILTIN_PCMPEQD,
20450 IX86_BUILTIN_PCMPGTB,
20451 IX86_BUILTIN_PCMPGTW,
20452 IX86_BUILTIN_PCMPGTD,
20454 IX86_BUILTIN_PMADDWD,
20456 IX86_BUILTIN_PMAXSW,
20457 IX86_BUILTIN_PMAXUB,
20458 IX86_BUILTIN_PMINSW,
20459 IX86_BUILTIN_PMINUB,
20461 IX86_BUILTIN_PMULHUW,
20462 IX86_BUILTIN_PMULHW,
20463 IX86_BUILTIN_PMULLW,
20465 IX86_BUILTIN_PSADBW,
20466 IX86_BUILTIN_PSHUFW,
20468 IX86_BUILTIN_PSLLW,
20469 IX86_BUILTIN_PSLLD,
20470 IX86_BUILTIN_PSLLQ,
20471 IX86_BUILTIN_PSRAW,
20472 IX86_BUILTIN_PSRAD,
20473 IX86_BUILTIN_PSRLW,
20474 IX86_BUILTIN_PSRLD,
20475 IX86_BUILTIN_PSRLQ,
20476 IX86_BUILTIN_PSLLWI,
20477 IX86_BUILTIN_PSLLDI,
20478 IX86_BUILTIN_PSLLQI,
20479 IX86_BUILTIN_PSRAWI,
20480 IX86_BUILTIN_PSRADI,
20481 IX86_BUILTIN_PSRLWI,
20482 IX86_BUILTIN_PSRLDI,
20483 IX86_BUILTIN_PSRLQI,
20485 IX86_BUILTIN_PUNPCKHBW,
20486 IX86_BUILTIN_PUNPCKHWD,
20487 IX86_BUILTIN_PUNPCKHDQ,
20488 IX86_BUILTIN_PUNPCKLBW,
20489 IX86_BUILTIN_PUNPCKLWD,
20490 IX86_BUILTIN_PUNPCKLDQ,
20492 IX86_BUILTIN_SHUFPS,
20494 IX86_BUILTIN_RCPPS,
20495 IX86_BUILTIN_RCPSS,
20496 IX86_BUILTIN_RSQRTPS,
20497 IX86_BUILTIN_RSQRTPS_NR,
20498 IX86_BUILTIN_RSQRTSS,
20499 IX86_BUILTIN_RSQRTF,
20500 IX86_BUILTIN_SQRTPS,
20501 IX86_BUILTIN_SQRTPS_NR,
20502 IX86_BUILTIN_SQRTSS,
20504 IX86_BUILTIN_UNPCKHPS,
20505 IX86_BUILTIN_UNPCKLPS,
20507 IX86_BUILTIN_ANDPS,
20508 IX86_BUILTIN_ANDNPS,
20509 IX86_BUILTIN_ORPS,
20510 IX86_BUILTIN_XORPS,
20512 IX86_BUILTIN_EMMS,
20513 IX86_BUILTIN_LDMXCSR,
20514 IX86_BUILTIN_STMXCSR,
20515 IX86_BUILTIN_SFENCE,
20517 /* 3DNow! Original */
20518 IX86_BUILTIN_FEMMS,
20519 IX86_BUILTIN_PAVGUSB,
20520 IX86_BUILTIN_PF2ID,
20521 IX86_BUILTIN_PFACC,
20522 IX86_BUILTIN_PFADD,
20523 IX86_BUILTIN_PFCMPEQ,
20524 IX86_BUILTIN_PFCMPGE,
20525 IX86_BUILTIN_PFCMPGT,
20526 IX86_BUILTIN_PFMAX,
20527 IX86_BUILTIN_PFMIN,
20528 IX86_BUILTIN_PFMUL,
20529 IX86_BUILTIN_PFRCP,
20530 IX86_BUILTIN_PFRCPIT1,
20531 IX86_BUILTIN_PFRCPIT2,
20532 IX86_BUILTIN_PFRSQIT1,
20533 IX86_BUILTIN_PFRSQRT,
20534 IX86_BUILTIN_PFSUB,
20535 IX86_BUILTIN_PFSUBR,
20536 IX86_BUILTIN_PI2FD,
20537 IX86_BUILTIN_PMULHRW,
20539 /* 3DNow! Athlon Extensions */
20540 IX86_BUILTIN_PF2IW,
20541 IX86_BUILTIN_PFNACC,
20542 IX86_BUILTIN_PFPNACC,
20543 IX86_BUILTIN_PI2FW,
20544 IX86_BUILTIN_PSWAPDSI,
20545 IX86_BUILTIN_PSWAPDSF,
20547 /* SSE2 */
20548 IX86_BUILTIN_ADDPD,
20549 IX86_BUILTIN_ADDSD,
20550 IX86_BUILTIN_DIVPD,
20551 IX86_BUILTIN_DIVSD,
20552 IX86_BUILTIN_MULPD,
20553 IX86_BUILTIN_MULSD,
20554 IX86_BUILTIN_SUBPD,
20555 IX86_BUILTIN_SUBSD,
20557 IX86_BUILTIN_CMPEQPD,
20558 IX86_BUILTIN_CMPLTPD,
20559 IX86_BUILTIN_CMPLEPD,
20560 IX86_BUILTIN_CMPGTPD,
20561 IX86_BUILTIN_CMPGEPD,
20562 IX86_BUILTIN_CMPNEQPD,
20563 IX86_BUILTIN_CMPNLTPD,
20564 IX86_BUILTIN_CMPNLEPD,
20565 IX86_BUILTIN_CMPNGTPD,
20566 IX86_BUILTIN_CMPNGEPD,
20567 IX86_BUILTIN_CMPORDPD,
20568 IX86_BUILTIN_CMPUNORDPD,
20569 IX86_BUILTIN_CMPEQSD,
20570 IX86_BUILTIN_CMPLTSD,
20571 IX86_BUILTIN_CMPLESD,
20572 IX86_BUILTIN_CMPNEQSD,
20573 IX86_BUILTIN_CMPNLTSD,
20574 IX86_BUILTIN_CMPNLESD,
20575 IX86_BUILTIN_CMPORDSD,
20576 IX86_BUILTIN_CMPUNORDSD,
20578 IX86_BUILTIN_COMIEQSD,
20579 IX86_BUILTIN_COMILTSD,
20580 IX86_BUILTIN_COMILESD,
20581 IX86_BUILTIN_COMIGTSD,
20582 IX86_BUILTIN_COMIGESD,
20583 IX86_BUILTIN_COMINEQSD,
20584 IX86_BUILTIN_UCOMIEQSD,
20585 IX86_BUILTIN_UCOMILTSD,
20586 IX86_BUILTIN_UCOMILESD,
20587 IX86_BUILTIN_UCOMIGTSD,
20588 IX86_BUILTIN_UCOMIGESD,
20589 IX86_BUILTIN_UCOMINEQSD,
20591 IX86_BUILTIN_MAXPD,
20592 IX86_BUILTIN_MAXSD,
20593 IX86_BUILTIN_MINPD,
20594 IX86_BUILTIN_MINSD,
20596 IX86_BUILTIN_ANDPD,
20597 IX86_BUILTIN_ANDNPD,
20598 IX86_BUILTIN_ORPD,
20599 IX86_BUILTIN_XORPD,
20601 IX86_BUILTIN_SQRTPD,
20602 IX86_BUILTIN_SQRTSD,
20604 IX86_BUILTIN_UNPCKHPD,
20605 IX86_BUILTIN_UNPCKLPD,
20607 IX86_BUILTIN_SHUFPD,
20609 IX86_BUILTIN_LOADUPD,
20610 IX86_BUILTIN_STOREUPD,
20611 IX86_BUILTIN_MOVSD,
20613 IX86_BUILTIN_LOADHPD,
20614 IX86_BUILTIN_LOADLPD,
20616 IX86_BUILTIN_CVTDQ2PD,
20617 IX86_BUILTIN_CVTDQ2PS,
20619 IX86_BUILTIN_CVTPD2DQ,
20620 IX86_BUILTIN_CVTPD2PI,
20621 IX86_BUILTIN_CVTPD2PS,
20622 IX86_BUILTIN_CVTTPD2DQ,
20623 IX86_BUILTIN_CVTTPD2PI,
20625 IX86_BUILTIN_CVTPI2PD,
20626 IX86_BUILTIN_CVTSI2SD,
20627 IX86_BUILTIN_CVTSI642SD,
20629 IX86_BUILTIN_CVTSD2SI,
20630 IX86_BUILTIN_CVTSD2SI64,
20631 IX86_BUILTIN_CVTSD2SS,
20632 IX86_BUILTIN_CVTSS2SD,
20633 IX86_BUILTIN_CVTTSD2SI,
20634 IX86_BUILTIN_CVTTSD2SI64,
20636 IX86_BUILTIN_CVTPS2DQ,
20637 IX86_BUILTIN_CVTPS2PD,
20638 IX86_BUILTIN_CVTTPS2DQ,
20640 IX86_BUILTIN_MOVNTI,
20641 IX86_BUILTIN_MOVNTPD,
20642 IX86_BUILTIN_MOVNTDQ,
20644 IX86_BUILTIN_MOVQ128,
20646 /* SSE2 MMX */
20647 IX86_BUILTIN_MASKMOVDQU,
20648 IX86_BUILTIN_MOVMSKPD,
20649 IX86_BUILTIN_PMOVMSKB128,
20651 IX86_BUILTIN_PACKSSWB128,
20652 IX86_BUILTIN_PACKSSDW128,
20653 IX86_BUILTIN_PACKUSWB128,
20655 IX86_BUILTIN_PADDB128,
20656 IX86_BUILTIN_PADDW128,
20657 IX86_BUILTIN_PADDD128,
20658 IX86_BUILTIN_PADDQ128,
20659 IX86_BUILTIN_PADDSB128,
20660 IX86_BUILTIN_PADDSW128,
20661 IX86_BUILTIN_PADDUSB128,
20662 IX86_BUILTIN_PADDUSW128,
20663 IX86_BUILTIN_PSUBB128,
20664 IX86_BUILTIN_PSUBW128,
20665 IX86_BUILTIN_PSUBD128,
20666 IX86_BUILTIN_PSUBQ128,
20667 IX86_BUILTIN_PSUBSB128,
20668 IX86_BUILTIN_PSUBSW128,
20669 IX86_BUILTIN_PSUBUSB128,
20670 IX86_BUILTIN_PSUBUSW128,
20672 IX86_BUILTIN_PAND128,
20673 IX86_BUILTIN_PANDN128,
20674 IX86_BUILTIN_POR128,
20675 IX86_BUILTIN_PXOR128,
20677 IX86_BUILTIN_PAVGB128,
20678 IX86_BUILTIN_PAVGW128,
20680 IX86_BUILTIN_PCMPEQB128,
20681 IX86_BUILTIN_PCMPEQW128,
20682 IX86_BUILTIN_PCMPEQD128,
20683 IX86_BUILTIN_PCMPGTB128,
20684 IX86_BUILTIN_PCMPGTW128,
20685 IX86_BUILTIN_PCMPGTD128,
20687 IX86_BUILTIN_PMADDWD128,
20689 IX86_BUILTIN_PMAXSW128,
20690 IX86_BUILTIN_PMAXUB128,
20691 IX86_BUILTIN_PMINSW128,
20692 IX86_BUILTIN_PMINUB128,
20694 IX86_BUILTIN_PMULUDQ,
20695 IX86_BUILTIN_PMULUDQ128,
20696 IX86_BUILTIN_PMULHUW128,
20697 IX86_BUILTIN_PMULHW128,
20698 IX86_BUILTIN_PMULLW128,
20700 IX86_BUILTIN_PSADBW128,
20701 IX86_BUILTIN_PSHUFHW,
20702 IX86_BUILTIN_PSHUFLW,
20703 IX86_BUILTIN_PSHUFD,
20705 IX86_BUILTIN_PSLLDQI128,
20706 IX86_BUILTIN_PSLLWI128,
20707 IX86_BUILTIN_PSLLDI128,
20708 IX86_BUILTIN_PSLLQI128,
20709 IX86_BUILTIN_PSRAWI128,
20710 IX86_BUILTIN_PSRADI128,
20711 IX86_BUILTIN_PSRLDQI128,
20712 IX86_BUILTIN_PSRLWI128,
20713 IX86_BUILTIN_PSRLDI128,
20714 IX86_BUILTIN_PSRLQI128,
20716 IX86_BUILTIN_PSLLDQ128,
20717 IX86_BUILTIN_PSLLW128,
20718 IX86_BUILTIN_PSLLD128,
20719 IX86_BUILTIN_PSLLQ128,
20720 IX86_BUILTIN_PSRAW128,
20721 IX86_BUILTIN_PSRAD128,
20722 IX86_BUILTIN_PSRLW128,
20723 IX86_BUILTIN_PSRLD128,
20724 IX86_BUILTIN_PSRLQ128,
20726 IX86_BUILTIN_PUNPCKHBW128,
20727 IX86_BUILTIN_PUNPCKHWD128,
20728 IX86_BUILTIN_PUNPCKHDQ128,
20729 IX86_BUILTIN_PUNPCKHQDQ128,
20730 IX86_BUILTIN_PUNPCKLBW128,
20731 IX86_BUILTIN_PUNPCKLWD128,
20732 IX86_BUILTIN_PUNPCKLDQ128,
20733 IX86_BUILTIN_PUNPCKLQDQ128,
20735 IX86_BUILTIN_CLFLUSH,
20736 IX86_BUILTIN_MFENCE,
20737 IX86_BUILTIN_LFENCE,
20739 IX86_BUILTIN_BSRSI,
20740 IX86_BUILTIN_BSRDI,
20741 IX86_BUILTIN_RDPMC,
20742 IX86_BUILTIN_RDTSC,
20743 IX86_BUILTIN_RDTSCP,
20744 IX86_BUILTIN_ROLQI,
20745 IX86_BUILTIN_ROLHI,
20746 IX86_BUILTIN_RORQI,
20747 IX86_BUILTIN_RORHI,
20749 /* SSE3. */
20750 IX86_BUILTIN_ADDSUBPS,
20751 IX86_BUILTIN_HADDPS,
20752 IX86_BUILTIN_HSUBPS,
20753 IX86_BUILTIN_MOVSHDUP,
20754 IX86_BUILTIN_MOVSLDUP,
20755 IX86_BUILTIN_ADDSUBPD,
20756 IX86_BUILTIN_HADDPD,
20757 IX86_BUILTIN_HSUBPD,
20758 IX86_BUILTIN_LDDQU,
20760 IX86_BUILTIN_MONITOR,
20761 IX86_BUILTIN_MWAIT,
20763 /* SSSE3. */
20764 IX86_BUILTIN_PHADDW,
20765 IX86_BUILTIN_PHADDD,
20766 IX86_BUILTIN_PHADDSW,
20767 IX86_BUILTIN_PHSUBW,
20768 IX86_BUILTIN_PHSUBD,
20769 IX86_BUILTIN_PHSUBSW,
20770 IX86_BUILTIN_PMADDUBSW,
20771 IX86_BUILTIN_PMULHRSW,
20772 IX86_BUILTIN_PSHUFB,
20773 IX86_BUILTIN_PSIGNB,
20774 IX86_BUILTIN_PSIGNW,
20775 IX86_BUILTIN_PSIGND,
20776 IX86_BUILTIN_PALIGNR,
20777 IX86_BUILTIN_PABSB,
20778 IX86_BUILTIN_PABSW,
20779 IX86_BUILTIN_PABSD,
20781 IX86_BUILTIN_PHADDW128,
20782 IX86_BUILTIN_PHADDD128,
20783 IX86_BUILTIN_PHADDSW128,
20784 IX86_BUILTIN_PHSUBW128,
20785 IX86_BUILTIN_PHSUBD128,
20786 IX86_BUILTIN_PHSUBSW128,
20787 IX86_BUILTIN_PMADDUBSW128,
20788 IX86_BUILTIN_PMULHRSW128,
20789 IX86_BUILTIN_PSHUFB128,
20790 IX86_BUILTIN_PSIGNB128,
20791 IX86_BUILTIN_PSIGNW128,
20792 IX86_BUILTIN_PSIGND128,
20793 IX86_BUILTIN_PALIGNR128,
20794 IX86_BUILTIN_PABSB128,
20795 IX86_BUILTIN_PABSW128,
20796 IX86_BUILTIN_PABSD128,
20798 /* AMDFAM10 - SSE4A New Instructions. */
20799 IX86_BUILTIN_MOVNTSD,
20800 IX86_BUILTIN_MOVNTSS,
20801 IX86_BUILTIN_EXTRQI,
20802 IX86_BUILTIN_EXTRQ,
20803 IX86_BUILTIN_INSERTQI,
20804 IX86_BUILTIN_INSERTQ,
20806 /* SSE4.1. */
20807 IX86_BUILTIN_BLENDPD,
20808 IX86_BUILTIN_BLENDPS,
20809 IX86_BUILTIN_BLENDVPD,
20810 IX86_BUILTIN_BLENDVPS,
20811 IX86_BUILTIN_PBLENDVB128,
20812 IX86_BUILTIN_PBLENDW128,
20814 IX86_BUILTIN_DPPD,
20815 IX86_BUILTIN_DPPS,
20817 IX86_BUILTIN_INSERTPS128,
20819 IX86_BUILTIN_MOVNTDQA,
20820 IX86_BUILTIN_MPSADBW128,
20821 IX86_BUILTIN_PACKUSDW128,
20822 IX86_BUILTIN_PCMPEQQ,
20823 IX86_BUILTIN_PHMINPOSUW128,
20825 IX86_BUILTIN_PMAXSB128,
20826 IX86_BUILTIN_PMAXSD128,
20827 IX86_BUILTIN_PMAXUD128,
20828 IX86_BUILTIN_PMAXUW128,
20830 IX86_BUILTIN_PMINSB128,
20831 IX86_BUILTIN_PMINSD128,
20832 IX86_BUILTIN_PMINUD128,
20833 IX86_BUILTIN_PMINUW128,
20835 IX86_BUILTIN_PMOVSXBW128,
20836 IX86_BUILTIN_PMOVSXBD128,
20837 IX86_BUILTIN_PMOVSXBQ128,
20838 IX86_BUILTIN_PMOVSXWD128,
20839 IX86_BUILTIN_PMOVSXWQ128,
20840 IX86_BUILTIN_PMOVSXDQ128,
20842 IX86_BUILTIN_PMOVZXBW128,
20843 IX86_BUILTIN_PMOVZXBD128,
20844 IX86_BUILTIN_PMOVZXBQ128,
20845 IX86_BUILTIN_PMOVZXWD128,
20846 IX86_BUILTIN_PMOVZXWQ128,
20847 IX86_BUILTIN_PMOVZXDQ128,
20849 IX86_BUILTIN_PMULDQ128,
20850 IX86_BUILTIN_PMULLD128,
20852 IX86_BUILTIN_ROUNDPD,
20853 IX86_BUILTIN_ROUNDPS,
20854 IX86_BUILTIN_ROUNDSD,
20855 IX86_BUILTIN_ROUNDSS,
20857 IX86_BUILTIN_PTESTZ,
20858 IX86_BUILTIN_PTESTC,
20859 IX86_BUILTIN_PTESTNZC,
20861 IX86_BUILTIN_VEC_INIT_V2SI,
20862 IX86_BUILTIN_VEC_INIT_V4HI,
20863 IX86_BUILTIN_VEC_INIT_V8QI,
20864 IX86_BUILTIN_VEC_EXT_V2DF,
20865 IX86_BUILTIN_VEC_EXT_V2DI,
20866 IX86_BUILTIN_VEC_EXT_V4SF,
20867 IX86_BUILTIN_VEC_EXT_V4SI,
20868 IX86_BUILTIN_VEC_EXT_V8HI,
20869 IX86_BUILTIN_VEC_EXT_V2SI,
20870 IX86_BUILTIN_VEC_EXT_V4HI,
20871 IX86_BUILTIN_VEC_EXT_V16QI,
20872 IX86_BUILTIN_VEC_SET_V2DI,
20873 IX86_BUILTIN_VEC_SET_V4SF,
20874 IX86_BUILTIN_VEC_SET_V4SI,
20875 IX86_BUILTIN_VEC_SET_V8HI,
20876 IX86_BUILTIN_VEC_SET_V4HI,
20877 IX86_BUILTIN_VEC_SET_V16QI,
20879 IX86_BUILTIN_VEC_PACK_SFIX,
20881 /* SSE4.2. */
20882 IX86_BUILTIN_CRC32QI,
20883 IX86_BUILTIN_CRC32HI,
20884 IX86_BUILTIN_CRC32SI,
20885 IX86_BUILTIN_CRC32DI,
20887 IX86_BUILTIN_PCMPESTRI128,
20888 IX86_BUILTIN_PCMPESTRM128,
20889 IX86_BUILTIN_PCMPESTRA128,
20890 IX86_BUILTIN_PCMPESTRC128,
20891 IX86_BUILTIN_PCMPESTRO128,
20892 IX86_BUILTIN_PCMPESTRS128,
20893 IX86_BUILTIN_PCMPESTRZ128,
20894 IX86_BUILTIN_PCMPISTRI128,
20895 IX86_BUILTIN_PCMPISTRM128,
20896 IX86_BUILTIN_PCMPISTRA128,
20897 IX86_BUILTIN_PCMPISTRC128,
20898 IX86_BUILTIN_PCMPISTRO128,
20899 IX86_BUILTIN_PCMPISTRS128,
20900 IX86_BUILTIN_PCMPISTRZ128,
20902 IX86_BUILTIN_PCMPGTQ,
20904 /* AES instructions */
20905 IX86_BUILTIN_AESENC128,
20906 IX86_BUILTIN_AESENCLAST128,
20907 IX86_BUILTIN_AESDEC128,
20908 IX86_BUILTIN_AESDECLAST128,
20909 IX86_BUILTIN_AESIMC128,
20910 IX86_BUILTIN_AESKEYGENASSIST128,
20912 /* PCLMUL instruction */
20913 IX86_BUILTIN_PCLMULQDQ128,
20915 /* AVX */
20916 IX86_BUILTIN_ADDPD256,
20917 IX86_BUILTIN_ADDPS256,
20918 IX86_BUILTIN_ADDSUBPD256,
20919 IX86_BUILTIN_ADDSUBPS256,
20920 IX86_BUILTIN_ANDPD256,
20921 IX86_BUILTIN_ANDPS256,
20922 IX86_BUILTIN_ANDNPD256,
20923 IX86_BUILTIN_ANDNPS256,
20924 IX86_BUILTIN_BLENDPD256,
20925 IX86_BUILTIN_BLENDPS256,
20926 IX86_BUILTIN_BLENDVPD256,
20927 IX86_BUILTIN_BLENDVPS256,
20928 IX86_BUILTIN_DIVPD256,
20929 IX86_BUILTIN_DIVPS256,
20930 IX86_BUILTIN_DPPS256,
20931 IX86_BUILTIN_HADDPD256,
20932 IX86_BUILTIN_HADDPS256,
20933 IX86_BUILTIN_HSUBPD256,
20934 IX86_BUILTIN_HSUBPS256,
20935 IX86_BUILTIN_MAXPD256,
20936 IX86_BUILTIN_MAXPS256,
20937 IX86_BUILTIN_MINPD256,
20938 IX86_BUILTIN_MINPS256,
20939 IX86_BUILTIN_MULPD256,
20940 IX86_BUILTIN_MULPS256,
20941 IX86_BUILTIN_ORPD256,
20942 IX86_BUILTIN_ORPS256,
20943 IX86_BUILTIN_SHUFPD256,
20944 IX86_BUILTIN_SHUFPS256,
20945 IX86_BUILTIN_SUBPD256,
20946 IX86_BUILTIN_SUBPS256,
20947 IX86_BUILTIN_XORPD256,
20948 IX86_BUILTIN_XORPS256,
20949 IX86_BUILTIN_CMPSD,
20950 IX86_BUILTIN_CMPSS,
20951 IX86_BUILTIN_CMPPD,
20952 IX86_BUILTIN_CMPPS,
20953 IX86_BUILTIN_CMPPD256,
20954 IX86_BUILTIN_CMPPS256,
20955 IX86_BUILTIN_CVTDQ2PD256,
20956 IX86_BUILTIN_CVTDQ2PS256,
20957 IX86_BUILTIN_CVTPD2PS256,
20958 IX86_BUILTIN_CVTPS2DQ256,
20959 IX86_BUILTIN_CVTPS2PD256,
20960 IX86_BUILTIN_CVTTPD2DQ256,
20961 IX86_BUILTIN_CVTPD2DQ256,
20962 IX86_BUILTIN_CVTTPS2DQ256,
20963 IX86_BUILTIN_EXTRACTF128PD256,
20964 IX86_BUILTIN_EXTRACTF128PS256,
20965 IX86_BUILTIN_EXTRACTF128SI256,
20966 IX86_BUILTIN_VZEROALL,
20967 IX86_BUILTIN_VZEROUPPER,
20968 IX86_BUILTIN_VPERMILVARPD,
20969 IX86_BUILTIN_VPERMILVARPS,
20970 IX86_BUILTIN_VPERMILVARPD256,
20971 IX86_BUILTIN_VPERMILVARPS256,
20972 IX86_BUILTIN_VPERMILPD,
20973 IX86_BUILTIN_VPERMILPS,
20974 IX86_BUILTIN_VPERMILPD256,
20975 IX86_BUILTIN_VPERMILPS256,
20976 IX86_BUILTIN_VPERM2F128PD256,
20977 IX86_BUILTIN_VPERM2F128PS256,
20978 IX86_BUILTIN_VPERM2F128SI256,
20979 IX86_BUILTIN_VBROADCASTSS,
20980 IX86_BUILTIN_VBROADCASTSD256,
20981 IX86_BUILTIN_VBROADCASTSS256,
20982 IX86_BUILTIN_VBROADCASTPD256,
20983 IX86_BUILTIN_VBROADCASTPS256,
20984 IX86_BUILTIN_VINSERTF128PD256,
20985 IX86_BUILTIN_VINSERTF128PS256,
20986 IX86_BUILTIN_VINSERTF128SI256,
20987 IX86_BUILTIN_LOADUPD256,
20988 IX86_BUILTIN_LOADUPS256,
20989 IX86_BUILTIN_STOREUPD256,
20990 IX86_BUILTIN_STOREUPS256,
20991 IX86_BUILTIN_LDDQU256,
20992 IX86_BUILTIN_MOVNTDQ256,
20993 IX86_BUILTIN_MOVNTPD256,
20994 IX86_BUILTIN_MOVNTPS256,
20995 IX86_BUILTIN_LOADDQU256,
20996 IX86_BUILTIN_STOREDQU256,
20997 IX86_BUILTIN_MASKLOADPD,
20998 IX86_BUILTIN_MASKLOADPS,
20999 IX86_BUILTIN_MASKSTOREPD,
21000 IX86_BUILTIN_MASKSTOREPS,
21001 IX86_BUILTIN_MASKLOADPD256,
21002 IX86_BUILTIN_MASKLOADPS256,
21003 IX86_BUILTIN_MASKSTOREPD256,
21004 IX86_BUILTIN_MASKSTOREPS256,
21005 IX86_BUILTIN_MOVSHDUP256,
21006 IX86_BUILTIN_MOVSLDUP256,
21007 IX86_BUILTIN_MOVDDUP256,
21009 IX86_BUILTIN_SQRTPD256,
21010 IX86_BUILTIN_SQRTPS256,
21011 IX86_BUILTIN_SQRTPS_NR256,
21012 IX86_BUILTIN_RSQRTPS256,
21013 IX86_BUILTIN_RSQRTPS_NR256,
21015 IX86_BUILTIN_RCPPS256,
21017 IX86_BUILTIN_ROUNDPD256,
21018 IX86_BUILTIN_ROUNDPS256,
21020 IX86_BUILTIN_UNPCKHPD256,
21021 IX86_BUILTIN_UNPCKLPD256,
21022 IX86_BUILTIN_UNPCKHPS256,
21023 IX86_BUILTIN_UNPCKLPS256,
21025 IX86_BUILTIN_SI256_SI,
21026 IX86_BUILTIN_PS256_PS,
21027 IX86_BUILTIN_PD256_PD,
21028 IX86_BUILTIN_SI_SI256,
21029 IX86_BUILTIN_PS_PS256,
21030 IX86_BUILTIN_PD_PD256,
21032 IX86_BUILTIN_VTESTZPD,
21033 IX86_BUILTIN_VTESTCPD,
21034 IX86_BUILTIN_VTESTNZCPD,
21035 IX86_BUILTIN_VTESTZPS,
21036 IX86_BUILTIN_VTESTCPS,
21037 IX86_BUILTIN_VTESTNZCPS,
21038 IX86_BUILTIN_VTESTZPD256,
21039 IX86_BUILTIN_VTESTCPD256,
21040 IX86_BUILTIN_VTESTNZCPD256,
21041 IX86_BUILTIN_VTESTZPS256,
21042 IX86_BUILTIN_VTESTCPS256,
21043 IX86_BUILTIN_VTESTNZCPS256,
21044 IX86_BUILTIN_PTESTZ256,
21045 IX86_BUILTIN_PTESTC256,
21046 IX86_BUILTIN_PTESTNZC256,
21048 IX86_BUILTIN_MOVMSKPD256,
21049 IX86_BUILTIN_MOVMSKPS256,
21051 /* TFmode support builtins. */
21052 IX86_BUILTIN_INFQ,
21053 IX86_BUILTIN_HUGE_VALQ,
21054 IX86_BUILTIN_FABSQ,
21055 IX86_BUILTIN_COPYSIGNQ,
21057 /* Vectorizer support builtins. */
21058 IX86_BUILTIN_CPYSGNPS,
21059 IX86_BUILTIN_CPYSGNPD,
21061 IX86_BUILTIN_CVTUDQ2PS,
21063 IX86_BUILTIN_VEC_PERM_V2DF,
21064 IX86_BUILTIN_VEC_PERM_V4SF,
21065 IX86_BUILTIN_VEC_PERM_V2DI,
21066 IX86_BUILTIN_VEC_PERM_V4SI,
21067 IX86_BUILTIN_VEC_PERM_V8HI,
21068 IX86_BUILTIN_VEC_PERM_V16QI,
21069 IX86_BUILTIN_VEC_PERM_V2DI_U,
21070 IX86_BUILTIN_VEC_PERM_V4SI_U,
21071 IX86_BUILTIN_VEC_PERM_V8HI_U,
21072 IX86_BUILTIN_VEC_PERM_V16QI_U,
21073 IX86_BUILTIN_VEC_PERM_V4DF,
21074 IX86_BUILTIN_VEC_PERM_V8SF,
21076 /* FMA4 and XOP instructions. */
21077 IX86_BUILTIN_VFMADDSS,
21078 IX86_BUILTIN_VFMADDSD,
21079 IX86_BUILTIN_VFMADDPS,
21080 IX86_BUILTIN_VFMADDPD,
21081 IX86_BUILTIN_VFMSUBSS,
21082 IX86_BUILTIN_VFMSUBSD,
21083 IX86_BUILTIN_VFMSUBPS,
21084 IX86_BUILTIN_VFMSUBPD,
21085 IX86_BUILTIN_VFMADDSUBPS,
21086 IX86_BUILTIN_VFMADDSUBPD,
21087 IX86_BUILTIN_VFMSUBADDPS,
21088 IX86_BUILTIN_VFMSUBADDPD,
21089 IX86_BUILTIN_VFNMADDSS,
21090 IX86_BUILTIN_VFNMADDSD,
21091 IX86_BUILTIN_VFNMADDPS,
21092 IX86_BUILTIN_VFNMADDPD,
21093 IX86_BUILTIN_VFNMSUBSS,
21094 IX86_BUILTIN_VFNMSUBSD,
21095 IX86_BUILTIN_VFNMSUBPS,
21096 IX86_BUILTIN_VFNMSUBPD,
21097 IX86_BUILTIN_VFMADDPS256,
21098 IX86_BUILTIN_VFMADDPD256,
21099 IX86_BUILTIN_VFMSUBPS256,
21100 IX86_BUILTIN_VFMSUBPD256,
21101 IX86_BUILTIN_VFMADDSUBPS256,
21102 IX86_BUILTIN_VFMADDSUBPD256,
21103 IX86_BUILTIN_VFMSUBADDPS256,
21104 IX86_BUILTIN_VFMSUBADDPD256,
21105 IX86_BUILTIN_VFNMADDPS256,
21106 IX86_BUILTIN_VFNMADDPD256,
21107 IX86_BUILTIN_VFNMSUBPS256,
21108 IX86_BUILTIN_VFNMSUBPD256,
21110 IX86_BUILTIN_VPCMOV,
21111 IX86_BUILTIN_VPCMOV_V2DI,
21112 IX86_BUILTIN_VPCMOV_V4SI,
21113 IX86_BUILTIN_VPCMOV_V8HI,
21114 IX86_BUILTIN_VPCMOV_V16QI,
21115 IX86_BUILTIN_VPCMOV_V4SF,
21116 IX86_BUILTIN_VPCMOV_V2DF,
21117 IX86_BUILTIN_VPCMOV256,
21118 IX86_BUILTIN_VPCMOV_V4DI256,
21119 IX86_BUILTIN_VPCMOV_V8SI256,
21120 IX86_BUILTIN_VPCMOV_V16HI256,
21121 IX86_BUILTIN_VPCMOV_V32QI256,
21122 IX86_BUILTIN_VPCMOV_V8SF256,
21123 IX86_BUILTIN_VPCMOV_V4DF256,
21125 IX86_BUILTIN_VPPERM,
21127 IX86_BUILTIN_VPMACSSWW,
21128 IX86_BUILTIN_VPMACSWW,
21129 IX86_BUILTIN_VPMACSSWD,
21130 IX86_BUILTIN_VPMACSWD,
21131 IX86_BUILTIN_VPMACSSDD,
21132 IX86_BUILTIN_VPMACSDD,
21133 IX86_BUILTIN_VPMACSSDQL,
21134 IX86_BUILTIN_VPMACSSDQH,
21135 IX86_BUILTIN_VPMACSDQL,
21136 IX86_BUILTIN_VPMACSDQH,
21137 IX86_BUILTIN_VPMADCSSWD,
21138 IX86_BUILTIN_VPMADCSWD,
21140 IX86_BUILTIN_VPHADDBW,
21141 IX86_BUILTIN_VPHADDBD,
21142 IX86_BUILTIN_VPHADDBQ,
21143 IX86_BUILTIN_VPHADDWD,
21144 IX86_BUILTIN_VPHADDWQ,
21145 IX86_BUILTIN_VPHADDDQ,
21146 IX86_BUILTIN_VPHADDUBW,
21147 IX86_BUILTIN_VPHADDUBD,
21148 IX86_BUILTIN_VPHADDUBQ,
21149 IX86_BUILTIN_VPHADDUWD,
21150 IX86_BUILTIN_VPHADDUWQ,
21151 IX86_BUILTIN_VPHADDUDQ,
21152 IX86_BUILTIN_VPHSUBBW,
21153 IX86_BUILTIN_VPHSUBWD,
21154 IX86_BUILTIN_VPHSUBDQ,
21156 IX86_BUILTIN_VPROTB,
21157 IX86_BUILTIN_VPROTW,
21158 IX86_BUILTIN_VPROTD,
21159 IX86_BUILTIN_VPROTQ,
21160 IX86_BUILTIN_VPROTB_IMM,
21161 IX86_BUILTIN_VPROTW_IMM,
21162 IX86_BUILTIN_VPROTD_IMM,
21163 IX86_BUILTIN_VPROTQ_IMM,
21165 IX86_BUILTIN_VPSHLB,
21166 IX86_BUILTIN_VPSHLW,
21167 IX86_BUILTIN_VPSHLD,
21168 IX86_BUILTIN_VPSHLQ,
21169 IX86_BUILTIN_VPSHAB,
21170 IX86_BUILTIN_VPSHAW,
21171 IX86_BUILTIN_VPSHAD,
21172 IX86_BUILTIN_VPSHAQ,
21174 IX86_BUILTIN_VFRCZSS,
21175 IX86_BUILTIN_VFRCZSD,
21176 IX86_BUILTIN_VFRCZPS,
21177 IX86_BUILTIN_VFRCZPD,
21178 IX86_BUILTIN_VFRCZPS256,
21179 IX86_BUILTIN_VFRCZPD256,
21181 IX86_BUILTIN_VPCOMEQUB,
21182 IX86_BUILTIN_VPCOMNEUB,
21183 IX86_BUILTIN_VPCOMLTUB,
21184 IX86_BUILTIN_VPCOMLEUB,
21185 IX86_BUILTIN_VPCOMGTUB,
21186 IX86_BUILTIN_VPCOMGEUB,
21187 IX86_BUILTIN_VPCOMFALSEUB,
21188 IX86_BUILTIN_VPCOMTRUEUB,
21190 IX86_BUILTIN_VPCOMEQUW,
21191 IX86_BUILTIN_VPCOMNEUW,
21192 IX86_BUILTIN_VPCOMLTUW,
21193 IX86_BUILTIN_VPCOMLEUW,
21194 IX86_BUILTIN_VPCOMGTUW,
21195 IX86_BUILTIN_VPCOMGEUW,
21196 IX86_BUILTIN_VPCOMFALSEUW,
21197 IX86_BUILTIN_VPCOMTRUEUW,
21199 IX86_BUILTIN_VPCOMEQUD,
21200 IX86_BUILTIN_VPCOMNEUD,
21201 IX86_BUILTIN_VPCOMLTUD,
21202 IX86_BUILTIN_VPCOMLEUD,
21203 IX86_BUILTIN_VPCOMGTUD,
21204 IX86_BUILTIN_VPCOMGEUD,
21205 IX86_BUILTIN_VPCOMFALSEUD,
21206 IX86_BUILTIN_VPCOMTRUEUD,
21208 IX86_BUILTIN_VPCOMEQUQ,
21209 IX86_BUILTIN_VPCOMNEUQ,
21210 IX86_BUILTIN_VPCOMLTUQ,
21211 IX86_BUILTIN_VPCOMLEUQ,
21212 IX86_BUILTIN_VPCOMGTUQ,
21213 IX86_BUILTIN_VPCOMGEUQ,
21214 IX86_BUILTIN_VPCOMFALSEUQ,
21215 IX86_BUILTIN_VPCOMTRUEUQ,
21217 IX86_BUILTIN_VPCOMEQB,
21218 IX86_BUILTIN_VPCOMNEB,
21219 IX86_BUILTIN_VPCOMLTB,
21220 IX86_BUILTIN_VPCOMLEB,
21221 IX86_BUILTIN_VPCOMGTB,
21222 IX86_BUILTIN_VPCOMGEB,
21223 IX86_BUILTIN_VPCOMFALSEB,
21224 IX86_BUILTIN_VPCOMTRUEB,
21226 IX86_BUILTIN_VPCOMEQW,
21227 IX86_BUILTIN_VPCOMNEW,
21228 IX86_BUILTIN_VPCOMLTW,
21229 IX86_BUILTIN_VPCOMLEW,
21230 IX86_BUILTIN_VPCOMGTW,
21231 IX86_BUILTIN_VPCOMGEW,
21232 IX86_BUILTIN_VPCOMFALSEW,
21233 IX86_BUILTIN_VPCOMTRUEW,
21235 IX86_BUILTIN_VPCOMEQD,
21236 IX86_BUILTIN_VPCOMNED,
21237 IX86_BUILTIN_VPCOMLTD,
21238 IX86_BUILTIN_VPCOMLED,
21239 IX86_BUILTIN_VPCOMGTD,
21240 IX86_BUILTIN_VPCOMGED,
21241 IX86_BUILTIN_VPCOMFALSED,
21242 IX86_BUILTIN_VPCOMTRUED,
21244 IX86_BUILTIN_VPCOMEQQ,
21245 IX86_BUILTIN_VPCOMNEQ,
21246 IX86_BUILTIN_VPCOMLTQ,
21247 IX86_BUILTIN_VPCOMLEQ,
21248 IX86_BUILTIN_VPCOMGTQ,
21249 IX86_BUILTIN_VPCOMGEQ,
21250 IX86_BUILTIN_VPCOMFALSEQ,
21251 IX86_BUILTIN_VPCOMTRUEQ,
21253 /* LWP instructions. */
21254 IX86_BUILTIN_LLWPCB,
21255 IX86_BUILTIN_SLWPCB,
21256 IX86_BUILTIN_LWPVAL32,
21257 IX86_BUILTIN_LWPVAL64,
21258 IX86_BUILTIN_LWPINS32,
21259 IX86_BUILTIN_LWPINS64,
21261 IX86_BUILTIN_CLZS,
21263 IX86_BUILTIN_MAX
21266 /* Table for the ix86 builtin decls. */
21267 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21269 /* Table of all of the builtin functions that are possible with different ISA's
21270 but are waiting to be built until a function is declared to use that
21271 ISA. */
21272 struct builtin_isa {
21273 const char *name; /* function name */
21274 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21275 int isa; /* isa_flags this builtin is defined for */
21276 bool const_p; /* true if the declaration is constant */
21277 bool set_and_not_built_p;
21280 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21283 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21284 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21285 function decl in the ix86_builtins array. Returns the function decl or
21286 NULL_TREE, if the builtin was not added.
21288 If the front end has a special hook for builtin functions, delay adding
21289 builtin functions that aren't in the current ISA until the ISA is changed
21290 with function specific optimization. Doing so, can save about 300K for the
21291 default compiler. When the builtin is expanded, check at that time whether
21292 it is valid.
21294 If the front end doesn't have a special hook, record all builtins, even if
21295 it isn't an instruction set in the current ISA in case the user uses
21296 function specific options for a different ISA, so that we don't get scope
21297 errors if a builtin is added in the middle of a function scope. */
21299 static inline tree
21300 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21301 enum ix86_builtins code)
21303 tree decl = NULL_TREE;
21305 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21307 ix86_builtins_isa[(int) code].isa = mask;
21309 if (mask == 0
21310 || (mask & ix86_isa_flags) != 0
21311 || (lang_hooks.builtin_function
21312 == lang_hooks.builtin_function_ext_scope))
21315 tree type = ix86_get_builtin_func_type (tcode);
21316 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21317 NULL, NULL_TREE);
21318 ix86_builtins[(int) code] = decl;
21319 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21321 else
21323 ix86_builtins[(int) code] = NULL_TREE;
21324 ix86_builtins_isa[(int) code].tcode = tcode;
21325 ix86_builtins_isa[(int) code].name = name;
21326 ix86_builtins_isa[(int) code].const_p = false;
21327 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21331 return decl;
21334 /* Like def_builtin, but also marks the function decl "const". */
21336 static inline tree
21337 def_builtin_const (int mask, const char *name,
21338 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21340 tree decl = def_builtin (mask, name, tcode, code);
21341 if (decl)
21342 TREE_READONLY (decl) = 1;
21343 else
21344 ix86_builtins_isa[(int) code].const_p = true;
21346 return decl;
21349 /* Add any new builtin functions for a given ISA that may not have been
21350 declared. This saves a bit of space compared to adding all of the
21351 declarations to the tree, even if we didn't use them. */
21353 static void
21354 ix86_add_new_builtins (int isa)
21356 int i;
21358 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21360 if ((ix86_builtins_isa[i].isa & isa) != 0
21361 && ix86_builtins_isa[i].set_and_not_built_p)
21363 tree decl, type;
21365 /* Don't define the builtin again. */
21366 ix86_builtins_isa[i].set_and_not_built_p = false;
21368 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21369 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21370 type, i, BUILT_IN_MD, NULL,
21371 NULL_TREE);
21373 ix86_builtins[i] = decl;
21374 if (ix86_builtins_isa[i].const_p)
21375 TREE_READONLY (decl) = 1;
21380 /* Bits for builtin_description.flag. */
21382 /* Set when we don't support the comparison natively, and should
21383 swap_comparison in order to support it. */
21384 #define BUILTIN_DESC_SWAP_OPERANDS 1
21386 struct builtin_description
21388 const unsigned int mask;
21389 const enum insn_code icode;
21390 const char *const name;
21391 const enum ix86_builtins code;
21392 const enum rtx_code comparison;
21393 const int flag;
21396 static const struct builtin_description bdesc_comi[] =
21398 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21399 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21400 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21401 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21402 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21403 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21404 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21405 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21406 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21407 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21408 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21409 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21410 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21411 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21412 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21413 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21414 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21415 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21416 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21417 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21418 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21419 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21420 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21421 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21424 static const struct builtin_description bdesc_pcmpestr[] =
21426 /* SSE4.2 */
21427 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21428 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21429 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21430 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21431 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21432 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21433 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21436 static const struct builtin_description bdesc_pcmpistr[] =
21438 /* SSE4.2 */
21439 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21440 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21441 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21442 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21443 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21444 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21445 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21448 /* Special builtins with variable number of arguments. */
21449 static const struct builtin_description bdesc_special_args[] =
21451 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21452 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21454 /* MMX */
21455 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21457 /* 3DNow! */
21458 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21460 /* SSE */
21461 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21462 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21463 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21465 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21466 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21467 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21468 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21470 /* SSE or 3DNow!A */
21471 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21472 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21474 /* SSE2 */
21475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21476 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21477 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21478 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21479 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21480 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21481 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21482 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21483 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21485 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21486 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21488 /* SSE3 */
21489 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21491 /* SSE4.1 */
21492 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21494 /* SSE4A */
21495 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21496 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21498 /* AVX */
21499 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21500 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21502 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21503 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21504 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21505 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21506 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21508 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21509 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21510 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21511 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21512 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21513 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21514 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21517 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21518 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21520 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21521 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21522 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21523 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21524 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21525 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21526 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21527 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21529 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21530 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21531 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21532 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21533 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21534 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21538 /* Builtins with variable number of arguments. */
21539 static const struct builtin_description bdesc_args[] =
21541 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21542 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21543 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21544 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21545 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21546 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21547 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21549 /* MMX */
21550 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21551 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21552 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21553 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21554 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21555 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21557 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21558 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21559 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21560 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21561 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21562 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21563 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21564 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21566 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21567 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21569 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21570 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21571 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21572 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21574 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21575 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21576 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21577 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21578 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21579 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21581 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21582 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21583 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21584 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21585 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21586 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21588 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21589 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21590 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21592 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21594 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21595 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21596 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21597 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21598 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21599 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21601 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21602 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21603 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21604 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21605 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21606 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21608 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21609 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21610 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21611 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21613 /* 3DNow! */
21614 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21615 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21616 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21617 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21619 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21620 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21621 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21622 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21623 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21624 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21625 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21626 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21627 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21628 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21629 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21630 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21631 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21632 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21633 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21635 /* 3DNow!A */
21636 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21637 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21638 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21639 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21640 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21641 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21643 /* SSE */
21644 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21645 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21646 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21647 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21648 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21649 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21650 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21651 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21652 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21653 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21654 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21655 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21657 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21659 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21660 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21661 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21662 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21663 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21664 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21665 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21666 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21668 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21669 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21670 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21671 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21672 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21673 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21674 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21675 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21676 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21677 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21678 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21679 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21680 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21681 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21682 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21683 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21684 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21685 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21691 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21692 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21696 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21697 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21698 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21699 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21701 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21703 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21704 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21705 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21706 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21707 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21709 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21710 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21711 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21713 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21715 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21716 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21717 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21719 /* SSE MMX or 3Dnow!A */
21720 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21721 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21722 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21724 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21725 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21726 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21727 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21729 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21730 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21732 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21734 /* SSE2 */
21735 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21737 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21738 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21739 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21740 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21741 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21742 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21743 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21744 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21745 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21746 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21747 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21748 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21750 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21751 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21752 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21753 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21754 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21755 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21757 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21758 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21759 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21760 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21761 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21763 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21765 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21766 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21767 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21768 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21770 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21771 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21772 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21774 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21775 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21776 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21777 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21778 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21779 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21780 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21781 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21783 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21784 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21785 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21786 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21787 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21788 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21789 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21790 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21791 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21792 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21793 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21794 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21795 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21796 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21797 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21798 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21799 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21800 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21801 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21802 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21804 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21805 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21806 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21807 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21809 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21810 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21811 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21812 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21814 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21816 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21817 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21818 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21820 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21822 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21823 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21824 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21825 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21826 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21827 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21828 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21829 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21831 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21832 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21833 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21834 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21835 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21836 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21837 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21838 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21840 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21841 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21843 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21844 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21845 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21846 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21848 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21849 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21851 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21852 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21853 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21854 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21855 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21856 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21858 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21860 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21864 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21867 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21870 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21872 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21874 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21877 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21885 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21891 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21893 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21897 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21898 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21902 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21907 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21908 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21910 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21914 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21916 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21917 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21919 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21921 /* SSE2 MMX */
21922 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21923 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21925 /* SSE3 */
21926 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21927 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21929 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21930 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21931 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21932 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21933 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21934 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21936 /* SSSE3 */
21937 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
21938 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
21939 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21940 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
21941 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
21942 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21944 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21945 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21946 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21947 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21948 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21949 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21950 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21951 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21952 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21953 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21954 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21955 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21956 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
21957 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
21958 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21959 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21960 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21961 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21962 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21963 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21964 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21965 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21966 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21967 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21969 /* SSSE3. */
21970 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
21971 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
21973 /* SSE4.1 */
21974 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21975 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21976 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
21977 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
21978 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21979 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21980 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21981 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
21982 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21983 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
21985 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21986 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21987 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21988 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21989 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21990 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21991 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21992 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21993 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21994 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21995 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21996 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21997 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21999 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22000 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22001 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22002 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22003 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22004 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22005 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22006 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22007 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22008 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22009 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22010 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22012 /* SSE4.1 */
22013 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22014 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22015 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22016 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22018 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22019 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22020 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22022 /* SSE4.2 */
22023 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22024 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22025 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22026 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22027 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22029 /* SSE4A */
22030 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22031 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22032 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22033 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22035 /* AES */
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22042 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22044 /* PCLMUL */
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22047 /* AVX */
22048 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22049 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22050 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22051 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22052 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22053 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22054 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22055 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22056 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22057 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22058 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22059 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22060 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22061 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22062 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22063 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22064 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22065 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22066 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22067 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22068 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22069 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22070 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22071 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22072 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22073 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22075 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22076 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22077 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22078 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22080 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22081 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22082 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22083 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22084 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22085 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22086 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22087 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22088 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22089 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22090 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22091 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22092 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22093 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22094 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22095 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22096 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22097 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22098 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22099 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22100 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22101 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22102 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22103 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22104 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22105 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22106 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22107 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22108 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22109 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22110 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22111 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22112 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22113 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22115 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22116 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22117 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22119 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22120 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22121 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22122 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22123 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22125 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22127 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22128 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22130 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22131 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22132 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22133 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22135 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22136 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22137 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22138 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22139 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22140 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22142 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22143 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22144 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22145 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22146 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22147 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22148 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22149 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22150 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22151 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22152 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22153 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22154 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22155 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22156 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22158 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22159 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22161 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22164 /* FMA4 and XOP. */
22165 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22166 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22167 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22168 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22169 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22170 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22171 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22172 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22173 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22174 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22175 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22176 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22177 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22178 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22179 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22180 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22181 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22182 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22183 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22184 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22185 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22186 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22187 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22188 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22189 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22190 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22191 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22192 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22193 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22194 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22195 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22196 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22197 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22198 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22199 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22200 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22201 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22202 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22203 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22204 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22205 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22206 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22207 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22208 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22209 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22210 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22211 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22212 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22214 static const struct builtin_description bdesc_multi_arg[] =
22216 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22217 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22218 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22219 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22220 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22221 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22222 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22223 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22225 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22226 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22227 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22228 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22229 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22230 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22231 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22232 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22234 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22235 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22236 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22237 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22239 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22240 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22241 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22242 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22244 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22245 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22246 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22247 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22249 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22250 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22251 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22252 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22254 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22255 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22256 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22257 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22258 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22259 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22260 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22262 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22263 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22264 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22265 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22266 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22267 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22268 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22270 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22272 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22273 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22274 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22275 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22276 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22277 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22278 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22279 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22280 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22281 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22282 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22283 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22285 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22286 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22287 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22288 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22289 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22290 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22291 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22292 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22293 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22294 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22295 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22296 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22297 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22298 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22299 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22300 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22302 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22303 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22304 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22305 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22306 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22307 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22309 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22310 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22311 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22312 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22313 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22314 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22315 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22316 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22317 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22318 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22319 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22320 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22321 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22322 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22323 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22325 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22326 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22327 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22328 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22329 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22330 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22331 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22333 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22334 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22335 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22336 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22337 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22338 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22339 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22341 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22342 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22343 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22344 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22345 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22346 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22347 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22349 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22350 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22351 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22352 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22353 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22354 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22355 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22357 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22358 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22359 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22360 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22361 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22362 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22363 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22365 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22366 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22367 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22368 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22369 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22370 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22371 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22373 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22374 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22375 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22376 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22377 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22378 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22379 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22381 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22390 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22399 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22400 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22401 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22402 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22403 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22404 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22405 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22409 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22410 in the current target ISA to allow the user to compile particular modules
22411 with different target specific options that differ from the command line
22412 options. */
22413 static void
22414 ix86_init_mmx_sse_builtins (void)
22416 const struct builtin_description * d;
22417 enum ix86_builtin_func_type ftype;
22418 size_t i;
22420 /* Add all special builtins with variable number of operands. */
22421 for (i = 0, d = bdesc_special_args;
22422 i < ARRAY_SIZE (bdesc_special_args);
22423 i++, d++)
22425 if (d->name == 0)
22426 continue;
22428 ftype = (enum ix86_builtin_func_type) d->flag;
22429 def_builtin (d->mask, d->name, ftype, d->code);
22432 /* Add all builtins with variable number of operands. */
22433 for (i = 0, d = bdesc_args;
22434 i < ARRAY_SIZE (bdesc_args);
22435 i++, d++)
22437 if (d->name == 0)
22438 continue;
22440 ftype = (enum ix86_builtin_func_type) d->flag;
22441 def_builtin_const (d->mask, d->name, ftype, d->code);
22444 /* pcmpestr[im] insns. */
22445 for (i = 0, d = bdesc_pcmpestr;
22446 i < ARRAY_SIZE (bdesc_pcmpestr);
22447 i++, d++)
22449 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22450 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22451 else
22452 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22453 def_builtin_const (d->mask, d->name, ftype, d->code);
22456 /* pcmpistr[im] insns. */
22457 for (i = 0, d = bdesc_pcmpistr;
22458 i < ARRAY_SIZE (bdesc_pcmpistr);
22459 i++, d++)
22461 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22462 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22463 else
22464 ftype = INT_FTYPE_V16QI_V16QI_INT;
22465 def_builtin_const (d->mask, d->name, ftype, d->code);
22468 /* comi/ucomi insns. */
22469 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22471 if (d->mask == OPTION_MASK_ISA_SSE2)
22472 ftype = INT_FTYPE_V2DF_V2DF;
22473 else
22474 ftype = INT_FTYPE_V4SF_V4SF;
22475 def_builtin_const (d->mask, d->name, ftype, d->code);
22478 /* SSE */
22479 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22480 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22481 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22482 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22484 /* SSE or 3DNow!A */
22485 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22486 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22487 IX86_BUILTIN_MASKMOVQ);
22489 /* SSE2 */
22490 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22491 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22493 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22494 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22495 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22496 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22498 /* SSE3. */
22499 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22500 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22501 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22502 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22504 /* AES */
22505 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22506 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22507 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22508 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22509 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22510 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22511 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22512 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22513 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22514 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22515 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22516 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22518 /* PCLMUL */
22519 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22520 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22522 /* MMX access to the vec_init patterns. */
22523 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22524 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22526 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22527 V4HI_FTYPE_HI_HI_HI_HI,
22528 IX86_BUILTIN_VEC_INIT_V4HI);
22530 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22531 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22532 IX86_BUILTIN_VEC_INIT_V8QI);
22534 /* Access to the vec_extract patterns. */
22535 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22536 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22537 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22538 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22539 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22540 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22541 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22542 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22543 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22544 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22546 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22547 "__builtin_ia32_vec_ext_v4hi",
22548 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22550 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22551 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22553 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22554 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22556 /* Access to the vec_set patterns. */
22557 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22558 "__builtin_ia32_vec_set_v2di",
22559 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22561 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22562 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22564 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22565 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22567 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22568 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22570 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22571 "__builtin_ia32_vec_set_v4hi",
22572 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22574 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22575 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22577 /* Add FMA4 multi-arg argument instructions */
22578 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22580 if (d->name == 0)
22581 continue;
22583 ftype = (enum ix86_builtin_func_type) d->flag;
22584 def_builtin_const (d->mask, d->name, ftype, d->code);
22588 /* Internal method for ix86_init_builtins. */
22590 static void
22591 ix86_init_builtins_va_builtins_abi (void)
22593 tree ms_va_ref, sysv_va_ref;
22594 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22595 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22596 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22597 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22599 if (!TARGET_64BIT)
22600 return;
22601 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22602 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22603 ms_va_ref = build_reference_type (ms_va_list_type_node);
22604 sysv_va_ref =
22605 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22607 fnvoid_va_end_ms =
22608 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22609 fnvoid_va_start_ms =
22610 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22611 fnvoid_va_end_sysv =
22612 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22613 fnvoid_va_start_sysv =
22614 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22615 NULL_TREE);
22616 fnvoid_va_copy_ms =
22617 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22618 NULL_TREE);
22619 fnvoid_va_copy_sysv =
22620 build_function_type_list (void_type_node, sysv_va_ref,
22621 sysv_va_ref, NULL_TREE);
22623 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22624 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22625 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22626 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22627 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22628 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22629 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22630 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22631 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22632 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22633 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22634 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22637 static void
22638 ix86_init_builtin_types (void)
22640 tree float128_type_node, float80_type_node;
22642 /* The __float80 type. */
22643 float80_type_node = long_double_type_node;
22644 if (TYPE_MODE (float80_type_node) != XFmode)
22646 /* The __float80 type. */
22647 float80_type_node = make_node (REAL_TYPE);
22649 TYPE_PRECISION (float80_type_node) = 80;
22650 layout_type (float80_type_node);
22652 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22654 /* The __float128 type. */
22655 float128_type_node = make_node (REAL_TYPE);
22656 TYPE_PRECISION (float128_type_node) = 128;
22657 layout_type (float128_type_node);
22658 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22660 /* This macro is built by i386-builtin-types.awk. */
22661 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22664 static void
22665 ix86_init_builtins (void)
22667 tree t;
22669 ix86_init_builtin_types ();
22671 /* TFmode support builtins. */
22672 def_builtin_const (0, "__builtin_infq",
22673 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22674 def_builtin_const (0, "__builtin_huge_valq",
22675 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22677 /* We will expand them to normal call if SSE2 isn't available since
22678 they are used by libgcc. */
22679 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22680 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22681 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22682 TREE_READONLY (t) = 1;
22683 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22685 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22686 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22687 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22688 TREE_READONLY (t) = 1;
22689 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22691 ix86_init_mmx_sse_builtins ();
22693 if (TARGET_64BIT)
22694 ix86_init_builtins_va_builtins_abi ();
22697 /* Return the ix86 builtin for CODE. */
22699 static tree
22700 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22702 if (code >= IX86_BUILTIN_MAX)
22703 return error_mark_node;
22705 return ix86_builtins[code];
22708 /* Errors in the source file can cause expand_expr to return const0_rtx
22709 where we expect a vector. To avoid crashing, use one of the vector
22710 clear instructions. */
22711 static rtx
22712 safe_vector_operand (rtx x, enum machine_mode mode)
22714 if (x == const0_rtx)
22715 x = CONST0_RTX (mode);
22716 return x;
22719 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22721 static rtx
22722 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22724 rtx pat;
22725 tree arg0 = CALL_EXPR_ARG (exp, 0);
22726 tree arg1 = CALL_EXPR_ARG (exp, 1);
22727 rtx op0 = expand_normal (arg0);
22728 rtx op1 = expand_normal (arg1);
22729 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22730 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22731 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22733 if (VECTOR_MODE_P (mode0))
22734 op0 = safe_vector_operand (op0, mode0);
22735 if (VECTOR_MODE_P (mode1))
22736 op1 = safe_vector_operand (op1, mode1);
22738 if (optimize || !target
22739 || GET_MODE (target) != tmode
22740 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22741 target = gen_reg_rtx (tmode);
22743 if (GET_MODE (op1) == SImode && mode1 == TImode)
22745 rtx x = gen_reg_rtx (V4SImode);
22746 emit_insn (gen_sse2_loadd (x, op1));
22747 op1 = gen_lowpart (TImode, x);
22750 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22751 op0 = copy_to_mode_reg (mode0, op0);
22752 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22753 op1 = copy_to_mode_reg (mode1, op1);
22755 pat = GEN_FCN (icode) (target, op0, op1);
22756 if (! pat)
22757 return 0;
22759 emit_insn (pat);
22761 return target;
22764 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22766 static rtx
22767 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22768 enum ix86_builtin_func_type m_type,
22769 enum rtx_code sub_code)
22771 rtx pat;
22772 int i;
22773 int nargs;
22774 bool comparison_p = false;
22775 bool tf_p = false;
22776 bool last_arg_constant = false;
22777 int num_memory = 0;
22778 struct {
22779 rtx op;
22780 enum machine_mode mode;
22781 } args[4];
22783 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22785 switch (m_type)
22787 case MULTI_ARG_3_SF:
22788 case MULTI_ARG_3_DF:
22789 case MULTI_ARG_3_SF2:
22790 case MULTI_ARG_3_DF2:
22791 case MULTI_ARG_3_DI:
22792 case MULTI_ARG_3_SI:
22793 case MULTI_ARG_3_SI_DI:
22794 case MULTI_ARG_3_HI:
22795 case MULTI_ARG_3_HI_SI:
22796 case MULTI_ARG_3_QI:
22797 case MULTI_ARG_3_DI2:
22798 case MULTI_ARG_3_SI2:
22799 case MULTI_ARG_3_HI2:
22800 case MULTI_ARG_3_QI2:
22801 nargs = 3;
22802 break;
22804 case MULTI_ARG_2_SF:
22805 case MULTI_ARG_2_DF:
22806 case MULTI_ARG_2_DI:
22807 case MULTI_ARG_2_SI:
22808 case MULTI_ARG_2_HI:
22809 case MULTI_ARG_2_QI:
22810 nargs = 2;
22811 break;
22813 case MULTI_ARG_2_DI_IMM:
22814 case MULTI_ARG_2_SI_IMM:
22815 case MULTI_ARG_2_HI_IMM:
22816 case MULTI_ARG_2_QI_IMM:
22817 nargs = 2;
22818 last_arg_constant = true;
22819 break;
22821 case MULTI_ARG_1_SF:
22822 case MULTI_ARG_1_DF:
22823 case MULTI_ARG_1_SF2:
22824 case MULTI_ARG_1_DF2:
22825 case MULTI_ARG_1_DI:
22826 case MULTI_ARG_1_SI:
22827 case MULTI_ARG_1_HI:
22828 case MULTI_ARG_1_QI:
22829 case MULTI_ARG_1_SI_DI:
22830 case MULTI_ARG_1_HI_DI:
22831 case MULTI_ARG_1_HI_SI:
22832 case MULTI_ARG_1_QI_DI:
22833 case MULTI_ARG_1_QI_SI:
22834 case MULTI_ARG_1_QI_HI:
22835 nargs = 1;
22836 break;
22838 case MULTI_ARG_2_DI_CMP:
22839 case MULTI_ARG_2_SI_CMP:
22840 case MULTI_ARG_2_HI_CMP:
22841 case MULTI_ARG_2_QI_CMP:
22842 nargs = 2;
22843 comparison_p = true;
22844 break;
22846 case MULTI_ARG_2_SF_TF:
22847 case MULTI_ARG_2_DF_TF:
22848 case MULTI_ARG_2_DI_TF:
22849 case MULTI_ARG_2_SI_TF:
22850 case MULTI_ARG_2_HI_TF:
22851 case MULTI_ARG_2_QI_TF:
22852 nargs = 2;
22853 tf_p = true;
22854 break;
22856 default:
22857 gcc_unreachable ();
22860 if (optimize || !target
22861 || GET_MODE (target) != tmode
22862 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22863 target = gen_reg_rtx (tmode);
22865 gcc_assert (nargs <= 4);
22867 for (i = 0; i < nargs; i++)
22869 tree arg = CALL_EXPR_ARG (exp, i);
22870 rtx op = expand_normal (arg);
22871 int adjust = (comparison_p) ? 1 : 0;
22872 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
22874 if (last_arg_constant && i == nargs-1)
22876 if (!CONST_INT_P (op))
22878 error ("last argument must be an immediate");
22879 return gen_reg_rtx (tmode);
22882 else
22884 if (VECTOR_MODE_P (mode))
22885 op = safe_vector_operand (op, mode);
22887 /* If we aren't optimizing, only allow one memory operand to be
22888 generated. */
22889 if (memory_operand (op, mode))
22890 num_memory++;
22892 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
22894 if (optimize
22895 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
22896 || num_memory > 1)
22897 op = force_reg (mode, op);
22900 args[i].op = op;
22901 args[i].mode = mode;
22904 switch (nargs)
22906 case 1:
22907 pat = GEN_FCN (icode) (target, args[0].op);
22908 break;
22910 case 2:
22911 if (tf_p)
22912 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
22913 GEN_INT ((int)sub_code));
22914 else if (! comparison_p)
22915 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
22916 else
22918 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
22919 args[0].op,
22920 args[1].op);
22922 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
22924 break;
22926 case 3:
22927 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
22928 break;
22930 default:
22931 gcc_unreachable ();
22934 if (! pat)
22935 return 0;
22937 emit_insn (pat);
22938 return target;
22941 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
22942 insns with vec_merge. */
22944 static rtx
22945 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
22946 rtx target)
22948 rtx pat;
22949 tree arg0 = CALL_EXPR_ARG (exp, 0);
22950 rtx op1, op0 = expand_normal (arg0);
22951 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22952 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22954 if (optimize || !target
22955 || GET_MODE (target) != tmode
22956 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22957 target = gen_reg_rtx (tmode);
22959 if (VECTOR_MODE_P (mode0))
22960 op0 = safe_vector_operand (op0, mode0);
22962 if ((optimize && !register_operand (op0, mode0))
22963 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
22964 op0 = copy_to_mode_reg (mode0, op0);
22966 op1 = op0;
22967 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
22968 op1 = copy_to_mode_reg (mode0, op1);
22970 pat = GEN_FCN (icode) (target, op0, op1);
22971 if (! pat)
22972 return 0;
22973 emit_insn (pat);
22974 return target;
22977 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
22979 static rtx
22980 ix86_expand_sse_compare (const struct builtin_description *d,
22981 tree exp, rtx target, bool swap)
22983 rtx pat;
22984 tree arg0 = CALL_EXPR_ARG (exp, 0);
22985 tree arg1 = CALL_EXPR_ARG (exp, 1);
22986 rtx op0 = expand_normal (arg0);
22987 rtx op1 = expand_normal (arg1);
22988 rtx op2;
22989 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
22990 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
22991 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
22992 enum rtx_code comparison = d->comparison;
22994 if (VECTOR_MODE_P (mode0))
22995 op0 = safe_vector_operand (op0, mode0);
22996 if (VECTOR_MODE_P (mode1))
22997 op1 = safe_vector_operand (op1, mode1);
22999 /* Swap operands if we have a comparison that isn't available in
23000 hardware. */
23001 if (swap)
23003 rtx tmp = gen_reg_rtx (mode1);
23004 emit_move_insn (tmp, op1);
23005 op1 = op0;
23006 op0 = tmp;
23009 if (optimize || !target
23010 || GET_MODE (target) != tmode
23011 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23012 target = gen_reg_rtx (tmode);
23014 if ((optimize && !register_operand (op0, mode0))
23015 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23016 op0 = copy_to_mode_reg (mode0, op0);
23017 if ((optimize && !register_operand (op1, mode1))
23018 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23019 op1 = copy_to_mode_reg (mode1, op1);
23021 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23022 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23023 if (! pat)
23024 return 0;
23025 emit_insn (pat);
23026 return target;
23029 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23031 static rtx
23032 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23033 rtx target)
23035 rtx pat;
23036 tree arg0 = CALL_EXPR_ARG (exp, 0);
23037 tree arg1 = CALL_EXPR_ARG (exp, 1);
23038 rtx op0 = expand_normal (arg0);
23039 rtx op1 = expand_normal (arg1);
23040 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23041 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23042 enum rtx_code comparison = d->comparison;
23044 if (VECTOR_MODE_P (mode0))
23045 op0 = safe_vector_operand (op0, mode0);
23046 if (VECTOR_MODE_P (mode1))
23047 op1 = safe_vector_operand (op1, mode1);
23049 /* Swap operands if we have a comparison that isn't available in
23050 hardware. */
23051 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23053 rtx tmp = op1;
23054 op1 = op0;
23055 op0 = tmp;
23058 target = gen_reg_rtx (SImode);
23059 emit_move_insn (target, const0_rtx);
23060 target = gen_rtx_SUBREG (QImode, target, 0);
23062 if ((optimize && !register_operand (op0, mode0))
23063 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23064 op0 = copy_to_mode_reg (mode0, op0);
23065 if ((optimize && !register_operand (op1, mode1))
23066 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23067 op1 = copy_to_mode_reg (mode1, op1);
23069 pat = GEN_FCN (d->icode) (op0, op1);
23070 if (! pat)
23071 return 0;
23072 emit_insn (pat);
23073 emit_insn (gen_rtx_SET (VOIDmode,
23074 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23075 gen_rtx_fmt_ee (comparison, QImode,
23076 SET_DEST (pat),
23077 const0_rtx)));
23079 return SUBREG_REG (target);
23082 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23084 static rtx
23085 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23086 rtx target)
23088 rtx pat;
23089 tree arg0 = CALL_EXPR_ARG (exp, 0);
23090 tree arg1 = CALL_EXPR_ARG (exp, 1);
23091 rtx op0 = expand_normal (arg0);
23092 rtx op1 = expand_normal (arg1);
23093 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23094 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23095 enum rtx_code comparison = d->comparison;
23097 if (VECTOR_MODE_P (mode0))
23098 op0 = safe_vector_operand (op0, mode0);
23099 if (VECTOR_MODE_P (mode1))
23100 op1 = safe_vector_operand (op1, mode1);
23102 target = gen_reg_rtx (SImode);
23103 emit_move_insn (target, const0_rtx);
23104 target = gen_rtx_SUBREG (QImode, target, 0);
23106 if ((optimize && !register_operand (op0, mode0))
23107 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23108 op0 = copy_to_mode_reg (mode0, op0);
23109 if ((optimize && !register_operand (op1, mode1))
23110 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23111 op1 = copy_to_mode_reg (mode1, op1);
23113 pat = GEN_FCN (d->icode) (op0, op1);
23114 if (! pat)
23115 return 0;
23116 emit_insn (pat);
23117 emit_insn (gen_rtx_SET (VOIDmode,
23118 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23119 gen_rtx_fmt_ee (comparison, QImode,
23120 SET_DEST (pat),
23121 const0_rtx)));
23123 return SUBREG_REG (target);
23126 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23128 static rtx
23129 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23130 tree exp, rtx target)
23132 rtx pat;
23133 tree arg0 = CALL_EXPR_ARG (exp, 0);
23134 tree arg1 = CALL_EXPR_ARG (exp, 1);
23135 tree arg2 = CALL_EXPR_ARG (exp, 2);
23136 tree arg3 = CALL_EXPR_ARG (exp, 3);
23137 tree arg4 = CALL_EXPR_ARG (exp, 4);
23138 rtx scratch0, scratch1;
23139 rtx op0 = expand_normal (arg0);
23140 rtx op1 = expand_normal (arg1);
23141 rtx op2 = expand_normal (arg2);
23142 rtx op3 = expand_normal (arg3);
23143 rtx op4 = expand_normal (arg4);
23144 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23146 tmode0 = insn_data[d->icode].operand[0].mode;
23147 tmode1 = insn_data[d->icode].operand[1].mode;
23148 modev2 = insn_data[d->icode].operand[2].mode;
23149 modei3 = insn_data[d->icode].operand[3].mode;
23150 modev4 = insn_data[d->icode].operand[4].mode;
23151 modei5 = insn_data[d->icode].operand[5].mode;
23152 modeimm = insn_data[d->icode].operand[6].mode;
23154 if (VECTOR_MODE_P (modev2))
23155 op0 = safe_vector_operand (op0, modev2);
23156 if (VECTOR_MODE_P (modev4))
23157 op2 = safe_vector_operand (op2, modev4);
23159 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23160 op0 = copy_to_mode_reg (modev2, op0);
23161 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23162 op1 = copy_to_mode_reg (modei3, op1);
23163 if ((optimize && !register_operand (op2, modev4))
23164 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23165 op2 = copy_to_mode_reg (modev4, op2);
23166 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23167 op3 = copy_to_mode_reg (modei5, op3);
23169 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23171 error ("the fifth argument must be a 8-bit immediate");
23172 return const0_rtx;
23175 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23177 if (optimize || !target
23178 || GET_MODE (target) != tmode0
23179 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23180 target = gen_reg_rtx (tmode0);
23182 scratch1 = gen_reg_rtx (tmode1);
23184 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23186 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23188 if (optimize || !target
23189 || GET_MODE (target) != tmode1
23190 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23191 target = gen_reg_rtx (tmode1);
23193 scratch0 = gen_reg_rtx (tmode0);
23195 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23197 else
23199 gcc_assert (d->flag);
23201 scratch0 = gen_reg_rtx (tmode0);
23202 scratch1 = gen_reg_rtx (tmode1);
23204 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23207 if (! pat)
23208 return 0;
23210 emit_insn (pat);
23212 if (d->flag)
23214 target = gen_reg_rtx (SImode);
23215 emit_move_insn (target, const0_rtx);
23216 target = gen_rtx_SUBREG (QImode, target, 0);
23218 emit_insn
23219 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23220 gen_rtx_fmt_ee (EQ, QImode,
23221 gen_rtx_REG ((enum machine_mode) d->flag,
23222 FLAGS_REG),
23223 const0_rtx)));
23224 return SUBREG_REG (target);
23226 else
23227 return target;
23231 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23233 static rtx
23234 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23235 tree exp, rtx target)
23237 rtx pat;
23238 tree arg0 = CALL_EXPR_ARG (exp, 0);
23239 tree arg1 = CALL_EXPR_ARG (exp, 1);
23240 tree arg2 = CALL_EXPR_ARG (exp, 2);
23241 rtx scratch0, scratch1;
23242 rtx op0 = expand_normal (arg0);
23243 rtx op1 = expand_normal (arg1);
23244 rtx op2 = expand_normal (arg2);
23245 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23247 tmode0 = insn_data[d->icode].operand[0].mode;
23248 tmode1 = insn_data[d->icode].operand[1].mode;
23249 modev2 = insn_data[d->icode].operand[2].mode;
23250 modev3 = insn_data[d->icode].operand[3].mode;
23251 modeimm = insn_data[d->icode].operand[4].mode;
23253 if (VECTOR_MODE_P (modev2))
23254 op0 = safe_vector_operand (op0, modev2);
23255 if (VECTOR_MODE_P (modev3))
23256 op1 = safe_vector_operand (op1, modev3);
23258 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23259 op0 = copy_to_mode_reg (modev2, op0);
23260 if ((optimize && !register_operand (op1, modev3))
23261 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23262 op1 = copy_to_mode_reg (modev3, op1);
23264 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23266 error ("the third argument must be a 8-bit immediate");
23267 return const0_rtx;
23270 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23272 if (optimize || !target
23273 || GET_MODE (target) != tmode0
23274 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23275 target = gen_reg_rtx (tmode0);
23277 scratch1 = gen_reg_rtx (tmode1);
23279 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23281 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23283 if (optimize || !target
23284 || GET_MODE (target) != tmode1
23285 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23286 target = gen_reg_rtx (tmode1);
23288 scratch0 = gen_reg_rtx (tmode0);
23290 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23292 else
23294 gcc_assert (d->flag);
23296 scratch0 = gen_reg_rtx (tmode0);
23297 scratch1 = gen_reg_rtx (tmode1);
23299 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23302 if (! pat)
23303 return 0;
23305 emit_insn (pat);
23307 if (d->flag)
23309 target = gen_reg_rtx (SImode);
23310 emit_move_insn (target, const0_rtx);
23311 target = gen_rtx_SUBREG (QImode, target, 0);
23313 emit_insn
23314 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23315 gen_rtx_fmt_ee (EQ, QImode,
23316 gen_rtx_REG ((enum machine_mode) d->flag,
23317 FLAGS_REG),
23318 const0_rtx)));
23319 return SUBREG_REG (target);
23321 else
23322 return target;
23325 /* Subroutine of ix86_expand_builtin to take care of insns with
23326 variable number of operands. */
23328 static rtx
23329 ix86_expand_args_builtin (const struct builtin_description *d,
23330 tree exp, rtx target)
23332 rtx pat, real_target;
23333 unsigned int i, nargs;
23334 unsigned int nargs_constant = 0;
23335 int num_memory = 0;
23336 struct
23338 rtx op;
23339 enum machine_mode mode;
23340 } args[4];
23341 bool last_arg_count = false;
23342 enum insn_code icode = d->icode;
23343 const struct insn_data *insn_p = &insn_data[icode];
23344 enum machine_mode tmode = insn_p->operand[0].mode;
23345 enum machine_mode rmode = VOIDmode;
23346 bool swap = false;
23347 enum rtx_code comparison = d->comparison;
23349 switch ((enum ix86_builtin_func_type) d->flag)
23351 case INT_FTYPE_V8SF_V8SF_PTEST:
23352 case INT_FTYPE_V4DI_V4DI_PTEST:
23353 case INT_FTYPE_V4DF_V4DF_PTEST:
23354 case INT_FTYPE_V4SF_V4SF_PTEST:
23355 case INT_FTYPE_V2DI_V2DI_PTEST:
23356 case INT_FTYPE_V2DF_V2DF_PTEST:
23357 return ix86_expand_sse_ptest (d, exp, target);
23358 case FLOAT128_FTYPE_FLOAT128:
23359 case FLOAT_FTYPE_FLOAT:
23360 case INT_FTYPE_INT:
23361 case UINT64_FTYPE_INT:
23362 case UINT16_FTYPE_UINT16:
23363 case INT64_FTYPE_INT64:
23364 case INT64_FTYPE_V4SF:
23365 case INT64_FTYPE_V2DF:
23366 case INT_FTYPE_V16QI:
23367 case INT_FTYPE_V8QI:
23368 case INT_FTYPE_V8SF:
23369 case INT_FTYPE_V4DF:
23370 case INT_FTYPE_V4SF:
23371 case INT_FTYPE_V2DF:
23372 case V16QI_FTYPE_V16QI:
23373 case V8SI_FTYPE_V8SF:
23374 case V8SI_FTYPE_V4SI:
23375 case V8HI_FTYPE_V8HI:
23376 case V8HI_FTYPE_V16QI:
23377 case V8QI_FTYPE_V8QI:
23378 case V8SF_FTYPE_V8SF:
23379 case V8SF_FTYPE_V8SI:
23380 case V8SF_FTYPE_V4SF:
23381 case V4SI_FTYPE_V4SI:
23382 case V4SI_FTYPE_V16QI:
23383 case V4SI_FTYPE_V4SF:
23384 case V4SI_FTYPE_V8SI:
23385 case V4SI_FTYPE_V8HI:
23386 case V4SI_FTYPE_V4DF:
23387 case V4SI_FTYPE_V2DF:
23388 case V4HI_FTYPE_V4HI:
23389 case V4DF_FTYPE_V4DF:
23390 case V4DF_FTYPE_V4SI:
23391 case V4DF_FTYPE_V4SF:
23392 case V4DF_FTYPE_V2DF:
23393 case V4SF_FTYPE_V4SF:
23394 case V4SF_FTYPE_V4SI:
23395 case V4SF_FTYPE_V8SF:
23396 case V4SF_FTYPE_V4DF:
23397 case V4SF_FTYPE_V2DF:
23398 case V2DI_FTYPE_V2DI:
23399 case V2DI_FTYPE_V16QI:
23400 case V2DI_FTYPE_V8HI:
23401 case V2DI_FTYPE_V4SI:
23402 case V2DF_FTYPE_V2DF:
23403 case V2DF_FTYPE_V4SI:
23404 case V2DF_FTYPE_V4DF:
23405 case V2DF_FTYPE_V4SF:
23406 case V2DF_FTYPE_V2SI:
23407 case V2SI_FTYPE_V2SI:
23408 case V2SI_FTYPE_V4SF:
23409 case V2SI_FTYPE_V2SF:
23410 case V2SI_FTYPE_V2DF:
23411 case V2SF_FTYPE_V2SF:
23412 case V2SF_FTYPE_V2SI:
23413 nargs = 1;
23414 break;
23415 case V4SF_FTYPE_V4SF_VEC_MERGE:
23416 case V2DF_FTYPE_V2DF_VEC_MERGE:
23417 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23418 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23419 case V16QI_FTYPE_V16QI_V16QI:
23420 case V16QI_FTYPE_V8HI_V8HI:
23421 case V8QI_FTYPE_V8QI_V8QI:
23422 case V8QI_FTYPE_V4HI_V4HI:
23423 case V8HI_FTYPE_V8HI_V8HI:
23424 case V8HI_FTYPE_V16QI_V16QI:
23425 case V8HI_FTYPE_V4SI_V4SI:
23426 case V8SF_FTYPE_V8SF_V8SF:
23427 case V8SF_FTYPE_V8SF_V8SI:
23428 case V4SI_FTYPE_V4SI_V4SI:
23429 case V4SI_FTYPE_V8HI_V8HI:
23430 case V4SI_FTYPE_V4SF_V4SF:
23431 case V4SI_FTYPE_V2DF_V2DF:
23432 case V4HI_FTYPE_V4HI_V4HI:
23433 case V4HI_FTYPE_V8QI_V8QI:
23434 case V4HI_FTYPE_V2SI_V2SI:
23435 case V4DF_FTYPE_V4DF_V4DF:
23436 case V4DF_FTYPE_V4DF_V4DI:
23437 case V4SF_FTYPE_V4SF_V4SF:
23438 case V4SF_FTYPE_V4SF_V4SI:
23439 case V4SF_FTYPE_V4SF_V2SI:
23440 case V4SF_FTYPE_V4SF_V2DF:
23441 case V4SF_FTYPE_V4SF_DI:
23442 case V4SF_FTYPE_V4SF_SI:
23443 case V2DI_FTYPE_V2DI_V2DI:
23444 case V2DI_FTYPE_V16QI_V16QI:
23445 case V2DI_FTYPE_V4SI_V4SI:
23446 case V2DI_FTYPE_V2DI_V16QI:
23447 case V2DI_FTYPE_V2DF_V2DF:
23448 case V2SI_FTYPE_V2SI_V2SI:
23449 case V2SI_FTYPE_V4HI_V4HI:
23450 case V2SI_FTYPE_V2SF_V2SF:
23451 case V2DF_FTYPE_V2DF_V2DF:
23452 case V2DF_FTYPE_V2DF_V4SF:
23453 case V2DF_FTYPE_V2DF_V2DI:
23454 case V2DF_FTYPE_V2DF_DI:
23455 case V2DF_FTYPE_V2DF_SI:
23456 case V2SF_FTYPE_V2SF_V2SF:
23457 case V1DI_FTYPE_V1DI_V1DI:
23458 case V1DI_FTYPE_V8QI_V8QI:
23459 case V1DI_FTYPE_V2SI_V2SI:
23460 if (comparison == UNKNOWN)
23461 return ix86_expand_binop_builtin (icode, exp, target);
23462 nargs = 2;
23463 break;
23464 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23465 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23466 gcc_assert (comparison != UNKNOWN);
23467 nargs = 2;
23468 swap = true;
23469 break;
23470 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23471 case V8HI_FTYPE_V8HI_SI_COUNT:
23472 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23473 case V4SI_FTYPE_V4SI_SI_COUNT:
23474 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23475 case V4HI_FTYPE_V4HI_SI_COUNT:
23476 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23477 case V2DI_FTYPE_V2DI_SI_COUNT:
23478 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23479 case V2SI_FTYPE_V2SI_SI_COUNT:
23480 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23481 case V1DI_FTYPE_V1DI_SI_COUNT:
23482 nargs = 2;
23483 last_arg_count = true;
23484 break;
23485 case UINT64_FTYPE_UINT64_UINT64:
23486 case UINT_FTYPE_UINT_UINT:
23487 case UINT_FTYPE_UINT_USHORT:
23488 case UINT_FTYPE_UINT_UCHAR:
23489 case UINT16_FTYPE_UINT16_INT:
23490 case UINT8_FTYPE_UINT8_INT:
23491 nargs = 2;
23492 break;
23493 case V2DI_FTYPE_V2DI_INT_CONVERT:
23494 nargs = 2;
23495 rmode = V2DImode;
23496 nargs_constant = 1;
23497 break;
23498 case V8HI_FTYPE_V8HI_INT:
23499 case V8SF_FTYPE_V8SF_INT:
23500 case V4SI_FTYPE_V4SI_INT:
23501 case V4SI_FTYPE_V8SI_INT:
23502 case V4HI_FTYPE_V4HI_INT:
23503 case V4DF_FTYPE_V4DF_INT:
23504 case V4SF_FTYPE_V4SF_INT:
23505 case V4SF_FTYPE_V8SF_INT:
23506 case V2DI_FTYPE_V2DI_INT:
23507 case V2DF_FTYPE_V2DF_INT:
23508 case V2DF_FTYPE_V4DF_INT:
23509 nargs = 2;
23510 nargs_constant = 1;
23511 break;
23512 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23513 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23514 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23515 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23516 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23517 nargs = 3;
23518 break;
23519 case V16QI_FTYPE_V16QI_V16QI_INT:
23520 case V8HI_FTYPE_V8HI_V8HI_INT:
23521 case V8SI_FTYPE_V8SI_V8SI_INT:
23522 case V8SI_FTYPE_V8SI_V4SI_INT:
23523 case V8SF_FTYPE_V8SF_V8SF_INT:
23524 case V8SF_FTYPE_V8SF_V4SF_INT:
23525 case V4SI_FTYPE_V4SI_V4SI_INT:
23526 case V4DF_FTYPE_V4DF_V4DF_INT:
23527 case V4DF_FTYPE_V4DF_V2DF_INT:
23528 case V4SF_FTYPE_V4SF_V4SF_INT:
23529 case V2DI_FTYPE_V2DI_V2DI_INT:
23530 case V2DF_FTYPE_V2DF_V2DF_INT:
23531 nargs = 3;
23532 nargs_constant = 1;
23533 break;
23534 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23535 nargs = 3;
23536 rmode = V2DImode;
23537 nargs_constant = 1;
23538 break;
23539 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23540 nargs = 3;
23541 rmode = DImode;
23542 nargs_constant = 1;
23543 break;
23544 case V2DI_FTYPE_V2DI_UINT_UINT:
23545 nargs = 3;
23546 nargs_constant = 2;
23547 break;
23548 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23549 nargs = 4;
23550 nargs_constant = 2;
23551 break;
23552 default:
23553 gcc_unreachable ();
23556 gcc_assert (nargs <= ARRAY_SIZE (args));
23558 if (comparison != UNKNOWN)
23560 gcc_assert (nargs == 2);
23561 return ix86_expand_sse_compare (d, exp, target, swap);
23564 if (rmode == VOIDmode || rmode == tmode)
23566 if (optimize
23567 || target == 0
23568 || GET_MODE (target) != tmode
23569 || ! (*insn_p->operand[0].predicate) (target, tmode))
23570 target = gen_reg_rtx (tmode);
23571 real_target = target;
23573 else
23575 target = gen_reg_rtx (rmode);
23576 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23579 for (i = 0; i < nargs; i++)
23581 tree arg = CALL_EXPR_ARG (exp, i);
23582 rtx op = expand_normal (arg);
23583 enum machine_mode mode = insn_p->operand[i + 1].mode;
23584 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23586 if (last_arg_count && (i + 1) == nargs)
23588 /* SIMD shift insns take either an 8-bit immediate or
23589 register as count. But builtin functions take int as
23590 count. If count doesn't match, we put it in register. */
23591 if (!match)
23593 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23594 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23595 op = copy_to_reg (op);
23598 else if ((nargs - i) <= nargs_constant)
23600 if (!match)
23601 switch (icode)
23603 case CODE_FOR_sse4_1_roundpd:
23604 case CODE_FOR_sse4_1_roundps:
23605 case CODE_FOR_sse4_1_roundsd:
23606 case CODE_FOR_sse4_1_roundss:
23607 case CODE_FOR_sse4_1_blendps:
23608 case CODE_FOR_avx_blendpd256:
23609 case CODE_FOR_avx_vpermilv4df:
23610 case CODE_FOR_avx_roundpd256:
23611 case CODE_FOR_avx_roundps256:
23612 error ("the last argument must be a 4-bit immediate");
23613 return const0_rtx;
23615 case CODE_FOR_sse4_1_blendpd:
23616 case CODE_FOR_avx_vpermilv2df:
23617 error ("the last argument must be a 2-bit immediate");
23618 return const0_rtx;
23620 case CODE_FOR_avx_vextractf128v4df:
23621 case CODE_FOR_avx_vextractf128v8sf:
23622 case CODE_FOR_avx_vextractf128v8si:
23623 case CODE_FOR_avx_vinsertf128v4df:
23624 case CODE_FOR_avx_vinsertf128v8sf:
23625 case CODE_FOR_avx_vinsertf128v8si:
23626 error ("the last argument must be a 1-bit immediate");
23627 return const0_rtx;
23629 case CODE_FOR_avx_cmpsdv2df3:
23630 case CODE_FOR_avx_cmpssv4sf3:
23631 case CODE_FOR_avx_cmppdv2df3:
23632 case CODE_FOR_avx_cmppsv4sf3:
23633 case CODE_FOR_avx_cmppdv4df3:
23634 case CODE_FOR_avx_cmppsv8sf3:
23635 error ("the last argument must be a 5-bit immediate");
23636 return const0_rtx;
23638 default:
23639 switch (nargs_constant)
23641 case 2:
23642 if ((nargs - i) == nargs_constant)
23644 error ("the next to last argument must be an 8-bit immediate");
23645 break;
23647 case 1:
23648 error ("the last argument must be an 8-bit immediate");
23649 break;
23650 default:
23651 gcc_unreachable ();
23653 return const0_rtx;
23656 else
23658 if (VECTOR_MODE_P (mode))
23659 op = safe_vector_operand (op, mode);
23661 /* If we aren't optimizing, only allow one memory operand to
23662 be generated. */
23663 if (memory_operand (op, mode))
23664 num_memory++;
23666 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23668 if (optimize || !match || num_memory > 1)
23669 op = copy_to_mode_reg (mode, op);
23671 else
23673 op = copy_to_reg (op);
23674 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23678 args[i].op = op;
23679 args[i].mode = mode;
23682 switch (nargs)
23684 case 1:
23685 pat = GEN_FCN (icode) (real_target, args[0].op);
23686 break;
23687 case 2:
23688 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23689 break;
23690 case 3:
23691 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23692 args[2].op);
23693 break;
23694 case 4:
23695 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23696 args[2].op, args[3].op);
23697 break;
23698 default:
23699 gcc_unreachable ();
23702 if (! pat)
23703 return 0;
23705 emit_insn (pat);
23706 return target;
23709 /* Subroutine of ix86_expand_builtin to take care of special insns
23710 with variable number of operands. */
23712 static rtx
23713 ix86_expand_special_args_builtin (const struct builtin_description *d,
23714 tree exp, rtx target)
23716 tree arg;
23717 rtx pat, op;
23718 unsigned int i, nargs, arg_adjust, memory;
23719 struct
23721 rtx op;
23722 enum machine_mode mode;
23723 } args[3];
23724 enum insn_code icode = d->icode;
23725 bool last_arg_constant = false;
23726 const struct insn_data *insn_p = &insn_data[icode];
23727 enum machine_mode tmode = insn_p->operand[0].mode;
23728 enum { load, store } klass;
23730 switch ((enum ix86_builtin_func_type) d->flag)
23732 case VOID_FTYPE_VOID:
23733 emit_insn (GEN_FCN (icode) (target));
23734 return 0;
23735 case UINT64_FTYPE_VOID:
23736 nargs = 0;
23737 klass = load;
23738 memory = 0;
23739 break;
23740 case UINT64_FTYPE_PUNSIGNED:
23741 case V2DI_FTYPE_PV2DI:
23742 case V32QI_FTYPE_PCCHAR:
23743 case V16QI_FTYPE_PCCHAR:
23744 case V8SF_FTYPE_PCV4SF:
23745 case V8SF_FTYPE_PCFLOAT:
23746 case V4SF_FTYPE_PCFLOAT:
23747 case V4DF_FTYPE_PCV2DF:
23748 case V4DF_FTYPE_PCDOUBLE:
23749 case V2DF_FTYPE_PCDOUBLE:
23750 case VOID_FTYPE_PVOID:
23751 nargs = 1;
23752 klass = load;
23753 memory = 0;
23754 break;
23755 case VOID_FTYPE_PV2SF_V4SF:
23756 case VOID_FTYPE_PV4DI_V4DI:
23757 case VOID_FTYPE_PV2DI_V2DI:
23758 case VOID_FTYPE_PCHAR_V32QI:
23759 case VOID_FTYPE_PCHAR_V16QI:
23760 case VOID_FTYPE_PFLOAT_V8SF:
23761 case VOID_FTYPE_PFLOAT_V4SF:
23762 case VOID_FTYPE_PDOUBLE_V4DF:
23763 case VOID_FTYPE_PDOUBLE_V2DF:
23764 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23765 case VOID_FTYPE_PINT_INT:
23766 nargs = 1;
23767 klass = store;
23768 /* Reserve memory operand for target. */
23769 memory = ARRAY_SIZE (args);
23770 break;
23771 case V4SF_FTYPE_V4SF_PCV2SF:
23772 case V2DF_FTYPE_V2DF_PCDOUBLE:
23773 nargs = 2;
23774 klass = load;
23775 memory = 1;
23776 break;
23777 case V8SF_FTYPE_PCV8SF_V8SF:
23778 case V4DF_FTYPE_PCV4DF_V4DF:
23779 case V4SF_FTYPE_PCV4SF_V4SF:
23780 case V2DF_FTYPE_PCV2DF_V2DF:
23781 nargs = 2;
23782 klass = load;
23783 memory = 0;
23784 break;
23785 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23786 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23787 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23788 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23789 nargs = 2;
23790 klass = store;
23791 /* Reserve memory operand for target. */
23792 memory = ARRAY_SIZE (args);
23793 break;
23794 case VOID_FTYPE_UINT_UINT_UINT:
23795 case VOID_FTYPE_UINT64_UINT_UINT:
23796 case UCHAR_FTYPE_UINT_UINT_UINT:
23797 case UCHAR_FTYPE_UINT64_UINT_UINT:
23798 nargs = 3;
23799 klass = load;
23800 memory = ARRAY_SIZE (args);
23801 last_arg_constant = true;
23802 break;
23803 default:
23804 gcc_unreachable ();
23807 gcc_assert (nargs <= ARRAY_SIZE (args));
23809 if (klass == store)
23811 arg = CALL_EXPR_ARG (exp, 0);
23812 op = expand_normal (arg);
23813 gcc_assert (target == 0);
23814 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23815 arg_adjust = 1;
23817 else
23819 arg_adjust = 0;
23820 if (optimize
23821 || target == 0
23822 || GET_MODE (target) != tmode
23823 || ! (*insn_p->operand[0].predicate) (target, tmode))
23824 target = gen_reg_rtx (tmode);
23827 for (i = 0; i < nargs; i++)
23829 enum machine_mode mode = insn_p->operand[i + 1].mode;
23830 bool match;
23832 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23833 op = expand_normal (arg);
23834 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23836 if (last_arg_constant && (i + 1) == nargs)
23838 if (!match)
23839 switch (icode)
23841 case CODE_FOR_lwp_lwpvalsi3:
23842 case CODE_FOR_lwp_lwpvaldi3:
23843 case CODE_FOR_lwp_lwpinssi3:
23844 case CODE_FOR_lwp_lwpinsdi3:
23845 error ("the last argument must be a 32-bit immediate");
23846 return const0_rtx;
23848 default:
23849 error ("the last argument must be an 8-bit immediate");
23850 return const0_rtx;
23853 else
23855 if (i == memory)
23857 /* This must be the memory operand. */
23858 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23859 gcc_assert (GET_MODE (op) == mode
23860 || GET_MODE (op) == VOIDmode);
23862 else
23864 /* This must be register. */
23865 if (VECTOR_MODE_P (mode))
23866 op = safe_vector_operand (op, mode);
23868 gcc_assert (GET_MODE (op) == mode
23869 || GET_MODE (op) == VOIDmode);
23870 op = copy_to_mode_reg (mode, op);
23874 args[i].op = op;
23875 args[i].mode = mode;
23878 switch (nargs)
23880 case 0:
23881 pat = GEN_FCN (icode) (target);
23882 break;
23883 case 1:
23884 pat = GEN_FCN (icode) (target, args[0].op);
23885 break;
23886 case 2:
23887 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23888 break;
23889 case 3:
23890 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23891 break;
23892 default:
23893 gcc_unreachable ();
23896 if (! pat)
23897 return 0;
23898 emit_insn (pat);
23899 return klass == store ? 0 : target;
23902 /* Return the integer constant in ARG. Constrain it to be in the range
23903 of the subparts of VEC_TYPE; issue an error if not. */
23905 static int
23906 get_element_number (tree vec_type, tree arg)
23908 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
23910 if (!host_integerp (arg, 1)
23911 || (elt = tree_low_cst (arg, 1), elt > max))
23913 error ("selector must be an integer constant in the range 0..%wi", max);
23914 return 0;
23917 return elt;
23920 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
23921 ix86_expand_vector_init. We DO have language-level syntax for this, in
23922 the form of (type){ init-list }. Except that since we can't place emms
23923 instructions from inside the compiler, we can't allow the use of MMX
23924 registers unless the user explicitly asks for it. So we do *not* define
23925 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
23926 we have builtins invoked by mmintrin.h that gives us license to emit
23927 these sorts of instructions. */
23929 static rtx
23930 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
23932 enum machine_mode tmode = TYPE_MODE (type);
23933 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
23934 int i, n_elt = GET_MODE_NUNITS (tmode);
23935 rtvec v = rtvec_alloc (n_elt);
23937 gcc_assert (VECTOR_MODE_P (tmode));
23938 gcc_assert (call_expr_nargs (exp) == n_elt);
23940 for (i = 0; i < n_elt; ++i)
23942 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
23943 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
23946 if (!target || !register_operand (target, tmode))
23947 target = gen_reg_rtx (tmode);
23949 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
23950 return target;
23953 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
23954 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
23955 had a language-level syntax for referencing vector elements. */
23957 static rtx
23958 ix86_expand_vec_ext_builtin (tree exp, rtx target)
23960 enum machine_mode tmode, mode0;
23961 tree arg0, arg1;
23962 int elt;
23963 rtx op0;
23965 arg0 = CALL_EXPR_ARG (exp, 0);
23966 arg1 = CALL_EXPR_ARG (exp, 1);
23968 op0 = expand_normal (arg0);
23969 elt = get_element_number (TREE_TYPE (arg0), arg1);
23971 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
23972 mode0 = TYPE_MODE (TREE_TYPE (arg0));
23973 gcc_assert (VECTOR_MODE_P (mode0));
23975 op0 = force_reg (mode0, op0);
23977 if (optimize || !target || !register_operand (target, tmode))
23978 target = gen_reg_rtx (tmode);
23980 ix86_expand_vector_extract (true, target, op0, elt);
23982 return target;
23985 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
23986 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
23987 a language-level syntax for referencing vector elements. */
23989 static rtx
23990 ix86_expand_vec_set_builtin (tree exp)
23992 enum machine_mode tmode, mode1;
23993 tree arg0, arg1, arg2;
23994 int elt;
23995 rtx op0, op1, target;
23997 arg0 = CALL_EXPR_ARG (exp, 0);
23998 arg1 = CALL_EXPR_ARG (exp, 1);
23999 arg2 = CALL_EXPR_ARG (exp, 2);
24001 tmode = TYPE_MODE (TREE_TYPE (arg0));
24002 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24003 gcc_assert (VECTOR_MODE_P (tmode));
24005 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24006 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24007 elt = get_element_number (TREE_TYPE (arg0), arg2);
24009 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24010 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24012 op0 = force_reg (tmode, op0);
24013 op1 = force_reg (mode1, op1);
24015 /* OP0 is the source of these builtin functions and shouldn't be
24016 modified. Create a copy, use it and return it as target. */
24017 target = gen_reg_rtx (tmode);
24018 emit_move_insn (target, op0);
24019 ix86_expand_vector_set (true, target, op1, elt);
24021 return target;
24024 /* Expand an expression EXP that calls a built-in function,
24025 with result going to TARGET if that's convenient
24026 (and in mode MODE if that's convenient).
24027 SUBTARGET may be used as the target for computing one of EXP's operands.
24028 IGNORE is nonzero if the value is to be ignored. */
24030 static rtx
24031 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24032 enum machine_mode mode ATTRIBUTE_UNUSED,
24033 int ignore ATTRIBUTE_UNUSED)
24035 const struct builtin_description *d;
24036 size_t i;
24037 enum insn_code icode;
24038 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24039 tree arg0, arg1, arg2;
24040 rtx op0, op1, op2, pat;
24041 enum machine_mode mode0, mode1, mode2;
24042 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24044 /* Determine whether the builtin function is available under the current ISA.
24045 Originally the builtin was not created if it wasn't applicable to the
24046 current ISA based on the command line switches. With function specific
24047 options, we need to check in the context of the function making the call
24048 whether it is supported. */
24049 if (ix86_builtins_isa[fcode].isa
24050 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24052 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24053 NULL, NULL, false);
24055 if (!opts)
24056 error ("%qE needs unknown isa option", fndecl);
24057 else
24059 gcc_assert (opts != NULL);
24060 error ("%qE needs isa option %s", fndecl, opts);
24061 free (opts);
24063 return const0_rtx;
24066 switch (fcode)
24068 case IX86_BUILTIN_MASKMOVQ:
24069 case IX86_BUILTIN_MASKMOVDQU:
24070 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24071 ? CODE_FOR_mmx_maskmovq
24072 : CODE_FOR_sse2_maskmovdqu);
24073 /* Note the arg order is different from the operand order. */
24074 arg1 = CALL_EXPR_ARG (exp, 0);
24075 arg2 = CALL_EXPR_ARG (exp, 1);
24076 arg0 = CALL_EXPR_ARG (exp, 2);
24077 op0 = expand_normal (arg0);
24078 op1 = expand_normal (arg1);
24079 op2 = expand_normal (arg2);
24080 mode0 = insn_data[icode].operand[0].mode;
24081 mode1 = insn_data[icode].operand[1].mode;
24082 mode2 = insn_data[icode].operand[2].mode;
24084 op0 = force_reg (Pmode, op0);
24085 op0 = gen_rtx_MEM (mode1, op0);
24087 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24088 op0 = copy_to_mode_reg (mode0, op0);
24089 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24090 op1 = copy_to_mode_reg (mode1, op1);
24091 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24092 op2 = copy_to_mode_reg (mode2, op2);
24093 pat = GEN_FCN (icode) (op0, op1, op2);
24094 if (! pat)
24095 return 0;
24096 emit_insn (pat);
24097 return 0;
24099 case IX86_BUILTIN_LDMXCSR:
24100 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24101 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24102 emit_move_insn (target, op0);
24103 emit_insn (gen_sse_ldmxcsr (target));
24104 return 0;
24106 case IX86_BUILTIN_STMXCSR:
24107 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24108 emit_insn (gen_sse_stmxcsr (target));
24109 return copy_to_mode_reg (SImode, target);
24111 case IX86_BUILTIN_CLFLUSH:
24112 arg0 = CALL_EXPR_ARG (exp, 0);
24113 op0 = expand_normal (arg0);
24114 icode = CODE_FOR_sse2_clflush;
24115 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24116 op0 = copy_to_mode_reg (Pmode, op0);
24118 emit_insn (gen_sse2_clflush (op0));
24119 return 0;
24121 case IX86_BUILTIN_MONITOR:
24122 arg0 = CALL_EXPR_ARG (exp, 0);
24123 arg1 = CALL_EXPR_ARG (exp, 1);
24124 arg2 = CALL_EXPR_ARG (exp, 2);
24125 op0 = expand_normal (arg0);
24126 op1 = expand_normal (arg1);
24127 op2 = expand_normal (arg2);
24128 if (!REG_P (op0))
24129 op0 = copy_to_mode_reg (Pmode, op0);
24130 if (!REG_P (op1))
24131 op1 = copy_to_mode_reg (SImode, op1);
24132 if (!REG_P (op2))
24133 op2 = copy_to_mode_reg (SImode, op2);
24134 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24135 return 0;
24137 case IX86_BUILTIN_MWAIT:
24138 arg0 = CALL_EXPR_ARG (exp, 0);
24139 arg1 = CALL_EXPR_ARG (exp, 1);
24140 op0 = expand_normal (arg0);
24141 op1 = expand_normal (arg1);
24142 if (!REG_P (op0))
24143 op0 = copy_to_mode_reg (SImode, op0);
24144 if (!REG_P (op1))
24145 op1 = copy_to_mode_reg (SImode, op1);
24146 emit_insn (gen_sse3_mwait (op0, op1));
24147 return 0;
24149 case IX86_BUILTIN_VEC_INIT_V2SI:
24150 case IX86_BUILTIN_VEC_INIT_V4HI:
24151 case IX86_BUILTIN_VEC_INIT_V8QI:
24152 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24154 case IX86_BUILTIN_VEC_EXT_V2DF:
24155 case IX86_BUILTIN_VEC_EXT_V2DI:
24156 case IX86_BUILTIN_VEC_EXT_V4SF:
24157 case IX86_BUILTIN_VEC_EXT_V4SI:
24158 case IX86_BUILTIN_VEC_EXT_V8HI:
24159 case IX86_BUILTIN_VEC_EXT_V2SI:
24160 case IX86_BUILTIN_VEC_EXT_V4HI:
24161 case IX86_BUILTIN_VEC_EXT_V16QI:
24162 return ix86_expand_vec_ext_builtin (exp, target);
24164 case IX86_BUILTIN_VEC_SET_V2DI:
24165 case IX86_BUILTIN_VEC_SET_V4SF:
24166 case IX86_BUILTIN_VEC_SET_V4SI:
24167 case IX86_BUILTIN_VEC_SET_V8HI:
24168 case IX86_BUILTIN_VEC_SET_V4HI:
24169 case IX86_BUILTIN_VEC_SET_V16QI:
24170 return ix86_expand_vec_set_builtin (exp);
24172 case IX86_BUILTIN_VEC_PERM_V2DF:
24173 case IX86_BUILTIN_VEC_PERM_V4SF:
24174 case IX86_BUILTIN_VEC_PERM_V2DI:
24175 case IX86_BUILTIN_VEC_PERM_V4SI:
24176 case IX86_BUILTIN_VEC_PERM_V8HI:
24177 case IX86_BUILTIN_VEC_PERM_V16QI:
24178 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24179 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24180 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24181 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24182 case IX86_BUILTIN_VEC_PERM_V4DF:
24183 case IX86_BUILTIN_VEC_PERM_V8SF:
24184 return ix86_expand_vec_perm_builtin (exp);
24186 case IX86_BUILTIN_INFQ:
24187 case IX86_BUILTIN_HUGE_VALQ:
24189 REAL_VALUE_TYPE inf;
24190 rtx tmp;
24192 real_inf (&inf);
24193 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24195 tmp = validize_mem (force_const_mem (mode, tmp));
24197 if (target == 0)
24198 target = gen_reg_rtx (mode);
24200 emit_move_insn (target, tmp);
24201 return target;
24204 case IX86_BUILTIN_LLWPCB:
24205 arg0 = CALL_EXPR_ARG (exp, 0);
24206 op0 = expand_normal (arg0);
24207 icode = CODE_FOR_lwp_llwpcb;
24208 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24209 op0 = copy_to_mode_reg (Pmode, op0);
24210 emit_insn (gen_lwp_llwpcb (op0));
24211 return 0;
24213 case IX86_BUILTIN_SLWPCB:
24214 icode = CODE_FOR_lwp_slwpcb;
24215 if (!target
24216 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24217 target = gen_reg_rtx (Pmode);
24218 emit_insn (gen_lwp_slwpcb (target));
24219 return target;
24221 default:
24222 break;
24225 for (i = 0, d = bdesc_special_args;
24226 i < ARRAY_SIZE (bdesc_special_args);
24227 i++, d++)
24228 if (d->code == fcode)
24229 return ix86_expand_special_args_builtin (d, exp, target);
24231 for (i = 0, d = bdesc_args;
24232 i < ARRAY_SIZE (bdesc_args);
24233 i++, d++)
24234 if (d->code == fcode)
24235 switch (fcode)
24237 case IX86_BUILTIN_FABSQ:
24238 case IX86_BUILTIN_COPYSIGNQ:
24239 if (!TARGET_SSE2)
24240 /* Emit a normal call if SSE2 isn't available. */
24241 return expand_call (exp, target, ignore);
24242 default:
24243 return ix86_expand_args_builtin (d, exp, target);
24246 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24247 if (d->code == fcode)
24248 return ix86_expand_sse_comi (d, exp, target);
24250 for (i = 0, d = bdesc_pcmpestr;
24251 i < ARRAY_SIZE (bdesc_pcmpestr);
24252 i++, d++)
24253 if (d->code == fcode)
24254 return ix86_expand_sse_pcmpestr (d, exp, target);
24256 for (i = 0, d = bdesc_pcmpistr;
24257 i < ARRAY_SIZE (bdesc_pcmpistr);
24258 i++, d++)
24259 if (d->code == fcode)
24260 return ix86_expand_sse_pcmpistr (d, exp, target);
24262 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24263 if (d->code == fcode)
24264 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24265 (enum ix86_builtin_func_type)
24266 d->flag, d->comparison);
24268 gcc_unreachable ();
24271 /* Returns a function decl for a vectorized version of the builtin function
24272 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24273 if it is not available. */
24275 static tree
24276 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24277 tree type_in)
24279 enum machine_mode in_mode, out_mode;
24280 int in_n, out_n;
24282 if (TREE_CODE (type_out) != VECTOR_TYPE
24283 || TREE_CODE (type_in) != VECTOR_TYPE)
24284 return NULL_TREE;
24286 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24287 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24288 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24289 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24291 switch (fn)
24293 case BUILT_IN_SQRT:
24294 if (out_mode == DFmode && out_n == 2
24295 && in_mode == DFmode && in_n == 2)
24296 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24297 break;
24299 case BUILT_IN_SQRTF:
24300 if (out_mode == SFmode && out_n == 4
24301 && in_mode == SFmode && in_n == 4)
24302 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24303 break;
24305 case BUILT_IN_LRINT:
24306 if (out_mode == SImode && out_n == 4
24307 && in_mode == DFmode && in_n == 2)
24308 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24309 break;
24311 case BUILT_IN_LRINTF:
24312 if (out_mode == SImode && out_n == 4
24313 && in_mode == SFmode && in_n == 4)
24314 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24315 break;
24317 case BUILT_IN_COPYSIGN:
24318 if (out_mode == DFmode && out_n == 2
24319 && in_mode == DFmode && in_n == 2)
24320 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24321 break;
24323 case BUILT_IN_COPYSIGNF:
24324 if (out_mode == SFmode && out_n == 4
24325 && in_mode == SFmode && in_n == 4)
24326 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24327 break;
24329 default:
24333 /* Dispatch to a handler for a vectorization library. */
24334 if (ix86_veclib_handler)
24335 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24336 type_in);
24338 return NULL_TREE;
24341 /* Handler for an SVML-style interface to
24342 a library with vectorized intrinsics. */
24344 static tree
24345 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24347 char name[20];
24348 tree fntype, new_fndecl, args;
24349 unsigned arity;
24350 const char *bname;
24351 enum machine_mode el_mode, in_mode;
24352 int n, in_n;
24354 /* The SVML is suitable for unsafe math only. */
24355 if (!flag_unsafe_math_optimizations)
24356 return NULL_TREE;
24358 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24359 n = TYPE_VECTOR_SUBPARTS (type_out);
24360 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24361 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24362 if (el_mode != in_mode
24363 || n != in_n)
24364 return NULL_TREE;
24366 switch (fn)
24368 case BUILT_IN_EXP:
24369 case BUILT_IN_LOG:
24370 case BUILT_IN_LOG10:
24371 case BUILT_IN_POW:
24372 case BUILT_IN_TANH:
24373 case BUILT_IN_TAN:
24374 case BUILT_IN_ATAN:
24375 case BUILT_IN_ATAN2:
24376 case BUILT_IN_ATANH:
24377 case BUILT_IN_CBRT:
24378 case BUILT_IN_SINH:
24379 case BUILT_IN_SIN:
24380 case BUILT_IN_ASINH:
24381 case BUILT_IN_ASIN:
24382 case BUILT_IN_COSH:
24383 case BUILT_IN_COS:
24384 case BUILT_IN_ACOSH:
24385 case BUILT_IN_ACOS:
24386 if (el_mode != DFmode || n != 2)
24387 return NULL_TREE;
24388 break;
24390 case BUILT_IN_EXPF:
24391 case BUILT_IN_LOGF:
24392 case BUILT_IN_LOG10F:
24393 case BUILT_IN_POWF:
24394 case BUILT_IN_TANHF:
24395 case BUILT_IN_TANF:
24396 case BUILT_IN_ATANF:
24397 case BUILT_IN_ATAN2F:
24398 case BUILT_IN_ATANHF:
24399 case BUILT_IN_CBRTF:
24400 case BUILT_IN_SINHF:
24401 case BUILT_IN_SINF:
24402 case BUILT_IN_ASINHF:
24403 case BUILT_IN_ASINF:
24404 case BUILT_IN_COSHF:
24405 case BUILT_IN_COSF:
24406 case BUILT_IN_ACOSHF:
24407 case BUILT_IN_ACOSF:
24408 if (el_mode != SFmode || n != 4)
24409 return NULL_TREE;
24410 break;
24412 default:
24413 return NULL_TREE;
24416 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24418 if (fn == BUILT_IN_LOGF)
24419 strcpy (name, "vmlsLn4");
24420 else if (fn == BUILT_IN_LOG)
24421 strcpy (name, "vmldLn2");
24422 else if (n == 4)
24424 sprintf (name, "vmls%s", bname+10);
24425 name[strlen (name)-1] = '4';
24427 else
24428 sprintf (name, "vmld%s2", bname+10);
24430 /* Convert to uppercase. */
24431 name[4] &= ~0x20;
24433 arity = 0;
24434 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24435 args = TREE_CHAIN (args))
24436 arity++;
24438 if (arity == 1)
24439 fntype = build_function_type_list (type_out, type_in, NULL);
24440 else
24441 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24443 /* Build a function declaration for the vectorized function. */
24444 new_fndecl = build_decl (BUILTINS_LOCATION,
24445 FUNCTION_DECL, get_identifier (name), fntype);
24446 TREE_PUBLIC (new_fndecl) = 1;
24447 DECL_EXTERNAL (new_fndecl) = 1;
24448 DECL_IS_NOVOPS (new_fndecl) = 1;
24449 TREE_READONLY (new_fndecl) = 1;
24451 return new_fndecl;
24454 /* Handler for an ACML-style interface to
24455 a library with vectorized intrinsics. */
24457 static tree
24458 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24460 char name[20] = "__vr.._";
24461 tree fntype, new_fndecl, args;
24462 unsigned arity;
24463 const char *bname;
24464 enum machine_mode el_mode, in_mode;
24465 int n, in_n;
24467 /* The ACML is 64bits only and suitable for unsafe math only as
24468 it does not correctly support parts of IEEE with the required
24469 precision such as denormals. */
24470 if (!TARGET_64BIT
24471 || !flag_unsafe_math_optimizations)
24472 return NULL_TREE;
24474 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24475 n = TYPE_VECTOR_SUBPARTS (type_out);
24476 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24477 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24478 if (el_mode != in_mode
24479 || n != in_n)
24480 return NULL_TREE;
24482 switch (fn)
24484 case BUILT_IN_SIN:
24485 case BUILT_IN_COS:
24486 case BUILT_IN_EXP:
24487 case BUILT_IN_LOG:
24488 case BUILT_IN_LOG2:
24489 case BUILT_IN_LOG10:
24490 name[4] = 'd';
24491 name[5] = '2';
24492 if (el_mode != DFmode
24493 || n != 2)
24494 return NULL_TREE;
24495 break;
24497 case BUILT_IN_SINF:
24498 case BUILT_IN_COSF:
24499 case BUILT_IN_EXPF:
24500 case BUILT_IN_POWF:
24501 case BUILT_IN_LOGF:
24502 case BUILT_IN_LOG2F:
24503 case BUILT_IN_LOG10F:
24504 name[4] = 's';
24505 name[5] = '4';
24506 if (el_mode != SFmode
24507 || n != 4)
24508 return NULL_TREE;
24509 break;
24511 default:
24512 return NULL_TREE;
24515 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24516 sprintf (name + 7, "%s", bname+10);
24518 arity = 0;
24519 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24520 args = TREE_CHAIN (args))
24521 arity++;
24523 if (arity == 1)
24524 fntype = build_function_type_list (type_out, type_in, NULL);
24525 else
24526 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24528 /* Build a function declaration for the vectorized function. */
24529 new_fndecl = build_decl (BUILTINS_LOCATION,
24530 FUNCTION_DECL, get_identifier (name), fntype);
24531 TREE_PUBLIC (new_fndecl) = 1;
24532 DECL_EXTERNAL (new_fndecl) = 1;
24533 DECL_IS_NOVOPS (new_fndecl) = 1;
24534 TREE_READONLY (new_fndecl) = 1;
24536 return new_fndecl;
24540 /* Returns a decl of a function that implements conversion of an integer vector
24541 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24542 side of the conversion.
24543 Return NULL_TREE if it is not available. */
24545 static tree
24546 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24548 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24549 return NULL_TREE;
24551 switch (code)
24553 case FLOAT_EXPR:
24554 switch (TYPE_MODE (type))
24556 case V4SImode:
24557 return TYPE_UNSIGNED (type)
24558 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24559 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24560 default:
24561 return NULL_TREE;
24564 case FIX_TRUNC_EXPR:
24565 switch (TYPE_MODE (type))
24567 case V4SImode:
24568 return TYPE_UNSIGNED (type)
24569 ? NULL_TREE
24570 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24571 default:
24572 return NULL_TREE;
24574 default:
24575 return NULL_TREE;
24580 /* Returns a code for a target-specific builtin that implements
24581 reciprocal of the function, or NULL_TREE if not available. */
24583 static tree
24584 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24585 bool sqrt ATTRIBUTE_UNUSED)
24587 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24588 && flag_finite_math_only && !flag_trapping_math
24589 && flag_unsafe_math_optimizations))
24590 return NULL_TREE;
24592 if (md_fn)
24593 /* Machine dependent builtins. */
24594 switch (fn)
24596 /* Vectorized version of sqrt to rsqrt conversion. */
24597 case IX86_BUILTIN_SQRTPS_NR:
24598 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24600 default:
24601 return NULL_TREE;
24603 else
24604 /* Normal builtins. */
24605 switch (fn)
24607 /* Sqrt to rsqrt conversion. */
24608 case BUILT_IN_SQRTF:
24609 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24611 default:
24612 return NULL_TREE;
24616 /* Helper for avx_vpermilps256_operand et al. This is also used by
24617 the expansion functions to turn the parallel back into a mask.
24618 The return value is 0 for no match and the imm8+1 for a match. */
24621 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24623 unsigned i, nelt = GET_MODE_NUNITS (mode);
24624 unsigned mask = 0;
24625 unsigned char ipar[8];
24627 if (XVECLEN (par, 0) != (int) nelt)
24628 return 0;
24630 /* Validate that all of the elements are constants, and not totally
24631 out of range. Copy the data into an integral array to make the
24632 subsequent checks easier. */
24633 for (i = 0; i < nelt; ++i)
24635 rtx er = XVECEXP (par, 0, i);
24636 unsigned HOST_WIDE_INT ei;
24638 if (!CONST_INT_P (er))
24639 return 0;
24640 ei = INTVAL (er);
24641 if (ei >= 2 * nelt)
24642 return 0;
24643 ipar[i] = ei;
24646 switch (mode)
24648 case V4DFmode:
24649 /* In the 256-bit DFmode case, we can only move elements within
24650 a 128-bit lane. */
24651 for (i = 0; i < 2; ++i)
24653 if (ipar[i] >= 2)
24654 return 0;
24655 mask |= ipar[i] << i;
24657 for (i = 2; i < 4; ++i)
24659 if (ipar[i] < 2)
24660 return 0;
24661 mask |= (ipar[i] - 2) << i;
24663 break;
24665 case V8SFmode:
24666 /* In the 256-bit SFmode case, we have full freedom of movement
24667 within the low 128-bit lane, but the high 128-bit lane must
24668 mirror the exact same pattern. */
24669 for (i = 0; i < 4; ++i)
24670 if (ipar[i] + 4 != ipar[i + 4])
24671 return 0;
24672 nelt = 4;
24673 /* FALLTHRU */
24675 case V2DFmode:
24676 case V4SFmode:
24677 /* In the 128-bit case, we've full freedom in the placement of
24678 the elements from the source operand. */
24679 for (i = 0; i < nelt; ++i)
24680 mask |= ipar[i] << (i * (nelt / 2));
24681 break;
24683 default:
24684 gcc_unreachable ();
24687 /* Make sure success has a non-zero value by adding one. */
24688 return mask + 1;
24691 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24692 the expansion functions to turn the parallel back into a mask.
24693 The return value is 0 for no match and the imm8+1 for a match. */
24696 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24698 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24699 unsigned mask = 0;
24700 unsigned char ipar[8];
24702 if (XVECLEN (par, 0) != (int) nelt)
24703 return 0;
24705 /* Validate that all of the elements are constants, and not totally
24706 out of range. Copy the data into an integral array to make the
24707 subsequent checks easier. */
24708 for (i = 0; i < nelt; ++i)
24710 rtx er = XVECEXP (par, 0, i);
24711 unsigned HOST_WIDE_INT ei;
24713 if (!CONST_INT_P (er))
24714 return 0;
24715 ei = INTVAL (er);
24716 if (ei >= 2 * nelt)
24717 return 0;
24718 ipar[i] = ei;
24721 /* Validate that the halves of the permute are halves. */
24722 for (i = 0; i < nelt2 - 1; ++i)
24723 if (ipar[i] + 1 != ipar[i + 1])
24724 return 0;
24725 for (i = nelt2; i < nelt - 1; ++i)
24726 if (ipar[i] + 1 != ipar[i + 1])
24727 return 0;
24729 /* Reconstruct the mask. */
24730 for (i = 0; i < 2; ++i)
24732 unsigned e = ipar[i * nelt2];
24733 if (e % nelt2)
24734 return 0;
24735 e /= nelt2;
24736 mask |= e << (i * 4);
24739 /* Make sure success has a non-zero value by adding one. */
24740 return mask + 1;
24744 /* Store OPERAND to the memory after reload is completed. This means
24745 that we can't easily use assign_stack_local. */
24747 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24749 rtx result;
24751 gcc_assert (reload_completed);
24752 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24754 result = gen_rtx_MEM (mode,
24755 gen_rtx_PLUS (Pmode,
24756 stack_pointer_rtx,
24757 GEN_INT (-RED_ZONE_SIZE)));
24758 emit_move_insn (result, operand);
24760 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24762 switch (mode)
24764 case HImode:
24765 case SImode:
24766 operand = gen_lowpart (DImode, operand);
24767 /* FALLTHRU */
24768 case DImode:
24769 emit_insn (
24770 gen_rtx_SET (VOIDmode,
24771 gen_rtx_MEM (DImode,
24772 gen_rtx_PRE_DEC (DImode,
24773 stack_pointer_rtx)),
24774 operand));
24775 break;
24776 default:
24777 gcc_unreachable ();
24779 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24781 else
24783 switch (mode)
24785 case DImode:
24787 rtx operands[2];
24788 split_di (&operand, 1, operands, operands + 1);
24789 emit_insn (
24790 gen_rtx_SET (VOIDmode,
24791 gen_rtx_MEM (SImode,
24792 gen_rtx_PRE_DEC (Pmode,
24793 stack_pointer_rtx)),
24794 operands[1]));
24795 emit_insn (
24796 gen_rtx_SET (VOIDmode,
24797 gen_rtx_MEM (SImode,
24798 gen_rtx_PRE_DEC (Pmode,
24799 stack_pointer_rtx)),
24800 operands[0]));
24802 break;
24803 case HImode:
24804 /* Store HImodes as SImodes. */
24805 operand = gen_lowpart (SImode, operand);
24806 /* FALLTHRU */
24807 case SImode:
24808 emit_insn (
24809 gen_rtx_SET (VOIDmode,
24810 gen_rtx_MEM (GET_MODE (operand),
24811 gen_rtx_PRE_DEC (SImode,
24812 stack_pointer_rtx)),
24813 operand));
24814 break;
24815 default:
24816 gcc_unreachable ();
24818 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24820 return result;
24823 /* Free operand from the memory. */
24824 void
24825 ix86_free_from_memory (enum machine_mode mode)
24827 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24829 int size;
24831 if (mode == DImode || TARGET_64BIT)
24832 size = 8;
24833 else
24834 size = 4;
24835 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24836 to pop or add instruction if registers are available. */
24837 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24838 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24839 GEN_INT (size))));
24843 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24844 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24845 same. */
24846 static const enum reg_class *
24847 i386_ira_cover_classes (void)
24849 static const enum reg_class sse_fpmath_classes[] = {
24850 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
24852 static const enum reg_class no_sse_fpmath_classes[] = {
24853 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
24856 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
24859 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24860 QImode must go into class Q_REGS.
24861 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24862 movdf to do mem-to-mem moves through integer regs. */
24863 enum reg_class
24864 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24866 enum machine_mode mode = GET_MODE (x);
24868 /* We're only allowed to return a subclass of CLASS. Many of the
24869 following checks fail for NO_REGS, so eliminate that early. */
24870 if (regclass == NO_REGS)
24871 return NO_REGS;
24873 /* All classes can load zeros. */
24874 if (x == CONST0_RTX (mode))
24875 return regclass;
24877 /* Force constants into memory if we are loading a (nonzero) constant into
24878 an MMX or SSE register. This is because there are no MMX/SSE instructions
24879 to load from a constant. */
24880 if (CONSTANT_P (x)
24881 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24882 return NO_REGS;
24884 /* Prefer SSE regs only, if we can use them for math. */
24885 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24886 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24888 /* Floating-point constants need more complex checks. */
24889 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24891 /* General regs can load everything. */
24892 if (reg_class_subset_p (regclass, GENERAL_REGS))
24893 return regclass;
24895 /* Floats can load 0 and 1 plus some others. Note that we eliminated
24896 zero above. We only want to wind up preferring 80387 registers if
24897 we plan on doing computation with them. */
24898 if (TARGET_80387
24899 && standard_80387_constant_p (x))
24901 /* Limit class to non-sse. */
24902 if (regclass == FLOAT_SSE_REGS)
24903 return FLOAT_REGS;
24904 if (regclass == FP_TOP_SSE_REGS)
24905 return FP_TOP_REG;
24906 if (regclass == FP_SECOND_SSE_REGS)
24907 return FP_SECOND_REG;
24908 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
24909 return regclass;
24912 return NO_REGS;
24915 /* Generally when we see PLUS here, it's the function invariant
24916 (plus soft-fp const_int). Which can only be computed into general
24917 regs. */
24918 if (GET_CODE (x) == PLUS)
24919 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
24921 /* QImode constants are easy to load, but non-constant QImode data
24922 must go into Q_REGS. */
24923 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
24925 if (reg_class_subset_p (regclass, Q_REGS))
24926 return regclass;
24927 if (reg_class_subset_p (Q_REGS, regclass))
24928 return Q_REGS;
24929 return NO_REGS;
24932 return regclass;
24935 /* Discourage putting floating-point values in SSE registers unless
24936 SSE math is being used, and likewise for the 387 registers. */
24937 enum reg_class
24938 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
24940 enum machine_mode mode = GET_MODE (x);
24942 /* Restrict the output reload class to the register bank that we are doing
24943 math on. If we would like not to return a subset of CLASS, reject this
24944 alternative: if reload cannot do this, it will still use its choice. */
24945 mode = GET_MODE (x);
24946 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
24947 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
24949 if (X87_FLOAT_MODE_P (mode))
24951 if (regclass == FP_TOP_SSE_REGS)
24952 return FP_TOP_REG;
24953 else if (regclass == FP_SECOND_SSE_REGS)
24954 return FP_SECOND_REG;
24955 else
24956 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
24959 return regclass;
24962 static enum reg_class
24963 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
24964 enum machine_mode mode,
24965 secondary_reload_info *sri ATTRIBUTE_UNUSED)
24967 /* QImode spills from non-QI registers require
24968 intermediate register on 32bit targets. */
24969 if (!in_p && mode == QImode && !TARGET_64BIT
24970 && (rclass == GENERAL_REGS
24971 || rclass == LEGACY_REGS
24972 || rclass == INDEX_REGS))
24974 int regno;
24976 if (REG_P (x))
24977 regno = REGNO (x);
24978 else
24979 regno = -1;
24981 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
24982 regno = true_regnum (x);
24984 /* Return Q_REGS if the operand is in memory. */
24985 if (regno == -1)
24986 return Q_REGS;
24989 return NO_REGS;
24992 /* If we are copying between general and FP registers, we need a memory
24993 location. The same is true for SSE and MMX registers.
24995 To optimize register_move_cost performance, allow inline variant.
24997 The macro can't work reliably when one of the CLASSES is class containing
24998 registers from multiple units (SSE, MMX, integer). We avoid this by never
24999 combining those units in single alternative in the machine description.
25000 Ensure that this constraint holds to avoid unexpected surprises.
25002 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25003 enforce these sanity checks. */
25005 static inline int
25006 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25007 enum machine_mode mode, int strict)
25009 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25010 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25011 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25012 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25013 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25014 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25016 gcc_assert (!strict);
25017 return true;
25020 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25021 return true;
25023 /* ??? This is a lie. We do have moves between mmx/general, and for
25024 mmx/sse2. But by saying we need secondary memory we discourage the
25025 register allocator from using the mmx registers unless needed. */
25026 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25027 return true;
25029 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25031 /* SSE1 doesn't have any direct moves from other classes. */
25032 if (!TARGET_SSE2)
25033 return true;
25035 /* If the target says that inter-unit moves are more expensive
25036 than moving through memory, then don't generate them. */
25037 if (!TARGET_INTER_UNIT_MOVES)
25038 return true;
25040 /* Between SSE and general, we have moves no larger than word size. */
25041 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25042 return true;
25045 return false;
25049 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25050 enum machine_mode mode, int strict)
25052 return inline_secondary_memory_needed (class1, class2, mode, strict);
25055 /* Return true if the registers in CLASS cannot represent the change from
25056 modes FROM to TO. */
25058 bool
25059 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25060 enum reg_class regclass)
25062 if (from == to)
25063 return false;
25065 /* x87 registers can't do subreg at all, as all values are reformatted
25066 to extended precision. */
25067 if (MAYBE_FLOAT_CLASS_P (regclass))
25068 return true;
25070 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25072 /* Vector registers do not support QI or HImode loads. If we don't
25073 disallow a change to these modes, reload will assume it's ok to
25074 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25075 the vec_dupv4hi pattern. */
25076 if (GET_MODE_SIZE (from) < 4)
25077 return true;
25079 /* Vector registers do not support subreg with nonzero offsets, which
25080 are otherwise valid for integer registers. Since we can't see
25081 whether we have a nonzero offset from here, prohibit all
25082 nonparadoxical subregs changing size. */
25083 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25084 return true;
25087 return false;
25090 /* Return the cost of moving data of mode M between a
25091 register and memory. A value of 2 is the default; this cost is
25092 relative to those in `REGISTER_MOVE_COST'.
25094 This function is used extensively by register_move_cost that is used to
25095 build tables at startup. Make it inline in this case.
25096 When IN is 2, return maximum of in and out move cost.
25098 If moving between registers and memory is more expensive than
25099 between two registers, you should define this macro to express the
25100 relative cost.
25102 Model also increased moving costs of QImode registers in non
25103 Q_REGS classes.
25105 static inline int
25106 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25107 int in)
25109 int cost;
25110 if (FLOAT_CLASS_P (regclass))
25112 int index;
25113 switch (mode)
25115 case SFmode:
25116 index = 0;
25117 break;
25118 case DFmode:
25119 index = 1;
25120 break;
25121 case XFmode:
25122 index = 2;
25123 break;
25124 default:
25125 return 100;
25127 if (in == 2)
25128 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25129 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25131 if (SSE_CLASS_P (regclass))
25133 int index;
25134 switch (GET_MODE_SIZE (mode))
25136 case 4:
25137 index = 0;
25138 break;
25139 case 8:
25140 index = 1;
25141 break;
25142 case 16:
25143 index = 2;
25144 break;
25145 default:
25146 return 100;
25148 if (in == 2)
25149 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25150 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25152 if (MMX_CLASS_P (regclass))
25154 int index;
25155 switch (GET_MODE_SIZE (mode))
25157 case 4:
25158 index = 0;
25159 break;
25160 case 8:
25161 index = 1;
25162 break;
25163 default:
25164 return 100;
25166 if (in)
25167 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25168 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25170 switch (GET_MODE_SIZE (mode))
25172 case 1:
25173 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25175 if (!in)
25176 return ix86_cost->int_store[0];
25177 if (TARGET_PARTIAL_REG_DEPENDENCY
25178 && optimize_function_for_speed_p (cfun))
25179 cost = ix86_cost->movzbl_load;
25180 else
25181 cost = ix86_cost->int_load[0];
25182 if (in == 2)
25183 return MAX (cost, ix86_cost->int_store[0]);
25184 return cost;
25186 else
25188 if (in == 2)
25189 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25190 if (in)
25191 return ix86_cost->movzbl_load;
25192 else
25193 return ix86_cost->int_store[0] + 4;
25195 break;
25196 case 2:
25197 if (in == 2)
25198 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25199 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25200 default:
25201 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25202 if (mode == TFmode)
25203 mode = XFmode;
25204 if (in == 2)
25205 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25206 else if (in)
25207 cost = ix86_cost->int_load[2];
25208 else
25209 cost = ix86_cost->int_store[2];
25210 return (cost * (((int) GET_MODE_SIZE (mode)
25211 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25216 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25218 return inline_memory_move_cost (mode, regclass, in);
25222 /* Return the cost of moving data from a register in class CLASS1 to
25223 one in class CLASS2.
25225 It is not required that the cost always equal 2 when FROM is the same as TO;
25226 on some machines it is expensive to move between registers if they are not
25227 general registers. */
25230 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25231 enum reg_class class2)
25233 /* In case we require secondary memory, compute cost of the store followed
25234 by load. In order to avoid bad register allocation choices, we need
25235 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25237 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25239 int cost = 1;
25241 cost += inline_memory_move_cost (mode, class1, 2);
25242 cost += inline_memory_move_cost (mode, class2, 2);
25244 /* In case of copying from general_purpose_register we may emit multiple
25245 stores followed by single load causing memory size mismatch stall.
25246 Count this as arbitrarily high cost of 20. */
25247 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25248 cost += 20;
25250 /* In the case of FP/MMX moves, the registers actually overlap, and we
25251 have to switch modes in order to treat them differently. */
25252 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25253 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25254 cost += 20;
25256 return cost;
25259 /* Moves between SSE/MMX and integer unit are expensive. */
25260 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25261 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25263 /* ??? By keeping returned value relatively high, we limit the number
25264 of moves between integer and MMX/SSE registers for all targets.
25265 Additionally, high value prevents problem with x86_modes_tieable_p(),
25266 where integer modes in MMX/SSE registers are not tieable
25267 because of missing QImode and HImode moves to, from or between
25268 MMX/SSE registers. */
25269 return MAX (8, ix86_cost->mmxsse_to_integer);
25271 if (MAYBE_FLOAT_CLASS_P (class1))
25272 return ix86_cost->fp_move;
25273 if (MAYBE_SSE_CLASS_P (class1))
25274 return ix86_cost->sse_move;
25275 if (MAYBE_MMX_CLASS_P (class1))
25276 return ix86_cost->mmx_move;
25277 return 2;
25280 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25282 bool
25283 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25285 /* Flags and only flags can only hold CCmode values. */
25286 if (CC_REGNO_P (regno))
25287 return GET_MODE_CLASS (mode) == MODE_CC;
25288 if (GET_MODE_CLASS (mode) == MODE_CC
25289 || GET_MODE_CLASS (mode) == MODE_RANDOM
25290 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25291 return 0;
25292 if (FP_REGNO_P (regno))
25293 return VALID_FP_MODE_P (mode);
25294 if (SSE_REGNO_P (regno))
25296 /* We implement the move patterns for all vector modes into and
25297 out of SSE registers, even when no operation instructions
25298 are available. OImode move is available only when AVX is
25299 enabled. */
25300 return ((TARGET_AVX && mode == OImode)
25301 || VALID_AVX256_REG_MODE (mode)
25302 || VALID_SSE_REG_MODE (mode)
25303 || VALID_SSE2_REG_MODE (mode)
25304 || VALID_MMX_REG_MODE (mode)
25305 || VALID_MMX_REG_MODE_3DNOW (mode));
25307 if (MMX_REGNO_P (regno))
25309 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25310 so if the register is available at all, then we can move data of
25311 the given mode into or out of it. */
25312 return (VALID_MMX_REG_MODE (mode)
25313 || VALID_MMX_REG_MODE_3DNOW (mode));
25316 if (mode == QImode)
25318 /* Take care for QImode values - they can be in non-QI regs,
25319 but then they do cause partial register stalls. */
25320 if (regno <= BX_REG || TARGET_64BIT)
25321 return 1;
25322 if (!TARGET_PARTIAL_REG_STALL)
25323 return 1;
25324 return reload_in_progress || reload_completed;
25326 /* We handle both integer and floats in the general purpose registers. */
25327 else if (VALID_INT_MODE_P (mode))
25328 return 1;
25329 else if (VALID_FP_MODE_P (mode))
25330 return 1;
25331 else if (VALID_DFP_MODE_P (mode))
25332 return 1;
25333 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25334 on to use that value in smaller contexts, this can easily force a
25335 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25336 supporting DImode, allow it. */
25337 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25338 return 1;
25340 return 0;
25343 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25344 tieable integer mode. */
25346 static bool
25347 ix86_tieable_integer_mode_p (enum machine_mode mode)
25349 switch (mode)
25351 case HImode:
25352 case SImode:
25353 return true;
25355 case QImode:
25356 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25358 case DImode:
25359 return TARGET_64BIT;
25361 default:
25362 return false;
25366 /* Return true if MODE1 is accessible in a register that can hold MODE2
25367 without copying. That is, all register classes that can hold MODE2
25368 can also hold MODE1. */
25370 bool
25371 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25373 if (mode1 == mode2)
25374 return true;
25376 if (ix86_tieable_integer_mode_p (mode1)
25377 && ix86_tieable_integer_mode_p (mode2))
25378 return true;
25380 /* MODE2 being XFmode implies fp stack or general regs, which means we
25381 can tie any smaller floating point modes to it. Note that we do not
25382 tie this with TFmode. */
25383 if (mode2 == XFmode)
25384 return mode1 == SFmode || mode1 == DFmode;
25386 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25387 that we can tie it with SFmode. */
25388 if (mode2 == DFmode)
25389 return mode1 == SFmode;
25391 /* If MODE2 is only appropriate for an SSE register, then tie with
25392 any other mode acceptable to SSE registers. */
25393 if (GET_MODE_SIZE (mode2) == 16
25394 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25395 return (GET_MODE_SIZE (mode1) == 16
25396 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25398 /* If MODE2 is appropriate for an MMX register, then tie
25399 with any other mode acceptable to MMX registers. */
25400 if (GET_MODE_SIZE (mode2) == 8
25401 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25402 return (GET_MODE_SIZE (mode1) == 8
25403 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25405 return false;
25408 /* Compute a (partial) cost for rtx X. Return true if the complete
25409 cost has been computed, and false if subexpressions should be
25410 scanned. In either case, *TOTAL contains the cost result. */
25412 static bool
25413 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25415 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25416 enum machine_mode mode = GET_MODE (x);
25417 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25419 switch (code)
25421 case CONST_INT:
25422 case CONST:
25423 case LABEL_REF:
25424 case SYMBOL_REF:
25425 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25426 *total = 3;
25427 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25428 *total = 2;
25429 else if (flag_pic && SYMBOLIC_CONST (x)
25430 && (!TARGET_64BIT
25431 || (!GET_CODE (x) != LABEL_REF
25432 && (GET_CODE (x) != SYMBOL_REF
25433 || !SYMBOL_REF_LOCAL_P (x)))))
25434 *total = 1;
25435 else
25436 *total = 0;
25437 return true;
25439 case CONST_DOUBLE:
25440 if (mode == VOIDmode)
25441 *total = 0;
25442 else
25443 switch (standard_80387_constant_p (x))
25445 case 1: /* 0.0 */
25446 *total = 1;
25447 break;
25448 default: /* Other constants */
25449 *total = 2;
25450 break;
25451 case 0:
25452 case -1:
25453 /* Start with (MEM (SYMBOL_REF)), since that's where
25454 it'll probably end up. Add a penalty for size. */
25455 *total = (COSTS_N_INSNS (1)
25456 + (flag_pic != 0 && !TARGET_64BIT)
25457 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25458 break;
25460 return true;
25462 case ZERO_EXTEND:
25463 /* The zero extensions is often completely free on x86_64, so make
25464 it as cheap as possible. */
25465 if (TARGET_64BIT && mode == DImode
25466 && GET_MODE (XEXP (x, 0)) == SImode)
25467 *total = 1;
25468 else if (TARGET_ZERO_EXTEND_WITH_AND)
25469 *total = cost->add;
25470 else
25471 *total = cost->movzx;
25472 return false;
25474 case SIGN_EXTEND:
25475 *total = cost->movsx;
25476 return false;
25478 case ASHIFT:
25479 if (CONST_INT_P (XEXP (x, 1))
25480 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25482 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25483 if (value == 1)
25485 *total = cost->add;
25486 return false;
25488 if ((value == 2 || value == 3)
25489 && cost->lea <= cost->shift_const)
25491 *total = cost->lea;
25492 return false;
25495 /* FALLTHRU */
25497 case ROTATE:
25498 case ASHIFTRT:
25499 case LSHIFTRT:
25500 case ROTATERT:
25501 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25503 if (CONST_INT_P (XEXP (x, 1)))
25505 if (INTVAL (XEXP (x, 1)) > 32)
25506 *total = cost->shift_const + COSTS_N_INSNS (2);
25507 else
25508 *total = cost->shift_const * 2;
25510 else
25512 if (GET_CODE (XEXP (x, 1)) == AND)
25513 *total = cost->shift_var * 2;
25514 else
25515 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25518 else
25520 if (CONST_INT_P (XEXP (x, 1)))
25521 *total = cost->shift_const;
25522 else
25523 *total = cost->shift_var;
25525 return false;
25527 case MULT:
25528 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25530 /* ??? SSE scalar cost should be used here. */
25531 *total = cost->fmul;
25532 return false;
25534 else if (X87_FLOAT_MODE_P (mode))
25536 *total = cost->fmul;
25537 return false;
25539 else if (FLOAT_MODE_P (mode))
25541 /* ??? SSE vector cost should be used here. */
25542 *total = cost->fmul;
25543 return false;
25545 else
25547 rtx op0 = XEXP (x, 0);
25548 rtx op1 = XEXP (x, 1);
25549 int nbits;
25550 if (CONST_INT_P (XEXP (x, 1)))
25552 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25553 for (nbits = 0; value != 0; value &= value - 1)
25554 nbits++;
25556 else
25557 /* This is arbitrary. */
25558 nbits = 7;
25560 /* Compute costs correctly for widening multiplication. */
25561 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25562 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25563 == GET_MODE_SIZE (mode))
25565 int is_mulwiden = 0;
25566 enum machine_mode inner_mode = GET_MODE (op0);
25568 if (GET_CODE (op0) == GET_CODE (op1))
25569 is_mulwiden = 1, op1 = XEXP (op1, 0);
25570 else if (CONST_INT_P (op1))
25572 if (GET_CODE (op0) == SIGN_EXTEND)
25573 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25574 == INTVAL (op1);
25575 else
25576 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25579 if (is_mulwiden)
25580 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25583 *total = (cost->mult_init[MODE_INDEX (mode)]
25584 + nbits * cost->mult_bit
25585 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25587 return true;
25590 case DIV:
25591 case UDIV:
25592 case MOD:
25593 case UMOD:
25594 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25595 /* ??? SSE cost should be used here. */
25596 *total = cost->fdiv;
25597 else if (X87_FLOAT_MODE_P (mode))
25598 *total = cost->fdiv;
25599 else if (FLOAT_MODE_P (mode))
25600 /* ??? SSE vector cost should be used here. */
25601 *total = cost->fdiv;
25602 else
25603 *total = cost->divide[MODE_INDEX (mode)];
25604 return false;
25606 case PLUS:
25607 if (GET_MODE_CLASS (mode) == MODE_INT
25608 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25610 if (GET_CODE (XEXP (x, 0)) == PLUS
25611 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25612 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25613 && CONSTANT_P (XEXP (x, 1)))
25615 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25616 if (val == 2 || val == 4 || val == 8)
25618 *total = cost->lea;
25619 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25620 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25621 outer_code, speed);
25622 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25623 return true;
25626 else if (GET_CODE (XEXP (x, 0)) == MULT
25627 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25629 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25630 if (val == 2 || val == 4 || val == 8)
25632 *total = cost->lea;
25633 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25634 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25635 return true;
25638 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25640 *total = cost->lea;
25641 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25642 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25643 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25644 return true;
25647 /* FALLTHRU */
25649 case MINUS:
25650 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25652 /* ??? SSE cost should be used here. */
25653 *total = cost->fadd;
25654 return false;
25656 else if (X87_FLOAT_MODE_P (mode))
25658 *total = cost->fadd;
25659 return false;
25661 else if (FLOAT_MODE_P (mode))
25663 /* ??? SSE vector cost should be used here. */
25664 *total = cost->fadd;
25665 return false;
25667 /* FALLTHRU */
25669 case AND:
25670 case IOR:
25671 case XOR:
25672 if (!TARGET_64BIT && mode == DImode)
25674 *total = (cost->add * 2
25675 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25676 << (GET_MODE (XEXP (x, 0)) != DImode))
25677 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25678 << (GET_MODE (XEXP (x, 1)) != DImode)));
25679 return true;
25681 /* FALLTHRU */
25683 case NEG:
25684 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25686 /* ??? SSE cost should be used here. */
25687 *total = cost->fchs;
25688 return false;
25690 else if (X87_FLOAT_MODE_P (mode))
25692 *total = cost->fchs;
25693 return false;
25695 else if (FLOAT_MODE_P (mode))
25697 /* ??? SSE vector cost should be used here. */
25698 *total = cost->fchs;
25699 return false;
25701 /* FALLTHRU */
25703 case NOT:
25704 if (!TARGET_64BIT && mode == DImode)
25705 *total = cost->add * 2;
25706 else
25707 *total = cost->add;
25708 return false;
25710 case COMPARE:
25711 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25712 && XEXP (XEXP (x, 0), 1) == const1_rtx
25713 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25714 && XEXP (x, 1) == const0_rtx)
25716 /* This kind of construct is implemented using test[bwl].
25717 Treat it as if we had an AND. */
25718 *total = (cost->add
25719 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25720 + rtx_cost (const1_rtx, outer_code, speed));
25721 return true;
25723 return false;
25725 case FLOAT_EXTEND:
25726 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25727 *total = 0;
25728 return false;
25730 case ABS:
25731 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25732 /* ??? SSE cost should be used here. */
25733 *total = cost->fabs;
25734 else if (X87_FLOAT_MODE_P (mode))
25735 *total = cost->fabs;
25736 else if (FLOAT_MODE_P (mode))
25737 /* ??? SSE vector cost should be used here. */
25738 *total = cost->fabs;
25739 return false;
25741 case SQRT:
25742 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25743 /* ??? SSE cost should be used here. */
25744 *total = cost->fsqrt;
25745 else if (X87_FLOAT_MODE_P (mode))
25746 *total = cost->fsqrt;
25747 else if (FLOAT_MODE_P (mode))
25748 /* ??? SSE vector cost should be used here. */
25749 *total = cost->fsqrt;
25750 return false;
25752 case UNSPEC:
25753 if (XINT (x, 1) == UNSPEC_TP)
25754 *total = 0;
25755 return false;
25757 case VEC_SELECT:
25758 case VEC_CONCAT:
25759 case VEC_MERGE:
25760 case VEC_DUPLICATE:
25761 /* ??? Assume all of these vector manipulation patterns are
25762 recognizable. In which case they all pretty much have the
25763 same cost. */
25764 *total = COSTS_N_INSNS (1);
25765 return true;
25767 default:
25768 return false;
25772 #if TARGET_MACHO
25774 static int current_machopic_label_num;
25776 /* Given a symbol name and its associated stub, write out the
25777 definition of the stub. */
25779 void
25780 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25782 unsigned int length;
25783 char *binder_name, *symbol_name, lazy_ptr_name[32];
25784 int label = ++current_machopic_label_num;
25786 /* For 64-bit we shouldn't get here. */
25787 gcc_assert (!TARGET_64BIT);
25789 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25790 symb = (*targetm.strip_name_encoding) (symb);
25792 length = strlen (stub);
25793 binder_name = XALLOCAVEC (char, length + 32);
25794 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25796 length = strlen (symb);
25797 symbol_name = XALLOCAVEC (char, length + 32);
25798 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25800 sprintf (lazy_ptr_name, "L%d$lz", label);
25802 if (MACHOPIC_PURE)
25803 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25804 else
25805 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25807 fprintf (file, "%s:\n", stub);
25808 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25810 if (MACHOPIC_PURE)
25812 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25813 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25814 fprintf (file, "\tjmp\t*%%edx\n");
25816 else
25817 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25819 fprintf (file, "%s:\n", binder_name);
25821 if (MACHOPIC_PURE)
25823 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25824 fputs ("\tpushl\t%eax\n", file);
25826 else
25827 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25829 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25831 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25832 fprintf (file, "%s:\n", lazy_ptr_name);
25833 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25834 fprintf (file, ASM_LONG "%s\n", binder_name);
25837 void
25838 darwin_x86_file_end (void)
25840 darwin_file_end ();
25841 ix86_file_end ();
25843 #endif /* TARGET_MACHO */
25845 /* Order the registers for register allocator. */
25847 void
25848 x86_order_regs_for_local_alloc (void)
25850 int pos = 0;
25851 int i;
25853 /* First allocate the local general purpose registers. */
25854 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25855 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25856 reg_alloc_order [pos++] = i;
25858 /* Global general purpose registers. */
25859 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25860 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25861 reg_alloc_order [pos++] = i;
25863 /* x87 registers come first in case we are doing FP math
25864 using them. */
25865 if (!TARGET_SSE_MATH)
25866 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25867 reg_alloc_order [pos++] = i;
25869 /* SSE registers. */
25870 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25871 reg_alloc_order [pos++] = i;
25872 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25873 reg_alloc_order [pos++] = i;
25875 /* x87 registers. */
25876 if (TARGET_SSE_MATH)
25877 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25878 reg_alloc_order [pos++] = i;
25880 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25881 reg_alloc_order [pos++] = i;
25883 /* Initialize the rest of array as we do not allocate some registers
25884 at all. */
25885 while (pos < FIRST_PSEUDO_REGISTER)
25886 reg_alloc_order [pos++] = 0;
25889 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25890 struct attribute_spec.handler. */
25891 static tree
25892 ix86_handle_abi_attribute (tree *node, tree name,
25893 tree args ATTRIBUTE_UNUSED,
25894 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25896 if (TREE_CODE (*node) != FUNCTION_TYPE
25897 && TREE_CODE (*node) != METHOD_TYPE
25898 && TREE_CODE (*node) != FIELD_DECL
25899 && TREE_CODE (*node) != TYPE_DECL)
25901 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25902 name);
25903 *no_add_attrs = true;
25904 return NULL_TREE;
25906 if (!TARGET_64BIT)
25908 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
25909 name);
25910 *no_add_attrs = true;
25911 return NULL_TREE;
25914 /* Can combine regparm with all attributes but fastcall. */
25915 if (is_attribute_p ("ms_abi", name))
25917 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
25919 error ("ms_abi and sysv_abi attributes are not compatible");
25922 return NULL_TREE;
25924 else if (is_attribute_p ("sysv_abi", name))
25926 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
25928 error ("ms_abi and sysv_abi attributes are not compatible");
25931 return NULL_TREE;
25934 return NULL_TREE;
25937 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
25938 struct attribute_spec.handler. */
25939 static tree
25940 ix86_handle_struct_attribute (tree *node, tree name,
25941 tree args ATTRIBUTE_UNUSED,
25942 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25944 tree *type = NULL;
25945 if (DECL_P (*node))
25947 if (TREE_CODE (*node) == TYPE_DECL)
25948 type = &TREE_TYPE (*node);
25950 else
25951 type = node;
25953 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
25954 || TREE_CODE (*type) == UNION_TYPE)))
25956 warning (OPT_Wattributes, "%qE attribute ignored",
25957 name);
25958 *no_add_attrs = true;
25961 else if ((is_attribute_p ("ms_struct", name)
25962 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
25963 || ((is_attribute_p ("gcc_struct", name)
25964 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
25966 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
25967 name);
25968 *no_add_attrs = true;
25971 return NULL_TREE;
25974 static tree
25975 ix86_handle_fndecl_attribute (tree *node, tree name,
25976 tree args ATTRIBUTE_UNUSED,
25977 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25979 if (TREE_CODE (*node) != FUNCTION_DECL)
25981 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25982 name);
25983 *no_add_attrs = true;
25984 return NULL_TREE;
25987 if (TARGET_64BIT)
25989 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
25990 name);
25991 return NULL_TREE;
25994 #ifndef HAVE_AS_IX86_SWAP
25995 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
25996 #endif
25998 return NULL_TREE;
26001 static bool
26002 ix86_ms_bitfield_layout_p (const_tree record_type)
26004 return (TARGET_MS_BITFIELD_LAYOUT &&
26005 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26006 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26009 /* Returns an expression indicating where the this parameter is
26010 located on entry to the FUNCTION. */
26012 static rtx
26013 x86_this_parameter (tree function)
26015 tree type = TREE_TYPE (function);
26016 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26017 int nregs;
26019 if (TARGET_64BIT)
26021 const int *parm_regs;
26023 if (ix86_function_type_abi (type) == MS_ABI)
26024 parm_regs = x86_64_ms_abi_int_parameter_registers;
26025 else
26026 parm_regs = x86_64_int_parameter_registers;
26027 return gen_rtx_REG (DImode, parm_regs[aggr]);
26030 nregs = ix86_function_regparm (type, function);
26032 if (nregs > 0 && !stdarg_p (type))
26034 int regno;
26036 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26037 regno = aggr ? DX_REG : CX_REG;
26038 else
26040 regno = AX_REG;
26041 if (aggr)
26043 regno = DX_REG;
26044 if (nregs == 1)
26045 return gen_rtx_MEM (SImode,
26046 plus_constant (stack_pointer_rtx, 4));
26049 return gen_rtx_REG (SImode, regno);
26052 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26055 /* Determine whether x86_output_mi_thunk can succeed. */
26057 static bool
26058 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26059 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26060 HOST_WIDE_INT vcall_offset, const_tree function)
26062 /* 64-bit can handle anything. */
26063 if (TARGET_64BIT)
26064 return true;
26066 /* For 32-bit, everything's fine if we have one free register. */
26067 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26068 return true;
26070 /* Need a free register for vcall_offset. */
26071 if (vcall_offset)
26072 return false;
26074 /* Need a free register for GOT references. */
26075 if (flag_pic && !(*targetm.binds_local_p) (function))
26076 return false;
26078 /* Otherwise ok. */
26079 return true;
26082 /* Output the assembler code for a thunk function. THUNK_DECL is the
26083 declaration for the thunk function itself, FUNCTION is the decl for
26084 the target function. DELTA is an immediate constant offset to be
26085 added to THIS. If VCALL_OFFSET is nonzero, the word at
26086 *(*this + vcall_offset) should be added to THIS. */
26088 static void
26089 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
26090 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26091 HOST_WIDE_INT vcall_offset, tree function)
26093 rtx xops[3];
26094 rtx this_param = x86_this_parameter (function);
26095 rtx this_reg, tmp;
26097 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26098 pull it in now and let DELTA benefit. */
26099 if (REG_P (this_param))
26100 this_reg = this_param;
26101 else if (vcall_offset)
26103 /* Put the this parameter into %eax. */
26104 xops[0] = this_param;
26105 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26106 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26108 else
26109 this_reg = NULL_RTX;
26111 /* Adjust the this parameter by a fixed constant. */
26112 if (delta)
26114 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26115 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26116 bool sub = delta < 0 || delta == 128;
26117 xops[0] = GEN_INT (sub ? -delta : delta);
26118 xops[1] = this_reg ? this_reg : this_param;
26119 if (TARGET_64BIT)
26121 if (!x86_64_general_operand (xops[0], DImode))
26123 tmp = gen_rtx_REG (DImode, R10_REG);
26124 xops[1] = tmp;
26125 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26126 xops[0] = tmp;
26127 xops[1] = this_param;
26129 if (sub)
26130 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26131 else
26132 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26134 else if (sub)
26135 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26136 else
26137 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26140 /* Adjust the this parameter by a value stored in the vtable. */
26141 if (vcall_offset)
26143 if (TARGET_64BIT)
26144 tmp = gen_rtx_REG (DImode, R10_REG);
26145 else
26147 int tmp_regno = CX_REG;
26148 if (lookup_attribute ("fastcall",
26149 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26150 tmp_regno = AX_REG;
26151 tmp = gen_rtx_REG (SImode, tmp_regno);
26154 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26155 xops[1] = tmp;
26156 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26158 /* Adjust the this parameter. */
26159 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26160 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26162 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26163 xops[0] = GEN_INT (vcall_offset);
26164 xops[1] = tmp2;
26165 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26166 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26168 xops[1] = this_reg;
26169 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26172 /* If necessary, drop THIS back to its stack slot. */
26173 if (this_reg && this_reg != this_param)
26175 xops[0] = this_reg;
26176 xops[1] = this_param;
26177 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26180 xops[0] = XEXP (DECL_RTL (function), 0);
26181 if (TARGET_64BIT)
26183 if (!flag_pic || (*targetm.binds_local_p) (function))
26184 output_asm_insn ("jmp\t%P0", xops);
26185 /* All thunks should be in the same object as their target,
26186 and thus binds_local_p should be true. */
26187 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26188 gcc_unreachable ();
26189 else
26191 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26192 tmp = gen_rtx_CONST (Pmode, tmp);
26193 tmp = gen_rtx_MEM (QImode, tmp);
26194 xops[0] = tmp;
26195 output_asm_insn ("jmp\t%A0", xops);
26198 else
26200 if (!flag_pic || (*targetm.binds_local_p) (function))
26201 output_asm_insn ("jmp\t%P0", xops);
26202 else
26203 #if TARGET_MACHO
26204 if (TARGET_MACHO)
26206 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26207 tmp = (gen_rtx_SYMBOL_REF
26208 (Pmode,
26209 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26210 tmp = gen_rtx_MEM (QImode, tmp);
26211 xops[0] = tmp;
26212 output_asm_insn ("jmp\t%0", xops);
26214 else
26215 #endif /* TARGET_MACHO */
26217 tmp = gen_rtx_REG (SImode, CX_REG);
26218 output_set_got (tmp, NULL_RTX);
26220 xops[1] = tmp;
26221 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26222 output_asm_insn ("jmp\t{*}%1", xops);
26227 static void
26228 x86_file_start (void)
26230 default_file_start ();
26231 #if TARGET_MACHO
26232 darwin_file_start ();
26233 #endif
26234 if (X86_FILE_START_VERSION_DIRECTIVE)
26235 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26236 if (X86_FILE_START_FLTUSED)
26237 fputs ("\t.global\t__fltused\n", asm_out_file);
26238 if (ix86_asm_dialect == ASM_INTEL)
26239 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26243 x86_field_alignment (tree field, int computed)
26245 enum machine_mode mode;
26246 tree type = TREE_TYPE (field);
26248 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26249 return computed;
26250 mode = TYPE_MODE (strip_array_types (type));
26251 if (mode == DFmode || mode == DCmode
26252 || GET_MODE_CLASS (mode) == MODE_INT
26253 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26254 return MIN (32, computed);
26255 return computed;
26258 /* Output assembler code to FILE to increment profiler label # LABELNO
26259 for profiling a function entry. */
26260 void
26261 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26263 if (TARGET_64BIT)
26265 #ifndef NO_PROFILE_COUNTERS
26266 fprintf (file, "\tleaq\t" LPREFIX "P%d@(%%rip),%%r11\n", labelno);
26267 #endif
26269 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26270 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26271 else
26272 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26274 else if (flag_pic)
26276 #ifndef NO_PROFILE_COUNTERS
26277 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26278 labelno);
26279 #endif
26280 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26282 else
26284 #ifndef NO_PROFILE_COUNTERS
26285 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26286 labelno);
26287 #endif
26288 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26292 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26293 /* We don't have exact information about the insn sizes, but we may assume
26294 quite safely that we are informed about all 1 byte insns and memory
26295 address sizes. This is enough to eliminate unnecessary padding in
26296 99% of cases. */
26298 static int
26299 min_insn_size (rtx insn)
26301 int l = 0, len;
26303 if (!INSN_P (insn) || !active_insn_p (insn))
26304 return 0;
26306 /* Discard alignments we've emit and jump instructions. */
26307 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26308 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26309 return 0;
26310 if (JUMP_TABLE_DATA_P (insn))
26311 return 0;
26313 /* Important case - calls are always 5 bytes.
26314 It is common to have many calls in the row. */
26315 if (CALL_P (insn)
26316 && symbolic_reference_mentioned_p (PATTERN (insn))
26317 && !SIBLING_CALL_P (insn))
26318 return 5;
26319 len = get_attr_length (insn);
26320 if (len <= 1)
26321 return 1;
26323 /* For normal instructions we rely on get_attr_length being exact,
26324 with a few exceptions. */
26325 if (!JUMP_P (insn))
26327 enum attr_type type = get_attr_type (insn);
26329 switch (type)
26331 case TYPE_MULTI:
26332 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26333 || asm_noperands (PATTERN (insn)) >= 0)
26334 return 0;
26335 break;
26336 case TYPE_OTHER:
26337 case TYPE_FCMP:
26338 break;
26339 default:
26340 /* Otherwise trust get_attr_length. */
26341 return len;
26344 l = get_attr_length_address (insn);
26345 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26346 l = 4;
26348 if (l)
26349 return 1+l;
26350 else
26351 return 2;
26354 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26355 window. */
26357 static void
26358 ix86_avoid_jump_mispredicts (void)
26360 rtx insn, start = get_insns ();
26361 int nbytes = 0, njumps = 0;
26362 int isjump = 0;
26364 /* Look for all minimal intervals of instructions containing 4 jumps.
26365 The intervals are bounded by START and INSN. NBYTES is the total
26366 size of instructions in the interval including INSN and not including
26367 START. When the NBYTES is smaller than 16 bytes, it is possible
26368 that the end of START and INSN ends up in the same 16byte page.
26370 The smallest offset in the page INSN can start is the case where START
26371 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26372 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26374 for (insn = start; insn; insn = NEXT_INSN (insn))
26376 int min_size;
26378 if (LABEL_P (insn))
26380 int align = label_to_alignment (insn);
26381 int max_skip = label_to_max_skip (insn);
26383 if (max_skip > 15)
26384 max_skip = 15;
26385 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26386 already in the current 16 byte page, because otherwise
26387 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26388 bytes to reach 16 byte boundary. */
26389 if (align <= 0
26390 || (align <= 3 && max_skip != (1 << align) - 1))
26391 max_skip = 0;
26392 if (dump_file)
26393 fprintf (dump_file, "Label %i with max_skip %i\n",
26394 INSN_UID (insn), max_skip);
26395 if (max_skip)
26397 while (nbytes + max_skip >= 16)
26399 start = NEXT_INSN (start);
26400 if ((JUMP_P (start)
26401 && GET_CODE (PATTERN (start)) != ADDR_VEC
26402 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26403 || CALL_P (start))
26404 njumps--, isjump = 1;
26405 else
26406 isjump = 0;
26407 nbytes -= min_insn_size (start);
26410 continue;
26413 min_size = min_insn_size (insn);
26414 nbytes += min_size;
26415 if (dump_file)
26416 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26417 INSN_UID (insn), min_size);
26418 if ((JUMP_P (insn)
26419 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26420 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26421 || CALL_P (insn))
26422 njumps++;
26423 else
26424 continue;
26426 while (njumps > 3)
26428 start = NEXT_INSN (start);
26429 if ((JUMP_P (start)
26430 && GET_CODE (PATTERN (start)) != ADDR_VEC
26431 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26432 || CALL_P (start))
26433 njumps--, isjump = 1;
26434 else
26435 isjump = 0;
26436 nbytes -= min_insn_size (start);
26438 gcc_assert (njumps >= 0);
26439 if (dump_file)
26440 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26441 INSN_UID (start), INSN_UID (insn), nbytes);
26443 if (njumps == 3 && isjump && nbytes < 16)
26445 int padsize = 15 - nbytes + min_insn_size (insn);
26447 if (dump_file)
26448 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26449 INSN_UID (insn), padsize);
26450 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26454 #endif
26456 /* AMD Athlon works faster
26457 when RET is not destination of conditional jump or directly preceded
26458 by other jump instruction. We avoid the penalty by inserting NOP just
26459 before the RET instructions in such cases. */
26460 static void
26461 ix86_pad_returns (void)
26463 edge e;
26464 edge_iterator ei;
26466 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26468 basic_block bb = e->src;
26469 rtx ret = BB_END (bb);
26470 rtx prev;
26471 bool replace = false;
26473 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26474 || optimize_bb_for_size_p (bb))
26475 continue;
26476 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26477 if (active_insn_p (prev) || LABEL_P (prev))
26478 break;
26479 if (prev && LABEL_P (prev))
26481 edge e;
26482 edge_iterator ei;
26484 FOR_EACH_EDGE (e, ei, bb->preds)
26485 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26486 && !(e->flags & EDGE_FALLTHRU))
26487 replace = true;
26489 if (!replace)
26491 prev = prev_active_insn (ret);
26492 if (prev
26493 && ((JUMP_P (prev) && any_condjump_p (prev))
26494 || CALL_P (prev)))
26495 replace = true;
26496 /* Empty functions get branch mispredict even when the jump destination
26497 is not visible to us. */
26498 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26499 replace = true;
26501 if (replace)
26503 emit_jump_insn_before (gen_return_internal_long (), ret);
26504 delete_insn (ret);
26509 /* Implement machine specific optimizations. We implement padding of returns
26510 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26511 static void
26512 ix86_reorg (void)
26514 if (optimize && optimize_function_for_speed_p (cfun))
26516 if (TARGET_PAD_RETURNS)
26517 ix86_pad_returns ();
26518 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26519 if (TARGET_FOUR_JUMP_LIMIT)
26520 ix86_avoid_jump_mispredicts ();
26521 #endif
26525 /* Return nonzero when QImode register that must be represented via REX prefix
26526 is used. */
26527 bool
26528 x86_extended_QIreg_mentioned_p (rtx insn)
26530 int i;
26531 extract_insn_cached (insn);
26532 for (i = 0; i < recog_data.n_operands; i++)
26533 if (REG_P (recog_data.operand[i])
26534 && REGNO (recog_data.operand[i]) > BX_REG)
26535 return true;
26536 return false;
26539 /* Return nonzero when P points to register encoded via REX prefix.
26540 Called via for_each_rtx. */
26541 static int
26542 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26544 unsigned int regno;
26545 if (!REG_P (*p))
26546 return 0;
26547 regno = REGNO (*p);
26548 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26551 /* Return true when INSN mentions register that must be encoded using REX
26552 prefix. */
26553 bool
26554 x86_extended_reg_mentioned_p (rtx insn)
26556 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26557 extended_reg_mentioned_1, NULL);
26560 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26561 optabs would emit if we didn't have TFmode patterns. */
26563 void
26564 x86_emit_floatuns (rtx operands[2])
26566 rtx neglab, donelab, i0, i1, f0, in, out;
26567 enum machine_mode mode, inmode;
26569 inmode = GET_MODE (operands[1]);
26570 gcc_assert (inmode == SImode || inmode == DImode);
26572 out = operands[0];
26573 in = force_reg (inmode, operands[1]);
26574 mode = GET_MODE (out);
26575 neglab = gen_label_rtx ();
26576 donelab = gen_label_rtx ();
26577 f0 = gen_reg_rtx (mode);
26579 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26581 expand_float (out, in, 0);
26583 emit_jump_insn (gen_jump (donelab));
26584 emit_barrier ();
26586 emit_label (neglab);
26588 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26589 1, OPTAB_DIRECT);
26590 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26591 1, OPTAB_DIRECT);
26592 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26594 expand_float (f0, i0, 0);
26596 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26598 emit_label (donelab);
26601 /* AVX does not support 32-byte integer vector operations,
26602 thus the longest vector we are faced with is V16QImode. */
26603 #define MAX_VECT_LEN 16
26605 struct expand_vec_perm_d
26607 rtx target, op0, op1;
26608 unsigned char perm[MAX_VECT_LEN];
26609 enum machine_mode vmode;
26610 unsigned char nelt;
26611 bool testing_p;
26614 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26615 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26617 /* Get a vector mode of the same size as the original but with elements
26618 twice as wide. This is only guaranteed to apply to integral vectors. */
26620 static inline enum machine_mode
26621 get_mode_wider_vector (enum machine_mode o)
26623 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26624 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26625 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26626 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26627 return n;
26630 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26631 with all elements equal to VAR. Return true if successful. */
26633 static bool
26634 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26635 rtx target, rtx val)
26637 bool ok;
26639 switch (mode)
26641 case V2SImode:
26642 case V2SFmode:
26643 if (!mmx_ok)
26644 return false;
26645 /* FALLTHRU */
26647 case V4DFmode:
26648 case V4DImode:
26649 case V8SFmode:
26650 case V8SImode:
26651 case V2DFmode:
26652 case V2DImode:
26653 case V4SFmode:
26654 case V4SImode:
26656 rtx insn, dup;
26658 /* First attempt to recognize VAL as-is. */
26659 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26660 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26661 if (recog_memoized (insn) < 0)
26663 /* If that fails, force VAL into a register. */
26664 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26665 ok = recog_memoized (insn) >= 0;
26666 gcc_assert (ok);
26669 return true;
26671 case V4HImode:
26672 if (!mmx_ok)
26673 return false;
26674 if (TARGET_SSE || TARGET_3DNOW_A)
26676 rtx x;
26678 val = gen_lowpart (SImode, val);
26679 x = gen_rtx_TRUNCATE (HImode, val);
26680 x = gen_rtx_VEC_DUPLICATE (mode, x);
26681 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26682 return true;
26684 goto widen;
26686 case V8QImode:
26687 if (!mmx_ok)
26688 return false;
26689 goto widen;
26691 case V8HImode:
26692 if (TARGET_SSE2)
26694 struct expand_vec_perm_d dperm;
26695 rtx tmp1, tmp2;
26697 permute:
26698 memset (&dperm, 0, sizeof (dperm));
26699 dperm.target = target;
26700 dperm.vmode = mode;
26701 dperm.nelt = GET_MODE_NUNITS (mode);
26702 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26704 /* Extend to SImode using a paradoxical SUBREG. */
26705 tmp1 = gen_reg_rtx (SImode);
26706 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26708 /* Insert the SImode value as low element of a V4SImode vector. */
26709 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26710 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26712 ok = (expand_vec_perm_1 (&dperm)
26713 || expand_vec_perm_broadcast_1 (&dperm));
26714 gcc_assert (ok);
26715 return ok;
26717 goto widen;
26719 case V16QImode:
26720 if (TARGET_SSE2)
26721 goto permute;
26722 goto widen;
26724 widen:
26725 /* Replicate the value once into the next wider mode and recurse. */
26727 enum machine_mode smode, wsmode, wvmode;
26728 rtx x;
26730 smode = GET_MODE_INNER (mode);
26731 wvmode = get_mode_wider_vector (mode);
26732 wsmode = GET_MODE_INNER (wvmode);
26734 val = convert_modes (wsmode, smode, val, true);
26735 x = expand_simple_binop (wsmode, ASHIFT, val,
26736 GEN_INT (GET_MODE_BITSIZE (smode)),
26737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26738 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26740 x = gen_lowpart (wvmode, target);
26741 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
26742 gcc_assert (ok);
26743 return ok;
26746 case V16HImode:
26747 case V32QImode:
26749 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
26750 rtx x = gen_reg_rtx (hvmode);
26752 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
26753 gcc_assert (ok);
26755 x = gen_rtx_VEC_CONCAT (mode, x, x);
26756 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26758 return true;
26760 default:
26761 return false;
26765 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26766 whose ONE_VAR element is VAR, and other elements are zero. Return true
26767 if successful. */
26769 static bool
26770 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26771 rtx target, rtx var, int one_var)
26773 enum machine_mode vsimode;
26774 rtx new_target;
26775 rtx x, tmp;
26776 bool use_vector_set = false;
26778 switch (mode)
26780 case V2DImode:
26781 /* For SSE4.1, we normally use vector set. But if the second
26782 element is zero and inter-unit moves are OK, we use movq
26783 instead. */
26784 use_vector_set = (TARGET_64BIT
26785 && TARGET_SSE4_1
26786 && !(TARGET_INTER_UNIT_MOVES
26787 && one_var == 0));
26788 break;
26789 case V16QImode:
26790 case V4SImode:
26791 case V4SFmode:
26792 use_vector_set = TARGET_SSE4_1;
26793 break;
26794 case V8HImode:
26795 use_vector_set = TARGET_SSE2;
26796 break;
26797 case V4HImode:
26798 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26799 break;
26800 case V32QImode:
26801 case V16HImode:
26802 case V8SImode:
26803 case V8SFmode:
26804 case V4DFmode:
26805 use_vector_set = TARGET_AVX;
26806 break;
26807 case V4DImode:
26808 /* Use ix86_expand_vector_set in 64bit mode only. */
26809 use_vector_set = TARGET_AVX && TARGET_64BIT;
26810 break;
26811 default:
26812 break;
26815 if (use_vector_set)
26817 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26818 var = force_reg (GET_MODE_INNER (mode), var);
26819 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26820 return true;
26823 switch (mode)
26825 case V2SFmode:
26826 case V2SImode:
26827 if (!mmx_ok)
26828 return false;
26829 /* FALLTHRU */
26831 case V2DFmode:
26832 case V2DImode:
26833 if (one_var != 0)
26834 return false;
26835 var = force_reg (GET_MODE_INNER (mode), var);
26836 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26837 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26838 return true;
26840 case V4SFmode:
26841 case V4SImode:
26842 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26843 new_target = gen_reg_rtx (mode);
26844 else
26845 new_target = target;
26846 var = force_reg (GET_MODE_INNER (mode), var);
26847 x = gen_rtx_VEC_DUPLICATE (mode, var);
26848 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26849 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26850 if (one_var != 0)
26852 /* We need to shuffle the value to the correct position, so
26853 create a new pseudo to store the intermediate result. */
26855 /* With SSE2, we can use the integer shuffle insns. */
26856 if (mode != V4SFmode && TARGET_SSE2)
26858 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26859 const1_rtx,
26860 GEN_INT (one_var == 1 ? 0 : 1),
26861 GEN_INT (one_var == 2 ? 0 : 1),
26862 GEN_INT (one_var == 3 ? 0 : 1)));
26863 if (target != new_target)
26864 emit_move_insn (target, new_target);
26865 return true;
26868 /* Otherwise convert the intermediate result to V4SFmode and
26869 use the SSE1 shuffle instructions. */
26870 if (mode != V4SFmode)
26872 tmp = gen_reg_rtx (V4SFmode);
26873 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26875 else
26876 tmp = new_target;
26878 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26879 const1_rtx,
26880 GEN_INT (one_var == 1 ? 0 : 1),
26881 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26882 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26884 if (mode != V4SFmode)
26885 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26886 else if (tmp != target)
26887 emit_move_insn (target, tmp);
26889 else if (target != new_target)
26890 emit_move_insn (target, new_target);
26891 return true;
26893 case V8HImode:
26894 case V16QImode:
26895 vsimode = V4SImode;
26896 goto widen;
26897 case V4HImode:
26898 case V8QImode:
26899 if (!mmx_ok)
26900 return false;
26901 vsimode = V2SImode;
26902 goto widen;
26903 widen:
26904 if (one_var != 0)
26905 return false;
26907 /* Zero extend the variable element to SImode and recurse. */
26908 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
26910 x = gen_reg_rtx (vsimode);
26911 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
26912 var, one_var))
26913 gcc_unreachable ();
26915 emit_move_insn (target, gen_lowpart (mode, x));
26916 return true;
26918 default:
26919 return false;
26923 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26924 consisting of the values in VALS. It is known that all elements
26925 except ONE_VAR are constants. Return true if successful. */
26927 static bool
26928 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
26929 rtx target, rtx vals, int one_var)
26931 rtx var = XVECEXP (vals, 0, one_var);
26932 enum machine_mode wmode;
26933 rtx const_vec, x;
26935 const_vec = copy_rtx (vals);
26936 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
26937 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
26939 switch (mode)
26941 case V2DFmode:
26942 case V2DImode:
26943 case V2SFmode:
26944 case V2SImode:
26945 /* For the two element vectors, it's just as easy to use
26946 the general case. */
26947 return false;
26949 case V4DImode:
26950 /* Use ix86_expand_vector_set in 64bit mode only. */
26951 if (!TARGET_64BIT)
26952 return false;
26953 case V4DFmode:
26954 case V8SFmode:
26955 case V8SImode:
26956 case V16HImode:
26957 case V32QImode:
26958 case V4SFmode:
26959 case V4SImode:
26960 case V8HImode:
26961 case V4HImode:
26962 break;
26964 case V16QImode:
26965 if (TARGET_SSE4_1)
26966 break;
26967 wmode = V8HImode;
26968 goto widen;
26969 case V8QImode:
26970 wmode = V4HImode;
26971 goto widen;
26972 widen:
26973 /* There's no way to set one QImode entry easily. Combine
26974 the variable value with its adjacent constant value, and
26975 promote to an HImode set. */
26976 x = XVECEXP (vals, 0, one_var ^ 1);
26977 if (one_var & 1)
26979 var = convert_modes (HImode, QImode, var, true);
26980 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
26981 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26982 x = GEN_INT (INTVAL (x) & 0xff);
26984 else
26986 var = convert_modes (HImode, QImode, var, true);
26987 x = gen_int_mode (INTVAL (x) << 8, HImode);
26989 if (x != const0_rtx)
26990 var = expand_simple_binop (HImode, IOR, var, x, var,
26991 1, OPTAB_LIB_WIDEN);
26993 x = gen_reg_rtx (wmode);
26994 emit_move_insn (x, gen_lowpart (wmode, const_vec));
26995 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
26997 emit_move_insn (target, gen_lowpart (mode, x));
26998 return true;
27000 default:
27001 return false;
27004 emit_move_insn (target, const_vec);
27005 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27006 return true;
27009 /* A subroutine of ix86_expand_vector_init_general. Use vector
27010 concatenate to handle the most general case: all values variable,
27011 and none identical. */
27013 static void
27014 ix86_expand_vector_init_concat (enum machine_mode mode,
27015 rtx target, rtx *ops, int n)
27017 enum machine_mode cmode, hmode = VOIDmode;
27018 rtx first[8], second[4];
27019 rtvec v;
27020 int i, j;
27022 switch (n)
27024 case 2:
27025 switch (mode)
27027 case V8SImode:
27028 cmode = V4SImode;
27029 break;
27030 case V8SFmode:
27031 cmode = V4SFmode;
27032 break;
27033 case V4DImode:
27034 cmode = V2DImode;
27035 break;
27036 case V4DFmode:
27037 cmode = V2DFmode;
27038 break;
27039 case V4SImode:
27040 cmode = V2SImode;
27041 break;
27042 case V4SFmode:
27043 cmode = V2SFmode;
27044 break;
27045 case V2DImode:
27046 cmode = DImode;
27047 break;
27048 case V2SImode:
27049 cmode = SImode;
27050 break;
27051 case V2DFmode:
27052 cmode = DFmode;
27053 break;
27054 case V2SFmode:
27055 cmode = SFmode;
27056 break;
27057 default:
27058 gcc_unreachable ();
27061 if (!register_operand (ops[1], cmode))
27062 ops[1] = force_reg (cmode, ops[1]);
27063 if (!register_operand (ops[0], cmode))
27064 ops[0] = force_reg (cmode, ops[0]);
27065 emit_insn (gen_rtx_SET (VOIDmode, target,
27066 gen_rtx_VEC_CONCAT (mode, ops[0],
27067 ops[1])));
27068 break;
27070 case 4:
27071 switch (mode)
27073 case V4DImode:
27074 cmode = V2DImode;
27075 break;
27076 case V4DFmode:
27077 cmode = V2DFmode;
27078 break;
27079 case V4SImode:
27080 cmode = V2SImode;
27081 break;
27082 case V4SFmode:
27083 cmode = V2SFmode;
27084 break;
27085 default:
27086 gcc_unreachable ();
27088 goto half;
27090 case 8:
27091 switch (mode)
27093 case V8SImode:
27094 cmode = V2SImode;
27095 hmode = V4SImode;
27096 break;
27097 case V8SFmode:
27098 cmode = V2SFmode;
27099 hmode = V4SFmode;
27100 break;
27101 default:
27102 gcc_unreachable ();
27104 goto half;
27106 half:
27107 /* FIXME: We process inputs backward to help RA. PR 36222. */
27108 i = n - 1;
27109 j = (n >> 1) - 1;
27110 for (; i > 0; i -= 2, j--)
27112 first[j] = gen_reg_rtx (cmode);
27113 v = gen_rtvec (2, ops[i - 1], ops[i]);
27114 ix86_expand_vector_init (false, first[j],
27115 gen_rtx_PARALLEL (cmode, v));
27118 n >>= 1;
27119 if (n > 2)
27121 gcc_assert (hmode != VOIDmode);
27122 for (i = j = 0; i < n; i += 2, j++)
27124 second[j] = gen_reg_rtx (hmode);
27125 ix86_expand_vector_init_concat (hmode, second [j],
27126 &first [i], 2);
27128 n >>= 1;
27129 ix86_expand_vector_init_concat (mode, target, second, n);
27131 else
27132 ix86_expand_vector_init_concat (mode, target, first, n);
27133 break;
27135 default:
27136 gcc_unreachable ();
27140 /* A subroutine of ix86_expand_vector_init_general. Use vector
27141 interleave to handle the most general case: all values variable,
27142 and none identical. */
27144 static void
27145 ix86_expand_vector_init_interleave (enum machine_mode mode,
27146 rtx target, rtx *ops, int n)
27148 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27149 int i, j;
27150 rtx op0, op1;
27151 rtx (*gen_load_even) (rtx, rtx, rtx);
27152 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27153 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27155 switch (mode)
27157 case V8HImode:
27158 gen_load_even = gen_vec_setv8hi;
27159 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27160 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27161 inner_mode = HImode;
27162 first_imode = V4SImode;
27163 second_imode = V2DImode;
27164 third_imode = VOIDmode;
27165 break;
27166 case V16QImode:
27167 gen_load_even = gen_vec_setv16qi;
27168 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27169 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27170 inner_mode = QImode;
27171 first_imode = V8HImode;
27172 second_imode = V4SImode;
27173 third_imode = V2DImode;
27174 break;
27175 default:
27176 gcc_unreachable ();
27179 for (i = 0; i < n; i++)
27181 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27182 op0 = gen_reg_rtx (SImode);
27183 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27185 /* Insert the SImode value as low element of V4SImode vector. */
27186 op1 = gen_reg_rtx (V4SImode);
27187 op0 = gen_rtx_VEC_MERGE (V4SImode,
27188 gen_rtx_VEC_DUPLICATE (V4SImode,
27189 op0),
27190 CONST0_RTX (V4SImode),
27191 const1_rtx);
27192 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27194 /* Cast the V4SImode vector back to a vector in orignal mode. */
27195 op0 = gen_reg_rtx (mode);
27196 emit_move_insn (op0, gen_lowpart (mode, op1));
27198 /* Load even elements into the second positon. */
27199 emit_insn ((*gen_load_even) (op0,
27200 force_reg (inner_mode,
27201 ops [i + i + 1]),
27202 const1_rtx));
27204 /* Cast vector to FIRST_IMODE vector. */
27205 ops[i] = gen_reg_rtx (first_imode);
27206 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27209 /* Interleave low FIRST_IMODE vectors. */
27210 for (i = j = 0; i < n; i += 2, j++)
27212 op0 = gen_reg_rtx (first_imode);
27213 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27215 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27216 ops[j] = gen_reg_rtx (second_imode);
27217 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27220 /* Interleave low SECOND_IMODE vectors. */
27221 switch (second_imode)
27223 case V4SImode:
27224 for (i = j = 0; i < n / 2; i += 2, j++)
27226 op0 = gen_reg_rtx (second_imode);
27227 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27228 ops[i + 1]));
27230 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27231 vector. */
27232 ops[j] = gen_reg_rtx (third_imode);
27233 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27235 second_imode = V2DImode;
27236 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27237 /* FALLTHRU */
27239 case V2DImode:
27240 op0 = gen_reg_rtx (second_imode);
27241 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27242 ops[1]));
27244 /* Cast the SECOND_IMODE vector back to a vector on original
27245 mode. */
27246 emit_insn (gen_rtx_SET (VOIDmode, target,
27247 gen_lowpart (mode, op0)));
27248 break;
27250 default:
27251 gcc_unreachable ();
27255 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27256 all values variable, and none identical. */
27258 static void
27259 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27260 rtx target, rtx vals)
27262 rtx ops[32], op0, op1;
27263 enum machine_mode half_mode = VOIDmode;
27264 int n, i;
27266 switch (mode)
27268 case V2SFmode:
27269 case V2SImode:
27270 if (!mmx_ok && !TARGET_SSE)
27271 break;
27272 /* FALLTHRU */
27274 case V8SFmode:
27275 case V8SImode:
27276 case V4DFmode:
27277 case V4DImode:
27278 case V4SFmode:
27279 case V4SImode:
27280 case V2DFmode:
27281 case V2DImode:
27282 n = GET_MODE_NUNITS (mode);
27283 for (i = 0; i < n; i++)
27284 ops[i] = XVECEXP (vals, 0, i);
27285 ix86_expand_vector_init_concat (mode, target, ops, n);
27286 return;
27288 case V32QImode:
27289 half_mode = V16QImode;
27290 goto half;
27292 case V16HImode:
27293 half_mode = V8HImode;
27294 goto half;
27296 half:
27297 n = GET_MODE_NUNITS (mode);
27298 for (i = 0; i < n; i++)
27299 ops[i] = XVECEXP (vals, 0, i);
27300 op0 = gen_reg_rtx (half_mode);
27301 op1 = gen_reg_rtx (half_mode);
27302 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27303 n >> 2);
27304 ix86_expand_vector_init_interleave (half_mode, op1,
27305 &ops [n >> 1], n >> 2);
27306 emit_insn (gen_rtx_SET (VOIDmode, target,
27307 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27308 return;
27310 case V16QImode:
27311 if (!TARGET_SSE4_1)
27312 break;
27313 /* FALLTHRU */
27315 case V8HImode:
27316 if (!TARGET_SSE2)
27317 break;
27319 /* Don't use ix86_expand_vector_init_interleave if we can't
27320 move from GPR to SSE register directly. */
27321 if (!TARGET_INTER_UNIT_MOVES)
27322 break;
27324 n = GET_MODE_NUNITS (mode);
27325 for (i = 0; i < n; i++)
27326 ops[i] = XVECEXP (vals, 0, i);
27327 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27328 return;
27330 case V4HImode:
27331 case V8QImode:
27332 break;
27334 default:
27335 gcc_unreachable ();
27339 int i, j, n_elts, n_words, n_elt_per_word;
27340 enum machine_mode inner_mode;
27341 rtx words[4], shift;
27343 inner_mode = GET_MODE_INNER (mode);
27344 n_elts = GET_MODE_NUNITS (mode);
27345 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27346 n_elt_per_word = n_elts / n_words;
27347 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27349 for (i = 0; i < n_words; ++i)
27351 rtx word = NULL_RTX;
27353 for (j = 0; j < n_elt_per_word; ++j)
27355 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27356 elt = convert_modes (word_mode, inner_mode, elt, true);
27358 if (j == 0)
27359 word = elt;
27360 else
27362 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27363 word, 1, OPTAB_LIB_WIDEN);
27364 word = expand_simple_binop (word_mode, IOR, word, elt,
27365 word, 1, OPTAB_LIB_WIDEN);
27369 words[i] = word;
27372 if (n_words == 1)
27373 emit_move_insn (target, gen_lowpart (mode, words[0]));
27374 else if (n_words == 2)
27376 rtx tmp = gen_reg_rtx (mode);
27377 emit_clobber (tmp);
27378 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27379 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27380 emit_move_insn (target, tmp);
27382 else if (n_words == 4)
27384 rtx tmp = gen_reg_rtx (V4SImode);
27385 gcc_assert (word_mode == SImode);
27386 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27387 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27388 emit_move_insn (target, gen_lowpart (mode, tmp));
27390 else
27391 gcc_unreachable ();
27395 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27396 instructions unless MMX_OK is true. */
27398 void
27399 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27401 enum machine_mode mode = GET_MODE (target);
27402 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27403 int n_elts = GET_MODE_NUNITS (mode);
27404 int n_var = 0, one_var = -1;
27405 bool all_same = true, all_const_zero = true;
27406 int i;
27407 rtx x;
27409 for (i = 0; i < n_elts; ++i)
27411 x = XVECEXP (vals, 0, i);
27412 if (!(CONST_INT_P (x)
27413 || GET_CODE (x) == CONST_DOUBLE
27414 || GET_CODE (x) == CONST_FIXED))
27415 n_var++, one_var = i;
27416 else if (x != CONST0_RTX (inner_mode))
27417 all_const_zero = false;
27418 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27419 all_same = false;
27422 /* Constants are best loaded from the constant pool. */
27423 if (n_var == 0)
27425 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27426 return;
27429 /* If all values are identical, broadcast the value. */
27430 if (all_same
27431 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27432 XVECEXP (vals, 0, 0)))
27433 return;
27435 /* Values where only one field is non-constant are best loaded from
27436 the pool and overwritten via move later. */
27437 if (n_var == 1)
27439 if (all_const_zero
27440 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27441 XVECEXP (vals, 0, one_var),
27442 one_var))
27443 return;
27445 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27446 return;
27449 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27452 void
27453 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27455 enum machine_mode mode = GET_MODE (target);
27456 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27457 enum machine_mode half_mode;
27458 bool use_vec_merge = false;
27459 rtx tmp;
27460 static rtx (*gen_extract[6][2]) (rtx, rtx)
27462 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27463 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27464 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27465 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27466 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27467 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27469 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27471 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27472 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27473 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27474 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27475 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27476 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27478 int i, j, n;
27480 switch (mode)
27482 case V2SFmode:
27483 case V2SImode:
27484 if (mmx_ok)
27486 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27487 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27488 if (elt == 0)
27489 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27490 else
27491 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27492 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27493 return;
27495 break;
27497 case V2DImode:
27498 use_vec_merge = TARGET_SSE4_1;
27499 if (use_vec_merge)
27500 break;
27502 case V2DFmode:
27504 rtx op0, op1;
27506 /* For the two element vectors, we implement a VEC_CONCAT with
27507 the extraction of the other element. */
27509 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27510 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27512 if (elt == 0)
27513 op0 = val, op1 = tmp;
27514 else
27515 op0 = tmp, op1 = val;
27517 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27518 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27520 return;
27522 case V4SFmode:
27523 use_vec_merge = TARGET_SSE4_1;
27524 if (use_vec_merge)
27525 break;
27527 switch (elt)
27529 case 0:
27530 use_vec_merge = true;
27531 break;
27533 case 1:
27534 /* tmp = target = A B C D */
27535 tmp = copy_to_reg (target);
27536 /* target = A A B B */
27537 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27538 /* target = X A B B */
27539 ix86_expand_vector_set (false, target, val, 0);
27540 /* target = A X C D */
27541 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27542 const1_rtx, const0_rtx,
27543 GEN_INT (2+4), GEN_INT (3+4)));
27544 return;
27546 case 2:
27547 /* tmp = target = A B C D */
27548 tmp = copy_to_reg (target);
27549 /* tmp = X B C D */
27550 ix86_expand_vector_set (false, tmp, val, 0);
27551 /* target = A B X D */
27552 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27553 const0_rtx, const1_rtx,
27554 GEN_INT (0+4), GEN_INT (3+4)));
27555 return;
27557 case 3:
27558 /* tmp = target = A B C D */
27559 tmp = copy_to_reg (target);
27560 /* tmp = X B C D */
27561 ix86_expand_vector_set (false, tmp, val, 0);
27562 /* target = A B X D */
27563 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27564 const0_rtx, const1_rtx,
27565 GEN_INT (2+4), GEN_INT (0+4)));
27566 return;
27568 default:
27569 gcc_unreachable ();
27571 break;
27573 case V4SImode:
27574 use_vec_merge = TARGET_SSE4_1;
27575 if (use_vec_merge)
27576 break;
27578 /* Element 0 handled by vec_merge below. */
27579 if (elt == 0)
27581 use_vec_merge = true;
27582 break;
27585 if (TARGET_SSE2)
27587 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27588 store into element 0, then shuffle them back. */
27590 rtx order[4];
27592 order[0] = GEN_INT (elt);
27593 order[1] = const1_rtx;
27594 order[2] = const2_rtx;
27595 order[3] = GEN_INT (3);
27596 order[elt] = const0_rtx;
27598 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27599 order[1], order[2], order[3]));
27601 ix86_expand_vector_set (false, target, val, 0);
27603 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27604 order[1], order[2], order[3]));
27606 else
27608 /* For SSE1, we have to reuse the V4SF code. */
27609 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27610 gen_lowpart (SFmode, val), elt);
27612 return;
27614 case V8HImode:
27615 use_vec_merge = TARGET_SSE2;
27616 break;
27617 case V4HImode:
27618 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27619 break;
27621 case V16QImode:
27622 use_vec_merge = TARGET_SSE4_1;
27623 break;
27625 case V8QImode:
27626 break;
27628 case V32QImode:
27629 half_mode = V16QImode;
27630 j = 0;
27631 n = 16;
27632 goto half;
27634 case V16HImode:
27635 half_mode = V8HImode;
27636 j = 1;
27637 n = 8;
27638 goto half;
27640 case V8SImode:
27641 half_mode = V4SImode;
27642 j = 2;
27643 n = 4;
27644 goto half;
27646 case V4DImode:
27647 half_mode = V2DImode;
27648 j = 3;
27649 n = 2;
27650 goto half;
27652 case V8SFmode:
27653 half_mode = V4SFmode;
27654 j = 4;
27655 n = 4;
27656 goto half;
27658 case V4DFmode:
27659 half_mode = V2DFmode;
27660 j = 5;
27661 n = 2;
27662 goto half;
27664 half:
27665 /* Compute offset. */
27666 i = elt / n;
27667 elt %= n;
27669 gcc_assert (i <= 1);
27671 /* Extract the half. */
27672 tmp = gen_reg_rtx (half_mode);
27673 emit_insn ((*gen_extract[j][i]) (tmp, target));
27675 /* Put val in tmp at elt. */
27676 ix86_expand_vector_set (false, tmp, val, elt);
27678 /* Put it back. */
27679 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27680 return;
27682 default:
27683 break;
27686 if (use_vec_merge)
27688 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27689 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27690 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27692 else
27694 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27696 emit_move_insn (mem, target);
27698 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27699 emit_move_insn (tmp, val);
27701 emit_move_insn (target, mem);
27705 void
27706 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27708 enum machine_mode mode = GET_MODE (vec);
27709 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27710 bool use_vec_extr = false;
27711 rtx tmp;
27713 switch (mode)
27715 case V2SImode:
27716 case V2SFmode:
27717 if (!mmx_ok)
27718 break;
27719 /* FALLTHRU */
27721 case V2DFmode:
27722 case V2DImode:
27723 use_vec_extr = true;
27724 break;
27726 case V4SFmode:
27727 use_vec_extr = TARGET_SSE4_1;
27728 if (use_vec_extr)
27729 break;
27731 switch (elt)
27733 case 0:
27734 tmp = vec;
27735 break;
27737 case 1:
27738 case 3:
27739 tmp = gen_reg_rtx (mode);
27740 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27741 GEN_INT (elt), GEN_INT (elt),
27742 GEN_INT (elt+4), GEN_INT (elt+4)));
27743 break;
27745 case 2:
27746 tmp = gen_reg_rtx (mode);
27747 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
27748 break;
27750 default:
27751 gcc_unreachable ();
27753 vec = tmp;
27754 use_vec_extr = true;
27755 elt = 0;
27756 break;
27758 case V4SImode:
27759 use_vec_extr = TARGET_SSE4_1;
27760 if (use_vec_extr)
27761 break;
27763 if (TARGET_SSE2)
27765 switch (elt)
27767 case 0:
27768 tmp = vec;
27769 break;
27771 case 1:
27772 case 3:
27773 tmp = gen_reg_rtx (mode);
27774 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27775 GEN_INT (elt), GEN_INT (elt),
27776 GEN_INT (elt), GEN_INT (elt)));
27777 break;
27779 case 2:
27780 tmp = gen_reg_rtx (mode);
27781 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
27782 break;
27784 default:
27785 gcc_unreachable ();
27787 vec = tmp;
27788 use_vec_extr = true;
27789 elt = 0;
27791 else
27793 /* For SSE1, we have to reuse the V4SF code. */
27794 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27795 gen_lowpart (V4SFmode, vec), elt);
27796 return;
27798 break;
27800 case V8HImode:
27801 use_vec_extr = TARGET_SSE2;
27802 break;
27803 case V4HImode:
27804 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27805 break;
27807 case V16QImode:
27808 use_vec_extr = TARGET_SSE4_1;
27809 break;
27811 case V8QImode:
27812 /* ??? Could extract the appropriate HImode element and shift. */
27813 default:
27814 break;
27817 if (use_vec_extr)
27819 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27820 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27822 /* Let the rtl optimizers know about the zero extension performed. */
27823 if (inner_mode == QImode || inner_mode == HImode)
27825 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27826 target = gen_lowpart (SImode, target);
27829 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27831 else
27833 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27835 emit_move_insn (mem, vec);
27837 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27838 emit_move_insn (target, tmp);
27842 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27843 pattern to reduce; DEST is the destination; IN is the input vector. */
27845 void
27846 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27848 rtx tmp1, tmp2, tmp3;
27850 tmp1 = gen_reg_rtx (V4SFmode);
27851 tmp2 = gen_reg_rtx (V4SFmode);
27852 tmp3 = gen_reg_rtx (V4SFmode);
27854 emit_insn (gen_sse_movhlps (tmp1, in, in));
27855 emit_insn (fn (tmp2, tmp1, in));
27857 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27858 const1_rtx, const1_rtx,
27859 GEN_INT (1+4), GEN_INT (1+4)));
27860 emit_insn (fn (dest, tmp2, tmp3));
27863 /* Target hook for scalar_mode_supported_p. */
27864 static bool
27865 ix86_scalar_mode_supported_p (enum machine_mode mode)
27867 if (DECIMAL_FLOAT_MODE_P (mode))
27868 return default_decimal_float_supported_p ();
27869 else if (mode == TFmode)
27870 return true;
27871 else
27872 return default_scalar_mode_supported_p (mode);
27875 /* Implements target hook vector_mode_supported_p. */
27876 static bool
27877 ix86_vector_mode_supported_p (enum machine_mode mode)
27879 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27880 return true;
27881 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27882 return true;
27883 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27884 return true;
27885 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27886 return true;
27887 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
27888 return true;
27889 return false;
27892 /* Target hook for c_mode_for_suffix. */
27893 static enum machine_mode
27894 ix86_c_mode_for_suffix (char suffix)
27896 if (suffix == 'q')
27897 return TFmode;
27898 if (suffix == 'w')
27899 return XFmode;
27901 return VOIDmode;
27904 /* Worker function for TARGET_MD_ASM_CLOBBERS.
27906 We do this in the new i386 backend to maintain source compatibility
27907 with the old cc0-based compiler. */
27909 static tree
27910 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
27911 tree inputs ATTRIBUTE_UNUSED,
27912 tree clobbers)
27914 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
27915 clobbers);
27916 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
27917 clobbers);
27918 return clobbers;
27921 /* Implements target vector targetm.asm.encode_section_info. This
27922 is not used by netware. */
27924 static void ATTRIBUTE_UNUSED
27925 ix86_encode_section_info (tree decl, rtx rtl, int first)
27927 default_encode_section_info (decl, rtl, first);
27929 if (TREE_CODE (decl) == VAR_DECL
27930 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
27931 && ix86_in_large_data_p (decl))
27932 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
27935 /* Worker function for REVERSE_CONDITION. */
27937 enum rtx_code
27938 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
27940 return (mode != CCFPmode && mode != CCFPUmode
27941 ? reverse_condition (code)
27942 : reverse_condition_maybe_unordered (code));
27945 /* Output code to perform an x87 FP register move, from OPERANDS[1]
27946 to OPERANDS[0]. */
27948 const char *
27949 output_387_reg_move (rtx insn, rtx *operands)
27951 if (REG_P (operands[0]))
27953 if (REG_P (operands[1])
27954 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27956 if (REGNO (operands[0]) == FIRST_STACK_REG)
27957 return output_387_ffreep (operands, 0);
27958 return "fstp\t%y0";
27960 if (STACK_TOP_P (operands[0]))
27961 return "fld%Z1\t%y1";
27962 return "fst\t%y0";
27964 else if (MEM_P (operands[0]))
27966 gcc_assert (REG_P (operands[1]));
27967 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27968 return "fstp%Z0\t%y0";
27969 else
27971 /* There is no non-popping store to memory for XFmode.
27972 So if we need one, follow the store with a load. */
27973 if (GET_MODE (operands[0]) == XFmode)
27974 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
27975 else
27976 return "fst%Z0\t%y0";
27979 else
27980 gcc_unreachable();
27983 /* Output code to perform a conditional jump to LABEL, if C2 flag in
27984 FP status register is set. */
27986 void
27987 ix86_emit_fp_unordered_jump (rtx label)
27989 rtx reg = gen_reg_rtx (HImode);
27990 rtx temp;
27992 emit_insn (gen_x86_fnstsw_1 (reg));
27994 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
27996 emit_insn (gen_x86_sahf_1 (reg));
27998 temp = gen_rtx_REG (CCmode, FLAGS_REG);
27999 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28001 else
28003 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28005 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28006 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28009 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28010 gen_rtx_LABEL_REF (VOIDmode, label),
28011 pc_rtx);
28012 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28014 emit_jump_insn (temp);
28015 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28018 /* Output code to perform a log1p XFmode calculation. */
28020 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28022 rtx label1 = gen_label_rtx ();
28023 rtx label2 = gen_label_rtx ();
28025 rtx tmp = gen_reg_rtx (XFmode);
28026 rtx tmp2 = gen_reg_rtx (XFmode);
28027 rtx test;
28029 emit_insn (gen_absxf2 (tmp, op1));
28030 test = gen_rtx_GE (VOIDmode, tmp,
28031 CONST_DOUBLE_FROM_REAL_VALUE (
28032 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28033 XFmode));
28034 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28036 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28037 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28038 emit_jump (label2);
28040 emit_label (label1);
28041 emit_move_insn (tmp, CONST1_RTX (XFmode));
28042 emit_insn (gen_addxf3 (tmp, op1, tmp));
28043 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28044 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28046 emit_label (label2);
28049 /* Output code to perform a Newton-Rhapson approximation of a single precision
28050 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28052 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28054 rtx x0, x1, e0, e1, two;
28056 x0 = gen_reg_rtx (mode);
28057 e0 = gen_reg_rtx (mode);
28058 e1 = gen_reg_rtx (mode);
28059 x1 = gen_reg_rtx (mode);
28061 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28063 if (VECTOR_MODE_P (mode))
28064 two = ix86_build_const_vector (SFmode, true, two);
28066 two = force_reg (mode, two);
28068 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28070 /* x0 = rcp(b) estimate */
28071 emit_insn (gen_rtx_SET (VOIDmode, x0,
28072 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28073 UNSPEC_RCP)));
28074 /* e0 = x0 * a */
28075 emit_insn (gen_rtx_SET (VOIDmode, e0,
28076 gen_rtx_MULT (mode, x0, a)));
28077 /* e1 = x0 * b */
28078 emit_insn (gen_rtx_SET (VOIDmode, e1,
28079 gen_rtx_MULT (mode, x0, b)));
28080 /* x1 = 2. - e1 */
28081 emit_insn (gen_rtx_SET (VOIDmode, x1,
28082 gen_rtx_MINUS (mode, two, e1)));
28083 /* res = e0 * x1 */
28084 emit_insn (gen_rtx_SET (VOIDmode, res,
28085 gen_rtx_MULT (mode, e0, x1)));
28088 /* Output code to perform a Newton-Rhapson approximation of a
28089 single precision floating point [reciprocal] square root. */
28091 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28092 bool recip)
28094 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28095 REAL_VALUE_TYPE r;
28097 x0 = gen_reg_rtx (mode);
28098 e0 = gen_reg_rtx (mode);
28099 e1 = gen_reg_rtx (mode);
28100 e2 = gen_reg_rtx (mode);
28101 e3 = gen_reg_rtx (mode);
28103 real_from_integer (&r, VOIDmode, -3, -1, 0);
28104 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28106 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28107 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28109 if (VECTOR_MODE_P (mode))
28111 mthree = ix86_build_const_vector (SFmode, true, mthree);
28112 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28115 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28116 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28118 /* x0 = rsqrt(a) estimate */
28119 emit_insn (gen_rtx_SET (VOIDmode, x0,
28120 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28121 UNSPEC_RSQRT)));
28123 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28124 if (!recip)
28126 rtx zero, mask;
28128 zero = gen_reg_rtx (mode);
28129 mask = gen_reg_rtx (mode);
28131 zero = force_reg (mode, CONST0_RTX(mode));
28132 emit_insn (gen_rtx_SET (VOIDmode, mask,
28133 gen_rtx_NE (mode, zero, a)));
28135 emit_insn (gen_rtx_SET (VOIDmode, x0,
28136 gen_rtx_AND (mode, x0, mask)));
28139 /* e0 = x0 * a */
28140 emit_insn (gen_rtx_SET (VOIDmode, e0,
28141 gen_rtx_MULT (mode, x0, a)));
28142 /* e1 = e0 * x0 */
28143 emit_insn (gen_rtx_SET (VOIDmode, e1,
28144 gen_rtx_MULT (mode, e0, x0)));
28146 /* e2 = e1 - 3. */
28147 mthree = force_reg (mode, mthree);
28148 emit_insn (gen_rtx_SET (VOIDmode, e2,
28149 gen_rtx_PLUS (mode, e1, mthree)));
28151 mhalf = force_reg (mode, mhalf);
28152 if (recip)
28153 /* e3 = -.5 * x0 */
28154 emit_insn (gen_rtx_SET (VOIDmode, e3,
28155 gen_rtx_MULT (mode, x0, mhalf)));
28156 else
28157 /* e3 = -.5 * e0 */
28158 emit_insn (gen_rtx_SET (VOIDmode, e3,
28159 gen_rtx_MULT (mode, e0, mhalf)));
28160 /* ret = e2 * e3 */
28161 emit_insn (gen_rtx_SET (VOIDmode, res,
28162 gen_rtx_MULT (mode, e2, e3)));
28165 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28167 static void ATTRIBUTE_UNUSED
28168 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28169 tree decl)
28171 /* With Binutils 2.15, the "@unwind" marker must be specified on
28172 every occurrence of the ".eh_frame" section, not just the first
28173 one. */
28174 if (TARGET_64BIT
28175 && strcmp (name, ".eh_frame") == 0)
28177 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28178 flags & SECTION_WRITE ? "aw" : "a");
28179 return;
28181 default_elf_asm_named_section (name, flags, decl);
28184 /* Return the mangling of TYPE if it is an extended fundamental type. */
28186 static const char *
28187 ix86_mangle_type (const_tree type)
28189 type = TYPE_MAIN_VARIANT (type);
28191 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28192 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28193 return NULL;
28195 switch (TYPE_MODE (type))
28197 case TFmode:
28198 /* __float128 is "g". */
28199 return "g";
28200 case XFmode:
28201 /* "long double" or __float80 is "e". */
28202 return "e";
28203 default:
28204 return NULL;
28208 /* For 32-bit code we can save PIC register setup by using
28209 __stack_chk_fail_local hidden function instead of calling
28210 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28211 register, so it is better to call __stack_chk_fail directly. */
28213 static tree
28214 ix86_stack_protect_fail (void)
28216 return TARGET_64BIT
28217 ? default_external_stack_protect_fail ()
28218 : default_hidden_stack_protect_fail ();
28221 /* Select a format to encode pointers in exception handling data. CODE
28222 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28223 true if the symbol may be affected by dynamic relocations.
28225 ??? All x86 object file formats are capable of representing this.
28226 After all, the relocation needed is the same as for the call insn.
28227 Whether or not a particular assembler allows us to enter such, I
28228 guess we'll have to see. */
28230 asm_preferred_eh_data_format (int code, int global)
28232 if (flag_pic)
28234 int type = DW_EH_PE_sdata8;
28235 if (!TARGET_64BIT
28236 || ix86_cmodel == CM_SMALL_PIC
28237 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28238 type = DW_EH_PE_sdata4;
28239 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28241 if (ix86_cmodel == CM_SMALL
28242 || (ix86_cmodel == CM_MEDIUM && code))
28243 return DW_EH_PE_udata4;
28244 return DW_EH_PE_absptr;
28247 /* Expand copysign from SIGN to the positive value ABS_VALUE
28248 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28249 the sign-bit. */
28250 static void
28251 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28253 enum machine_mode mode = GET_MODE (sign);
28254 rtx sgn = gen_reg_rtx (mode);
28255 if (mask == NULL_RTX)
28257 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28258 if (!VECTOR_MODE_P (mode))
28260 /* We need to generate a scalar mode mask in this case. */
28261 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28262 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28263 mask = gen_reg_rtx (mode);
28264 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28267 else
28268 mask = gen_rtx_NOT (mode, mask);
28269 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28270 gen_rtx_AND (mode, mask, sign)));
28271 emit_insn (gen_rtx_SET (VOIDmode, result,
28272 gen_rtx_IOR (mode, abs_value, sgn)));
28275 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28276 mask for masking out the sign-bit is stored in *SMASK, if that is
28277 non-null. */
28278 static rtx
28279 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28281 enum machine_mode mode = GET_MODE (op0);
28282 rtx xa, mask;
28284 xa = gen_reg_rtx (mode);
28285 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28286 if (!VECTOR_MODE_P (mode))
28288 /* We need to generate a scalar mode mask in this case. */
28289 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28290 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28291 mask = gen_reg_rtx (mode);
28292 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28294 emit_insn (gen_rtx_SET (VOIDmode, xa,
28295 gen_rtx_AND (mode, op0, mask)));
28297 if (smask)
28298 *smask = mask;
28300 return xa;
28303 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28304 swapping the operands if SWAP_OPERANDS is true. The expanded
28305 code is a forward jump to a newly created label in case the
28306 comparison is true. The generated label rtx is returned. */
28307 static rtx
28308 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28309 bool swap_operands)
28311 rtx label, tmp;
28313 if (swap_operands)
28315 tmp = op0;
28316 op0 = op1;
28317 op1 = tmp;
28320 label = gen_label_rtx ();
28321 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28322 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28323 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28324 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28325 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28326 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28327 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28328 JUMP_LABEL (tmp) = label;
28330 return label;
28333 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28334 using comparison code CODE. Operands are swapped for the comparison if
28335 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28336 static rtx
28337 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28338 bool swap_operands)
28340 enum machine_mode mode = GET_MODE (op0);
28341 rtx mask = gen_reg_rtx (mode);
28343 if (swap_operands)
28345 rtx tmp = op0;
28346 op0 = op1;
28347 op1 = tmp;
28350 if (mode == DFmode)
28351 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28352 gen_rtx_fmt_ee (code, mode, op0, op1)));
28353 else
28354 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28355 gen_rtx_fmt_ee (code, mode, op0, op1)));
28357 return mask;
28360 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28361 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28362 static rtx
28363 ix86_gen_TWO52 (enum machine_mode mode)
28365 REAL_VALUE_TYPE TWO52r;
28366 rtx TWO52;
28368 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28369 TWO52 = const_double_from_real_value (TWO52r, mode);
28370 TWO52 = force_reg (mode, TWO52);
28372 return TWO52;
28375 /* Expand SSE sequence for computing lround from OP1 storing
28376 into OP0. */
28377 void
28378 ix86_expand_lround (rtx op0, rtx op1)
28380 /* C code for the stuff we're doing below:
28381 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28382 return (long)tmp;
28384 enum machine_mode mode = GET_MODE (op1);
28385 const struct real_format *fmt;
28386 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28387 rtx adj;
28389 /* load nextafter (0.5, 0.0) */
28390 fmt = REAL_MODE_FORMAT (mode);
28391 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28392 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28394 /* adj = copysign (0.5, op1) */
28395 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28396 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28398 /* adj = op1 + adj */
28399 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28401 /* op0 = (imode)adj */
28402 expand_fix (op0, adj, 0);
28405 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28406 into OPERAND0. */
28407 void
28408 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28410 /* C code for the stuff we're doing below (for do_floor):
28411 xi = (long)op1;
28412 xi -= (double)xi > op1 ? 1 : 0;
28413 return xi;
28415 enum machine_mode fmode = GET_MODE (op1);
28416 enum machine_mode imode = GET_MODE (op0);
28417 rtx ireg, freg, label, tmp;
28419 /* reg = (long)op1 */
28420 ireg = gen_reg_rtx (imode);
28421 expand_fix (ireg, op1, 0);
28423 /* freg = (double)reg */
28424 freg = gen_reg_rtx (fmode);
28425 expand_float (freg, ireg, 0);
28427 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28428 label = ix86_expand_sse_compare_and_jump (UNLE,
28429 freg, op1, !do_floor);
28430 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28431 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28432 emit_move_insn (ireg, tmp);
28434 emit_label (label);
28435 LABEL_NUSES (label) = 1;
28437 emit_move_insn (op0, ireg);
28440 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28441 result in OPERAND0. */
28442 void
28443 ix86_expand_rint (rtx operand0, rtx operand1)
28445 /* C code for the stuff we're doing below:
28446 xa = fabs (operand1);
28447 if (!isless (xa, 2**52))
28448 return operand1;
28449 xa = xa + 2**52 - 2**52;
28450 return copysign (xa, operand1);
28452 enum machine_mode mode = GET_MODE (operand0);
28453 rtx res, xa, label, TWO52, mask;
28455 res = gen_reg_rtx (mode);
28456 emit_move_insn (res, operand1);
28458 /* xa = abs (operand1) */
28459 xa = ix86_expand_sse_fabs (res, &mask);
28461 /* if (!isless (xa, TWO52)) goto label; */
28462 TWO52 = ix86_gen_TWO52 (mode);
28463 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28465 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28466 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28468 ix86_sse_copysign_to_positive (res, xa, res, mask);
28470 emit_label (label);
28471 LABEL_NUSES (label) = 1;
28473 emit_move_insn (operand0, res);
28476 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28477 into OPERAND0. */
28478 void
28479 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28481 /* C code for the stuff we expand below.
28482 double xa = fabs (x), x2;
28483 if (!isless (xa, TWO52))
28484 return x;
28485 xa = xa + TWO52 - TWO52;
28486 x2 = copysign (xa, x);
28487 Compensate. Floor:
28488 if (x2 > x)
28489 x2 -= 1;
28490 Compensate. Ceil:
28491 if (x2 < x)
28492 x2 -= -1;
28493 return x2;
28495 enum machine_mode mode = GET_MODE (operand0);
28496 rtx xa, TWO52, tmp, label, one, res, mask;
28498 TWO52 = ix86_gen_TWO52 (mode);
28500 /* Temporary for holding the result, initialized to the input
28501 operand to ease control flow. */
28502 res = gen_reg_rtx (mode);
28503 emit_move_insn (res, operand1);
28505 /* xa = abs (operand1) */
28506 xa = ix86_expand_sse_fabs (res, &mask);
28508 /* if (!isless (xa, TWO52)) goto label; */
28509 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28511 /* xa = xa + TWO52 - TWO52; */
28512 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28513 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28515 /* xa = copysign (xa, operand1) */
28516 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28518 /* generate 1.0 or -1.0 */
28519 one = force_reg (mode,
28520 const_double_from_real_value (do_floor
28521 ? dconst1 : dconstm1, mode));
28523 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28524 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28525 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28526 gen_rtx_AND (mode, one, tmp)));
28527 /* We always need to subtract here to preserve signed zero. */
28528 tmp = expand_simple_binop (mode, MINUS,
28529 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28530 emit_move_insn (res, tmp);
28532 emit_label (label);
28533 LABEL_NUSES (label) = 1;
28535 emit_move_insn (operand0, res);
28538 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28539 into OPERAND0. */
28540 void
28541 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28543 /* C code for the stuff we expand below.
28544 double xa = fabs (x), x2;
28545 if (!isless (xa, TWO52))
28546 return x;
28547 x2 = (double)(long)x;
28548 Compensate. Floor:
28549 if (x2 > x)
28550 x2 -= 1;
28551 Compensate. Ceil:
28552 if (x2 < x)
28553 x2 += 1;
28554 if (HONOR_SIGNED_ZEROS (mode))
28555 return copysign (x2, x);
28556 return x2;
28558 enum machine_mode mode = GET_MODE (operand0);
28559 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28561 TWO52 = ix86_gen_TWO52 (mode);
28563 /* Temporary for holding the result, initialized to the input
28564 operand to ease control flow. */
28565 res = gen_reg_rtx (mode);
28566 emit_move_insn (res, operand1);
28568 /* xa = abs (operand1) */
28569 xa = ix86_expand_sse_fabs (res, &mask);
28571 /* if (!isless (xa, TWO52)) goto label; */
28572 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28574 /* xa = (double)(long)x */
28575 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28576 expand_fix (xi, res, 0);
28577 expand_float (xa, xi, 0);
28579 /* generate 1.0 */
28580 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28582 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28583 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28584 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28585 gen_rtx_AND (mode, one, tmp)));
28586 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28587 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28588 emit_move_insn (res, tmp);
28590 if (HONOR_SIGNED_ZEROS (mode))
28591 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28593 emit_label (label);
28594 LABEL_NUSES (label) = 1;
28596 emit_move_insn (operand0, res);
28599 /* Expand SSE sequence for computing round from OPERAND1 storing
28600 into OPERAND0. Sequence that works without relying on DImode truncation
28601 via cvttsd2siq that is only available on 64bit targets. */
28602 void
28603 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28605 /* C code for the stuff we expand below.
28606 double xa = fabs (x), xa2, x2;
28607 if (!isless (xa, TWO52))
28608 return x;
28609 Using the absolute value and copying back sign makes
28610 -0.0 -> -0.0 correct.
28611 xa2 = xa + TWO52 - TWO52;
28612 Compensate.
28613 dxa = xa2 - xa;
28614 if (dxa <= -0.5)
28615 xa2 += 1;
28616 else if (dxa > 0.5)
28617 xa2 -= 1;
28618 x2 = copysign (xa2, x);
28619 return x2;
28621 enum machine_mode mode = GET_MODE (operand0);
28622 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28624 TWO52 = ix86_gen_TWO52 (mode);
28626 /* Temporary for holding the result, initialized to the input
28627 operand to ease control flow. */
28628 res = gen_reg_rtx (mode);
28629 emit_move_insn (res, operand1);
28631 /* xa = abs (operand1) */
28632 xa = ix86_expand_sse_fabs (res, &mask);
28634 /* if (!isless (xa, TWO52)) goto label; */
28635 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28637 /* xa2 = xa + TWO52 - TWO52; */
28638 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28639 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28641 /* dxa = xa2 - xa; */
28642 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28644 /* generate 0.5, 1.0 and -0.5 */
28645 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28646 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28647 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28648 0, OPTAB_DIRECT);
28650 /* Compensate. */
28651 tmp = gen_reg_rtx (mode);
28652 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28653 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28654 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28655 gen_rtx_AND (mode, one, tmp)));
28656 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28657 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28658 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28659 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28660 gen_rtx_AND (mode, one, tmp)));
28661 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28663 /* res = copysign (xa2, operand1) */
28664 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28666 emit_label (label);
28667 LABEL_NUSES (label) = 1;
28669 emit_move_insn (operand0, res);
28672 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28673 into OPERAND0. */
28674 void
28675 ix86_expand_trunc (rtx operand0, rtx operand1)
28677 /* C code for SSE variant we expand below.
28678 double xa = fabs (x), x2;
28679 if (!isless (xa, TWO52))
28680 return x;
28681 x2 = (double)(long)x;
28682 if (HONOR_SIGNED_ZEROS (mode))
28683 return copysign (x2, x);
28684 return x2;
28686 enum machine_mode mode = GET_MODE (operand0);
28687 rtx xa, xi, TWO52, label, res, mask;
28689 TWO52 = ix86_gen_TWO52 (mode);
28691 /* Temporary for holding the result, initialized to the input
28692 operand to ease control flow. */
28693 res = gen_reg_rtx (mode);
28694 emit_move_insn (res, operand1);
28696 /* xa = abs (operand1) */
28697 xa = ix86_expand_sse_fabs (res, &mask);
28699 /* if (!isless (xa, TWO52)) goto label; */
28700 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28702 /* x = (double)(long)x */
28703 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28704 expand_fix (xi, res, 0);
28705 expand_float (res, xi, 0);
28707 if (HONOR_SIGNED_ZEROS (mode))
28708 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28710 emit_label (label);
28711 LABEL_NUSES (label) = 1;
28713 emit_move_insn (operand0, res);
28716 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28717 into OPERAND0. */
28718 void
28719 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28721 enum machine_mode mode = GET_MODE (operand0);
28722 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28724 /* C code for SSE variant we expand below.
28725 double xa = fabs (x), x2;
28726 if (!isless (xa, TWO52))
28727 return x;
28728 xa2 = xa + TWO52 - TWO52;
28729 Compensate:
28730 if (xa2 > xa)
28731 xa2 -= 1.0;
28732 x2 = copysign (xa2, x);
28733 return x2;
28736 TWO52 = ix86_gen_TWO52 (mode);
28738 /* Temporary for holding the result, initialized to the input
28739 operand to ease control flow. */
28740 res = gen_reg_rtx (mode);
28741 emit_move_insn (res, operand1);
28743 /* xa = abs (operand1) */
28744 xa = ix86_expand_sse_fabs (res, &smask);
28746 /* if (!isless (xa, TWO52)) goto label; */
28747 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28749 /* res = xa + TWO52 - TWO52; */
28750 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28751 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28752 emit_move_insn (res, tmp);
28754 /* generate 1.0 */
28755 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28757 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28758 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28759 emit_insn (gen_rtx_SET (VOIDmode, mask,
28760 gen_rtx_AND (mode, mask, one)));
28761 tmp = expand_simple_binop (mode, MINUS,
28762 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28763 emit_move_insn (res, tmp);
28765 /* res = copysign (res, operand1) */
28766 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28768 emit_label (label);
28769 LABEL_NUSES (label) = 1;
28771 emit_move_insn (operand0, res);
28774 /* Expand SSE sequence for computing round from OPERAND1 storing
28775 into OPERAND0. */
28776 void
28777 ix86_expand_round (rtx operand0, rtx operand1)
28779 /* C code for the stuff we're doing below:
28780 double xa = fabs (x);
28781 if (!isless (xa, TWO52))
28782 return x;
28783 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28784 return copysign (xa, x);
28786 enum machine_mode mode = GET_MODE (operand0);
28787 rtx res, TWO52, xa, label, xi, half, mask;
28788 const struct real_format *fmt;
28789 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28791 /* Temporary for holding the result, initialized to the input
28792 operand to ease control flow. */
28793 res = gen_reg_rtx (mode);
28794 emit_move_insn (res, operand1);
28796 TWO52 = ix86_gen_TWO52 (mode);
28797 xa = ix86_expand_sse_fabs (res, &mask);
28798 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28800 /* load nextafter (0.5, 0.0) */
28801 fmt = REAL_MODE_FORMAT (mode);
28802 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28803 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28805 /* xa = xa + 0.5 */
28806 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28807 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28809 /* xa = (double)(int64_t)xa */
28810 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28811 expand_fix (xi, xa, 0);
28812 expand_float (xa, xi, 0);
28814 /* res = copysign (xa, operand1) */
28815 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28817 emit_label (label);
28818 LABEL_NUSES (label) = 1;
28820 emit_move_insn (operand0, res);
28824 /* Table of valid machine attributes. */
28825 static const struct attribute_spec ix86_attribute_table[] =
28827 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28828 /* Stdcall attribute says callee is responsible for popping arguments
28829 if they are not variable. */
28830 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28831 /* Fastcall attribute says callee is responsible for popping arguments
28832 if they are not variable. */
28833 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28834 /* Cdecl attribute says the callee is a normal C declaration */
28835 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28836 /* Regparm attribute specifies how many integer arguments are to be
28837 passed in registers. */
28838 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28839 /* Sseregparm attribute says we are using x86_64 calling conventions
28840 for FP arguments. */
28841 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28842 /* force_align_arg_pointer says this function realigns the stack at entry. */
28843 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28844 false, true, true, ix86_handle_cconv_attribute },
28845 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28846 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28847 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28848 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28849 #endif
28850 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28851 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28852 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28853 SUBTARGET_ATTRIBUTE_TABLE,
28854 #endif
28855 /* ms_abi and sysv_abi calling convention function attributes. */
28856 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28857 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28858 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
28859 /* End element. */
28860 { NULL, 0, 0, false, false, false, NULL }
28863 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28864 static int
28865 ix86_builtin_vectorization_cost (bool runtime_test)
28867 /* If the branch of the runtime test is taken - i.e. - the vectorized
28868 version is skipped - this incurs a misprediction cost (because the
28869 vectorized version is expected to be the fall-through). So we subtract
28870 the latency of a mispredicted branch from the costs that are incured
28871 when the vectorized version is executed.
28873 TODO: The values in individual target tables have to be tuned or new
28874 fields may be needed. For eg. on K8, the default branch path is the
28875 not-taken path. If the taken path is predicted correctly, the minimum
28876 penalty of going down the taken-path is 1 cycle. If the taken-path is
28877 not predicted correctly, then the minimum penalty is 10 cycles. */
28879 if (runtime_test)
28881 return (-(ix86_cost->cond_taken_branch_cost));
28883 else
28884 return 0;
28887 /* Implement targetm.vectorize.builtin_vec_perm. */
28889 static tree
28890 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
28892 tree itype = TREE_TYPE (vec_type);
28893 bool u = TYPE_UNSIGNED (itype);
28894 enum machine_mode vmode = TYPE_MODE (vec_type);
28895 enum ix86_builtins fcode;
28896 bool ok = TARGET_SSE2;
28898 switch (vmode)
28900 case V4DFmode:
28901 ok = TARGET_AVX;
28902 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
28903 goto get_di;
28904 case V2DFmode:
28905 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
28906 get_di:
28907 itype = ix86_get_builtin_type (IX86_BT_DI);
28908 break;
28910 case V8SFmode:
28911 ok = TARGET_AVX;
28912 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
28913 goto get_si;
28914 case V4SFmode:
28915 ok = TARGET_SSE;
28916 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
28917 get_si:
28918 itype = ix86_get_builtin_type (IX86_BT_SI);
28919 break;
28921 case V2DImode:
28922 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
28923 break;
28924 case V4SImode:
28925 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
28926 break;
28927 case V8HImode:
28928 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
28929 break;
28930 case V16QImode:
28931 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
28932 break;
28933 default:
28934 ok = false;
28935 break;
28938 if (!ok)
28939 return NULL_TREE;
28941 *mask_type = itype;
28942 return ix86_builtins[(int) fcode];
28945 /* Return a vector mode with twice as many elements as VMODE. */
28946 /* ??? Consider moving this to a table generated by genmodes.c. */
28948 static enum machine_mode
28949 doublesize_vector_mode (enum machine_mode vmode)
28951 switch (vmode)
28953 case V2SFmode: return V4SFmode;
28954 case V1DImode: return V2DImode;
28955 case V2SImode: return V4SImode;
28956 case V4HImode: return V8HImode;
28957 case V8QImode: return V16QImode;
28959 case V2DFmode: return V4DFmode;
28960 case V4SFmode: return V8SFmode;
28961 case V2DImode: return V4DImode;
28962 case V4SImode: return V8SImode;
28963 case V8HImode: return V16HImode;
28964 case V16QImode: return V32QImode;
28966 case V4DFmode: return V8DFmode;
28967 case V8SFmode: return V16SFmode;
28968 case V4DImode: return V8DImode;
28969 case V8SImode: return V16SImode;
28970 case V16HImode: return V32HImode;
28971 case V32QImode: return V64QImode;
28973 default:
28974 gcc_unreachable ();
28978 /* Construct (set target (vec_select op0 (parallel perm))) and
28979 return true if that's a valid instruction in the active ISA. */
28981 static bool
28982 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
28984 rtx rperm[MAX_VECT_LEN], x;
28985 unsigned i;
28987 for (i = 0; i < nelt; ++i)
28988 rperm[i] = GEN_INT (perm[i]);
28990 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
28991 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
28992 x = gen_rtx_SET (VOIDmode, target, x);
28994 x = emit_insn (x);
28995 if (recog_memoized (x) < 0)
28997 remove_insn (x);
28998 return false;
29000 return true;
29003 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29005 static bool
29006 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29007 const unsigned char *perm, unsigned nelt)
29009 enum machine_mode v2mode;
29010 rtx x;
29012 v2mode = doublesize_vector_mode (GET_MODE (op0));
29013 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29014 return expand_vselect (target, x, perm, nelt);
29017 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29018 in terms of blendp[sd] / pblendw / pblendvb. */
29020 static bool
29021 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29023 enum machine_mode vmode = d->vmode;
29024 unsigned i, mask, nelt = d->nelt;
29025 rtx target, op0, op1, x;
29027 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29028 return false;
29029 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29030 return false;
29032 /* This is a blend, not a permute. Elements must stay in their
29033 respective lanes. */
29034 for (i = 0; i < nelt; ++i)
29036 unsigned e = d->perm[i];
29037 if (!(e == i || e == i + nelt))
29038 return false;
29041 if (d->testing_p)
29042 return true;
29044 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29045 decision should be extracted elsewhere, so that we only try that
29046 sequence once all budget==3 options have been tried. */
29048 /* For bytes, see if bytes move in pairs so we can use pblendw with
29049 an immediate argument, rather than pblendvb with a vector argument. */
29050 if (vmode == V16QImode)
29052 bool pblendw_ok = true;
29053 for (i = 0; i < 16 && pblendw_ok; i += 2)
29054 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29056 if (!pblendw_ok)
29058 rtx rperm[16], vperm;
29060 for (i = 0; i < nelt; ++i)
29061 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29063 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29064 vperm = force_reg (V16QImode, vperm);
29066 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29067 return true;
29071 target = d->target;
29072 op0 = d->op0;
29073 op1 = d->op1;
29074 mask = 0;
29076 switch (vmode)
29078 case V4DFmode:
29079 case V8SFmode:
29080 case V2DFmode:
29081 case V4SFmode:
29082 case V8HImode:
29083 for (i = 0; i < nelt; ++i)
29084 mask |= (d->perm[i] >= nelt) << i;
29085 break;
29087 case V2DImode:
29088 for (i = 0; i < 2; ++i)
29089 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29090 goto do_subreg;
29092 case V4SImode:
29093 for (i = 0; i < 4; ++i)
29094 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29095 goto do_subreg;
29097 case V16QImode:
29098 for (i = 0; i < 8; ++i)
29099 mask |= (d->perm[i * 2] >= 16) << i;
29101 do_subreg:
29102 vmode = V8HImode;
29103 target = gen_lowpart (vmode, target);
29104 op0 = gen_lowpart (vmode, target);
29105 op1 = gen_lowpart (vmode, target);
29106 break;
29108 default:
29109 gcc_unreachable ();
29112 /* This matches five different patterns with the different modes. */
29113 x = gen_rtx_VEC_MERGE (vmode, op0, op1, GEN_INT (mask));
29114 x = gen_rtx_SET (VOIDmode, target, x);
29115 emit_insn (x);
29117 return true;
29120 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29121 in terms of the variable form of vpermilps.
29123 Note that we will have already failed the immediate input vpermilps,
29124 which requires that the high and low part shuffle be identical; the
29125 variable form doesn't require that. */
29127 static bool
29128 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29130 rtx rperm[8], vperm;
29131 unsigned i;
29133 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29134 return false;
29136 /* We can only permute within the 128-bit lane. */
29137 for (i = 0; i < 8; ++i)
29139 unsigned e = d->perm[i];
29140 if (i < 4 ? e >= 4 : e < 4)
29141 return false;
29144 if (d->testing_p)
29145 return true;
29147 for (i = 0; i < 8; ++i)
29149 unsigned e = d->perm[i];
29151 /* Within each 128-bit lane, the elements of op0 are numbered
29152 from 0 and the elements of op1 are numbered from 4. */
29153 if (e >= 8 + 4)
29154 e -= 8;
29155 else if (e >= 4)
29156 e -= 4;
29158 rperm[i] = GEN_INT (e);
29161 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29162 vperm = force_reg (V8SImode, vperm);
29163 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29165 return true;
29168 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29169 in terms of pshufb or vpperm. */
29171 static bool
29172 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29174 unsigned i, nelt, eltsz;
29175 rtx rperm[16], vperm, target, op0, op1;
29177 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29178 return false;
29179 if (GET_MODE_SIZE (d->vmode) != 16)
29180 return false;
29182 if (d->testing_p)
29183 return true;
29185 nelt = d->nelt;
29186 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29188 for (i = 0; i < nelt; ++i)
29190 unsigned j, e = d->perm[i];
29191 for (j = 0; j < eltsz; ++j)
29192 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29195 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29196 vperm = force_reg (V16QImode, vperm);
29198 target = gen_lowpart (V16QImode, d->target);
29199 op0 = gen_lowpart (V16QImode, d->op0);
29200 if (d->op0 == d->op1)
29201 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29202 else
29204 op1 = gen_lowpart (V16QImode, d->op1);
29205 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29208 return true;
29211 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29212 in a single instruction. */
29214 static bool
29215 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29217 unsigned i, nelt = d->nelt;
29218 unsigned char perm2[MAX_VECT_LEN];
29220 /* Check plain VEC_SELECT first, because AVX has instructions that could
29221 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29222 input where SEL+CONCAT may not. */
29223 if (d->op0 == d->op1)
29225 if (expand_vselect (d->target, d->op0, d->perm, nelt))
29226 return true;
29228 /* There are plenty of patterns in sse.md that are written for
29229 SEL+CONCAT and are not replicated for a single op. Perhaps
29230 that should be changed, to avoid the nastiness here. */
29232 /* Recognize interleave style patterns, which means incrementing
29233 every other permutation operand. */
29234 for (i = 0; i < nelt; i += 2)
29236 perm2[i] = d->perm[i];
29237 perm2[i+1] = d->perm[i+1] + nelt;
29239 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29240 return true;
29242 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29243 if (nelt >= 4)
29245 memcpy (perm2, d->perm, nelt);
29246 for (i = 2; i < nelt; i += 4)
29248 perm2[i+0] += nelt;
29249 perm2[i+1] += nelt;
29252 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29253 return true;
29257 /* Finally, try the fully general two operand permute. */
29258 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29259 return true;
29261 /* Recognize interleave style patterns with reversed operands. */
29262 if (d->op0 != d->op1)
29264 for (i = 0; i < nelt; ++i)
29266 unsigned e = d->perm[i];
29267 if (e >= nelt)
29268 e -= nelt;
29269 else
29270 e += nelt;
29271 perm2[i] = e;
29274 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29275 return true;
29278 /* Try the SSE4.1 blend variable merge instructions. */
29279 if (expand_vec_perm_blend (d))
29280 return true;
29282 /* Try one of the AVX vpermil variable permutations. */
29283 if (expand_vec_perm_vpermil (d))
29284 return true;
29286 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29287 if (expand_vec_perm_pshufb (d))
29288 return true;
29290 return false;
29293 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29294 in terms of a pair of pshuflw + pshufhw instructions. */
29296 static bool
29297 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29299 unsigned char perm2[MAX_VECT_LEN];
29300 unsigned i;
29301 bool ok;
29303 if (d->vmode != V8HImode || d->op0 != d->op1)
29304 return false;
29306 /* The two permutations only operate in 64-bit lanes. */
29307 for (i = 0; i < 4; ++i)
29308 if (d->perm[i] >= 4)
29309 return false;
29310 for (i = 4; i < 8; ++i)
29311 if (d->perm[i] < 4)
29312 return false;
29314 if (d->testing_p)
29315 return true;
29317 /* Emit the pshuflw. */
29318 memcpy (perm2, d->perm, 4);
29319 for (i = 4; i < 8; ++i)
29320 perm2[i] = i;
29321 ok = expand_vselect (d->target, d->op0, perm2, 8);
29322 gcc_assert (ok);
29324 /* Emit the pshufhw. */
29325 memcpy (perm2 + 4, d->perm + 4, 4);
29326 for (i = 0; i < 4; ++i)
29327 perm2[i] = i;
29328 ok = expand_vselect (d->target, d->target, perm2, 8);
29329 gcc_assert (ok);
29331 return true;
29334 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29335 the permutation using the SSSE3 palignr instruction. This succeeds
29336 when all of the elements in PERM fit within one vector and we merely
29337 need to shift them down so that a single vector permutation has a
29338 chance to succeed. */
29340 static bool
29341 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29343 unsigned i, nelt = d->nelt;
29344 unsigned min, max;
29345 bool in_order, ok;
29346 rtx shift;
29348 /* Even with AVX, palignr only operates on 128-bit vectors. */
29349 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29350 return false;
29352 min = nelt, max = 0;
29353 for (i = 0; i < nelt; ++i)
29355 unsigned e = d->perm[i];
29356 if (e < min)
29357 min = e;
29358 if (e > max)
29359 max = e;
29361 if (min == 0 || max - min >= nelt)
29362 return false;
29364 /* Given that we have SSSE3, we know we'll be able to implement the
29365 single operand permutation after the palignr with pshufb. */
29366 if (d->testing_p)
29367 return true;
29369 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29370 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29371 gen_lowpart (TImode, d->op1),
29372 gen_lowpart (TImode, d->op0), shift));
29374 d->op0 = d->op1 = d->target;
29376 in_order = true;
29377 for (i = 0; i < nelt; ++i)
29379 unsigned e = d->perm[i] - min;
29380 if (e != i)
29381 in_order = false;
29382 d->perm[i] = e;
29385 /* Test for the degenerate case where the alignment by itself
29386 produces the desired permutation. */
29387 if (in_order)
29388 return true;
29390 ok = expand_vec_perm_1 (d);
29391 gcc_assert (ok);
29393 return ok;
29396 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29397 a two vector permutation into a single vector permutation by using
29398 an interleave operation to merge the vectors. */
29400 static bool
29401 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29403 struct expand_vec_perm_d dremap, dfinal;
29404 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29405 unsigned contents, h1, h2, h3, h4;
29406 unsigned char remap[2 * MAX_VECT_LEN];
29407 rtx seq;
29408 bool ok;
29410 if (d->op0 == d->op1)
29411 return false;
29413 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29414 lanes. We can use similar techniques with the vperm2f128 instruction,
29415 but it requires slightly different logic. */
29416 if (GET_MODE_SIZE (d->vmode) != 16)
29417 return false;
29419 /* Examine from whence the elements come. */
29420 contents = 0;
29421 for (i = 0; i < nelt; ++i)
29422 contents |= 1u << d->perm[i];
29424 /* Split the two input vectors into 4 halves. */
29425 h1 = (1u << nelt2) - 1;
29426 h2 = h1 << nelt2;
29427 h3 = h2 << nelt2;
29428 h4 = h3 << nelt2;
29430 memset (remap, 0xff, sizeof (remap));
29431 dremap = *d;
29433 /* If the elements from the low halves use interleave low, and similarly
29434 for interleave high. If the elements are from mis-matched halves, we
29435 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29436 if ((contents & (h1 | h3)) == contents)
29438 for (i = 0; i < nelt2; ++i)
29440 remap[i] = i * 2;
29441 remap[i + nelt] = i * 2 + 1;
29442 dremap.perm[i * 2] = i;
29443 dremap.perm[i * 2 + 1] = i + nelt;
29446 else if ((contents & (h2 | h4)) == contents)
29448 for (i = 0; i < nelt2; ++i)
29450 remap[i + nelt2] = i * 2;
29451 remap[i + nelt + nelt2] = i * 2 + 1;
29452 dremap.perm[i * 2] = i + nelt2;
29453 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29456 else if ((contents & (h1 | h4)) == contents)
29458 for (i = 0; i < nelt2; ++i)
29460 remap[i] = i;
29461 remap[i + nelt + nelt2] = i + nelt2;
29462 dremap.perm[i] = i;
29463 dremap.perm[i + nelt2] = i + nelt + nelt2;
29465 if (nelt != 4)
29467 dremap.vmode = V2DImode;
29468 dremap.nelt = 2;
29469 dremap.perm[0] = 0;
29470 dremap.perm[1] = 3;
29473 else if ((contents & (h2 | h3)) == contents)
29475 for (i = 0; i < nelt2; ++i)
29477 remap[i + nelt2] = i;
29478 remap[i + nelt] = i + nelt2;
29479 dremap.perm[i] = i + nelt2;
29480 dremap.perm[i + nelt2] = i + nelt;
29482 if (nelt != 4)
29484 dremap.vmode = V2DImode;
29485 dremap.nelt = 2;
29486 dremap.perm[0] = 1;
29487 dremap.perm[1] = 2;
29490 else
29491 return false;
29493 /* Use the remapping array set up above to move the elements from their
29494 swizzled locations into their final destinations. */
29495 dfinal = *d;
29496 for (i = 0; i < nelt; ++i)
29498 unsigned e = remap[d->perm[i]];
29499 gcc_assert (e < nelt);
29500 dfinal.perm[i] = e;
29502 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29503 dfinal.op1 = dfinal.op0;
29504 dremap.target = dfinal.op0;
29506 /* Test if the final remap can be done with a single insn. For V4SFmode or
29507 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29508 start_sequence ();
29509 ok = expand_vec_perm_1 (&dfinal);
29510 seq = get_insns ();
29511 end_sequence ();
29513 if (!ok)
29514 return false;
29516 if (dremap.vmode != dfinal.vmode)
29518 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29519 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29520 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29523 ok = expand_vec_perm_1 (&dremap);
29524 gcc_assert (ok);
29526 emit_insn (seq);
29527 return true;
29530 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29531 permutation with two pshufb insns and an ior. We should have already
29532 failed all two instruction sequences. */
29534 static bool
29535 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29537 rtx rperm[2][16], vperm, l, h, op, m128;
29538 unsigned int i, nelt, eltsz;
29540 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29541 return false;
29542 gcc_assert (d->op0 != d->op1);
29544 nelt = d->nelt;
29545 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29547 /* Generate two permutation masks. If the required element is within
29548 the given vector it is shuffled into the proper lane. If the required
29549 element is in the other vector, force a zero into the lane by setting
29550 bit 7 in the permutation mask. */
29551 m128 = GEN_INT (-128);
29552 for (i = 0; i < nelt; ++i)
29554 unsigned j, e = d->perm[i];
29555 unsigned which = (e >= nelt);
29556 if (e >= nelt)
29557 e -= nelt;
29559 for (j = 0; j < eltsz; ++j)
29561 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29562 rperm[1-which][i*eltsz + j] = m128;
29566 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29567 vperm = force_reg (V16QImode, vperm);
29569 l = gen_reg_rtx (V16QImode);
29570 op = gen_lowpart (V16QImode, d->op0);
29571 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29573 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29574 vperm = force_reg (V16QImode, vperm);
29576 h = gen_reg_rtx (V16QImode);
29577 op = gen_lowpart (V16QImode, d->op1);
29578 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29580 op = gen_lowpart (V16QImode, d->target);
29581 emit_insn (gen_iorv16qi3 (op, l, h));
29583 return true;
29586 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29587 and extract-odd permutations. */
29589 static bool
29590 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29592 rtx t1, t2, t3, t4;
29594 switch (d->vmode)
29596 case V4DFmode:
29597 t1 = gen_reg_rtx (V4DFmode);
29598 t2 = gen_reg_rtx (V4DFmode);
29600 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29601 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29602 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29604 /* Now an unpck[lh]pd will produce the result required. */
29605 if (odd)
29606 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29607 else
29608 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29609 emit_insn (t3);
29610 break;
29612 case V8SFmode:
29614 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29615 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29616 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29618 t1 = gen_reg_rtx (V8SFmode);
29619 t2 = gen_reg_rtx (V8SFmode);
29620 t3 = gen_reg_rtx (V8SFmode);
29621 t4 = gen_reg_rtx (V8SFmode);
29623 /* Shuffle within the 128-bit lanes to produce:
29624 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29625 expand_vselect (t1, d->op0, perm1, 8);
29626 expand_vselect (t2, d->op1, perm1, 8);
29628 /* Shuffle the lanes around to produce:
29629 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29630 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29631 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29633 /* Now a vpermil2p will produce the result required. */
29634 /* ??? The vpermil2p requires a vector constant. Another option
29635 is a unpck[lh]ps to merge the two vectors to produce
29636 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29637 vpermilps to get the elements into the final order. */
29638 d->op0 = t3;
29639 d->op1 = t4;
29640 memcpy (d->perm, odd ? permo: perme, 8);
29641 expand_vec_perm_vpermil (d);
29643 break;
29645 case V2DFmode:
29646 case V4SFmode:
29647 case V2DImode:
29648 case V4SImode:
29649 /* These are always directly implementable by expand_vec_perm_1. */
29650 gcc_unreachable ();
29652 case V8HImode:
29653 if (TARGET_SSSE3)
29654 return expand_vec_perm_pshufb2 (d);
29655 else
29657 /* We need 2*log2(N)-1 operations to achieve odd/even
29658 with interleave. */
29659 t1 = gen_reg_rtx (V8HImode);
29660 t2 = gen_reg_rtx (V8HImode);
29661 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29662 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29663 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29664 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29665 if (odd)
29666 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29667 else
29668 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29669 emit_insn (t3);
29671 break;
29673 case V16QImode:
29674 if (TARGET_SSSE3)
29675 return expand_vec_perm_pshufb2 (d);
29676 else
29678 t1 = gen_reg_rtx (V16QImode);
29679 t2 = gen_reg_rtx (V16QImode);
29680 t3 = gen_reg_rtx (V16QImode);
29681 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29682 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29683 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29684 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29685 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29686 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29687 if (odd)
29688 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29689 else
29690 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29691 emit_insn (t3);
29693 break;
29695 default:
29696 gcc_unreachable ();
29699 return true;
29702 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29703 extract-even and extract-odd permutations. */
29705 static bool
29706 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29708 unsigned i, odd, nelt = d->nelt;
29710 odd = d->perm[0];
29711 if (odd != 0 && odd != 1)
29712 return false;
29714 for (i = 1; i < nelt; ++i)
29715 if (d->perm[i] != 2 * i + odd)
29716 return false;
29718 return expand_vec_perm_even_odd_1 (d, odd);
29721 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29722 permutations. We assume that expand_vec_perm_1 has already failed. */
29724 static bool
29725 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29727 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29728 enum machine_mode vmode = d->vmode;
29729 unsigned char perm2[4];
29730 rtx op0 = d->op0;
29731 bool ok;
29733 switch (vmode)
29735 case V4DFmode:
29736 case V8SFmode:
29737 /* These are special-cased in sse.md so that we can optionally
29738 use the vbroadcast instruction. They expand to two insns
29739 if the input happens to be in a register. */
29740 gcc_unreachable ();
29742 case V2DFmode:
29743 case V2DImode:
29744 case V4SFmode:
29745 case V4SImode:
29746 /* These are always implementable using standard shuffle patterns. */
29747 gcc_unreachable ();
29749 case V8HImode:
29750 case V16QImode:
29751 /* These can be implemented via interleave. We save one insn by
29752 stopping once we have promoted to V4SImode and then use pshufd. */
29755 optab otab = vec_interleave_low_optab;
29757 if (elt >= nelt2)
29759 otab = vec_interleave_high_optab;
29760 elt -= nelt2;
29762 nelt2 /= 2;
29764 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
29765 vmode = get_mode_wider_vector (vmode);
29766 op0 = gen_lowpart (vmode, op0);
29768 while (vmode != V4SImode);
29770 memset (perm2, elt, 4);
29771 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
29772 gcc_assert (ok);
29773 return true;
29775 default:
29776 gcc_unreachable ();
29780 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29781 broadcast permutations. */
29783 static bool
29784 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
29786 unsigned i, elt, nelt = d->nelt;
29788 if (d->op0 != d->op1)
29789 return false;
29791 elt = d->perm[0];
29792 for (i = 1; i < nelt; ++i)
29793 if (d->perm[i] != elt)
29794 return false;
29796 return expand_vec_perm_broadcast_1 (d);
29799 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
29800 With all of the interface bits taken care of, perform the expansion
29801 in D and return true on success. */
29803 static bool
29804 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
29806 /* Try a single instruction expansion. */
29807 if (expand_vec_perm_1 (d))
29808 return true;
29810 /* Try sequences of two instructions. */
29812 if (expand_vec_perm_pshuflw_pshufhw (d))
29813 return true;
29815 if (expand_vec_perm_palignr (d))
29816 return true;
29818 if (expand_vec_perm_interleave2 (d))
29819 return true;
29821 if (expand_vec_perm_broadcast (d))
29822 return true;
29824 /* Try sequences of three instructions. */
29826 if (expand_vec_perm_pshufb2 (d))
29827 return true;
29829 /* ??? Look for narrow permutations whose element orderings would
29830 allow the promotion to a wider mode. */
29832 /* ??? Look for sequences of interleave or a wider permute that place
29833 the data into the correct lanes for a half-vector shuffle like
29834 pshuf[lh]w or vpermilps. */
29836 /* ??? Look for sequences of interleave that produce the desired results.
29837 The combinatorics of punpck[lh] get pretty ugly... */
29839 if (expand_vec_perm_even_odd (d))
29840 return true;
29842 return false;
29845 /* Extract the values from the vector CST into the permutation array in D.
29846 Return 0 on error, 1 if all values from the permutation come from the
29847 first vector, 2 if all values from the second vector, and 3 otherwise. */
29849 static int
29850 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
29852 tree list = TREE_VECTOR_CST_ELTS (cst);
29853 unsigned i, nelt = d->nelt;
29854 int ret = 0;
29856 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
29858 unsigned HOST_WIDE_INT e;
29860 if (!host_integerp (TREE_VALUE (list), 1))
29861 return 0;
29862 e = tree_low_cst (TREE_VALUE (list), 1);
29863 if (e >= 2 * nelt)
29864 return 0;
29866 ret |= (e < nelt ? 1 : 2);
29867 d->perm[i] = e;
29869 gcc_assert (list == NULL);
29871 /* For all elements from second vector, fold the elements to first. */
29872 if (ret == 2)
29873 for (i = 0; i < nelt; ++i)
29874 d->perm[i] -= nelt;
29876 return ret;
29879 static rtx
29880 ix86_expand_vec_perm_builtin (tree exp)
29882 struct expand_vec_perm_d d;
29883 tree arg0, arg1, arg2;
29885 arg0 = CALL_EXPR_ARG (exp, 0);
29886 arg1 = CALL_EXPR_ARG (exp, 1);
29887 arg2 = CALL_EXPR_ARG (exp, 2);
29889 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
29890 d.nelt = GET_MODE_NUNITS (d.vmode);
29891 d.testing_p = false;
29892 gcc_assert (VECTOR_MODE_P (d.vmode));
29894 if (TREE_CODE (arg2) != VECTOR_CST)
29896 error_at (EXPR_LOCATION (exp),
29897 "vector permutation requires vector constant");
29898 goto exit_error;
29901 switch (extract_vec_perm_cst (&d, arg2))
29903 default:
29904 gcc_unreachable();
29906 case 0:
29907 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
29908 goto exit_error;
29910 case 3:
29911 if (!operand_equal_p (arg0, arg1, 0))
29913 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
29914 d.op0 = force_reg (d.vmode, d.op0);
29915 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
29916 d.op1 = force_reg (d.vmode, d.op1);
29917 break;
29920 /* The elements of PERM do not suggest that only the first operand
29921 is used, but both operands are identical. Allow easier matching
29922 of the permutation by folding the permutation into the single
29923 input vector. */
29925 unsigned i, nelt = d.nelt;
29926 for (i = 0; i < nelt; ++i)
29927 if (d.perm[i] >= nelt)
29928 d.perm[i] -= nelt;
29930 /* FALLTHRU */
29932 case 1:
29933 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
29934 d.op0 = force_reg (d.vmode, d.op0);
29935 d.op1 = d.op0;
29936 break;
29938 case 2:
29939 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
29940 d.op0 = force_reg (d.vmode, d.op0);
29941 d.op1 = d.op0;
29942 break;
29945 d.target = gen_reg_rtx (d.vmode);
29946 if (ix86_expand_vec_perm_builtin_1 (&d))
29947 return d.target;
29949 /* For compiler generated permutations, we should never got here, because
29950 the compiler should also be checking the ok hook. But since this is a
29951 builtin the user has access too, so don't abort. */
29952 switch (d.nelt)
29954 case 2:
29955 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
29956 break;
29957 case 4:
29958 sorry ("vector permutation (%d %d %d %d)",
29959 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
29960 break;
29961 case 8:
29962 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
29963 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
29964 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
29965 break;
29966 case 16:
29967 sorry ("vector permutation "
29968 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
29969 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
29970 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
29971 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
29972 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
29973 break;
29974 default:
29975 gcc_unreachable ();
29977 exit_error:
29978 return CONST0_RTX (d.vmode);
29981 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
29983 static bool
29984 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
29986 struct expand_vec_perm_d d;
29987 int vec_mask;
29988 bool ret, one_vec;
29990 d.vmode = TYPE_MODE (vec_type);
29991 d.nelt = GET_MODE_NUNITS (d.vmode);
29992 d.testing_p = true;
29994 /* Given sufficient ISA support we can just return true here
29995 for selected vector modes. */
29996 if (GET_MODE_SIZE (d.vmode) == 16)
29998 /* All implementable with a single vpperm insn. */
29999 if (TARGET_XOP)
30000 return true;
30001 /* All implementable with 2 pshufb + 1 ior. */
30002 if (TARGET_SSSE3)
30003 return true;
30004 /* All implementable with shufpd or unpck[lh]pd. */
30005 if (d.nelt == 2)
30006 return true;
30009 vec_mask = extract_vec_perm_cst (&d, mask);
30011 /* This hook is cannot be called in response to something that the
30012 user does (unlike the builtin expander) so we shouldn't ever see
30013 an error generated from the extract. */
30014 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30015 one_vec = (vec_mask != 3);
30017 /* Implementable with shufps or pshufd. */
30018 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30019 return true;
30021 /* Otherwise we have to go through the motions and see if we can
30022 figure out how to generate the requested permutation. */
30023 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30024 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30025 if (!one_vec)
30026 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30028 start_sequence ();
30029 ret = ix86_expand_vec_perm_builtin_1 (&d);
30030 end_sequence ();
30032 return ret;
30035 void
30036 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30038 struct expand_vec_perm_d d;
30039 unsigned i, nelt;
30041 d.target = targ;
30042 d.op0 = op0;
30043 d.op1 = op1;
30044 d.vmode = GET_MODE (targ);
30045 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30046 d.testing_p = false;
30048 for (i = 0; i < nelt; ++i)
30049 d.perm[i] = i * 2 + odd;
30051 /* We'll either be able to implement the permutation directly... */
30052 if (expand_vec_perm_1 (&d))
30053 return;
30055 /* ... or we use the special-case patterns. */
30056 expand_vec_perm_even_odd_1 (&d, odd);
30059 /* This function returns the calling abi specific va_list type node.
30060 It returns the FNDECL specific va_list type. */
30062 tree
30063 ix86_fn_abi_va_list (tree fndecl)
30065 if (!TARGET_64BIT)
30066 return va_list_type_node;
30067 gcc_assert (fndecl != NULL_TREE);
30069 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30070 return ms_va_list_type_node;
30071 else
30072 return sysv_va_list_type_node;
30075 /* Returns the canonical va_list type specified by TYPE. If there
30076 is no valid TYPE provided, it return NULL_TREE. */
30078 tree
30079 ix86_canonical_va_list_type (tree type)
30081 tree wtype, htype;
30083 /* Resolve references and pointers to va_list type. */
30084 if (INDIRECT_REF_P (type))
30085 type = TREE_TYPE (type);
30086 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30087 type = TREE_TYPE (type);
30089 if (TARGET_64BIT)
30091 wtype = va_list_type_node;
30092 gcc_assert (wtype != NULL_TREE);
30093 htype = type;
30094 if (TREE_CODE (wtype) == ARRAY_TYPE)
30096 /* If va_list is an array type, the argument may have decayed
30097 to a pointer type, e.g. by being passed to another function.
30098 In that case, unwrap both types so that we can compare the
30099 underlying records. */
30100 if (TREE_CODE (htype) == ARRAY_TYPE
30101 || POINTER_TYPE_P (htype))
30103 wtype = TREE_TYPE (wtype);
30104 htype = TREE_TYPE (htype);
30107 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30108 return va_list_type_node;
30109 wtype = sysv_va_list_type_node;
30110 gcc_assert (wtype != NULL_TREE);
30111 htype = type;
30112 if (TREE_CODE (wtype) == ARRAY_TYPE)
30114 /* If va_list is an array type, the argument may have decayed
30115 to a pointer type, e.g. by being passed to another function.
30116 In that case, unwrap both types so that we can compare the
30117 underlying records. */
30118 if (TREE_CODE (htype) == ARRAY_TYPE
30119 || POINTER_TYPE_P (htype))
30121 wtype = TREE_TYPE (wtype);
30122 htype = TREE_TYPE (htype);
30125 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30126 return sysv_va_list_type_node;
30127 wtype = ms_va_list_type_node;
30128 gcc_assert (wtype != NULL_TREE);
30129 htype = type;
30130 if (TREE_CODE (wtype) == ARRAY_TYPE)
30132 /* If va_list is an array type, the argument may have decayed
30133 to a pointer type, e.g. by being passed to another function.
30134 In that case, unwrap both types so that we can compare the
30135 underlying records. */
30136 if (TREE_CODE (htype) == ARRAY_TYPE
30137 || POINTER_TYPE_P (htype))
30139 wtype = TREE_TYPE (wtype);
30140 htype = TREE_TYPE (htype);
30143 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30144 return ms_va_list_type_node;
30145 return NULL_TREE;
30147 return std_canonical_va_list_type (type);
30150 /* Iterate through the target-specific builtin types for va_list.
30151 IDX denotes the iterator, *PTREE is set to the result type of
30152 the va_list builtin, and *PNAME to its internal type.
30153 Returns zero if there is no element for this index, otherwise
30154 IDX should be increased upon the next call.
30155 Note, do not iterate a base builtin's name like __builtin_va_list.
30156 Used from c_common_nodes_and_builtins. */
30159 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30161 if (!TARGET_64BIT)
30162 return 0;
30163 switch (idx) {
30164 case 0:
30165 *ptree = ms_va_list_type_node;
30166 *pname = "__builtin_ms_va_list";
30167 break;
30168 case 1:
30169 *ptree = sysv_va_list_type_node;
30170 *pname = "__builtin_sysv_va_list";
30171 break;
30172 default:
30173 return 0;
30175 return 1;
30178 /* Initialize the GCC target structure. */
30179 #undef TARGET_RETURN_IN_MEMORY
30180 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30182 #undef TARGET_LEGITIMIZE_ADDRESS
30183 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30185 #undef TARGET_ATTRIBUTE_TABLE
30186 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30187 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30188 # undef TARGET_MERGE_DECL_ATTRIBUTES
30189 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30190 #endif
30192 #undef TARGET_COMP_TYPE_ATTRIBUTES
30193 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30195 #undef TARGET_INIT_BUILTINS
30196 #define TARGET_INIT_BUILTINS ix86_init_builtins
30197 #undef TARGET_BUILTIN_DECL
30198 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30199 #undef TARGET_EXPAND_BUILTIN
30200 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30202 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30203 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30204 ix86_builtin_vectorized_function
30206 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30207 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30209 #undef TARGET_BUILTIN_RECIPROCAL
30210 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30212 #undef TARGET_ASM_FUNCTION_EPILOGUE
30213 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30215 #undef TARGET_ENCODE_SECTION_INFO
30216 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30217 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30218 #else
30219 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30220 #endif
30222 #undef TARGET_ASM_OPEN_PAREN
30223 #define TARGET_ASM_OPEN_PAREN ""
30224 #undef TARGET_ASM_CLOSE_PAREN
30225 #define TARGET_ASM_CLOSE_PAREN ""
30227 #undef TARGET_ASM_BYTE_OP
30228 #define TARGET_ASM_BYTE_OP ASM_BYTE
30230 #undef TARGET_ASM_ALIGNED_HI_OP
30231 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30232 #undef TARGET_ASM_ALIGNED_SI_OP
30233 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30234 #ifdef ASM_QUAD
30235 #undef TARGET_ASM_ALIGNED_DI_OP
30236 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30237 #endif
30239 #undef TARGET_ASM_UNALIGNED_HI_OP
30240 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30241 #undef TARGET_ASM_UNALIGNED_SI_OP
30242 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30243 #undef TARGET_ASM_UNALIGNED_DI_OP
30244 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30246 #undef TARGET_SCHED_ADJUST_COST
30247 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30248 #undef TARGET_SCHED_ISSUE_RATE
30249 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30250 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30251 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30252 ia32_multipass_dfa_lookahead
30254 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30255 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30257 #ifdef HAVE_AS_TLS
30258 #undef TARGET_HAVE_TLS
30259 #define TARGET_HAVE_TLS true
30260 #endif
30261 #undef TARGET_CANNOT_FORCE_CONST_MEM
30262 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30263 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30264 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30266 #undef TARGET_DELEGITIMIZE_ADDRESS
30267 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30269 #undef TARGET_MS_BITFIELD_LAYOUT_P
30270 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30272 #if TARGET_MACHO
30273 #undef TARGET_BINDS_LOCAL_P
30274 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30275 #endif
30276 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30277 #undef TARGET_BINDS_LOCAL_P
30278 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30279 #endif
30281 #undef TARGET_ASM_OUTPUT_MI_THUNK
30282 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30286 #undef TARGET_ASM_FILE_START
30287 #define TARGET_ASM_FILE_START x86_file_start
30289 #undef TARGET_DEFAULT_TARGET_FLAGS
30290 #define TARGET_DEFAULT_TARGET_FLAGS \
30291 (TARGET_DEFAULT \
30292 | TARGET_SUBTARGET_DEFAULT \
30293 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30294 | MASK_FUSED_MADD)
30296 #undef TARGET_HANDLE_OPTION
30297 #define TARGET_HANDLE_OPTION ix86_handle_option
30299 #undef TARGET_RTX_COSTS
30300 #define TARGET_RTX_COSTS ix86_rtx_costs
30301 #undef TARGET_ADDRESS_COST
30302 #define TARGET_ADDRESS_COST ix86_address_cost
30304 #undef TARGET_FIXED_CONDITION_CODE_REGS
30305 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30306 #undef TARGET_CC_MODES_COMPATIBLE
30307 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30309 #undef TARGET_MACHINE_DEPENDENT_REORG
30310 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30312 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30313 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30315 #undef TARGET_BUILD_BUILTIN_VA_LIST
30316 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30318 #undef TARGET_FN_ABI_VA_LIST
30319 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30321 #undef TARGET_CANONICAL_VA_LIST_TYPE
30322 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30324 #undef TARGET_EXPAND_BUILTIN_VA_START
30325 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30327 #undef TARGET_MD_ASM_CLOBBERS
30328 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30330 #undef TARGET_PROMOTE_PROTOTYPES
30331 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30332 #undef TARGET_STRUCT_VALUE_RTX
30333 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30334 #undef TARGET_SETUP_INCOMING_VARARGS
30335 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30336 #undef TARGET_MUST_PASS_IN_STACK
30337 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30338 #undef TARGET_PASS_BY_REFERENCE
30339 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30340 #undef TARGET_INTERNAL_ARG_POINTER
30341 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30342 #undef TARGET_UPDATE_STACK_BOUNDARY
30343 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30344 #undef TARGET_GET_DRAP_RTX
30345 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30346 #undef TARGET_STRICT_ARGUMENT_NAMING
30347 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30348 #undef TARGET_STATIC_CHAIN
30349 #define TARGET_STATIC_CHAIN ix86_static_chain
30350 #undef TARGET_TRAMPOLINE_INIT
30351 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30353 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30354 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30356 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30357 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30359 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30360 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30362 #undef TARGET_C_MODE_FOR_SUFFIX
30363 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30365 #ifdef HAVE_AS_TLS
30366 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30367 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30368 #endif
30370 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30371 #undef TARGET_INSERT_ATTRIBUTES
30372 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30373 #endif
30375 #undef TARGET_MANGLE_TYPE
30376 #define TARGET_MANGLE_TYPE ix86_mangle_type
30378 #undef TARGET_STACK_PROTECT_FAIL
30379 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30381 #undef TARGET_FUNCTION_VALUE
30382 #define TARGET_FUNCTION_VALUE ix86_function_value
30384 #undef TARGET_SECONDARY_RELOAD
30385 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30387 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30388 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30389 ix86_builtin_vectorization_cost
30390 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30391 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30392 ix86_vectorize_builtin_vec_perm
30393 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30394 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30395 ix86_vectorize_builtin_vec_perm_ok
30397 #undef TARGET_SET_CURRENT_FUNCTION
30398 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30400 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30401 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30403 #undef TARGET_OPTION_SAVE
30404 #define TARGET_OPTION_SAVE ix86_function_specific_save
30406 #undef TARGET_OPTION_RESTORE
30407 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30409 #undef TARGET_OPTION_PRINT
30410 #define TARGET_OPTION_PRINT ix86_function_specific_print
30412 #undef TARGET_CAN_INLINE_P
30413 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30415 #undef TARGET_EXPAND_TO_RTL_HOOK
30416 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30418 #undef TARGET_LEGITIMATE_ADDRESS_P
30419 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30421 #undef TARGET_IRA_COVER_CLASSES
30422 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30424 #undef TARGET_FRAME_POINTER_REQUIRED
30425 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30427 #undef TARGET_CAN_ELIMINATE
30428 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30430 struct gcc_target targetm = TARGET_INITIALIZER;
30432 #include "gt-i386.h"