better m128i constant materialization
[official-gcc.git] / gcc / config / i386 / i386.c
blob8c89823b535afbfbc0a00a73eeae0e50decf558f
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
56 #include "cselib.h"
58 static int x86_builtin_vectorization_cost (bool);
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1039 /* Generic64 should produce code tuned for Nocona and K8. */
1040 static const
1041 struct processor_costs generic64_cost = {
1042 COSTS_N_INSNS (1), /* cost of an add instruction */
1043 /* On all chips taken into consideration lea is 2 cycles and more. With
1044 this cost however our current implementation of synth_mult results in
1045 use of unnecessary temporary registers causing regression on several
1046 SPECfp benchmarks. */
1047 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1048 COSTS_N_INSNS (1), /* variable shift costs */
1049 COSTS_N_INSNS (1), /* constant shift costs */
1050 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1051 COSTS_N_INSNS (4), /* HI */
1052 COSTS_N_INSNS (3), /* SI */
1053 COSTS_N_INSNS (4), /* DI */
1054 COSTS_N_INSNS (2)}, /* other */
1055 0, /* cost of multiply per each bit set */
1056 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1057 COSTS_N_INSNS (26), /* HI */
1058 COSTS_N_INSNS (42), /* SI */
1059 COSTS_N_INSNS (74), /* DI */
1060 COSTS_N_INSNS (74)}, /* other */
1061 COSTS_N_INSNS (1), /* cost of movsx */
1062 COSTS_N_INSNS (1), /* cost of movzx */
1063 8, /* "large" insn */
1064 17, /* MOVE_RATIO */
1065 4, /* cost for loading QImode using movzbl */
1066 {4, 4, 4}, /* cost of loading integer registers
1067 in QImode, HImode and SImode.
1068 Relative to reg-reg move (2). */
1069 {4, 4, 4}, /* cost of storing integer registers */
1070 4, /* cost of reg,reg fld/fst */
1071 {12, 12, 12}, /* cost of loading fp registers
1072 in SFmode, DFmode and XFmode */
1073 {6, 6, 8}, /* cost of storing fp registers
1074 in SFmode, DFmode and XFmode */
1075 2, /* cost of moving MMX register */
1076 {8, 8}, /* cost of loading MMX registers
1077 in SImode and DImode */
1078 {8, 8}, /* cost of storing MMX registers
1079 in SImode and DImode */
1080 2, /* cost of moving SSE register */
1081 {8, 8, 8}, /* cost of loading SSE registers
1082 in SImode, DImode and TImode */
1083 {8, 8, 8}, /* cost of storing SSE registers
1084 in SImode, DImode and TImode */
1085 5, /* MMX or SSE register to integer */
1086 32, /* size of l1 cache. */
1087 512, /* size of l2 cache. */
1088 64, /* size of prefetch block */
1089 6, /* number of parallel prefetches */
1090 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1091 is increased to perhaps more appropriate value of 5. */
1092 3, /* Branch cost */
1093 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1094 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1095 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1096 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1097 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1098 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 {DUMMY_STRINGOP_ALGS,
1102 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1103 1, /* scalar_stmt_cost. */
1104 1, /* scalar load_cost. */
1105 1, /* scalar_store_cost. */
1106 1, /* vec_stmt_cost. */
1107 1, /* vec_to_scalar_cost. */
1108 1, /* scalar_to_vec_cost. */
1109 1, /* vec_align_load_cost. */
1110 2, /* vec_unalign_load_cost. */
1111 1, /* vec_store_cost. */
1112 3, /* cond_taken_branch_cost. */
1113 1, /* cond_not_taken_branch_cost. */
1116 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1117 static const
1118 struct processor_costs generic32_cost = {
1119 COSTS_N_INSNS (1), /* cost of an add instruction */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 256, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1173 DUMMY_STRINGOP_ALGS},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 const struct processor_costs *ix86_cost = &pentium_cost;
1189 /* Processor feature/optimization bitmasks. */
1190 #define m_386 (1<<PROCESSOR_I386)
1191 #define m_486 (1<<PROCESSOR_I486)
1192 #define m_PENT (1<<PROCESSOR_PENTIUM)
1193 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1194 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1195 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1196 #define m_CORE2 (1<<PROCESSOR_CORE2)
1198 #define m_GEODE (1<<PROCESSOR_GEODE)
1199 #define m_K6 (1<<PROCESSOR_K6)
1200 #define m_K6_GEODE (m_K6 | m_GEODE)
1201 #define m_K8 (1<<PROCESSOR_K8)
1202 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1203 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1204 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1205 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1207 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1208 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1210 /* Generic instruction choice should be common subset of supported CPUs
1211 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1212 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1214 /* Feature tests against the various tunings. */
1215 unsigned char ix86_tune_features[X86_TUNE_LAST];
1217 /* Feature tests against the various tunings used to create ix86_tune_features
1218 based on the processor mask. */
1219 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1220 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1221 negatively, so enabling for Generic64 seems like good code size
1222 tradeoff. We can't enable it for 32bit generic because it does not
1223 work well with PPro base chips. */
1224 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1226 /* X86_TUNE_PUSH_MEMORY */
1227 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1228 | m_NOCONA | m_CORE2 | m_GENERIC,
1230 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1231 m_486 | m_PENT,
1233 /* X86_TUNE_UNROLL_STRLEN */
1234 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1236 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1237 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1239 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1240 on simulation result. But after P4 was made, no performance benefit
1241 was observed with branch hints. It also increases the code size.
1242 As a result, icc never generates branch hints. */
1245 /* X86_TUNE_DOUBLE_WITH_ADD */
1246 ~m_386,
1248 /* X86_TUNE_USE_SAHF */
1249 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1250 | m_NOCONA | m_CORE2 | m_GENERIC,
1252 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1253 partial dependencies. */
1254 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1255 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1257 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1258 register stalls on Generic32 compilation setting as well. However
1259 in current implementation the partial register stalls are not eliminated
1260 very well - they can be introduced via subregs synthesized by combine
1261 and can happen in caller/callee saving sequences. Because this option
1262 pays back little on PPro based chips and is in conflict with partial reg
1263 dependencies used by Athlon/P4 based chips, it is better to leave it off
1264 for generic32 for now. */
1265 m_PPRO,
1267 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1268 m_CORE2 | m_GENERIC,
1270 /* X86_TUNE_USE_HIMODE_FIOP */
1271 m_386 | m_486 | m_K6_GEODE,
1273 /* X86_TUNE_USE_SIMODE_FIOP */
1274 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1276 /* X86_TUNE_USE_MOV0 */
1277 m_K6,
1279 /* X86_TUNE_USE_CLTD */
1280 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1282 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1283 m_PENT4,
1285 /* X86_TUNE_SPLIT_LONG_MOVES */
1286 m_PPRO,
1288 /* X86_TUNE_READ_MODIFY_WRITE */
1289 ~m_PENT,
1291 /* X86_TUNE_READ_MODIFY */
1292 ~(m_PENT | m_PPRO),
1294 /* X86_TUNE_PROMOTE_QIMODE */
1295 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1296 | m_GENERIC /* | m_PENT4 ? */,
1298 /* X86_TUNE_FAST_PREFIX */
1299 ~(m_PENT | m_486 | m_386),
1301 /* X86_TUNE_SINGLE_STRINGOP */
1302 m_386 | m_PENT4 | m_NOCONA,
1304 /* X86_TUNE_QIMODE_MATH */
1307 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1308 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1309 might be considered for Generic32 if our scheme for avoiding partial
1310 stalls was more effective. */
1311 ~m_PPRO,
1313 /* X86_TUNE_PROMOTE_QI_REGS */
1316 /* X86_TUNE_PROMOTE_HI_REGS */
1317 m_PPRO,
1319 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1320 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1322 /* X86_TUNE_ADD_ESP_8 */
1323 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1324 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_SUB_ESP_4 */
1327 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1329 /* X86_TUNE_SUB_ESP_8 */
1330 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1331 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1333 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1334 for DFmode copies */
1335 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1336 | m_GENERIC | m_GEODE),
1338 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1339 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1341 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1342 conflict here in between PPro/Pentium4 based chips that thread 128bit
1343 SSE registers as single units versus K8 based chips that divide SSE
1344 registers to two 64bit halves. This knob promotes all store destinations
1345 to be 128bit to allow register renaming on 128bit SSE units, but usually
1346 results in one extra microop on 64bit SSE units. Experimental results
1347 shows that disabling this option on P4 brings over 20% SPECfp regression,
1348 while enabling it on K8 brings roughly 2.4% regression that can be partly
1349 masked by careful scheduling of moves. */
1350 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1352 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1353 m_AMDFAM10,
1355 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1356 are resolved on SSE register parts instead of whole registers, so we may
1357 maintain just lower part of scalar values in proper format leaving the
1358 upper part undefined. */
1359 m_ATHLON_K8,
1361 /* X86_TUNE_SSE_TYPELESS_STORES */
1362 m_AMD_MULTIPLE,
1364 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1365 m_PPRO | m_PENT4 | m_NOCONA,
1367 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1368 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1370 /* X86_TUNE_PROLOGUE_USING_MOVE */
1371 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1373 /* X86_TUNE_EPILOGUE_USING_MOVE */
1374 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1376 /* X86_TUNE_SHIFT1 */
1377 ~m_486,
1379 /* X86_TUNE_USE_FFREEP */
1380 m_AMD_MULTIPLE,
1382 /* X86_TUNE_INTER_UNIT_MOVES */
1383 ~(m_AMD_MULTIPLE | m_GENERIC),
1385 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1386 ~(m_AMDFAM10),
1388 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1389 than 4 branch instructions in the 16 byte window. */
1390 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_SCHEDULE */
1393 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1395 /* X86_TUNE_USE_BT */
1396 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_USE_INCDEC */
1399 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1401 /* X86_TUNE_PAD_RETURNS */
1402 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1404 /* X86_TUNE_EXT_80387_CONSTANTS */
1405 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1407 /* X86_TUNE_SHORTEN_X87_SSE */
1408 ~m_K8,
1410 /* X86_TUNE_AVOID_VECTOR_DECODE */
1411 m_K8 | m_GENERIC64,
1413 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1414 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1415 ~(m_386 | m_486),
1417 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1418 vector path on AMD machines. */
1419 m_K8 | m_GENERIC64 | m_AMDFAM10,
1421 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1422 machines. */
1423 m_K8 | m_GENERIC64 | m_AMDFAM10,
1425 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1426 than a MOV. */
1427 m_PENT,
1429 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1430 but one byte longer. */
1431 m_PENT,
1433 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1434 operand that cannot be represented using a modRM byte. The XOR
1435 replacement is long decoded, so this split helps here as well. */
1436 m_K6,
1438 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1439 from FP to FP. */
1440 m_AMDFAM10 | m_GENERIC,
1442 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1443 from integer to FP. */
1444 m_AMDFAM10,
1446 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1447 with a subsequent conditional jump instruction into a single
1448 compare-and-branch uop. */
1449 m_CORE2,
1452 /* Feature tests against the various architecture variations. */
1453 unsigned char ix86_arch_features[X86_ARCH_LAST];
1455 /* Feature tests against the various architecture variations, used to create
1456 ix86_arch_features based on the processor mask. */
1457 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1458 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1459 ~(m_386 | m_486 | m_PENT | m_K6),
1461 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1462 ~m_386,
1464 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1465 ~(m_386 | m_486),
1467 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1468 ~m_386,
1470 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1471 ~m_386,
1474 static const unsigned int x86_accumulate_outgoing_args
1475 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1477 static const unsigned int x86_arch_always_fancy_math_387
1478 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1479 | m_NOCONA | m_CORE2 | m_GENERIC;
1481 static enum stringop_alg stringop_alg = no_stringop;
1483 /* In case the average insn count for single function invocation is
1484 lower than this constant, emit fast (but longer) prologue and
1485 epilogue code. */
1486 #define FAST_PROLOGUE_INSN_COUNT 20
1488 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1489 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1490 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1491 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1493 /* Array of the smallest class containing reg number REGNO, indexed by
1494 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1496 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1498 /* ax, dx, cx, bx */
1499 AREG, DREG, CREG, BREG,
1500 /* si, di, bp, sp */
1501 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1502 /* FP registers */
1503 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1504 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1505 /* arg pointer */
1506 NON_Q_REGS,
1507 /* flags, fpsr, fpcr, frame */
1508 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1509 /* SSE registers */
1510 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1511 SSE_REGS, SSE_REGS,
1512 /* MMX registers */
1513 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1514 MMX_REGS, MMX_REGS,
1515 /* REX registers */
1516 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1517 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1518 /* SSE REX registers */
1519 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1520 SSE_REGS, SSE_REGS,
1523 /* The "default" register map used in 32bit mode. */
1525 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1527 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1528 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1529 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1530 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1531 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1532 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1533 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1536 /* The "default" register map used in 64bit mode. */
1538 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1540 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1541 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1542 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1543 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1544 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1545 8,9,10,11,12,13,14,15, /* extended integer registers */
1546 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1549 /* Define the register numbers to be used in Dwarf debugging information.
1550 The SVR4 reference port C compiler uses the following register numbers
1551 in its Dwarf output code:
1552 0 for %eax (gcc regno = 0)
1553 1 for %ecx (gcc regno = 2)
1554 2 for %edx (gcc regno = 1)
1555 3 for %ebx (gcc regno = 3)
1556 4 for %esp (gcc regno = 7)
1557 5 for %ebp (gcc regno = 6)
1558 6 for %esi (gcc regno = 4)
1559 7 for %edi (gcc regno = 5)
1560 The following three DWARF register numbers are never generated by
1561 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1562 believes these numbers have these meanings.
1563 8 for %eip (no gcc equivalent)
1564 9 for %eflags (gcc regno = 17)
1565 10 for %trapno (no gcc equivalent)
1566 It is not at all clear how we should number the FP stack registers
1567 for the x86 architecture. If the version of SDB on x86/svr4 were
1568 a bit less brain dead with respect to floating-point then we would
1569 have a precedent to follow with respect to DWARF register numbers
1570 for x86 FP registers, but the SDB on x86/svr4 is so completely
1571 broken with respect to FP registers that it is hardly worth thinking
1572 of it as something to strive for compatibility with.
1573 The version of x86/svr4 SDB I have at the moment does (partially)
1574 seem to believe that DWARF register number 11 is associated with
1575 the x86 register %st(0), but that's about all. Higher DWARF
1576 register numbers don't seem to be associated with anything in
1577 particular, and even for DWARF regno 11, SDB only seems to under-
1578 stand that it should say that a variable lives in %st(0) (when
1579 asked via an `=' command) if we said it was in DWARF regno 11,
1580 but SDB still prints garbage when asked for the value of the
1581 variable in question (via a `/' command).
1582 (Also note that the labels SDB prints for various FP stack regs
1583 when doing an `x' command are all wrong.)
1584 Note that these problems generally don't affect the native SVR4
1585 C compiler because it doesn't allow the use of -O with -g and
1586 because when it is *not* optimizing, it allocates a memory
1587 location for each floating-point variable, and the memory
1588 location is what gets described in the DWARF AT_location
1589 attribute for the variable in question.
1590 Regardless of the severe mental illness of the x86/svr4 SDB, we
1591 do something sensible here and we use the following DWARF
1592 register numbers. Note that these are all stack-top-relative
1593 numbers.
1594 11 for %st(0) (gcc regno = 8)
1595 12 for %st(1) (gcc regno = 9)
1596 13 for %st(2) (gcc regno = 10)
1597 14 for %st(3) (gcc regno = 11)
1598 15 for %st(4) (gcc regno = 12)
1599 16 for %st(5) (gcc regno = 13)
1600 17 for %st(6) (gcc regno = 14)
1601 18 for %st(7) (gcc regno = 15)
1603 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1605 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1606 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1607 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1608 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1609 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1610 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1611 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1614 /* Test and compare insns in i386.md store the information needed to
1615 generate branch and scc insns here. */
1617 rtx ix86_compare_op0 = NULL_RTX;
1618 rtx ix86_compare_op1 = NULL_RTX;
1619 rtx ix86_compare_emitted = NULL_RTX;
1621 /* Define parameter passing and return registers. */
1623 static int const x86_64_int_parameter_registers[6] =
1625 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1628 static int const x86_64_ms_abi_int_parameter_registers[4] =
1630 CX_REG, DX_REG, R8_REG, R9_REG
1633 static int const x86_64_int_return_registers[4] =
1635 AX_REG, DX_REG, DI_REG, SI_REG
1638 /* Define the structure for the machine field in struct function. */
1640 struct stack_local_entry GTY(())
1642 unsigned short mode;
1643 unsigned short n;
1644 rtx rtl;
1645 struct stack_local_entry *next;
1648 /* Structure describing stack frame layout.
1649 Stack grows downward:
1651 [arguments]
1652 <- ARG_POINTER
1653 saved pc
1655 saved frame pointer if frame_pointer_needed
1656 <- HARD_FRAME_POINTER
1657 [saved regs]
1659 [padding0]
1661 [saved SSE regs]
1663 [padding1] \
1665 [va_arg registers] (
1666 > to_allocate <- FRAME_POINTER
1667 [frame] (
1669 [padding2] /
1671 struct ix86_frame
1673 int padding0;
1674 int nsseregs;
1675 int nregs;
1676 int padding1;
1677 int va_arg_size;
1678 HOST_WIDE_INT frame;
1679 int padding2;
1680 int outgoing_arguments_size;
1681 int red_zone_size;
1683 HOST_WIDE_INT to_allocate;
1684 /* The offsets relative to ARG_POINTER. */
1685 HOST_WIDE_INT frame_pointer_offset;
1686 HOST_WIDE_INT hard_frame_pointer_offset;
1687 HOST_WIDE_INT stack_pointer_offset;
1689 /* When save_regs_using_mov is set, emit prologue using
1690 move instead of push instructions. */
1691 bool save_regs_using_mov;
1694 /* Code model option. */
1695 enum cmodel ix86_cmodel;
1696 /* Asm dialect. */
1697 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1698 /* TLS dialects. */
1699 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1701 /* Which unit we are generating floating point math for. */
1702 enum fpmath_unit ix86_fpmath;
1704 /* Which cpu are we scheduling for. */
1705 enum attr_cpu ix86_schedule;
1707 /* Which cpu are we optimizing for. */
1708 enum processor_type ix86_tune;
1710 /* Which instruction set architecture to use. */
1711 enum processor_type ix86_arch;
1713 /* true if sse prefetch instruction is not NOOP. */
1714 int x86_prefetch_sse;
1716 /* ix86_regparm_string as a number */
1717 static int ix86_regparm;
1719 /* -mstackrealign option */
1720 extern int ix86_force_align_arg_pointer;
1721 static const char ix86_force_align_arg_pointer_string[]
1722 = "force_align_arg_pointer";
1724 static rtx (*ix86_gen_leave) (void);
1725 static rtx (*ix86_gen_pop1) (rtx);
1726 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1727 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1728 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1729 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1730 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1731 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1733 /* Preferred alignment for stack boundary in bits. */
1734 unsigned int ix86_preferred_stack_boundary;
1736 /* Alignment for incoming stack boundary in bits specified at
1737 command line. */
1738 static unsigned int ix86_user_incoming_stack_boundary;
1740 /* Default alignment for incoming stack boundary in bits. */
1741 static unsigned int ix86_default_incoming_stack_boundary;
1743 /* Alignment for incoming stack boundary in bits. */
1744 unsigned int ix86_incoming_stack_boundary;
1746 /* The abi used by target. */
1747 enum calling_abi ix86_abi = DEFAULT_ABI;
1749 /* Values 1-5: see jump.c */
1750 int ix86_branch_cost;
1752 /* Calling abi specific va_list type nodes. */
1753 static GTY(()) tree sysv_va_list_type_node;
1754 static GTY(()) tree ms_va_list_type_node;
1756 /* Variables which are this size or smaller are put in the data/bss
1757 or ldata/lbss sections. */
1759 int ix86_section_threshold = 65536;
1761 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1762 char internal_label_prefix[16];
1763 int internal_label_prefix_len;
1765 /* Fence to use after loop using movnt. */
1766 tree x86_mfence;
1768 /* Register class used for passing given 64bit part of the argument.
1769 These represent classes as documented by the PS ABI, with the exception
1770 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1771 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1773 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1774 whenever possible (upper half does contain padding). */
1775 enum x86_64_reg_class
1777 X86_64_NO_CLASS,
1778 X86_64_INTEGER_CLASS,
1779 X86_64_INTEGERSI_CLASS,
1780 X86_64_SSE_CLASS,
1781 X86_64_SSESF_CLASS,
1782 X86_64_SSEDF_CLASS,
1783 X86_64_SSEUP_CLASS,
1784 X86_64_X87_CLASS,
1785 X86_64_X87UP_CLASS,
1786 X86_64_COMPLEX_X87_CLASS,
1787 X86_64_MEMORY_CLASS
1790 #define MAX_CLASSES 4
1792 /* Table of constants used by fldpi, fldln2, etc.... */
1793 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1794 static bool ext_80387_constants_init = 0;
1797 static struct machine_function * ix86_init_machine_status (void);
1798 static rtx ix86_function_value (const_tree, const_tree, bool);
1799 static int ix86_function_regparm (const_tree, const_tree);
1800 static void ix86_compute_frame_layout (struct ix86_frame *);
1801 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1802 rtx, rtx, int);
1803 static void ix86_add_new_builtins (int);
1805 enum ix86_function_specific_strings
1807 IX86_FUNCTION_SPECIFIC_ARCH,
1808 IX86_FUNCTION_SPECIFIC_TUNE,
1809 IX86_FUNCTION_SPECIFIC_FPMATH,
1810 IX86_FUNCTION_SPECIFIC_MAX
1813 static char *ix86_target_string (int, int, const char *, const char *,
1814 const char *, bool);
1815 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1816 static void ix86_function_specific_save (struct cl_target_option *);
1817 static void ix86_function_specific_restore (struct cl_target_option *);
1818 static void ix86_function_specific_print (FILE *, int,
1819 struct cl_target_option *);
1820 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1821 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1822 static bool ix86_can_inline_p (tree, tree);
1823 static void ix86_set_current_function (tree);
1825 static enum calling_abi ix86_function_abi (const_tree);
1828 /* The svr4 ABI for the i386 says that records and unions are returned
1829 in memory. */
1830 #ifndef DEFAULT_PCC_STRUCT_RETURN
1831 #define DEFAULT_PCC_STRUCT_RETURN 1
1832 #endif
1834 /* Whether -mtune= or -march= were specified */
1835 static int ix86_tune_defaulted;
1836 static int ix86_arch_specified;
1838 /* Bit flags that specify the ISA we are compiling for. */
1839 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1841 /* A mask of ix86_isa_flags that includes bit X if X
1842 was set or cleared on the command line. */
1843 static int ix86_isa_flags_explicit;
1845 /* Define a set of ISAs which are available when a given ISA is
1846 enabled. MMX and SSE ISAs are handled separately. */
1848 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1849 #define OPTION_MASK_ISA_3DNOW_SET \
1850 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1852 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1853 #define OPTION_MASK_ISA_SSE2_SET \
1854 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1855 #define OPTION_MASK_ISA_SSE3_SET \
1856 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1857 #define OPTION_MASK_ISA_SSSE3_SET \
1858 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1859 #define OPTION_MASK_ISA_SSE4_1_SET \
1860 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1861 #define OPTION_MASK_ISA_SSE4_2_SET \
1862 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1863 #define OPTION_MASK_ISA_AVX_SET \
1864 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1865 #define OPTION_MASK_ISA_FMA_SET \
1866 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1868 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1869 as -msse4.2. */
1870 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1872 #define OPTION_MASK_ISA_SSE4A_SET \
1873 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1874 #define OPTION_MASK_ISA_SSE5_SET \
1875 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1877 /* AES and PCLMUL need SSE2 because they use xmm registers */
1878 #define OPTION_MASK_ISA_AES_SET \
1879 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1880 #define OPTION_MASK_ISA_PCLMUL_SET \
1881 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1883 #define OPTION_MASK_ISA_ABM_SET \
1884 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1885 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1886 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1887 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1889 /* Define a set of ISAs which aren't available when a given ISA is
1890 disabled. MMX and SSE ISAs are handled separately. */
1892 #define OPTION_MASK_ISA_MMX_UNSET \
1893 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1894 #define OPTION_MASK_ISA_3DNOW_UNSET \
1895 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1896 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1898 #define OPTION_MASK_ISA_SSE_UNSET \
1899 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1900 #define OPTION_MASK_ISA_SSE2_UNSET \
1901 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1902 #define OPTION_MASK_ISA_SSE3_UNSET \
1903 (OPTION_MASK_ISA_SSE3 \
1904 | OPTION_MASK_ISA_SSSE3_UNSET \
1905 | OPTION_MASK_ISA_SSE4A_UNSET )
1906 #define OPTION_MASK_ISA_SSSE3_UNSET \
1907 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1908 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1909 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1910 #define OPTION_MASK_ISA_SSE4_2_UNSET \
1911 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
1912 #define OPTION_MASK_ISA_AVX_UNSET \
1913 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET)
1914 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
1916 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1917 as -mno-sse4.1. */
1918 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1920 #define OPTION_MASK_ISA_SSE4A_UNSET \
1921 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1922 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1923 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
1924 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
1925 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
1926 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
1927 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
1928 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
1930 /* Vectorization library interface and handlers. */
1931 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1932 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1933 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1935 /* Processor target table, indexed by processor number */
1936 struct ptt
1938 const struct processor_costs *cost; /* Processor costs */
1939 const int align_loop; /* Default alignments. */
1940 const int align_loop_max_skip;
1941 const int align_jump;
1942 const int align_jump_max_skip;
1943 const int align_func;
1946 static const struct ptt processor_target_table[PROCESSOR_max] =
1948 {&i386_cost, 4, 3, 4, 3, 4},
1949 {&i486_cost, 16, 15, 16, 15, 16},
1950 {&pentium_cost, 16, 7, 16, 7, 16},
1951 {&pentiumpro_cost, 16, 15, 16, 10, 16},
1952 {&geode_cost, 0, 0, 0, 0, 0},
1953 {&k6_cost, 32, 7, 32, 7, 32},
1954 {&athlon_cost, 16, 7, 16, 7, 16},
1955 {&pentium4_cost, 0, 0, 0, 0, 0},
1956 {&k8_cost, 16, 7, 16, 7, 16},
1957 {&nocona_cost, 0, 0, 0, 0, 0},
1958 {&core2_cost, 16, 10, 16, 10, 16},
1959 {&generic32_cost, 16, 7, 16, 7, 16},
1960 {&generic64_cost, 16, 10, 16, 10, 16},
1961 {&amdfam10_cost, 32, 24, 32, 7, 32}
1964 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
1966 "generic",
1967 "i386",
1968 "i486",
1969 "pentium",
1970 "pentium-mmx",
1971 "pentiumpro",
1972 "pentium2",
1973 "pentium3",
1974 "pentium4",
1975 "pentium-m",
1976 "prescott",
1977 "nocona",
1978 "core2",
1979 "geode",
1980 "k6",
1981 "k6-2",
1982 "k6-3",
1983 "athlon",
1984 "athlon-4",
1985 "k8",
1986 "amdfam10"
1989 /* Implement TARGET_HANDLE_OPTION. */
1991 static bool
1992 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1994 switch (code)
1996 case OPT_mmmx:
1997 if (value)
1999 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2000 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2002 else
2004 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2005 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2007 return true;
2009 case OPT_m3dnow:
2010 if (value)
2012 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2013 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2015 else
2017 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2018 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2020 return true;
2022 case OPT_m3dnowa:
2023 return false;
2025 case OPT_msse:
2026 if (value)
2028 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2029 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2031 else
2033 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2034 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2036 return true;
2038 case OPT_msse2:
2039 if (value)
2041 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2042 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2044 else
2046 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2047 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2049 return true;
2051 case OPT_msse3:
2052 if (value)
2054 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2055 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2057 else
2059 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2060 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2062 return true;
2064 case OPT_mssse3:
2065 if (value)
2067 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2068 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2070 else
2072 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2073 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2075 return true;
2077 case OPT_msse4_1:
2078 if (value)
2080 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2081 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2083 else
2085 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2086 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2088 return true;
2090 case OPT_msse4_2:
2091 if (value)
2093 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2094 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2096 else
2098 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2099 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2101 return true;
2103 case OPT_mavx:
2104 if (value)
2106 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2107 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2109 else
2111 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2112 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2114 return true;
2116 case OPT_mfma:
2117 if (value)
2119 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2120 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2122 else
2124 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2125 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2127 return true;
2129 case OPT_msse4:
2130 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2131 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2132 return true;
2134 case OPT_mno_sse4:
2135 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2136 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2137 return true;
2139 case OPT_msse4a:
2140 if (value)
2142 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2145 else
2147 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2148 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2150 return true;
2152 case OPT_msse5:
2153 if (value)
2155 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
2158 else
2160 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
2161 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
2163 return true;
2165 case OPT_mabm:
2166 if (value)
2168 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2171 else
2173 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2174 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2176 return true;
2178 case OPT_mpopcnt:
2179 if (value)
2181 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2184 else
2186 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2187 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2189 return true;
2191 case OPT_msahf:
2192 if (value)
2194 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2197 else
2199 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2200 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2202 return true;
2204 case OPT_mcx16:
2205 if (value)
2207 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2210 else
2212 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2213 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2215 return true;
2217 case OPT_maes:
2218 if (value)
2220 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2223 else
2225 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2226 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2228 return true;
2230 case OPT_mpclmul:
2231 if (value)
2233 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2236 else
2238 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2239 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2241 return true;
2243 default:
2244 return true;
2248 /* Return a string the documents the current -m options. The caller is
2249 responsible for freeing the string. */
2251 static char *
2252 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2253 const char *fpmath, bool add_nl_p)
2255 struct ix86_target_opts
2257 const char *option; /* option string */
2258 int mask; /* isa mask options */
2261 /* This table is ordered so that options like -msse5 or -msse4.2 that imply
2262 preceding options while match those first. */
2263 static struct ix86_target_opts isa_opts[] =
2265 { "-m64", OPTION_MASK_ISA_64BIT },
2266 { "-msse5", OPTION_MASK_ISA_SSE5 },
2267 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2268 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2269 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2270 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2271 { "-msse3", OPTION_MASK_ISA_SSE3 },
2272 { "-msse2", OPTION_MASK_ISA_SSE2 },
2273 { "-msse", OPTION_MASK_ISA_SSE },
2274 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2275 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2276 { "-mmmx", OPTION_MASK_ISA_MMX },
2277 { "-mabm", OPTION_MASK_ISA_ABM },
2278 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2279 { "-maes", OPTION_MASK_ISA_AES },
2280 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2283 /* Flag options. */
2284 static struct ix86_target_opts flag_opts[] =
2286 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2287 { "-m80387", MASK_80387 },
2288 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2289 { "-malign-double", MASK_ALIGN_DOUBLE },
2290 { "-mcld", MASK_CLD },
2291 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2292 { "-mieee-fp", MASK_IEEE_FP },
2293 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2294 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2295 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2296 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2297 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2298 { "-mno-fused-madd", MASK_NO_FUSED_MADD },
2299 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2300 { "-mno-red-zone", MASK_NO_RED_ZONE },
2301 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2302 { "-mrecip", MASK_RECIP },
2303 { "-mrtd", MASK_RTD },
2304 { "-msseregparm", MASK_SSEREGPARM },
2305 { "-mstack-arg-probe", MASK_STACK_PROBE },
2306 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2309 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2311 char isa_other[40];
2312 char target_other[40];
2313 unsigned num = 0;
2314 unsigned i, j;
2315 char *ret;
2316 char *ptr;
2317 size_t len;
2318 size_t line_len;
2319 size_t sep_len;
2321 memset (opts, '\0', sizeof (opts));
2323 /* Add -march= option. */
2324 if (arch)
2326 opts[num][0] = "-march=";
2327 opts[num++][1] = arch;
2330 /* Add -mtune= option. */
2331 if (tune)
2333 opts[num][0] = "-mtune=";
2334 opts[num++][1] = tune;
2337 /* Pick out the options in isa options. */
2338 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2340 if ((isa & isa_opts[i].mask) != 0)
2342 opts[num++][0] = isa_opts[i].option;
2343 isa &= ~ isa_opts[i].mask;
2347 if (isa && add_nl_p)
2349 opts[num++][0] = isa_other;
2350 sprintf (isa_other, "(other isa: 0x%x)", isa);
2353 /* Add flag options. */
2354 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2356 if ((flags & flag_opts[i].mask) != 0)
2358 opts[num++][0] = flag_opts[i].option;
2359 flags &= ~ flag_opts[i].mask;
2363 if (flags && add_nl_p)
2365 opts[num++][0] = target_other;
2366 sprintf (target_other, "(other flags: 0x%x)", isa);
2369 /* Add -fpmath= option. */
2370 if (fpmath)
2372 opts[num][0] = "-mfpmath=";
2373 opts[num++][1] = fpmath;
2376 /* Any options? */
2377 if (num == 0)
2378 return NULL;
2380 gcc_assert (num < ARRAY_SIZE (opts));
2382 /* Size the string. */
2383 len = 0;
2384 sep_len = (add_nl_p) ? 3 : 1;
2385 for (i = 0; i < num; i++)
2387 len += sep_len;
2388 for (j = 0; j < 2; j++)
2389 if (opts[i][j])
2390 len += strlen (opts[i][j]);
2393 /* Build the string. */
2394 ret = ptr = (char *) xmalloc (len);
2395 line_len = 0;
2397 for (i = 0; i < num; i++)
2399 size_t len2[2];
2401 for (j = 0; j < 2; j++)
2402 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2404 if (i != 0)
2406 *ptr++ = ' ';
2407 line_len++;
2409 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2411 *ptr++ = '\\';
2412 *ptr++ = '\n';
2413 line_len = 0;
2417 for (j = 0; j < 2; j++)
2418 if (opts[i][j])
2420 memcpy (ptr, opts[i][j], len2[j]);
2421 ptr += len2[j];
2422 line_len += len2[j];
2426 *ptr = '\0';
2427 gcc_assert (ret + len >= ptr);
2429 return ret;
2432 /* Function that is callable from the debugger to print the current
2433 options. */
2434 void
2435 ix86_debug_options (void)
2437 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2438 ix86_arch_string, ix86_tune_string,
2439 ix86_fpmath_string, true);
2441 if (opts)
2443 fprintf (stderr, "%s\n\n", opts);
2444 free (opts);
2446 else
2447 fprintf (stderr, "<no options>\n\n");
2449 return;
2452 /* Sometimes certain combinations of command options do not make
2453 sense on a particular target machine. You can define a macro
2454 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2455 defined, is executed once just after all the command options have
2456 been parsed.
2458 Don't use this macro to turn on various extra optimizations for
2459 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2461 void
2462 override_options (bool main_args_p)
2464 int i;
2465 unsigned int ix86_arch_mask, ix86_tune_mask;
2466 const char *prefix;
2467 const char *suffix;
2468 const char *sw;
2470 /* Comes from final.c -- no real reason to change it. */
2471 #define MAX_CODE_ALIGN 16
2473 enum pta_flags
2475 PTA_SSE = 1 << 0,
2476 PTA_SSE2 = 1 << 1,
2477 PTA_SSE3 = 1 << 2,
2478 PTA_MMX = 1 << 3,
2479 PTA_PREFETCH_SSE = 1 << 4,
2480 PTA_3DNOW = 1 << 5,
2481 PTA_3DNOW_A = 1 << 6,
2482 PTA_64BIT = 1 << 7,
2483 PTA_SSSE3 = 1 << 8,
2484 PTA_CX16 = 1 << 9,
2485 PTA_POPCNT = 1 << 10,
2486 PTA_ABM = 1 << 11,
2487 PTA_SSE4A = 1 << 12,
2488 PTA_NO_SAHF = 1 << 13,
2489 PTA_SSE4_1 = 1 << 14,
2490 PTA_SSE4_2 = 1 << 15,
2491 PTA_SSE5 = 1 << 16,
2492 PTA_AES = 1 << 17,
2493 PTA_PCLMUL = 1 << 18,
2494 PTA_AVX = 1 << 19,
2495 PTA_FMA = 1 << 20
2498 static struct pta
2500 const char *const name; /* processor name or nickname. */
2501 const enum processor_type processor;
2502 const enum attr_cpu schedule;
2503 const unsigned /*enum pta_flags*/ flags;
2505 const processor_alias_table[] =
2507 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2508 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2509 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2510 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2511 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2512 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2513 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2514 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2515 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2516 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2517 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2518 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2519 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2520 PTA_MMX | PTA_SSE},
2521 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2522 PTA_MMX | PTA_SSE},
2523 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2524 PTA_MMX | PTA_SSE | PTA_SSE2},
2525 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2526 PTA_MMX |PTA_SSE | PTA_SSE2},
2527 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2528 PTA_MMX | PTA_SSE | PTA_SSE2},
2529 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2530 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2531 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2532 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2533 | PTA_CX16 | PTA_NO_SAHF},
2534 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2535 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2536 | PTA_SSSE3 | PTA_CX16},
2537 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2538 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2539 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2540 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2541 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2542 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2543 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2544 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2545 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2546 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2547 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2548 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2549 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2550 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2551 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2552 {"x86-64", PROCESSOR_K8, CPU_K8,
2553 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2554 {"k8", PROCESSOR_K8, CPU_K8,
2555 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2556 | PTA_SSE2 | PTA_NO_SAHF},
2557 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2558 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2559 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2560 {"opteron", PROCESSOR_K8, CPU_K8,
2561 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2562 | PTA_SSE2 | PTA_NO_SAHF},
2563 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2564 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2565 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2566 {"athlon64", PROCESSOR_K8, CPU_K8,
2567 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2568 | PTA_SSE2 | PTA_NO_SAHF},
2569 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2570 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2571 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2572 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2573 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2574 | PTA_SSE2 | PTA_NO_SAHF},
2575 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2576 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2577 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2578 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2579 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2580 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2581 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2582 0 /* flags are only used for -march switch. */ },
2583 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2584 PTA_64BIT /* flags are only used for -march switch. */ },
2587 int const pta_size = ARRAY_SIZE (processor_alias_table);
2589 /* Set up prefix/suffix so the error messages refer to either the command
2590 line argument, or the attribute(target). */
2591 if (main_args_p)
2593 prefix = "-m";
2594 suffix = "";
2595 sw = "switch";
2597 else
2599 prefix = "option(\"";
2600 suffix = "\")";
2601 sw = "attribute";
2604 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2605 SUBTARGET_OVERRIDE_OPTIONS;
2606 #endif
2608 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2609 SUBSUBTARGET_OVERRIDE_OPTIONS;
2610 #endif
2612 /* -fPIC is the default for x86_64. */
2613 if (TARGET_MACHO && TARGET_64BIT)
2614 flag_pic = 2;
2616 /* Set the default values for switches whose default depends on TARGET_64BIT
2617 in case they weren't overwritten by command line options. */
2618 if (TARGET_64BIT)
2620 /* Mach-O doesn't support omitting the frame pointer for now. */
2621 if (flag_omit_frame_pointer == 2)
2622 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2623 if (flag_asynchronous_unwind_tables == 2)
2624 flag_asynchronous_unwind_tables = 1;
2625 if (flag_pcc_struct_return == 2)
2626 flag_pcc_struct_return = 0;
2628 else
2630 if (flag_omit_frame_pointer == 2)
2631 flag_omit_frame_pointer = 0;
2632 if (flag_asynchronous_unwind_tables == 2)
2633 flag_asynchronous_unwind_tables = 0;
2634 if (flag_pcc_struct_return == 2)
2635 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2638 /* Need to check -mtune=generic first. */
2639 if (ix86_tune_string)
2641 if (!strcmp (ix86_tune_string, "generic")
2642 || !strcmp (ix86_tune_string, "i686")
2643 /* As special support for cross compilers we read -mtune=native
2644 as -mtune=generic. With native compilers we won't see the
2645 -mtune=native, as it was changed by the driver. */
2646 || !strcmp (ix86_tune_string, "native"))
2648 if (TARGET_64BIT)
2649 ix86_tune_string = "generic64";
2650 else
2651 ix86_tune_string = "generic32";
2653 /* If this call is for setting the option attribute, allow the
2654 generic32/generic64 that was previously set. */
2655 else if (!main_args_p
2656 && (!strcmp (ix86_tune_string, "generic32")
2657 || !strcmp (ix86_tune_string, "generic64")))
2659 else if (!strncmp (ix86_tune_string, "generic", 7))
2660 error ("bad value (%s) for %stune=%s %s",
2661 ix86_tune_string, prefix, suffix, sw);
2663 else
2665 if (ix86_arch_string)
2666 ix86_tune_string = ix86_arch_string;
2667 if (!ix86_tune_string)
2669 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2670 ix86_tune_defaulted = 1;
2673 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2674 need to use a sensible tune option. */
2675 if (!strcmp (ix86_tune_string, "generic")
2676 || !strcmp (ix86_tune_string, "x86-64")
2677 || !strcmp (ix86_tune_string, "i686"))
2679 if (TARGET_64BIT)
2680 ix86_tune_string = "generic64";
2681 else
2682 ix86_tune_string = "generic32";
2685 if (ix86_stringop_string)
2687 if (!strcmp (ix86_stringop_string, "rep_byte"))
2688 stringop_alg = rep_prefix_1_byte;
2689 else if (!strcmp (ix86_stringop_string, "libcall"))
2690 stringop_alg = libcall;
2691 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2692 stringop_alg = rep_prefix_4_byte;
2693 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2694 && TARGET_64BIT)
2695 /* rep; movq isn't available in 32-bit code. */
2696 stringop_alg = rep_prefix_8_byte;
2697 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2698 stringop_alg = loop_1_byte;
2699 else if (!strcmp (ix86_stringop_string, "loop"))
2700 stringop_alg = loop;
2701 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2702 stringop_alg = unrolled_loop;
2703 else
2704 error ("bad value (%s) for %sstringop-strategy=%s %s",
2705 ix86_stringop_string, prefix, suffix, sw);
2707 if (!strcmp (ix86_tune_string, "x86-64"))
2708 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2709 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2710 prefix, suffix, prefix, suffix, prefix, suffix);
2712 if (!ix86_arch_string)
2713 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2714 else
2715 ix86_arch_specified = 1;
2717 if (!strcmp (ix86_arch_string, "generic"))
2718 error ("generic CPU can be used only for %stune=%s %s",
2719 prefix, suffix, sw);
2720 if (!strncmp (ix86_arch_string, "generic", 7))
2721 error ("bad value (%s) for %sarch=%s %s",
2722 ix86_arch_string, prefix, suffix, sw);
2724 /* Validate -mabi= value. */
2725 if (ix86_abi_string)
2727 if (strcmp (ix86_abi_string, "sysv") == 0)
2728 ix86_abi = SYSV_ABI;
2729 else if (strcmp (ix86_abi_string, "ms") == 0)
2730 ix86_abi = MS_ABI;
2731 else
2732 error ("unknown ABI (%s) for %sabi=%s %s",
2733 ix86_abi_string, prefix, suffix, sw);
2736 if (ix86_cmodel_string != 0)
2738 if (!strcmp (ix86_cmodel_string, "small"))
2739 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2740 else if (!strcmp (ix86_cmodel_string, "medium"))
2741 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2742 else if (!strcmp (ix86_cmodel_string, "large"))
2743 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2744 else if (flag_pic)
2745 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2746 else if (!strcmp (ix86_cmodel_string, "32"))
2747 ix86_cmodel = CM_32;
2748 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2749 ix86_cmodel = CM_KERNEL;
2750 else
2751 error ("bad value (%s) for %scmodel=%s %s",
2752 ix86_cmodel_string, prefix, suffix, sw);
2754 else
2756 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2757 use of rip-relative addressing. This eliminates fixups that
2758 would otherwise be needed if this object is to be placed in a
2759 DLL, and is essentially just as efficient as direct addressing. */
2760 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2761 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2762 else if (TARGET_64BIT)
2763 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2764 else
2765 ix86_cmodel = CM_32;
2767 if (ix86_asm_string != 0)
2769 if (! TARGET_MACHO
2770 && !strcmp (ix86_asm_string, "intel"))
2771 ix86_asm_dialect = ASM_INTEL;
2772 else if (!strcmp (ix86_asm_string, "att"))
2773 ix86_asm_dialect = ASM_ATT;
2774 else
2775 error ("bad value (%s) for %sasm=%s %s",
2776 ix86_asm_string, prefix, suffix, sw);
2778 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2779 error ("code model %qs not supported in the %s bit mode",
2780 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2781 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2782 sorry ("%i-bit mode not compiled in",
2783 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2785 for (i = 0; i < pta_size; i++)
2786 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2788 ix86_schedule = processor_alias_table[i].schedule;
2789 ix86_arch = processor_alias_table[i].processor;
2790 /* Default cpu tuning to the architecture. */
2791 ix86_tune = ix86_arch;
2793 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2794 error ("CPU you selected does not support x86-64 "
2795 "instruction set");
2797 if (processor_alias_table[i].flags & PTA_MMX
2798 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2799 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2800 if (processor_alias_table[i].flags & PTA_3DNOW
2801 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2802 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2803 if (processor_alias_table[i].flags & PTA_3DNOW_A
2804 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2805 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2806 if (processor_alias_table[i].flags & PTA_SSE
2807 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2808 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2809 if (processor_alias_table[i].flags & PTA_SSE2
2810 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2811 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2812 if (processor_alias_table[i].flags & PTA_SSE3
2813 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2814 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2815 if (processor_alias_table[i].flags & PTA_SSSE3
2816 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2817 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2818 if (processor_alias_table[i].flags & PTA_SSE4_1
2819 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2820 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2821 if (processor_alias_table[i].flags & PTA_SSE4_2
2822 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2823 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2824 if (processor_alias_table[i].flags & PTA_AVX
2825 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2826 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2827 if (processor_alias_table[i].flags & PTA_FMA
2828 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2829 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2830 if (processor_alias_table[i].flags & PTA_SSE4A
2831 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2832 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2833 if (processor_alias_table[i].flags & PTA_SSE5
2834 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2835 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2836 if (processor_alias_table[i].flags & PTA_ABM
2837 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2838 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2839 if (processor_alias_table[i].flags & PTA_CX16
2840 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2841 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2842 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2843 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2844 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2845 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2846 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2847 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2848 if (processor_alias_table[i].flags & PTA_AES
2849 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2850 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2851 if (processor_alias_table[i].flags & PTA_PCLMUL
2852 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2853 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2854 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2855 x86_prefetch_sse = true;
2857 break;
2860 if (i == pta_size)
2861 error ("bad value (%s) for %sarch=%s %s",
2862 ix86_arch_string, prefix, suffix, sw);
2864 ix86_arch_mask = 1u << ix86_arch;
2865 for (i = 0; i < X86_ARCH_LAST; ++i)
2866 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2868 for (i = 0; i < pta_size; i++)
2869 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2871 ix86_schedule = processor_alias_table[i].schedule;
2872 ix86_tune = processor_alias_table[i].processor;
2873 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2875 if (ix86_tune_defaulted)
2877 ix86_tune_string = "x86-64";
2878 for (i = 0; i < pta_size; i++)
2879 if (! strcmp (ix86_tune_string,
2880 processor_alias_table[i].name))
2881 break;
2882 ix86_schedule = processor_alias_table[i].schedule;
2883 ix86_tune = processor_alias_table[i].processor;
2885 else
2886 error ("CPU you selected does not support x86-64 "
2887 "instruction set");
2889 /* Intel CPUs have always interpreted SSE prefetch instructions as
2890 NOPs; so, we can enable SSE prefetch instructions even when
2891 -mtune (rather than -march) points us to a processor that has them.
2892 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2893 higher processors. */
2894 if (TARGET_CMOVE
2895 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2896 x86_prefetch_sse = true;
2897 break;
2899 if (i == pta_size)
2900 error ("bad value (%s) for %stune=%s %s",
2901 ix86_tune_string, prefix, suffix, sw);
2903 ix86_tune_mask = 1u << ix86_tune;
2904 for (i = 0; i < X86_TUNE_LAST; ++i)
2905 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
2907 if (optimize_size)
2908 ix86_cost = &ix86_size_cost;
2909 else
2910 ix86_cost = processor_target_table[ix86_tune].cost;
2912 /* Arrange to set up i386_stack_locals for all functions. */
2913 init_machine_status = ix86_init_machine_status;
2915 /* Validate -mregparm= value. */
2916 if (ix86_regparm_string)
2918 if (TARGET_64BIT)
2919 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
2920 i = atoi (ix86_regparm_string);
2921 if (i < 0 || i > REGPARM_MAX)
2922 error ("%sregparm=%d%s is not between 0 and %d",
2923 prefix, i, suffix, REGPARM_MAX);
2924 else
2925 ix86_regparm = i;
2927 if (TARGET_64BIT)
2928 ix86_regparm = REGPARM_MAX;
2930 /* If the user has provided any of the -malign-* options,
2931 warn and use that value only if -falign-* is not set.
2932 Remove this code in GCC 3.2 or later. */
2933 if (ix86_align_loops_string)
2935 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
2936 prefix, suffix, suffix);
2937 if (align_loops == 0)
2939 i = atoi (ix86_align_loops_string);
2940 if (i < 0 || i > MAX_CODE_ALIGN)
2941 error ("%salign-loops=%d%s is not between 0 and %d",
2942 prefix, i, suffix, MAX_CODE_ALIGN);
2943 else
2944 align_loops = 1 << i;
2948 if (ix86_align_jumps_string)
2950 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
2951 prefix, suffix, suffix);
2952 if (align_jumps == 0)
2954 i = atoi (ix86_align_jumps_string);
2955 if (i < 0 || i > MAX_CODE_ALIGN)
2956 error ("%salign-loops=%d%s is not between 0 and %d",
2957 prefix, i, suffix, MAX_CODE_ALIGN);
2958 else
2959 align_jumps = 1 << i;
2963 if (ix86_align_funcs_string)
2965 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
2966 prefix, suffix, suffix);
2967 if (align_functions == 0)
2969 i = atoi (ix86_align_funcs_string);
2970 if (i < 0 || i > MAX_CODE_ALIGN)
2971 error ("%salign-loops=%d%s is not between 0 and %d",
2972 prefix, i, suffix, MAX_CODE_ALIGN);
2973 else
2974 align_functions = 1 << i;
2978 /* Default align_* from the processor table. */
2979 if (align_loops == 0)
2981 align_loops = processor_target_table[ix86_tune].align_loop;
2982 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2984 if (align_jumps == 0)
2986 align_jumps = processor_target_table[ix86_tune].align_jump;
2987 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2989 if (align_functions == 0)
2991 align_functions = processor_target_table[ix86_tune].align_func;
2994 /* Validate -mbranch-cost= value, or provide default. */
2995 ix86_branch_cost = ix86_cost->branch_cost;
2996 if (ix86_branch_cost_string)
2998 i = atoi (ix86_branch_cost_string);
2999 if (i < 0 || i > 5)
3000 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3001 else
3002 ix86_branch_cost = i;
3004 if (ix86_section_threshold_string)
3006 i = atoi (ix86_section_threshold_string);
3007 if (i < 0)
3008 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3009 else
3010 ix86_section_threshold = i;
3013 if (ix86_tls_dialect_string)
3015 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3016 ix86_tls_dialect = TLS_DIALECT_GNU;
3017 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3018 ix86_tls_dialect = TLS_DIALECT_GNU2;
3019 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3020 ix86_tls_dialect = TLS_DIALECT_SUN;
3021 else
3022 error ("bad value (%s) for %stls-dialect=%s %s",
3023 ix86_tls_dialect_string, prefix, suffix, sw);
3026 if (ix87_precision_string)
3028 i = atoi (ix87_precision_string);
3029 if (i != 32 && i != 64 && i != 80)
3030 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3033 if (TARGET_64BIT)
3035 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3037 /* Enable by default the SSE and MMX builtins. Do allow the user to
3038 explicitly disable any of these. In particular, disabling SSE and
3039 MMX for kernel code is extremely useful. */
3040 if (!ix86_arch_specified)
3041 ix86_isa_flags
3042 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3043 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3045 if (TARGET_RTD)
3046 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3048 else
3050 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3052 if (!ix86_arch_specified)
3053 ix86_isa_flags
3054 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3056 /* i386 ABI does not specify red zone. It still makes sense to use it
3057 when programmer takes care to stack from being destroyed. */
3058 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3059 target_flags |= MASK_NO_RED_ZONE;
3062 /* Keep nonleaf frame pointers. */
3063 if (flag_omit_frame_pointer)
3064 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3065 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3066 flag_omit_frame_pointer = 1;
3068 /* If we're doing fast math, we don't care about comparison order
3069 wrt NaNs. This lets us use a shorter comparison sequence. */
3070 if (flag_finite_math_only)
3071 target_flags &= ~MASK_IEEE_FP;
3073 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3074 since the insns won't need emulation. */
3075 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3076 target_flags &= ~MASK_NO_FANCY_MATH_387;
3078 /* Likewise, if the target doesn't have a 387, or we've specified
3079 software floating point, don't use 387 inline intrinsics. */
3080 if (!TARGET_80387)
3081 target_flags |= MASK_NO_FANCY_MATH_387;
3083 /* Turn on MMX builtins for -msse. */
3084 if (TARGET_SSE)
3086 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3087 x86_prefetch_sse = true;
3090 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3091 if (TARGET_SSE4_2 || TARGET_ABM)
3092 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3094 /* Validate -mpreferred-stack-boundary= value or default it to
3095 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3096 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3097 if (ix86_preferred_stack_boundary_string)
3099 i = atoi (ix86_preferred_stack_boundary_string);
3100 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3101 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3102 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3103 else
3104 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3107 /* Set the default value for -mstackrealign. */
3108 if (ix86_force_align_arg_pointer == -1)
3109 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3111 /* Validate -mincoming-stack-boundary= value or default it to
3112 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3113 if (ix86_force_align_arg_pointer)
3114 ix86_default_incoming_stack_boundary = MIN_STACK_BOUNDARY;
3115 else
3116 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3117 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3118 if (ix86_incoming_stack_boundary_string)
3120 i = atoi (ix86_incoming_stack_boundary_string);
3121 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3122 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3123 i, TARGET_64BIT ? 4 : 2);
3124 else
3126 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3127 ix86_incoming_stack_boundary
3128 = ix86_user_incoming_stack_boundary;
3132 /* Accept -msseregparm only if at least SSE support is enabled. */
3133 if (TARGET_SSEREGPARM
3134 && ! TARGET_SSE)
3135 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3137 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3138 if (ix86_fpmath_string != 0)
3140 if (! strcmp (ix86_fpmath_string, "387"))
3141 ix86_fpmath = FPMATH_387;
3142 else if (! strcmp (ix86_fpmath_string, "sse"))
3144 if (!TARGET_SSE)
3146 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3147 ix86_fpmath = FPMATH_387;
3149 else
3150 ix86_fpmath = FPMATH_SSE;
3152 else if (! strcmp (ix86_fpmath_string, "387,sse")
3153 || ! strcmp (ix86_fpmath_string, "387+sse")
3154 || ! strcmp (ix86_fpmath_string, "sse,387")
3155 || ! strcmp (ix86_fpmath_string, "sse+387")
3156 || ! strcmp (ix86_fpmath_string, "both"))
3158 if (!TARGET_SSE)
3160 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3161 ix86_fpmath = FPMATH_387;
3163 else if (!TARGET_80387)
3165 warning (0, "387 instruction set disabled, using SSE arithmetics");
3166 ix86_fpmath = FPMATH_SSE;
3168 else
3169 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3171 else
3172 error ("bad value (%s) for %sfpmath=%s %s",
3173 ix86_fpmath_string, prefix, suffix, sw);
3176 /* If the i387 is disabled, then do not return values in it. */
3177 if (!TARGET_80387)
3178 target_flags &= ~MASK_FLOAT_RETURNS;
3180 /* Use external vectorized library in vectorizing intrinsics. */
3181 if (ix86_veclibabi_string)
3183 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3184 ix86_veclib_handler = ix86_veclibabi_svml;
3185 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3186 ix86_veclib_handler = ix86_veclibabi_acml;
3187 else
3188 error ("unknown vectorization library ABI type (%s) for "
3189 "%sveclibabi=%s %s", ix86_veclibabi_string,
3190 prefix, suffix, sw);
3193 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3194 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3195 && !optimize_size)
3196 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3198 /* ??? Unwind info is not correct around the CFG unless either a frame
3199 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3200 unwind info generation to be aware of the CFG and propagating states
3201 around edges. */
3202 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3203 || flag_exceptions || flag_non_call_exceptions)
3204 && flag_omit_frame_pointer
3205 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3207 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3208 warning (0, "unwind tables currently require either a frame pointer "
3209 "or %saccumulate-outgoing-args%s for correctness",
3210 prefix, suffix);
3211 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3214 /* If stack probes are required, the space used for large function
3215 arguments on the stack must also be probed, so enable
3216 -maccumulate-outgoing-args so this happens in the prologue. */
3217 if (TARGET_STACK_PROBE
3218 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3220 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3221 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3222 "for correctness", prefix, suffix);
3223 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3226 /* For sane SSE instruction set generation we need fcomi instruction.
3227 It is safe to enable all CMOVE instructions. */
3228 if (TARGET_SSE)
3229 TARGET_CMOVE = 1;
3231 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3233 char *p;
3234 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3235 p = strchr (internal_label_prefix, 'X');
3236 internal_label_prefix_len = p - internal_label_prefix;
3237 *p = '\0';
3240 /* When scheduling description is not available, disable scheduler pass
3241 so it won't slow down the compilation and make x87 code slower. */
3242 if (!TARGET_SCHEDULE)
3243 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3245 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3246 set_param_value ("simultaneous-prefetches",
3247 ix86_cost->simultaneous_prefetches);
3248 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3249 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3250 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3251 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3252 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3253 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3255 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3256 can be optimized to ap = __builtin_next_arg (0). */
3257 if (!TARGET_64BIT)
3258 targetm.expand_builtin_va_start = NULL;
3260 if (TARGET_64BIT)
3262 ix86_gen_leave = gen_leave_rex64;
3263 ix86_gen_pop1 = gen_popdi1;
3264 ix86_gen_add3 = gen_adddi3;
3265 ix86_gen_sub3 = gen_subdi3;
3266 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3267 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3268 ix86_gen_monitor = gen_sse3_monitor64;
3269 ix86_gen_andsp = gen_anddi3;
3271 else
3273 ix86_gen_leave = gen_leave;
3274 ix86_gen_pop1 = gen_popsi1;
3275 ix86_gen_add3 = gen_addsi3;
3276 ix86_gen_sub3 = gen_subsi3;
3277 ix86_gen_sub3_carry = gen_subsi3_carry;
3278 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3279 ix86_gen_monitor = gen_sse3_monitor;
3280 ix86_gen_andsp = gen_andsi3;
3283 #ifdef USE_IX86_CLD
3284 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3285 if (!TARGET_64BIT)
3286 target_flags |= MASK_CLD & ~target_flags_explicit;
3287 #endif
3289 /* Save the initial options in case the user does function specific options */
3290 if (main_args_p)
3291 target_option_default_node = target_option_current_node
3292 = build_target_option_node ();
3295 /* Save the current options */
3297 static void
3298 ix86_function_specific_save (struct cl_target_option *ptr)
3300 gcc_assert (IN_RANGE (ix86_arch, 0, 255));
3301 gcc_assert (IN_RANGE (ix86_schedule, 0, 255));
3302 gcc_assert (IN_RANGE (ix86_tune, 0, 255));
3303 gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
3304 gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
3306 ptr->arch = ix86_arch;
3307 ptr->schedule = ix86_schedule;
3308 ptr->tune = ix86_tune;
3309 ptr->fpmath = ix86_fpmath;
3310 ptr->branch_cost = ix86_branch_cost;
3311 ptr->tune_defaulted = ix86_tune_defaulted;
3312 ptr->arch_specified = ix86_arch_specified;
3313 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3314 ptr->target_flags_explicit = target_flags_explicit;
3317 /* Restore the current options */
3319 static void
3320 ix86_function_specific_restore (struct cl_target_option *ptr)
3322 enum processor_type old_tune = ix86_tune;
3323 enum processor_type old_arch = ix86_arch;
3324 unsigned int ix86_arch_mask, ix86_tune_mask;
3325 int i;
3327 ix86_arch = ptr->arch;
3328 ix86_schedule = ptr->schedule;
3329 ix86_tune = ptr->tune;
3330 ix86_fpmath = ptr->fpmath;
3331 ix86_branch_cost = ptr->branch_cost;
3332 ix86_tune_defaulted = ptr->tune_defaulted;
3333 ix86_arch_specified = ptr->arch_specified;
3334 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3335 target_flags_explicit = ptr->target_flags_explicit;
3337 /* Recreate the arch feature tests if the arch changed */
3338 if (old_arch != ix86_arch)
3340 ix86_arch_mask = 1u << ix86_arch;
3341 for (i = 0; i < X86_ARCH_LAST; ++i)
3342 ix86_arch_features[i]
3343 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3346 /* Recreate the tune optimization tests */
3347 if (old_tune != ix86_tune)
3349 ix86_tune_mask = 1u << ix86_tune;
3350 for (i = 0; i < X86_TUNE_LAST; ++i)
3351 ix86_tune_features[i]
3352 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3356 /* Print the current options */
3358 static void
3359 ix86_function_specific_print (FILE *file, int indent,
3360 struct cl_target_option *ptr)
3362 char *target_string
3363 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3364 NULL, NULL, NULL, false);
3366 fprintf (file, "%*sarch = %d (%s)\n",
3367 indent, "",
3368 ptr->arch,
3369 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3370 ? cpu_names[ptr->arch]
3371 : "<unknown>"));
3373 fprintf (file, "%*stune = %d (%s)\n",
3374 indent, "",
3375 ptr->tune,
3376 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3377 ? cpu_names[ptr->tune]
3378 : "<unknown>"));
3380 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3381 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3382 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3383 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3385 if (target_string)
3387 fprintf (file, "%*s%s\n", indent, "", target_string);
3388 free (target_string);
3393 /* Inner function to process the attribute((target(...))), take an argument and
3394 set the current options from the argument. If we have a list, recursively go
3395 over the list. */
3397 static bool
3398 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3400 char *next_optstr;
3401 bool ret = true;
3403 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3404 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3405 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3406 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3408 enum ix86_opt_type
3410 ix86_opt_unknown,
3411 ix86_opt_yes,
3412 ix86_opt_no,
3413 ix86_opt_str,
3414 ix86_opt_isa
3417 static const struct
3419 const char *string;
3420 size_t len;
3421 enum ix86_opt_type type;
3422 int opt;
3423 int mask;
3424 } attrs[] = {
3425 /* isa options */
3426 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3427 IX86_ATTR_ISA ("abm", OPT_mabm),
3428 IX86_ATTR_ISA ("aes", OPT_maes),
3429 IX86_ATTR_ISA ("avx", OPT_mavx),
3430 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3431 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3432 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3433 IX86_ATTR_ISA ("sse", OPT_msse),
3434 IX86_ATTR_ISA ("sse2", OPT_msse2),
3435 IX86_ATTR_ISA ("sse3", OPT_msse3),
3436 IX86_ATTR_ISA ("sse4", OPT_msse4),
3437 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3438 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3439 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3440 IX86_ATTR_ISA ("sse5", OPT_msse5),
3441 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3443 /* string options */
3444 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3445 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3446 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3448 /* flag options */
3449 IX86_ATTR_YES ("cld",
3450 OPT_mcld,
3451 MASK_CLD),
3453 IX86_ATTR_NO ("fancy-math-387",
3454 OPT_mfancy_math_387,
3455 MASK_NO_FANCY_MATH_387),
3457 IX86_ATTR_NO ("fused-madd",
3458 OPT_mfused_madd,
3459 MASK_NO_FUSED_MADD),
3461 IX86_ATTR_YES ("ieee-fp",
3462 OPT_mieee_fp,
3463 MASK_IEEE_FP),
3465 IX86_ATTR_YES ("inline-all-stringops",
3466 OPT_minline_all_stringops,
3467 MASK_INLINE_ALL_STRINGOPS),
3469 IX86_ATTR_YES ("inline-stringops-dynamically",
3470 OPT_minline_stringops_dynamically,
3471 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3473 IX86_ATTR_NO ("align-stringops",
3474 OPT_mno_align_stringops,
3475 MASK_NO_ALIGN_STRINGOPS),
3477 IX86_ATTR_YES ("recip",
3478 OPT_mrecip,
3479 MASK_RECIP),
3483 /* If this is a list, recurse to get the options. */
3484 if (TREE_CODE (args) == TREE_LIST)
3486 bool ret = true;
3488 for (; args; args = TREE_CHAIN (args))
3489 if (TREE_VALUE (args)
3490 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3491 ret = false;
3493 return ret;
3496 else if (TREE_CODE (args) != STRING_CST)
3497 gcc_unreachable ();
3499 /* Handle multiple arguments separated by commas. */
3500 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3502 while (next_optstr && *next_optstr != '\0')
3504 char *p = next_optstr;
3505 char *orig_p = p;
3506 char *comma = strchr (next_optstr, ',');
3507 const char *opt_string;
3508 size_t len, opt_len;
3509 int opt;
3510 bool opt_set_p;
3511 char ch;
3512 unsigned i;
3513 enum ix86_opt_type type = ix86_opt_unknown;
3514 int mask = 0;
3516 if (comma)
3518 *comma = '\0';
3519 len = comma - next_optstr;
3520 next_optstr = comma + 1;
3522 else
3524 len = strlen (p);
3525 next_optstr = NULL;
3528 /* Recognize no-xxx. */
3529 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3531 opt_set_p = false;
3532 p += 3;
3533 len -= 3;
3535 else
3536 opt_set_p = true;
3538 /* Find the option. */
3539 ch = *p;
3540 opt = N_OPTS;
3541 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3543 type = attrs[i].type;
3544 opt_len = attrs[i].len;
3545 if (ch == attrs[i].string[0]
3546 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3547 && memcmp (p, attrs[i].string, opt_len) == 0)
3549 opt = attrs[i].opt;
3550 mask = attrs[i].mask;
3551 opt_string = attrs[i].string;
3552 break;
3556 /* Process the option. */
3557 if (opt == N_OPTS)
3559 error ("attribute(target(\"%s\")) is unknown", orig_p);
3560 ret = false;
3563 else if (type == ix86_opt_isa)
3564 ix86_handle_option (opt, p, opt_set_p);
3566 else if (type == ix86_opt_yes || type == ix86_opt_no)
3568 if (type == ix86_opt_no)
3569 opt_set_p = !opt_set_p;
3571 if (opt_set_p)
3572 target_flags |= mask;
3573 else
3574 target_flags &= ~mask;
3577 else if (type == ix86_opt_str)
3579 if (p_strings[opt])
3581 error ("option(\"%s\") was already specified", opt_string);
3582 ret = false;
3584 else
3585 p_strings[opt] = xstrdup (p + opt_len);
3588 else
3589 gcc_unreachable ();
3592 return ret;
3595 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3597 tree
3598 ix86_valid_target_attribute_tree (tree args)
3600 const char *orig_arch_string = ix86_arch_string;
3601 const char *orig_tune_string = ix86_tune_string;
3602 const char *orig_fpmath_string = ix86_fpmath_string;
3603 int orig_tune_defaulted = ix86_tune_defaulted;
3604 int orig_arch_specified = ix86_arch_specified;
3605 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3606 tree t = NULL_TREE;
3607 int i;
3608 struct cl_target_option *def
3609 = TREE_TARGET_OPTION (target_option_default_node);
3611 /* Process each of the options on the chain. */
3612 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3613 return NULL_TREE;
3615 /* If the changed options are different from the default, rerun override_options,
3616 and then save the options away. The string options are are attribute options,
3617 and will be undone when we copy the save structure. */
3618 if (ix86_isa_flags != def->ix86_isa_flags
3619 || target_flags != def->target_flags
3620 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3621 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3622 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3624 /* If we are using the default tune= or arch=, undo the string assigned,
3625 and use the default. */
3626 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3627 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3628 else if (!orig_arch_specified)
3629 ix86_arch_string = NULL;
3631 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3632 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3633 else if (orig_tune_defaulted)
3634 ix86_tune_string = NULL;
3636 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3637 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3638 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3639 else if (!TARGET_64BIT && TARGET_SSE)
3640 ix86_fpmath_string = "sse,387";
3642 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3643 override_options (false);
3645 /* Add any builtin functions with the new isa if any. */
3646 ix86_add_new_builtins (ix86_isa_flags);
3648 /* Save the current options unless we are validating options for
3649 #pragma. */
3650 t = build_target_option_node ();
3652 ix86_arch_string = orig_arch_string;
3653 ix86_tune_string = orig_tune_string;
3654 ix86_fpmath_string = orig_fpmath_string;
3656 /* Free up memory allocated to hold the strings */
3657 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3658 if (option_strings[i])
3659 free (option_strings[i]);
3662 return t;
3665 /* Hook to validate attribute((target("string"))). */
3667 static bool
3668 ix86_valid_target_attribute_p (tree fndecl,
3669 tree ARG_UNUSED (name),
3670 tree args,
3671 int ARG_UNUSED (flags))
3673 struct cl_target_option cur_target;
3674 bool ret = true;
3675 tree old_optimize = build_optimization_node ();
3676 tree new_target, new_optimize;
3677 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3679 /* If the function changed the optimization levels as well as setting target
3680 options, start with the optimizations specified. */
3681 if (func_optimize && func_optimize != old_optimize)
3682 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3684 /* The target attributes may also change some optimization flags, so update
3685 the optimization options if necessary. */
3686 cl_target_option_save (&cur_target);
3687 new_target = ix86_valid_target_attribute_tree (args);
3688 new_optimize = build_optimization_node ();
3690 if (!new_target)
3691 ret = false;
3693 else if (fndecl)
3695 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3697 if (old_optimize != new_optimize)
3698 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3701 cl_target_option_restore (&cur_target);
3703 if (old_optimize != new_optimize)
3704 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3706 return ret;
3710 /* Hook to determine if one function can safely inline another. */
3712 static bool
3713 ix86_can_inline_p (tree caller, tree callee)
3715 bool ret = false;
3716 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3717 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3719 /* If callee has no option attributes, then it is ok to inline. */
3720 if (!callee_tree)
3721 ret = true;
3723 /* If caller has no option attributes, but callee does then it is not ok to
3724 inline. */
3725 else if (!caller_tree)
3726 ret = false;
3728 else
3730 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3731 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3733 /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
3734 can inline a SSE2 function but a SSE2 function can't inline a SSE5
3735 function. */
3736 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3737 != callee_opts->ix86_isa_flags)
3738 ret = false;
3740 /* See if we have the same non-isa options. */
3741 else if (caller_opts->target_flags != callee_opts->target_flags)
3742 ret = false;
3744 /* See if arch, tune, etc. are the same. */
3745 else if (caller_opts->arch != callee_opts->arch)
3746 ret = false;
3748 else if (caller_opts->tune != callee_opts->tune)
3749 ret = false;
3751 else if (caller_opts->fpmath != callee_opts->fpmath)
3752 ret = false;
3754 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3755 ret = false;
3757 else
3758 ret = true;
3761 return ret;
3765 /* Remember the last target of ix86_set_current_function. */
3766 static GTY(()) tree ix86_previous_fndecl;
3768 /* Establish appropriate back-end context for processing the function
3769 FNDECL. The argument might be NULL to indicate processing at top
3770 level, outside of any function scope. */
3771 static void
3772 ix86_set_current_function (tree fndecl)
3774 /* Only change the context if the function changes. This hook is called
3775 several times in the course of compiling a function, and we don't want to
3776 slow things down too much or call target_reinit when it isn't safe. */
3777 if (fndecl && fndecl != ix86_previous_fndecl)
3779 tree old_tree = (ix86_previous_fndecl
3780 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3781 : NULL_TREE);
3783 tree new_tree = (fndecl
3784 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3785 : NULL_TREE);
3787 ix86_previous_fndecl = fndecl;
3788 if (old_tree == new_tree)
3791 else if (new_tree)
3793 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3794 target_reinit ();
3797 else if (old_tree)
3799 struct cl_target_option *def
3800 = TREE_TARGET_OPTION (target_option_current_node);
3802 cl_target_option_restore (def);
3803 target_reinit ();
3809 /* Return true if this goes in large data/bss. */
3811 static bool
3812 ix86_in_large_data_p (tree exp)
3814 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3815 return false;
3817 /* Functions are never large data. */
3818 if (TREE_CODE (exp) == FUNCTION_DECL)
3819 return false;
3821 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3823 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3824 if (strcmp (section, ".ldata") == 0
3825 || strcmp (section, ".lbss") == 0)
3826 return true;
3827 return false;
3829 else
3831 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
3833 /* If this is an incomplete type with size 0, then we can't put it
3834 in data because it might be too big when completed. */
3835 if (!size || size > ix86_section_threshold)
3836 return true;
3839 return false;
3842 /* Switch to the appropriate section for output of DECL.
3843 DECL is either a `VAR_DECL' node or a constant of some sort.
3844 RELOC indicates whether forming the initial value of DECL requires
3845 link-time relocations. */
3847 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
3848 ATTRIBUTE_UNUSED;
3850 static section *
3851 x86_64_elf_select_section (tree decl, int reloc,
3852 unsigned HOST_WIDE_INT align)
3854 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3855 && ix86_in_large_data_p (decl))
3857 const char *sname = NULL;
3858 unsigned int flags = SECTION_WRITE;
3859 switch (categorize_decl_for_section (decl, reloc))
3861 case SECCAT_DATA:
3862 sname = ".ldata";
3863 break;
3864 case SECCAT_DATA_REL:
3865 sname = ".ldata.rel";
3866 break;
3867 case SECCAT_DATA_REL_LOCAL:
3868 sname = ".ldata.rel.local";
3869 break;
3870 case SECCAT_DATA_REL_RO:
3871 sname = ".ldata.rel.ro";
3872 break;
3873 case SECCAT_DATA_REL_RO_LOCAL:
3874 sname = ".ldata.rel.ro.local";
3875 break;
3876 case SECCAT_BSS:
3877 sname = ".lbss";
3878 flags |= SECTION_BSS;
3879 break;
3880 case SECCAT_RODATA:
3881 case SECCAT_RODATA_MERGE_STR:
3882 case SECCAT_RODATA_MERGE_STR_INIT:
3883 case SECCAT_RODATA_MERGE_CONST:
3884 sname = ".lrodata";
3885 flags = 0;
3886 break;
3887 case SECCAT_SRODATA:
3888 case SECCAT_SDATA:
3889 case SECCAT_SBSS:
3890 gcc_unreachable ();
3891 case SECCAT_TEXT:
3892 case SECCAT_TDATA:
3893 case SECCAT_TBSS:
3894 /* We don't split these for medium model. Place them into
3895 default sections and hope for best. */
3896 break;
3897 case SECCAT_EMUTLS_VAR:
3898 case SECCAT_EMUTLS_TMPL:
3899 gcc_unreachable ();
3901 if (sname)
3903 /* We might get called with string constants, but get_named_section
3904 doesn't like them as they are not DECLs. Also, we need to set
3905 flags in that case. */
3906 if (!DECL_P (decl))
3907 return get_section (sname, flags, NULL);
3908 return get_named_section (decl, sname, reloc);
3911 return default_elf_select_section (decl, reloc, align);
3914 /* Build up a unique section name, expressed as a
3915 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
3916 RELOC indicates whether the initial value of EXP requires
3917 link-time relocations. */
3919 static void ATTRIBUTE_UNUSED
3920 x86_64_elf_unique_section (tree decl, int reloc)
3922 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3923 && ix86_in_large_data_p (decl))
3925 const char *prefix = NULL;
3926 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
3927 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3929 switch (categorize_decl_for_section (decl, reloc))
3931 case SECCAT_DATA:
3932 case SECCAT_DATA_REL:
3933 case SECCAT_DATA_REL_LOCAL:
3934 case SECCAT_DATA_REL_RO:
3935 case SECCAT_DATA_REL_RO_LOCAL:
3936 prefix = one_only ? ".ld" : ".ldata";
3937 break;
3938 case SECCAT_BSS:
3939 prefix = one_only ? ".lb" : ".lbss";
3940 break;
3941 case SECCAT_RODATA:
3942 case SECCAT_RODATA_MERGE_STR:
3943 case SECCAT_RODATA_MERGE_STR_INIT:
3944 case SECCAT_RODATA_MERGE_CONST:
3945 prefix = one_only ? ".lr" : ".lrodata";
3946 break;
3947 case SECCAT_SRODATA:
3948 case SECCAT_SDATA:
3949 case SECCAT_SBSS:
3950 gcc_unreachable ();
3951 case SECCAT_TEXT:
3952 case SECCAT_TDATA:
3953 case SECCAT_TBSS:
3954 /* We don't split these for medium model. Place them into
3955 default sections and hope for best. */
3956 break;
3957 case SECCAT_EMUTLS_VAR:
3958 prefix = targetm.emutls.var_section;
3959 break;
3960 case SECCAT_EMUTLS_TMPL:
3961 prefix = targetm.emutls.tmpl_section;
3962 break;
3964 if (prefix)
3966 const char *name, *linkonce;
3967 char *string;
3969 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3970 name = targetm.strip_name_encoding (name);
3972 /* If we're using one_only, then there needs to be a .gnu.linkonce
3973 prefix to the section name. */
3974 linkonce = one_only ? ".gnu.linkonce" : "";
3976 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
3978 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
3979 return;
3982 default_unique_section (decl, reloc);
3985 #ifdef COMMON_ASM_OP
3986 /* This says how to output assembler code to declare an
3987 uninitialized external linkage data object.
3989 For medium model x86-64 we need to use .largecomm opcode for
3990 large objects. */
3991 void
3992 x86_elf_aligned_common (FILE *file,
3993 const char *name, unsigned HOST_WIDE_INT size,
3994 int align)
3996 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3997 && size > (unsigned int)ix86_section_threshold)
3998 fprintf (file, ".largecomm\t");
3999 else
4000 fprintf (file, "%s", COMMON_ASM_OP);
4001 assemble_name (file, name);
4002 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
4003 size, align / BITS_PER_UNIT);
4005 #endif
4007 /* Utility function for targets to use in implementing
4008 ASM_OUTPUT_ALIGNED_BSS. */
4010 void
4011 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4012 const char *name, unsigned HOST_WIDE_INT size,
4013 int align)
4015 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4016 && size > (unsigned int)ix86_section_threshold)
4017 switch_to_section (get_named_section (decl, ".lbss", 0));
4018 else
4019 switch_to_section (bss_section);
4020 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4021 #ifdef ASM_DECLARE_OBJECT_NAME
4022 last_assemble_variable_decl = decl;
4023 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4024 #else
4025 /* Standard thing is just output label for the object. */
4026 ASM_OUTPUT_LABEL (file, name);
4027 #endif /* ASM_DECLARE_OBJECT_NAME */
4028 ASM_OUTPUT_SKIP (file, size ? size : 1);
4031 void
4032 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4034 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4035 make the problem with not enough registers even worse. */
4036 #ifdef INSN_SCHEDULING
4037 if (level > 1)
4038 flag_schedule_insns = 0;
4039 #endif
4041 if (TARGET_MACHO)
4042 /* The Darwin libraries never set errno, so we might as well
4043 avoid calling them when that's the only reason we would. */
4044 flag_errno_math = 0;
4046 /* The default values of these switches depend on the TARGET_64BIT
4047 that is not known at this moment. Mark these values with 2 and
4048 let user the to override these. In case there is no command line option
4049 specifying them, we will set the defaults in override_options. */
4050 if (optimize >= 1)
4051 flag_omit_frame_pointer = 2;
4052 flag_pcc_struct_return = 2;
4053 flag_asynchronous_unwind_tables = 2;
4054 flag_vect_cost_model = 1;
4055 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4056 SUBTARGET_OPTIMIZATION_OPTIONS;
4057 #endif
4060 /* Decide whether we can make a sibling call to a function. DECL is the
4061 declaration of the function being targeted by the call and EXP is the
4062 CALL_EXPR representing the call. */
4064 static bool
4065 ix86_function_ok_for_sibcall (tree decl, tree exp)
4067 tree func;
4068 rtx a, b;
4070 /* If we are generating position-independent code, we cannot sibcall
4071 optimize any indirect call, or a direct call to a global function,
4072 as the PLT requires %ebx be live. */
4073 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4074 return false;
4076 if (decl)
4077 func = decl;
4078 else
4080 func = TREE_TYPE (CALL_EXPR_FN (exp));
4081 if (POINTER_TYPE_P (func))
4082 func = TREE_TYPE (func);
4085 /* Check that the return value locations are the same. Like
4086 if we are returning floats on the 80387 register stack, we cannot
4087 make a sibcall from a function that doesn't return a float to a
4088 function that does or, conversely, from a function that does return
4089 a float to a function that doesn't; the necessary stack adjustment
4090 would not be executed. This is also the place we notice
4091 differences in the return value ABI. Note that it is ok for one
4092 of the functions to have void return type as long as the return
4093 value of the other is passed in a register. */
4094 a = ix86_function_value (TREE_TYPE (exp), func, false);
4095 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4096 cfun->decl, false);
4097 if (STACK_REG_P (a) || STACK_REG_P (b))
4099 if (!rtx_equal_p (a, b))
4100 return false;
4102 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4104 else if (!rtx_equal_p (a, b))
4105 return false;
4107 /* If this call is indirect, we'll need to be able to use a call-clobbered
4108 register for the address of the target function. Make sure that all
4109 such registers are not used for passing parameters. */
4110 if (!decl && !TARGET_64BIT)
4112 tree type;
4114 /* We're looking at the CALL_EXPR, we need the type of the function. */
4115 type = CALL_EXPR_FN (exp); /* pointer expression */
4116 type = TREE_TYPE (type); /* pointer type */
4117 type = TREE_TYPE (type); /* function type */
4119 if (ix86_function_regparm (type, NULL) >= 3)
4121 /* ??? Need to count the actual number of registers to be used,
4122 not the possible number of registers. Fix later. */
4123 return false;
4127 /* Dllimport'd functions are also called indirectly. */
4128 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
4129 && !TARGET_64BIT
4130 && decl && DECL_DLLIMPORT_P (decl)
4131 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
4132 return false;
4134 /* If we need to align the outgoing stack, then sibcalling would
4135 unalign the stack, which may break the called function. */
4136 if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
4137 return false;
4139 /* Otherwise okay. That also includes certain types of indirect calls. */
4140 return true;
4143 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4144 calling convention attributes;
4145 arguments as in struct attribute_spec.handler. */
4147 static tree
4148 ix86_handle_cconv_attribute (tree *node, tree name,
4149 tree args,
4150 int flags ATTRIBUTE_UNUSED,
4151 bool *no_add_attrs)
4153 if (TREE_CODE (*node) != FUNCTION_TYPE
4154 && TREE_CODE (*node) != METHOD_TYPE
4155 && TREE_CODE (*node) != FIELD_DECL
4156 && TREE_CODE (*node) != TYPE_DECL)
4158 warning (OPT_Wattributes, "%qs attribute only applies to functions",
4159 IDENTIFIER_POINTER (name));
4160 *no_add_attrs = true;
4161 return NULL_TREE;
4164 /* Can combine regparm with all attributes but fastcall. */
4165 if (is_attribute_p ("regparm", name))
4167 tree cst;
4169 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4171 error ("fastcall and regparm attributes are not compatible");
4174 cst = TREE_VALUE (args);
4175 if (TREE_CODE (cst) != INTEGER_CST)
4177 warning (OPT_Wattributes,
4178 "%qs attribute requires an integer constant argument",
4179 IDENTIFIER_POINTER (name));
4180 *no_add_attrs = true;
4182 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4184 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
4185 IDENTIFIER_POINTER (name), REGPARM_MAX);
4186 *no_add_attrs = true;
4189 return NULL_TREE;
4192 if (TARGET_64BIT)
4194 /* Do not warn when emulating the MS ABI. */
4195 if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
4196 warning (OPT_Wattributes, "%qs attribute ignored",
4197 IDENTIFIER_POINTER (name));
4198 *no_add_attrs = true;
4199 return NULL_TREE;
4202 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4203 if (is_attribute_p ("fastcall", name))
4205 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4207 error ("fastcall and cdecl attributes are not compatible");
4209 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4211 error ("fastcall and stdcall attributes are not compatible");
4213 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4215 error ("fastcall and regparm attributes are not compatible");
4219 /* Can combine stdcall with fastcall (redundant), regparm and
4220 sseregparm. */
4221 else if (is_attribute_p ("stdcall", name))
4223 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4225 error ("stdcall and cdecl attributes are not compatible");
4227 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4229 error ("stdcall and fastcall attributes are not compatible");
4233 /* Can combine cdecl with regparm and sseregparm. */
4234 else if (is_attribute_p ("cdecl", name))
4236 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4238 error ("stdcall and cdecl attributes are not compatible");
4240 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4242 error ("fastcall and cdecl attributes are not compatible");
4246 /* Can combine sseregparm with all attributes. */
4248 return NULL_TREE;
4251 /* Return 0 if the attributes for two types are incompatible, 1 if they
4252 are compatible, and 2 if they are nearly compatible (which causes a
4253 warning to be generated). */
4255 static int
4256 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4258 /* Check for mismatch of non-default calling convention. */
4259 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4261 if (TREE_CODE (type1) != FUNCTION_TYPE
4262 && TREE_CODE (type1) != METHOD_TYPE)
4263 return 1;
4265 /* Check for mismatched fastcall/regparm types. */
4266 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4267 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4268 || (ix86_function_regparm (type1, NULL)
4269 != ix86_function_regparm (type2, NULL)))
4270 return 0;
4272 /* Check for mismatched sseregparm types. */
4273 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4274 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4275 return 0;
4277 /* Check for mismatched return types (cdecl vs stdcall). */
4278 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4279 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4280 return 0;
4282 return 1;
4285 /* Return the regparm value for a function with the indicated TYPE and DECL.
4286 DECL may be NULL when calling function indirectly
4287 or considering a libcall. */
4289 static int
4290 ix86_function_regparm (const_tree type, const_tree decl)
4292 tree attr;
4293 int regparm;
4295 static bool error_issued;
4297 if (TARGET_64BIT)
4298 return (ix86_function_type_abi (type) == SYSV_ABI
4299 ? X86_64_REGPARM_MAX : X64_REGPARM_MAX);
4301 regparm = ix86_regparm;
4302 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4303 if (attr)
4305 regparm
4306 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4308 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4310 /* We can't use regparm(3) for nested functions because
4311 these pass static chain pointer in %ecx register. */
4312 if (!error_issued && regparm == 3
4313 && decl_function_context (decl)
4314 && !DECL_NO_STATIC_CHAIN (decl))
4316 error ("nested functions are limited to 2 register parameters");
4317 error_issued = true;
4318 return 0;
4322 return regparm;
4325 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4326 return 2;
4328 /* Use register calling convention for local functions when possible. */
4329 if (decl
4330 && TREE_CODE (decl) == FUNCTION_DECL
4331 && optimize
4332 && !profile_flag)
4334 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4335 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4336 if (i && i->local)
4338 int local_regparm, globals = 0, regno;
4339 struct function *f;
4341 /* Make sure no regparm register is taken by a
4342 fixed register variable. */
4343 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4344 if (fixed_regs[local_regparm])
4345 break;
4347 /* We can't use regparm(3) for nested functions as these use
4348 static chain pointer in third argument. */
4349 if (local_regparm == 3
4350 && decl_function_context (decl)
4351 && !DECL_NO_STATIC_CHAIN (decl))
4352 local_regparm = 2;
4354 /* If the function realigns its stackpointer, the prologue will
4355 clobber %ecx. If we've already generated code for the callee,
4356 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
4357 scanning the attributes for the self-realigning property. */
4358 f = DECL_STRUCT_FUNCTION (decl);
4359 /* Since current internal arg pointer won't conflict with
4360 parameter passing regs, so no need to change stack
4361 realignment and adjust regparm number.
4363 Each fixed register usage increases register pressure,
4364 so less registers should be used for argument passing.
4365 This functionality can be overriden by an explicit
4366 regparm value. */
4367 for (regno = 0; regno <= DI_REG; regno++)
4368 if (fixed_regs[regno])
4369 globals++;
4371 local_regparm
4372 = globals < local_regparm ? local_regparm - globals : 0;
4374 if (local_regparm > regparm)
4375 regparm = local_regparm;
4379 return regparm;
4382 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4383 DFmode (2) arguments in SSE registers for a function with the
4384 indicated TYPE and DECL. DECL may be NULL when calling function
4385 indirectly or considering a libcall. Otherwise return 0. */
4387 static int
4388 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4390 gcc_assert (!TARGET_64BIT);
4392 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4393 by the sseregparm attribute. */
4394 if (TARGET_SSEREGPARM
4395 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4397 if (!TARGET_SSE)
4399 if (warn)
4401 if (decl)
4402 error ("Calling %qD with attribute sseregparm without "
4403 "SSE/SSE2 enabled", decl);
4404 else
4405 error ("Calling %qT with attribute sseregparm without "
4406 "SSE/SSE2 enabled", type);
4408 return 0;
4411 return 2;
4414 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4415 (and DFmode for SSE2) arguments in SSE registers. */
4416 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4418 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4419 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4420 if (i && i->local)
4421 return TARGET_SSE2 ? 2 : 1;
4424 return 0;
4427 /* Return true if EAX is live at the start of the function. Used by
4428 ix86_expand_prologue to determine if we need special help before
4429 calling allocate_stack_worker. */
4431 static bool
4432 ix86_eax_live_at_start_p (void)
4434 /* Cheat. Don't bother working forward from ix86_function_regparm
4435 to the function type to whether an actual argument is located in
4436 eax. Instead just look at cfg info, which is still close enough
4437 to correct at this point. This gives false positives for broken
4438 functions that might use uninitialized data that happens to be
4439 allocated in eax, but who cares? */
4440 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4443 /* Value is the number of bytes of arguments automatically
4444 popped when returning from a subroutine call.
4445 FUNDECL is the declaration node of the function (as a tree),
4446 FUNTYPE is the data type of the function (as a tree),
4447 or for a library call it is an identifier node for the subroutine name.
4448 SIZE is the number of bytes of arguments passed on the stack.
4450 On the 80386, the RTD insn may be used to pop them if the number
4451 of args is fixed, but if the number is variable then the caller
4452 must pop them all. RTD can't be used for library calls now
4453 because the library is compiled with the Unix compiler.
4454 Use of RTD is a selectable option, since it is incompatible with
4455 standard Unix calling sequences. If the option is not selected,
4456 the caller must always pop the args.
4458 The attribute stdcall is equivalent to RTD on a per module basis. */
4461 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4463 int rtd;
4465 /* None of the 64-bit ABIs pop arguments. */
4466 if (TARGET_64BIT)
4467 return 0;
4469 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4471 /* Cdecl functions override -mrtd, and never pop the stack. */
4472 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4474 /* Stdcall and fastcall functions will pop the stack if not
4475 variable args. */
4476 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4477 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4478 rtd = 1;
4480 if (rtd && ! stdarg_p (funtype))
4481 return size;
4484 /* Lose any fake structure return argument if it is passed on the stack. */
4485 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4486 && !KEEP_AGGREGATE_RETURN_POINTER)
4488 int nregs = ix86_function_regparm (funtype, fundecl);
4489 if (nregs == 0)
4490 return GET_MODE_SIZE (Pmode);
4493 return 0;
4496 /* Argument support functions. */
4498 /* Return true when register may be used to pass function parameters. */
4499 bool
4500 ix86_function_arg_regno_p (int regno)
4502 int i;
4503 const int *parm_regs;
4505 if (!TARGET_64BIT)
4507 if (TARGET_MACHO)
4508 return (regno < REGPARM_MAX
4509 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4510 else
4511 return (regno < REGPARM_MAX
4512 || (TARGET_MMX && MMX_REGNO_P (regno)
4513 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4514 || (TARGET_SSE && SSE_REGNO_P (regno)
4515 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4518 if (TARGET_MACHO)
4520 if (SSE_REGNO_P (regno) && TARGET_SSE)
4521 return true;
4523 else
4525 if (TARGET_SSE && SSE_REGNO_P (regno)
4526 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4527 return true;
4530 /* TODO: The function should depend on current function ABI but
4531 builtins.c would need updating then. Therefore we use the
4532 default ABI. */
4534 /* RAX is used as hidden argument to va_arg functions. */
4535 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4536 return true;
4538 if (ix86_abi == MS_ABI)
4539 parm_regs = x86_64_ms_abi_int_parameter_registers;
4540 else
4541 parm_regs = x86_64_int_parameter_registers;
4542 for (i = 0; i < (ix86_abi == MS_ABI ? X64_REGPARM_MAX
4543 : X86_64_REGPARM_MAX); i++)
4544 if (regno == parm_regs[i])
4545 return true;
4546 return false;
4549 /* Return if we do not know how to pass TYPE solely in registers. */
4551 static bool
4552 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4554 if (must_pass_in_stack_var_size_or_pad (mode, type))
4555 return true;
4557 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4558 The layout_type routine is crafty and tries to trick us into passing
4559 currently unsupported vector types on the stack by using TImode. */
4560 return (!TARGET_64BIT && mode == TImode
4561 && type && TREE_CODE (type) != VECTOR_TYPE);
4564 /* It returns the size, in bytes, of the area reserved for arguments passed
4565 in registers for the function represented by fndecl dependent to the used
4566 abi format. */
4568 ix86_reg_parm_stack_space (const_tree fndecl)
4570 enum calling_abi call_abi = SYSV_ABI;
4571 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4572 call_abi = ix86_function_abi (fndecl);
4573 else
4574 call_abi = ix86_function_type_abi (fndecl);
4575 if (call_abi == MS_ABI)
4576 return 32;
4577 return 0;
4580 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4581 call abi used. */
4582 enum calling_abi
4583 ix86_function_type_abi (const_tree fntype)
4585 if (TARGET_64BIT && fntype != NULL)
4587 enum calling_abi abi = ix86_abi;
4588 if (abi == SYSV_ABI)
4590 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4591 abi = MS_ABI;
4593 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4594 abi = SYSV_ABI;
4595 return abi;
4597 return ix86_abi;
4600 static enum calling_abi
4601 ix86_function_abi (const_tree fndecl)
4603 if (! fndecl)
4604 return ix86_abi;
4605 return ix86_function_type_abi (TREE_TYPE (fndecl));
4608 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4609 call abi used. */
4610 enum calling_abi
4611 ix86_cfun_abi (void)
4613 if (! cfun || ! TARGET_64BIT)
4614 return ix86_abi;
4615 return cfun->machine->call_abi;
4618 /* regclass.c */
4619 extern void init_regs (void);
4621 /* Implementation of call abi switching target hook. Specific to FNDECL
4622 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4623 for more details. */
4624 void
4625 ix86_call_abi_override (const_tree fndecl)
4627 if (fndecl == NULL_TREE)
4628 cfun->machine->call_abi = ix86_abi;
4629 else
4630 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4633 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4634 re-initialization of init_regs each time we switch function context since
4635 this is needed only during RTL expansion. */
4636 static void
4637 ix86_maybe_switch_abi (void)
4639 if (TARGET_64BIT &&
4640 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4641 reinit_regs ();
4644 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4645 for a call to a function whose data type is FNTYPE.
4646 For a library call, FNTYPE is 0. */
4648 void
4649 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4650 tree fntype, /* tree ptr for function decl */
4651 rtx libname, /* SYMBOL_REF of library name or 0 */
4652 tree fndecl)
4654 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4655 memset (cum, 0, sizeof (*cum));
4657 if (fndecl)
4658 cum->call_abi = ix86_function_abi (fndecl);
4659 else
4660 cum->call_abi = ix86_function_type_abi (fntype);
4661 /* Set up the number of registers to use for passing arguments. */
4663 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4664 sorry ("ms_abi attribute require -maccumulate-outgoing-args or subtarget optimization implying it");
4665 cum->nregs = ix86_regparm;
4666 if (TARGET_64BIT)
4668 if (cum->call_abi != ix86_abi)
4669 cum->nregs = ix86_abi != SYSV_ABI ? X86_64_REGPARM_MAX
4670 : X64_REGPARM_MAX;
4672 if (TARGET_SSE)
4674 cum->sse_nregs = SSE_REGPARM_MAX;
4675 if (TARGET_64BIT)
4677 if (cum->call_abi != ix86_abi)
4678 cum->sse_nregs = ix86_abi != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
4679 : X64_SSE_REGPARM_MAX;
4682 if (TARGET_MMX)
4683 cum->mmx_nregs = MMX_REGPARM_MAX;
4684 cum->warn_avx = true;
4685 cum->warn_sse = true;
4686 cum->warn_mmx = true;
4688 /* Because type might mismatch in between caller and callee, we need to
4689 use actual type of function for local calls.
4690 FIXME: cgraph_analyze can be told to actually record if function uses
4691 va_start so for local functions maybe_vaarg can be made aggressive
4692 helping K&R code.
4693 FIXME: once typesytem is fixed, we won't need this code anymore. */
4694 if (i && i->local)
4695 fntype = TREE_TYPE (fndecl);
4696 cum->maybe_vaarg = (fntype
4697 ? (!prototype_p (fntype) || stdarg_p (fntype))
4698 : !libname);
4700 if (!TARGET_64BIT)
4702 /* If there are variable arguments, then we won't pass anything
4703 in registers in 32-bit mode. */
4704 if (stdarg_p (fntype))
4706 cum->nregs = 0;
4707 cum->sse_nregs = 0;
4708 cum->mmx_nregs = 0;
4709 cum->warn_avx = 0;
4710 cum->warn_sse = 0;
4711 cum->warn_mmx = 0;
4712 return;
4715 /* Use ecx and edx registers if function has fastcall attribute,
4716 else look for regparm information. */
4717 if (fntype)
4719 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4721 cum->nregs = 2;
4722 cum->fastcall = 1;
4724 else
4725 cum->nregs = ix86_function_regparm (fntype, fndecl);
4728 /* Set up the number of SSE registers used for passing SFmode
4729 and DFmode arguments. Warn for mismatching ABI. */
4730 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4734 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4735 But in the case of vector types, it is some vector mode.
4737 When we have only some of our vector isa extensions enabled, then there
4738 are some modes for which vector_mode_supported_p is false. For these
4739 modes, the generic vector support in gcc will choose some non-vector mode
4740 in order to implement the type. By computing the natural mode, we'll
4741 select the proper ABI location for the operand and not depend on whatever
4742 the middle-end decides to do with these vector types.
4744 The midde-end can't deal with the vector types > 16 bytes. In this
4745 case, we return the original mode and warn ABI change if CUM isn't
4746 NULL. */
4748 static enum machine_mode
4749 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4751 enum machine_mode mode = TYPE_MODE (type);
4753 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4755 HOST_WIDE_INT size = int_size_in_bytes (type);
4756 if ((size == 8 || size == 16 || size == 32)
4757 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4758 && TYPE_VECTOR_SUBPARTS (type) > 1)
4760 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4762 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4763 mode = MIN_MODE_VECTOR_FLOAT;
4764 else
4765 mode = MIN_MODE_VECTOR_INT;
4767 /* Get the mode which has this inner mode and number of units. */
4768 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4769 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4770 && GET_MODE_INNER (mode) == innermode)
4772 if (size == 32 && !TARGET_AVX)
4774 static bool warnedavx;
4776 if (cum
4777 && !warnedavx
4778 && cum->warn_avx)
4780 warnedavx = true;
4781 warning (0, "AVX vector argument without AVX "
4782 "enabled changes the ABI");
4784 return TYPE_MODE (type);
4786 else
4787 return mode;
4790 gcc_unreachable ();
4794 return mode;
4797 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4798 this may not agree with the mode that the type system has chosen for the
4799 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4800 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4802 static rtx
4803 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4804 unsigned int regno)
4806 rtx tmp;
4808 if (orig_mode != BLKmode)
4809 tmp = gen_rtx_REG (orig_mode, regno);
4810 else
4812 tmp = gen_rtx_REG (mode, regno);
4813 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4814 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4817 return tmp;
4820 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4821 of this code is to classify each 8bytes of incoming argument by the register
4822 class and assign registers accordingly. */
4824 /* Return the union class of CLASS1 and CLASS2.
4825 See the x86-64 PS ABI for details. */
4827 static enum x86_64_reg_class
4828 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
4830 /* Rule #1: If both classes are equal, this is the resulting class. */
4831 if (class1 == class2)
4832 return class1;
4834 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
4835 the other class. */
4836 if (class1 == X86_64_NO_CLASS)
4837 return class2;
4838 if (class2 == X86_64_NO_CLASS)
4839 return class1;
4841 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
4842 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
4843 return X86_64_MEMORY_CLASS;
4845 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
4846 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
4847 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
4848 return X86_64_INTEGERSI_CLASS;
4849 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
4850 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
4851 return X86_64_INTEGER_CLASS;
4853 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
4854 MEMORY is used. */
4855 if (class1 == X86_64_X87_CLASS
4856 || class1 == X86_64_X87UP_CLASS
4857 || class1 == X86_64_COMPLEX_X87_CLASS
4858 || class2 == X86_64_X87_CLASS
4859 || class2 == X86_64_X87UP_CLASS
4860 || class2 == X86_64_COMPLEX_X87_CLASS)
4861 return X86_64_MEMORY_CLASS;
4863 /* Rule #6: Otherwise class SSE is used. */
4864 return X86_64_SSE_CLASS;
4867 /* Classify the argument of type TYPE and mode MODE.
4868 CLASSES will be filled by the register class used to pass each word
4869 of the operand. The number of words is returned. In case the parameter
4870 should be passed in memory, 0 is returned. As a special case for zero
4871 sized containers, classes[0] will be NO_CLASS and 1 is returned.
4873 BIT_OFFSET is used internally for handling records and specifies offset
4874 of the offset in bits modulo 256 to avoid overflow cases.
4876 See the x86-64 PS ABI for details.
4879 static int
4880 classify_argument (enum machine_mode mode, const_tree type,
4881 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
4883 HOST_WIDE_INT bytes =
4884 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4885 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4887 /* Variable sized entities are always passed/returned in memory. */
4888 if (bytes < 0)
4889 return 0;
4891 if (mode != VOIDmode
4892 && targetm.calls.must_pass_in_stack (mode, type))
4893 return 0;
4895 if (type && AGGREGATE_TYPE_P (type))
4897 int i;
4898 tree field;
4899 enum x86_64_reg_class subclasses[MAX_CLASSES];
4901 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
4902 if (bytes > 32)
4903 return 0;
4905 for (i = 0; i < words; i++)
4906 classes[i] = X86_64_NO_CLASS;
4908 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
4909 signalize memory class, so handle it as special case. */
4910 if (!words)
4912 classes[0] = X86_64_NO_CLASS;
4913 return 1;
4916 /* Classify each field of record and merge classes. */
4917 switch (TREE_CODE (type))
4919 case RECORD_TYPE:
4920 /* And now merge the fields of structure. */
4921 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4923 if (TREE_CODE (field) == FIELD_DECL)
4925 int num;
4927 if (TREE_TYPE (field) == error_mark_node)
4928 continue;
4930 /* Bitfields are always classified as integer. Handle them
4931 early, since later code would consider them to be
4932 misaligned integers. */
4933 if (DECL_BIT_FIELD (field))
4935 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4936 i < ((int_bit_position (field) + (bit_offset % 64))
4937 + tree_low_cst (DECL_SIZE (field), 0)
4938 + 63) / 8 / 8; i++)
4939 classes[i] =
4940 merge_classes (X86_64_INTEGER_CLASS,
4941 classes[i]);
4943 else
4945 int pos;
4947 type = TREE_TYPE (field);
4949 /* Flexible array member is ignored. */
4950 if (TYPE_MODE (type) == BLKmode
4951 && TREE_CODE (type) == ARRAY_TYPE
4952 && TYPE_SIZE (type) == NULL_TREE
4953 && TYPE_DOMAIN (type) != NULL_TREE
4954 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
4955 == NULL_TREE))
4957 static bool warned;
4959 if (!warned && warn_psabi)
4961 warned = true;
4962 inform (input_location,
4963 "The ABI of passing struct with"
4964 " a flexible array member has"
4965 " changed in GCC 4.4");
4967 continue;
4969 num = classify_argument (TYPE_MODE (type), type,
4970 subclasses,
4971 (int_bit_position (field)
4972 + bit_offset) % 256);
4973 if (!num)
4974 return 0;
4975 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4976 for (i = 0; i < num && (i + pos) < words; i++)
4977 classes[i + pos] =
4978 merge_classes (subclasses[i], classes[i + pos]);
4982 break;
4984 case ARRAY_TYPE:
4985 /* Arrays are handled as small records. */
4987 int num;
4988 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
4989 TREE_TYPE (type), subclasses, bit_offset);
4990 if (!num)
4991 return 0;
4993 /* The partial classes are now full classes. */
4994 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
4995 subclasses[0] = X86_64_SSE_CLASS;
4996 if (subclasses[0] == X86_64_INTEGERSI_CLASS
4997 && !((bit_offset % 64) == 0 && bytes == 4))
4998 subclasses[0] = X86_64_INTEGER_CLASS;
5000 for (i = 0; i < words; i++)
5001 classes[i] = subclasses[i % num];
5003 break;
5005 case UNION_TYPE:
5006 case QUAL_UNION_TYPE:
5007 /* Unions are similar to RECORD_TYPE but offset is always 0.
5009 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5011 if (TREE_CODE (field) == FIELD_DECL)
5013 int num;
5015 if (TREE_TYPE (field) == error_mark_node)
5016 continue;
5018 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5019 TREE_TYPE (field), subclasses,
5020 bit_offset);
5021 if (!num)
5022 return 0;
5023 for (i = 0; i < num; i++)
5024 classes[i] = merge_classes (subclasses[i], classes[i]);
5027 break;
5029 default:
5030 gcc_unreachable ();
5033 if (words > 2)
5035 /* When size > 16 bytes, if the first one isn't
5036 X86_64_SSE_CLASS or any other ones aren't
5037 X86_64_SSEUP_CLASS, everything should be passed in
5038 memory. */
5039 if (classes[0] != X86_64_SSE_CLASS)
5040 return 0;
5042 for (i = 1; i < words; i++)
5043 if (classes[i] != X86_64_SSEUP_CLASS)
5044 return 0;
5047 /* Final merger cleanup. */
5048 for (i = 0; i < words; i++)
5050 /* If one class is MEMORY, everything should be passed in
5051 memory. */
5052 if (classes[i] == X86_64_MEMORY_CLASS)
5053 return 0;
5055 /* The X86_64_SSEUP_CLASS should be always preceded by
5056 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5057 if (classes[i] == X86_64_SSEUP_CLASS
5058 && classes[i - 1] != X86_64_SSE_CLASS
5059 && classes[i - 1] != X86_64_SSEUP_CLASS)
5061 /* The first one should never be X86_64_SSEUP_CLASS. */
5062 gcc_assert (i != 0);
5063 classes[i] = X86_64_SSE_CLASS;
5066 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5067 everything should be passed in memory. */
5068 if (classes[i] == X86_64_X87UP_CLASS
5069 && (classes[i - 1] != X86_64_X87_CLASS))
5071 static bool warned;
5073 /* The first one should never be X86_64_X87UP_CLASS. */
5074 gcc_assert (i != 0);
5075 if (!warned && warn_psabi)
5077 warned = true;
5078 inform (input_location,
5079 "The ABI of passing union with long double"
5080 " has changed in GCC 4.4");
5082 return 0;
5085 return words;
5088 /* Compute alignment needed. We align all types to natural boundaries with
5089 exception of XFmode that is aligned to 64bits. */
5090 if (mode != VOIDmode && mode != BLKmode)
5092 int mode_alignment = GET_MODE_BITSIZE (mode);
5094 if (mode == XFmode)
5095 mode_alignment = 128;
5096 else if (mode == XCmode)
5097 mode_alignment = 256;
5098 if (COMPLEX_MODE_P (mode))
5099 mode_alignment /= 2;
5100 /* Misaligned fields are always returned in memory. */
5101 if (bit_offset % mode_alignment)
5102 return 0;
5105 /* for V1xx modes, just use the base mode */
5106 if (VECTOR_MODE_P (mode) && mode != V1DImode
5107 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5108 mode = GET_MODE_INNER (mode);
5110 /* Classification of atomic types. */
5111 switch (mode)
5113 case SDmode:
5114 case DDmode:
5115 classes[0] = X86_64_SSE_CLASS;
5116 return 1;
5117 case TDmode:
5118 classes[0] = X86_64_SSE_CLASS;
5119 classes[1] = X86_64_SSEUP_CLASS;
5120 return 2;
5121 case DImode:
5122 case SImode:
5123 case HImode:
5124 case QImode:
5125 case CSImode:
5126 case CHImode:
5127 case CQImode:
5129 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5131 if (size <= 32)
5133 classes[0] = X86_64_INTEGERSI_CLASS;
5134 return 1;
5136 else if (size <= 64)
5138 classes[0] = X86_64_INTEGER_CLASS;
5139 return 1;
5141 else if (size <= 64+32)
5143 classes[0] = X86_64_INTEGER_CLASS;
5144 classes[1] = X86_64_INTEGERSI_CLASS;
5145 return 2;
5147 else if (size <= 64+64)
5149 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5150 return 2;
5152 else
5153 gcc_unreachable ();
5155 case CDImode:
5156 case TImode:
5157 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5158 return 2;
5159 case COImode:
5160 case OImode:
5161 /* OImode shouldn't be used directly. */
5162 gcc_unreachable ();
5163 case CTImode:
5164 return 0;
5165 case SFmode:
5166 if (!(bit_offset % 64))
5167 classes[0] = X86_64_SSESF_CLASS;
5168 else
5169 classes[0] = X86_64_SSE_CLASS;
5170 return 1;
5171 case DFmode:
5172 classes[0] = X86_64_SSEDF_CLASS;
5173 return 1;
5174 case XFmode:
5175 classes[0] = X86_64_X87_CLASS;
5176 classes[1] = X86_64_X87UP_CLASS;
5177 return 2;
5178 case TFmode:
5179 classes[0] = X86_64_SSE_CLASS;
5180 classes[1] = X86_64_SSEUP_CLASS;
5181 return 2;
5182 case SCmode:
5183 classes[0] = X86_64_SSE_CLASS;
5184 return 1;
5185 case DCmode:
5186 classes[0] = X86_64_SSEDF_CLASS;
5187 classes[1] = X86_64_SSEDF_CLASS;
5188 return 2;
5189 case XCmode:
5190 classes[0] = X86_64_COMPLEX_X87_CLASS;
5191 return 1;
5192 case TCmode:
5193 /* This modes is larger than 16 bytes. */
5194 return 0;
5195 case V8SFmode:
5196 case V8SImode:
5197 case V32QImode:
5198 case V16HImode:
5199 case V4DFmode:
5200 case V4DImode:
5201 classes[0] = X86_64_SSE_CLASS;
5202 classes[1] = X86_64_SSEUP_CLASS;
5203 classes[2] = X86_64_SSEUP_CLASS;
5204 classes[3] = X86_64_SSEUP_CLASS;
5205 return 4;
5206 case V4SFmode:
5207 case V4SImode:
5208 case V16QImode:
5209 case V8HImode:
5210 case V2DFmode:
5211 case V2DImode:
5212 classes[0] = X86_64_SSE_CLASS;
5213 classes[1] = X86_64_SSEUP_CLASS;
5214 return 2;
5215 case V1DImode:
5216 case V2SFmode:
5217 case V2SImode:
5218 case V4HImode:
5219 case V8QImode:
5220 classes[0] = X86_64_SSE_CLASS;
5221 return 1;
5222 case BLKmode:
5223 case VOIDmode:
5224 return 0;
5225 default:
5226 gcc_assert (VECTOR_MODE_P (mode));
5228 if (bytes > 16)
5229 return 0;
5231 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5233 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5234 classes[0] = X86_64_INTEGERSI_CLASS;
5235 else
5236 classes[0] = X86_64_INTEGER_CLASS;
5237 classes[1] = X86_64_INTEGER_CLASS;
5238 return 1 + (bytes > 8);
5242 /* Examine the argument and return set number of register required in each
5243 class. Return 0 iff parameter should be passed in memory. */
5244 static int
5245 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5246 int *int_nregs, int *sse_nregs)
5248 enum x86_64_reg_class regclass[MAX_CLASSES];
5249 int n = classify_argument (mode, type, regclass, 0);
5251 *int_nregs = 0;
5252 *sse_nregs = 0;
5253 if (!n)
5254 return 0;
5255 for (n--; n >= 0; n--)
5256 switch (regclass[n])
5258 case X86_64_INTEGER_CLASS:
5259 case X86_64_INTEGERSI_CLASS:
5260 (*int_nregs)++;
5261 break;
5262 case X86_64_SSE_CLASS:
5263 case X86_64_SSESF_CLASS:
5264 case X86_64_SSEDF_CLASS:
5265 (*sse_nregs)++;
5266 break;
5267 case X86_64_NO_CLASS:
5268 case X86_64_SSEUP_CLASS:
5269 break;
5270 case X86_64_X87_CLASS:
5271 case X86_64_X87UP_CLASS:
5272 if (!in_return)
5273 return 0;
5274 break;
5275 case X86_64_COMPLEX_X87_CLASS:
5276 return in_return ? 2 : 0;
5277 case X86_64_MEMORY_CLASS:
5278 gcc_unreachable ();
5280 return 1;
5283 /* Construct container for the argument used by GCC interface. See
5284 FUNCTION_ARG for the detailed description. */
5286 static rtx
5287 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5288 const_tree type, int in_return, int nintregs, int nsseregs,
5289 const int *intreg, int sse_regno)
5291 /* The following variables hold the static issued_error state. */
5292 static bool issued_sse_arg_error;
5293 static bool issued_sse_ret_error;
5294 static bool issued_x87_ret_error;
5296 enum machine_mode tmpmode;
5297 int bytes =
5298 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5299 enum x86_64_reg_class regclass[MAX_CLASSES];
5300 int n;
5301 int i;
5302 int nexps = 0;
5303 int needed_sseregs, needed_intregs;
5304 rtx exp[MAX_CLASSES];
5305 rtx ret;
5307 n = classify_argument (mode, type, regclass, 0);
5308 if (!n)
5309 return NULL;
5310 if (!examine_argument (mode, type, in_return, &needed_intregs,
5311 &needed_sseregs))
5312 return NULL;
5313 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5314 return NULL;
5316 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5317 some less clueful developer tries to use floating-point anyway. */
5318 if (needed_sseregs && !TARGET_SSE)
5320 if (in_return)
5322 if (!issued_sse_ret_error)
5324 error ("SSE register return with SSE disabled");
5325 issued_sse_ret_error = true;
5328 else if (!issued_sse_arg_error)
5330 error ("SSE register argument with SSE disabled");
5331 issued_sse_arg_error = true;
5333 return NULL;
5336 /* Likewise, error if the ABI requires us to return values in the
5337 x87 registers and the user specified -mno-80387. */
5338 if (!TARGET_80387 && in_return)
5339 for (i = 0; i < n; i++)
5340 if (regclass[i] == X86_64_X87_CLASS
5341 || regclass[i] == X86_64_X87UP_CLASS
5342 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5344 if (!issued_x87_ret_error)
5346 error ("x87 register return with x87 disabled");
5347 issued_x87_ret_error = true;
5349 return NULL;
5352 /* First construct simple cases. Avoid SCmode, since we want to use
5353 single register to pass this type. */
5354 if (n == 1 && mode != SCmode)
5355 switch (regclass[0])
5357 case X86_64_INTEGER_CLASS:
5358 case X86_64_INTEGERSI_CLASS:
5359 return gen_rtx_REG (mode, intreg[0]);
5360 case X86_64_SSE_CLASS:
5361 case X86_64_SSESF_CLASS:
5362 case X86_64_SSEDF_CLASS:
5363 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
5364 case X86_64_X87_CLASS:
5365 case X86_64_COMPLEX_X87_CLASS:
5366 return gen_rtx_REG (mode, FIRST_STACK_REG);
5367 case X86_64_NO_CLASS:
5368 /* Zero sized array, struct or class. */
5369 return NULL;
5370 default:
5371 gcc_unreachable ();
5373 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5374 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5375 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5376 if (n == 4
5377 && regclass[0] == X86_64_SSE_CLASS
5378 && regclass[1] == X86_64_SSEUP_CLASS
5379 && regclass[2] == X86_64_SSEUP_CLASS
5380 && regclass[3] == X86_64_SSEUP_CLASS
5381 && mode != BLKmode)
5382 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5384 if (n == 2
5385 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5386 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5387 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5388 && regclass[1] == X86_64_INTEGER_CLASS
5389 && (mode == CDImode || mode == TImode || mode == TFmode)
5390 && intreg[0] + 1 == intreg[1])
5391 return gen_rtx_REG (mode, intreg[0]);
5393 /* Otherwise figure out the entries of the PARALLEL. */
5394 for (i = 0; i < n; i++)
5396 int pos;
5398 switch (regclass[i])
5400 case X86_64_NO_CLASS:
5401 break;
5402 case X86_64_INTEGER_CLASS:
5403 case X86_64_INTEGERSI_CLASS:
5404 /* Merge TImodes on aligned occasions here too. */
5405 if (i * 8 + 8 > bytes)
5406 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5407 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5408 tmpmode = SImode;
5409 else
5410 tmpmode = DImode;
5411 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5412 if (tmpmode == BLKmode)
5413 tmpmode = DImode;
5414 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5415 gen_rtx_REG (tmpmode, *intreg),
5416 GEN_INT (i*8));
5417 intreg++;
5418 break;
5419 case X86_64_SSESF_CLASS:
5420 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5421 gen_rtx_REG (SFmode,
5422 SSE_REGNO (sse_regno)),
5423 GEN_INT (i*8));
5424 sse_regno++;
5425 break;
5426 case X86_64_SSEDF_CLASS:
5427 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5428 gen_rtx_REG (DFmode,
5429 SSE_REGNO (sse_regno)),
5430 GEN_INT (i*8));
5431 sse_regno++;
5432 break;
5433 case X86_64_SSE_CLASS:
5434 pos = i;
5435 switch (n)
5437 case 1:
5438 tmpmode = DImode;
5439 break;
5440 case 2:
5441 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5443 tmpmode = TImode;
5444 i++;
5446 else
5447 tmpmode = DImode;
5448 break;
5449 case 4:
5450 gcc_assert (i == 0
5451 && regclass[1] == X86_64_SSEUP_CLASS
5452 && regclass[2] == X86_64_SSEUP_CLASS
5453 && regclass[3] == X86_64_SSEUP_CLASS);
5454 tmpmode = OImode;
5455 i += 3;
5456 break;
5457 default:
5458 gcc_unreachable ();
5460 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5461 gen_rtx_REG (tmpmode,
5462 SSE_REGNO (sse_regno)),
5463 GEN_INT (pos*8));
5464 sse_regno++;
5465 break;
5466 default:
5467 gcc_unreachable ();
5471 /* Empty aligned struct, union or class. */
5472 if (nexps == 0)
5473 return NULL;
5475 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5476 for (i = 0; i < nexps; i++)
5477 XVECEXP (ret, 0, i) = exp [i];
5478 return ret;
5481 /* Update the data in CUM to advance over an argument of mode MODE
5482 and data type TYPE. (TYPE is null for libcalls where that information
5483 may not be available.) */
5485 static void
5486 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5487 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5489 switch (mode)
5491 default:
5492 break;
5494 case BLKmode:
5495 if (bytes < 0)
5496 break;
5497 /* FALLTHRU */
5499 case DImode:
5500 case SImode:
5501 case HImode:
5502 case QImode:
5503 cum->words += words;
5504 cum->nregs -= words;
5505 cum->regno += words;
5507 if (cum->nregs <= 0)
5509 cum->nregs = 0;
5510 cum->regno = 0;
5512 break;
5514 case OImode:
5515 /* OImode shouldn't be used directly. */
5516 gcc_unreachable ();
5518 case DFmode:
5519 if (cum->float_in_sse < 2)
5520 break;
5521 case SFmode:
5522 if (cum->float_in_sse < 1)
5523 break;
5524 /* FALLTHRU */
5526 case V8SFmode:
5527 case V8SImode:
5528 case V32QImode:
5529 case V16HImode:
5530 case V4DFmode:
5531 case V4DImode:
5532 case TImode:
5533 case V16QImode:
5534 case V8HImode:
5535 case V4SImode:
5536 case V2DImode:
5537 case V4SFmode:
5538 case V2DFmode:
5539 if (!type || !AGGREGATE_TYPE_P (type))
5541 cum->sse_words += words;
5542 cum->sse_nregs -= 1;
5543 cum->sse_regno += 1;
5544 if (cum->sse_nregs <= 0)
5546 cum->sse_nregs = 0;
5547 cum->sse_regno = 0;
5550 break;
5552 case V8QImode:
5553 case V4HImode:
5554 case V2SImode:
5555 case V2SFmode:
5556 case V1DImode:
5557 if (!type || !AGGREGATE_TYPE_P (type))
5559 cum->mmx_words += words;
5560 cum->mmx_nregs -= 1;
5561 cum->mmx_regno += 1;
5562 if (cum->mmx_nregs <= 0)
5564 cum->mmx_nregs = 0;
5565 cum->mmx_regno = 0;
5568 break;
5572 static void
5573 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5574 tree type, HOST_WIDE_INT words, int named)
5576 int int_nregs, sse_nregs;
5578 /* Unnamed 256bit vector mode parameters are passed on stack. */
5579 if (!named && VALID_AVX256_REG_MODE (mode))
5580 return;
5582 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5583 cum->words += words;
5584 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5586 cum->nregs -= int_nregs;
5587 cum->sse_nregs -= sse_nregs;
5588 cum->regno += int_nregs;
5589 cum->sse_regno += sse_nregs;
5591 else
5592 cum->words += words;
5595 static void
5596 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5597 HOST_WIDE_INT words)
5599 /* Otherwise, this should be passed indirect. */
5600 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5602 cum->words += words;
5603 if (cum->nregs > 0)
5605 cum->nregs -= 1;
5606 cum->regno += 1;
5610 void
5611 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5612 tree type, int named)
5614 HOST_WIDE_INT bytes, words;
5616 if (mode == BLKmode)
5617 bytes = int_size_in_bytes (type);
5618 else
5619 bytes = GET_MODE_SIZE (mode);
5620 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5622 if (type)
5623 mode = type_natural_mode (type, NULL);
5625 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5626 function_arg_advance_ms_64 (cum, bytes, words);
5627 else if (TARGET_64BIT)
5628 function_arg_advance_64 (cum, mode, type, words, named);
5629 else
5630 function_arg_advance_32 (cum, mode, type, bytes, words);
5633 /* Define where to put the arguments to a function.
5634 Value is zero to push the argument on the stack,
5635 or a hard register in which to store the argument.
5637 MODE is the argument's machine mode.
5638 TYPE is the data type of the argument (as a tree).
5639 This is null for libcalls where that information may
5640 not be available.
5641 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5642 the preceding args and about the function being called.
5643 NAMED is nonzero if this argument is a named parameter
5644 (otherwise it is an extra parameter matching an ellipsis). */
5646 static rtx
5647 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5648 enum machine_mode orig_mode, tree type,
5649 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5651 static bool warnedsse, warnedmmx;
5653 /* Avoid the AL settings for the Unix64 ABI. */
5654 if (mode == VOIDmode)
5655 return constm1_rtx;
5657 switch (mode)
5659 default:
5660 break;
5662 case BLKmode:
5663 if (bytes < 0)
5664 break;
5665 /* FALLTHRU */
5666 case DImode:
5667 case SImode:
5668 case HImode:
5669 case QImode:
5670 if (words <= cum->nregs)
5672 int regno = cum->regno;
5674 /* Fastcall allocates the first two DWORD (SImode) or
5675 smaller arguments to ECX and EDX if it isn't an
5676 aggregate type . */
5677 if (cum->fastcall)
5679 if (mode == BLKmode
5680 || mode == DImode
5681 || (type && AGGREGATE_TYPE_P (type)))
5682 break;
5684 /* ECX not EAX is the first allocated register. */
5685 if (regno == AX_REG)
5686 regno = CX_REG;
5688 return gen_rtx_REG (mode, regno);
5690 break;
5692 case DFmode:
5693 if (cum->float_in_sse < 2)
5694 break;
5695 case SFmode:
5696 if (cum->float_in_sse < 1)
5697 break;
5698 /* FALLTHRU */
5699 case TImode:
5700 /* In 32bit, we pass TImode in xmm registers. */
5701 case V16QImode:
5702 case V8HImode:
5703 case V4SImode:
5704 case V2DImode:
5705 case V4SFmode:
5706 case V2DFmode:
5707 if (!type || !AGGREGATE_TYPE_P (type))
5709 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5711 warnedsse = true;
5712 warning (0, "SSE vector argument without SSE enabled "
5713 "changes the ABI");
5715 if (cum->sse_nregs)
5716 return gen_reg_or_parallel (mode, orig_mode,
5717 cum->sse_regno + FIRST_SSE_REG);
5719 break;
5721 case OImode:
5722 /* OImode shouldn't be used directly. */
5723 gcc_unreachable ();
5725 case V8SFmode:
5726 case V8SImode:
5727 case V32QImode:
5728 case V16HImode:
5729 case V4DFmode:
5730 case V4DImode:
5731 if (!type || !AGGREGATE_TYPE_P (type))
5733 if (cum->sse_nregs)
5734 return gen_reg_or_parallel (mode, orig_mode,
5735 cum->sse_regno + FIRST_SSE_REG);
5737 break;
5739 case V8QImode:
5740 case V4HImode:
5741 case V2SImode:
5742 case V2SFmode:
5743 case V1DImode:
5744 if (!type || !AGGREGATE_TYPE_P (type))
5746 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5748 warnedmmx = true;
5749 warning (0, "MMX vector argument without MMX enabled "
5750 "changes the ABI");
5752 if (cum->mmx_nregs)
5753 return gen_reg_or_parallel (mode, orig_mode,
5754 cum->mmx_regno + FIRST_MMX_REG);
5756 break;
5759 return NULL_RTX;
5762 static rtx
5763 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5764 enum machine_mode orig_mode, tree type, int named)
5766 /* Handle a hidden AL argument containing number of registers
5767 for varargs x86-64 functions. */
5768 if (mode == VOIDmode)
5769 return GEN_INT (cum->maybe_vaarg
5770 ? (cum->sse_nregs < 0
5771 ? (cum->call_abi == ix86_abi
5772 ? SSE_REGPARM_MAX
5773 : (ix86_abi != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
5774 : X64_SSE_REGPARM_MAX))
5775 : cum->sse_regno)
5776 : -1);
5778 switch (mode)
5780 default:
5781 break;
5783 case V8SFmode:
5784 case V8SImode:
5785 case V32QImode:
5786 case V16HImode:
5787 case V4DFmode:
5788 case V4DImode:
5789 /* Unnamed 256bit vector mode parameters are passed on stack. */
5790 if (!named)
5791 return NULL;
5792 break;
5795 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5796 cum->sse_nregs,
5797 &x86_64_int_parameter_registers [cum->regno],
5798 cum->sse_regno);
5801 static rtx
5802 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5803 enum machine_mode orig_mode, int named,
5804 HOST_WIDE_INT bytes)
5806 unsigned int regno;
5808 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
5809 We use value of -2 to specify that current function call is MSABI. */
5810 if (mode == VOIDmode)
5811 return GEN_INT (-2);
5813 /* If we've run out of registers, it goes on the stack. */
5814 if (cum->nregs == 0)
5815 return NULL_RTX;
5817 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
5819 /* Only floating point modes are passed in anything but integer regs. */
5820 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
5822 if (named)
5823 regno = cum->regno + FIRST_SSE_REG;
5824 else
5826 rtx t1, t2;
5828 /* Unnamed floating parameters are passed in both the
5829 SSE and integer registers. */
5830 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
5831 t2 = gen_rtx_REG (mode, regno);
5832 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
5833 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
5834 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
5837 /* Handle aggregated types passed in register. */
5838 if (orig_mode == BLKmode)
5840 if (bytes > 0 && bytes <= 8)
5841 mode = (bytes > 4 ? DImode : SImode);
5842 if (mode == BLKmode)
5843 mode = DImode;
5846 return gen_reg_or_parallel (mode, orig_mode, regno);
5850 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
5851 tree type, int named)
5853 enum machine_mode mode = omode;
5854 HOST_WIDE_INT bytes, words;
5856 if (mode == BLKmode)
5857 bytes = int_size_in_bytes (type);
5858 else
5859 bytes = GET_MODE_SIZE (mode);
5860 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5862 /* To simplify the code below, represent vector types with a vector mode
5863 even if MMX/SSE are not active. */
5864 if (type && TREE_CODE (type) == VECTOR_TYPE)
5865 mode = type_natural_mode (type, cum);
5867 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5868 return function_arg_ms_64 (cum, mode, omode, named, bytes);
5869 else if (TARGET_64BIT)
5870 return function_arg_64 (cum, mode, omode, type, named);
5871 else
5872 return function_arg_32 (cum, mode, omode, type, bytes, words);
5875 /* A C expression that indicates when an argument must be passed by
5876 reference. If nonzero for an argument, a copy of that argument is
5877 made in memory and a pointer to the argument is passed instead of
5878 the argument itself. The pointer is passed in whatever way is
5879 appropriate for passing a pointer to that type. */
5881 static bool
5882 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5883 enum machine_mode mode ATTRIBUTE_UNUSED,
5884 const_tree type, bool named ATTRIBUTE_UNUSED)
5886 /* See Windows x64 Software Convention. */
5887 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5889 int msize = (int) GET_MODE_SIZE (mode);
5890 if (type)
5892 /* Arrays are passed by reference. */
5893 if (TREE_CODE (type) == ARRAY_TYPE)
5894 return true;
5896 if (AGGREGATE_TYPE_P (type))
5898 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
5899 are passed by reference. */
5900 msize = int_size_in_bytes (type);
5904 /* __m128 is passed by reference. */
5905 switch (msize) {
5906 case 1: case 2: case 4: case 8:
5907 break;
5908 default:
5909 return true;
5912 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
5913 return 1;
5915 return 0;
5918 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
5919 ABI. */
5920 static bool
5921 contains_aligned_value_p (tree type)
5923 enum machine_mode mode = TYPE_MODE (type);
5924 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
5925 || mode == TDmode
5926 || mode == TFmode
5927 || mode == TCmode)
5928 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
5929 return true;
5930 if (TYPE_ALIGN (type) < 128)
5931 return false;
5933 if (AGGREGATE_TYPE_P (type))
5935 /* Walk the aggregates recursively. */
5936 switch (TREE_CODE (type))
5938 case RECORD_TYPE:
5939 case UNION_TYPE:
5940 case QUAL_UNION_TYPE:
5942 tree field;
5944 /* Walk all the structure fields. */
5945 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5947 if (TREE_CODE (field) == FIELD_DECL
5948 && contains_aligned_value_p (TREE_TYPE (field)))
5949 return true;
5951 break;
5954 case ARRAY_TYPE:
5955 /* Just for use if some languages passes arrays by value. */
5956 if (contains_aligned_value_p (TREE_TYPE (type)))
5957 return true;
5958 break;
5960 default:
5961 gcc_unreachable ();
5964 return false;
5967 /* Gives the alignment boundary, in bits, of an argument with the
5968 specified mode and type. */
5971 ix86_function_arg_boundary (enum machine_mode mode, tree type)
5973 int align;
5974 if (type)
5976 /* Since canonical type is used for call, we convert it to
5977 canonical type if needed. */
5978 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
5979 type = TYPE_CANONICAL (type);
5980 align = TYPE_ALIGN (type);
5982 else
5983 align = GET_MODE_ALIGNMENT (mode);
5984 if (align < PARM_BOUNDARY)
5985 align = PARM_BOUNDARY;
5986 /* In 32bit, only _Decimal128 and __float128 are aligned to their
5987 natural boundaries. */
5988 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
5990 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
5991 make an exception for SSE modes since these require 128bit
5992 alignment.
5994 The handling here differs from field_alignment. ICC aligns MMX
5995 arguments to 4 byte boundaries, while structure fields are aligned
5996 to 8 byte boundaries. */
5997 if (!type)
5999 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6000 align = PARM_BOUNDARY;
6002 else
6004 if (!contains_aligned_value_p (type))
6005 align = PARM_BOUNDARY;
6008 if (align > BIGGEST_ALIGNMENT)
6009 align = BIGGEST_ALIGNMENT;
6010 return align;
6013 /* Return true if N is a possible register number of function value. */
6015 bool
6016 ix86_function_value_regno_p (int regno)
6018 switch (regno)
6020 case 0:
6021 return true;
6023 case FIRST_FLOAT_REG:
6024 /* TODO: The function should depend on current function ABI but
6025 builtins.c would need updating then. Therefore we use the
6026 default ABI. */
6027 if (TARGET_64BIT && ix86_abi == MS_ABI)
6028 return false;
6029 return TARGET_FLOAT_RETURNS_IN_80387;
6031 case FIRST_SSE_REG:
6032 return TARGET_SSE;
6034 case FIRST_MMX_REG:
6035 if (TARGET_MACHO || TARGET_64BIT)
6036 return false;
6037 return TARGET_MMX;
6040 return false;
6043 /* Define how to find the value returned by a function.
6044 VALTYPE is the data type of the value (as a tree).
6045 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6046 otherwise, FUNC is 0. */
6048 static rtx
6049 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6050 const_tree fntype, const_tree fn)
6052 unsigned int regno;
6054 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6055 we normally prevent this case when mmx is not available. However
6056 some ABIs may require the result to be returned like DImode. */
6057 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6058 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6060 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6061 we prevent this case when sse is not available. However some ABIs
6062 may require the result to be returned like integer TImode. */
6063 else if (mode == TImode
6064 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6065 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6067 /* 32-byte vector modes in %ymm0. */
6068 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6069 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6071 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6072 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6073 regno = FIRST_FLOAT_REG;
6074 else
6075 /* Most things go in %eax. */
6076 regno = AX_REG;
6078 /* Override FP return register with %xmm0 for local functions when
6079 SSE math is enabled or for functions with sseregparm attribute. */
6080 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6082 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6083 if ((sse_level >= 1 && mode == SFmode)
6084 || (sse_level == 2 && mode == DFmode))
6085 regno = FIRST_SSE_REG;
6088 /* OImode shouldn't be used directly. */
6089 gcc_assert (mode != OImode);
6091 return gen_rtx_REG (orig_mode, regno);
6094 static rtx
6095 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6096 const_tree valtype)
6098 rtx ret;
6100 /* Handle libcalls, which don't provide a type node. */
6101 if (valtype == NULL)
6103 switch (mode)
6105 case SFmode:
6106 case SCmode:
6107 case DFmode:
6108 case DCmode:
6109 case TFmode:
6110 case SDmode:
6111 case DDmode:
6112 case TDmode:
6113 return gen_rtx_REG (mode, FIRST_SSE_REG);
6114 case XFmode:
6115 case XCmode:
6116 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6117 case TCmode:
6118 return NULL;
6119 default:
6120 return gen_rtx_REG (mode, AX_REG);
6124 ret = construct_container (mode, orig_mode, valtype, 1,
6125 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6126 x86_64_int_return_registers, 0);
6128 /* For zero sized structures, construct_container returns NULL, but we
6129 need to keep rest of compiler happy by returning meaningful value. */
6130 if (!ret)
6131 ret = gen_rtx_REG (orig_mode, AX_REG);
6133 return ret;
6136 static rtx
6137 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6139 unsigned int regno = AX_REG;
6141 if (TARGET_SSE)
6143 switch (GET_MODE_SIZE (mode))
6145 case 16:
6146 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6147 && !COMPLEX_MODE_P (mode))
6148 regno = FIRST_SSE_REG;
6149 break;
6150 case 8:
6151 case 4:
6152 if (mode == SFmode || mode == DFmode)
6153 regno = FIRST_SSE_REG;
6154 break;
6155 default:
6156 break;
6159 return gen_rtx_REG (orig_mode, regno);
6162 static rtx
6163 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6164 enum machine_mode orig_mode, enum machine_mode mode)
6166 const_tree fn, fntype;
6168 fn = NULL_TREE;
6169 if (fntype_or_decl && DECL_P (fntype_or_decl))
6170 fn = fntype_or_decl;
6171 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6173 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6174 return function_value_ms_64 (orig_mode, mode);
6175 else if (TARGET_64BIT)
6176 return function_value_64 (orig_mode, mode, valtype);
6177 else
6178 return function_value_32 (orig_mode, mode, fntype, fn);
6181 static rtx
6182 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6183 bool outgoing ATTRIBUTE_UNUSED)
6185 enum machine_mode mode, orig_mode;
6187 orig_mode = TYPE_MODE (valtype);
6188 mode = type_natural_mode (valtype, NULL);
6189 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6193 ix86_libcall_value (enum machine_mode mode)
6195 return ix86_function_value_1 (NULL, NULL, mode, mode);
6198 /* Return true iff type is returned in memory. */
6200 static int ATTRIBUTE_UNUSED
6201 return_in_memory_32 (const_tree type, enum machine_mode mode)
6203 HOST_WIDE_INT size;
6205 if (mode == BLKmode)
6206 return 1;
6208 size = int_size_in_bytes (type);
6210 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6211 return 0;
6213 if (VECTOR_MODE_P (mode) || mode == TImode)
6215 /* User-created vectors small enough to fit in EAX. */
6216 if (size < 8)
6217 return 0;
6219 /* MMX/3dNow values are returned in MM0,
6220 except when it doesn't exits. */
6221 if (size == 8)
6222 return (TARGET_MMX ? 0 : 1);
6224 /* SSE values are returned in XMM0, except when it doesn't exist. */
6225 if (size == 16)
6226 return (TARGET_SSE ? 0 : 1);
6228 /* AVX values are returned in YMM0, except when it doesn't exist. */
6229 if (size == 32)
6230 return TARGET_AVX ? 0 : 1;
6233 if (mode == XFmode)
6234 return 0;
6236 if (size > 12)
6237 return 1;
6239 /* OImode shouldn't be used directly. */
6240 gcc_assert (mode != OImode);
6242 return 0;
6245 static int ATTRIBUTE_UNUSED
6246 return_in_memory_64 (const_tree type, enum machine_mode mode)
6248 int needed_intregs, needed_sseregs;
6249 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6252 static int ATTRIBUTE_UNUSED
6253 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6255 HOST_WIDE_INT size = int_size_in_bytes (type);
6257 /* __m128 is returned in xmm0. */
6258 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6259 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6260 return 0;
6262 /* Otherwise, the size must be exactly in [1248]. */
6263 return (size != 1 && size != 2 && size != 4 && size != 8);
6266 static bool
6267 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6269 #ifdef SUBTARGET_RETURN_IN_MEMORY
6270 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6271 #else
6272 const enum machine_mode mode = type_natural_mode (type, NULL);
6274 if (TARGET_64BIT)
6276 if (ix86_function_type_abi (fntype) == MS_ABI)
6277 return return_in_memory_ms_64 (type, mode);
6278 else
6279 return return_in_memory_64 (type, mode);
6281 else
6282 return return_in_memory_32 (type, mode);
6283 #endif
6286 /* Return false iff TYPE is returned in memory. This version is used
6287 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6288 but differs notably in that when MMX is available, 8-byte vectors
6289 are returned in memory, rather than in MMX registers. */
6291 bool
6292 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6294 int size;
6295 enum machine_mode mode = type_natural_mode (type, NULL);
6297 if (TARGET_64BIT)
6298 return return_in_memory_64 (type, mode);
6300 if (mode == BLKmode)
6301 return 1;
6303 size = int_size_in_bytes (type);
6305 if (VECTOR_MODE_P (mode))
6307 /* Return in memory only if MMX registers *are* available. This
6308 seems backwards, but it is consistent with the existing
6309 Solaris x86 ABI. */
6310 if (size == 8)
6311 return TARGET_MMX;
6312 if (size == 16)
6313 return !TARGET_SSE;
6315 else if (mode == TImode)
6316 return !TARGET_SSE;
6317 else if (mode == XFmode)
6318 return 0;
6320 return size > 12;
6323 /* When returning SSE vector types, we have a choice of either
6324 (1) being abi incompatible with a -march switch, or
6325 (2) generating an error.
6326 Given no good solution, I think the safest thing is one warning.
6327 The user won't be able to use -Werror, but....
6329 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6330 called in response to actually generating a caller or callee that
6331 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6332 via aggregate_value_p for general type probing from tree-ssa. */
6334 static rtx
6335 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6337 static bool warnedsse, warnedmmx;
6339 if (!TARGET_64BIT && type)
6341 /* Look at the return type of the function, not the function type. */
6342 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6344 if (!TARGET_SSE && !warnedsse)
6346 if (mode == TImode
6347 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6349 warnedsse = true;
6350 warning (0, "SSE vector return without SSE enabled "
6351 "changes the ABI");
6355 if (!TARGET_MMX && !warnedmmx)
6357 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6359 warnedmmx = true;
6360 warning (0, "MMX vector return without MMX enabled "
6361 "changes the ABI");
6366 return NULL;
6370 /* Create the va_list data type. */
6372 /* Returns the calling convention specific va_list date type.
6373 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6375 static tree
6376 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6378 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6380 /* For i386 we use plain pointer to argument area. */
6381 if (!TARGET_64BIT || abi == MS_ABI)
6382 return build_pointer_type (char_type_node);
6384 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6385 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6387 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
6388 unsigned_type_node);
6389 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
6390 unsigned_type_node);
6391 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6392 ptr_type_node);
6393 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6394 ptr_type_node);
6396 va_list_gpr_counter_field = f_gpr;
6397 va_list_fpr_counter_field = f_fpr;
6399 DECL_FIELD_CONTEXT (f_gpr) = record;
6400 DECL_FIELD_CONTEXT (f_fpr) = record;
6401 DECL_FIELD_CONTEXT (f_ovf) = record;
6402 DECL_FIELD_CONTEXT (f_sav) = record;
6404 TREE_CHAIN (record) = type_decl;
6405 TYPE_NAME (record) = type_decl;
6406 TYPE_FIELDS (record) = f_gpr;
6407 TREE_CHAIN (f_gpr) = f_fpr;
6408 TREE_CHAIN (f_fpr) = f_ovf;
6409 TREE_CHAIN (f_ovf) = f_sav;
6411 layout_type (record);
6413 /* The correct type is an array type of one element. */
6414 return build_array_type (record, build_index_type (size_zero_node));
6417 /* Setup the builtin va_list data type and for 64-bit the additional
6418 calling convention specific va_list data types. */
6420 static tree
6421 ix86_build_builtin_va_list (void)
6423 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6425 /* Initialize abi specific va_list builtin types. */
6426 if (TARGET_64BIT)
6428 tree t;
6429 if (ix86_abi == MS_ABI)
6431 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6432 if (TREE_CODE (t) != RECORD_TYPE)
6433 t = build_variant_type_copy (t);
6434 sysv_va_list_type_node = t;
6436 else
6438 t = ret;
6439 if (TREE_CODE (t) != RECORD_TYPE)
6440 t = build_variant_type_copy (t);
6441 sysv_va_list_type_node = t;
6443 if (ix86_abi != MS_ABI)
6445 t = ix86_build_builtin_va_list_abi (MS_ABI);
6446 if (TREE_CODE (t) != RECORD_TYPE)
6447 t = build_variant_type_copy (t);
6448 ms_va_list_type_node = t;
6450 else
6452 t = ret;
6453 if (TREE_CODE (t) != RECORD_TYPE)
6454 t = build_variant_type_copy (t);
6455 ms_va_list_type_node = t;
6459 return ret;
6462 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6464 static void
6465 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6467 rtx save_area, mem;
6468 rtx label;
6469 rtx label_ref;
6470 rtx tmp_reg;
6471 rtx nsse_reg;
6472 alias_set_type set;
6473 int i;
6474 int regparm = ix86_regparm;
6476 if (cum->call_abi != ix86_abi)
6477 regparm = ix86_abi != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
6479 /* GPR size of varargs save area. */
6480 if (cfun->va_list_gpr_size)
6481 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6482 else
6483 ix86_varargs_gpr_size = 0;
6485 /* FPR size of varargs save area. We don't need it if we don't pass
6486 anything in SSE registers. */
6487 if (cum->sse_nregs && cfun->va_list_fpr_size)
6488 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6489 else
6490 ix86_varargs_fpr_size = 0;
6492 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6493 return;
6495 save_area = frame_pointer_rtx;
6496 set = get_varargs_alias_set ();
6498 for (i = cum->regno;
6499 i < regparm
6500 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6501 i++)
6503 mem = gen_rtx_MEM (Pmode,
6504 plus_constant (save_area, i * UNITS_PER_WORD));
6505 MEM_NOTRAP_P (mem) = 1;
6506 set_mem_alias_set (mem, set);
6507 emit_move_insn (mem, gen_rtx_REG (Pmode,
6508 x86_64_int_parameter_registers[i]));
6511 if (ix86_varargs_fpr_size)
6513 /* Now emit code to save SSE registers. The AX parameter contains number
6514 of SSE parameter registers used to call this function. We use
6515 sse_prologue_save insn template that produces computed jump across
6516 SSE saves. We need some preparation work to get this working. */
6518 label = gen_label_rtx ();
6519 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6521 /* Compute address to jump to :
6522 label - eax*4 + nnamed_sse_arguments*4 Or
6523 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6524 tmp_reg = gen_reg_rtx (Pmode);
6525 nsse_reg = gen_reg_rtx (Pmode);
6526 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6527 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6528 gen_rtx_MULT (Pmode, nsse_reg,
6529 GEN_INT (4))));
6531 /* vmovaps is one byte longer than movaps. */
6532 if (TARGET_AVX)
6533 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6534 gen_rtx_PLUS (Pmode, tmp_reg,
6535 nsse_reg)));
6537 if (cum->sse_regno)
6538 emit_move_insn
6539 (nsse_reg,
6540 gen_rtx_CONST (DImode,
6541 gen_rtx_PLUS (DImode,
6542 label_ref,
6543 GEN_INT (cum->sse_regno
6544 * (TARGET_AVX ? 5 : 4)))));
6545 else
6546 emit_move_insn (nsse_reg, label_ref);
6547 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6549 /* Compute address of memory block we save into. We always use pointer
6550 pointing 127 bytes after first byte to store - this is needed to keep
6551 instruction size limited by 4 bytes (5 bytes for AVX) with one
6552 byte displacement. */
6553 tmp_reg = gen_reg_rtx (Pmode);
6554 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6555 plus_constant (save_area,
6556 ix86_varargs_gpr_size + 127)));
6557 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6558 MEM_NOTRAP_P (mem) = 1;
6559 set_mem_alias_set (mem, set);
6560 set_mem_align (mem, BITS_PER_WORD);
6562 /* And finally do the dirty job! */
6563 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6564 GEN_INT (cum->sse_regno), label));
6568 static void
6569 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6571 alias_set_type set = get_varargs_alias_set ();
6572 int i;
6574 for (i = cum->regno; i < X64_REGPARM_MAX; i++)
6576 rtx reg, mem;
6578 mem = gen_rtx_MEM (Pmode,
6579 plus_constant (virtual_incoming_args_rtx,
6580 i * UNITS_PER_WORD));
6581 MEM_NOTRAP_P (mem) = 1;
6582 set_mem_alias_set (mem, set);
6584 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6585 emit_move_insn (mem, reg);
6589 static void
6590 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6591 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6592 int no_rtl)
6594 CUMULATIVE_ARGS next_cum;
6595 tree fntype;
6597 /* This argument doesn't appear to be used anymore. Which is good,
6598 because the old code here didn't suppress rtl generation. */
6599 gcc_assert (!no_rtl);
6601 if (!TARGET_64BIT)
6602 return;
6604 fntype = TREE_TYPE (current_function_decl);
6606 /* For varargs, we do not want to skip the dummy va_dcl argument.
6607 For stdargs, we do want to skip the last named argument. */
6608 next_cum = *cum;
6609 if (stdarg_p (fntype))
6610 function_arg_advance (&next_cum, mode, type, 1);
6612 if (cum->call_abi == MS_ABI)
6613 setup_incoming_varargs_ms_64 (&next_cum);
6614 else
6615 setup_incoming_varargs_64 (&next_cum);
6618 /* Checks if TYPE is of kind va_list char *. */
6620 static bool
6621 is_va_list_char_pointer (tree type)
6623 tree canonic;
6625 /* For 32-bit it is always true. */
6626 if (!TARGET_64BIT)
6627 return true;
6628 canonic = ix86_canonical_va_list_type (type);
6629 return (canonic == ms_va_list_type_node
6630 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6633 /* Implement va_start. */
6635 static void
6636 ix86_va_start (tree valist, rtx nextarg)
6638 HOST_WIDE_INT words, n_gpr, n_fpr;
6639 tree f_gpr, f_fpr, f_ovf, f_sav;
6640 tree gpr, fpr, ovf, sav, t;
6641 tree type;
6643 /* Only 64bit target needs something special. */
6644 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6646 std_expand_builtin_va_start (valist, nextarg);
6647 return;
6650 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6651 f_fpr = TREE_CHAIN (f_gpr);
6652 f_ovf = TREE_CHAIN (f_fpr);
6653 f_sav = TREE_CHAIN (f_ovf);
6655 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6656 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6657 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6658 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6659 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6661 /* Count number of gp and fp argument registers used. */
6662 words = crtl->args.info.words;
6663 n_gpr = crtl->args.info.regno;
6664 n_fpr = crtl->args.info.sse_regno;
6666 if (cfun->va_list_gpr_size)
6668 type = TREE_TYPE (gpr);
6669 t = build2 (MODIFY_EXPR, type,
6670 gpr, build_int_cst (type, n_gpr * 8));
6671 TREE_SIDE_EFFECTS (t) = 1;
6672 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6675 if (TARGET_SSE && cfun->va_list_fpr_size)
6677 type = TREE_TYPE (fpr);
6678 t = build2 (MODIFY_EXPR, type, fpr,
6679 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6680 TREE_SIDE_EFFECTS (t) = 1;
6681 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6684 /* Find the overflow area. */
6685 type = TREE_TYPE (ovf);
6686 t = make_tree (type, crtl->args.internal_arg_pointer);
6687 if (words != 0)
6688 t = build2 (POINTER_PLUS_EXPR, type, t,
6689 size_int (words * UNITS_PER_WORD));
6690 t = build2 (MODIFY_EXPR, type, ovf, t);
6691 TREE_SIDE_EFFECTS (t) = 1;
6692 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6694 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6696 /* Find the register save area.
6697 Prologue of the function save it right above stack frame. */
6698 type = TREE_TYPE (sav);
6699 t = make_tree (type, frame_pointer_rtx);
6700 if (!ix86_varargs_gpr_size)
6701 t = build2 (POINTER_PLUS_EXPR, type, t,
6702 size_int (-8 * X86_64_REGPARM_MAX));
6703 t = build2 (MODIFY_EXPR, type, sav, t);
6704 TREE_SIDE_EFFECTS (t) = 1;
6705 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6709 /* Implement va_arg. */
6711 static tree
6712 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6713 gimple_seq *post_p)
6715 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6716 tree f_gpr, f_fpr, f_ovf, f_sav;
6717 tree gpr, fpr, ovf, sav, t;
6718 int size, rsize;
6719 tree lab_false, lab_over = NULL_TREE;
6720 tree addr, t2;
6721 rtx container;
6722 int indirect_p = 0;
6723 tree ptrtype;
6724 enum machine_mode nat_mode;
6725 int arg_boundary;
6727 /* Only 64bit target needs something special. */
6728 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6729 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6731 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6732 f_fpr = TREE_CHAIN (f_gpr);
6733 f_ovf = TREE_CHAIN (f_fpr);
6734 f_sav = TREE_CHAIN (f_ovf);
6736 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
6737 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
6738 valist = build_va_arg_indirect_ref (valist);
6739 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6740 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6741 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6743 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6744 if (indirect_p)
6745 type = build_pointer_type (type);
6746 size = int_size_in_bytes (type);
6747 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6749 nat_mode = type_natural_mode (type, NULL);
6750 switch (nat_mode)
6752 case V8SFmode:
6753 case V8SImode:
6754 case V32QImode:
6755 case V16HImode:
6756 case V4DFmode:
6757 case V4DImode:
6758 /* Unnamed 256bit vector mode parameters are passed on stack. */
6759 if (ix86_cfun_abi () == SYSV_ABI)
6761 container = NULL;
6762 break;
6765 default:
6766 container = construct_container (nat_mode, TYPE_MODE (type),
6767 type, 0, X86_64_REGPARM_MAX,
6768 X86_64_SSE_REGPARM_MAX, intreg,
6770 break;
6773 /* Pull the value out of the saved registers. */
6775 addr = create_tmp_var (ptr_type_node, "addr");
6776 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
6778 if (container)
6780 int needed_intregs, needed_sseregs;
6781 bool need_temp;
6782 tree int_addr, sse_addr;
6784 lab_false = create_artificial_label ();
6785 lab_over = create_artificial_label ();
6787 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6789 need_temp = (!REG_P (container)
6790 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6791 || TYPE_ALIGN (type) > 128));
6793 /* In case we are passing structure, verify that it is consecutive block
6794 on the register save area. If not we need to do moves. */
6795 if (!need_temp && !REG_P (container))
6797 /* Verify that all registers are strictly consecutive */
6798 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6800 int i;
6802 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6804 rtx slot = XVECEXP (container, 0, i);
6805 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
6806 || INTVAL (XEXP (slot, 1)) != i * 16)
6807 need_temp = 1;
6810 else
6812 int i;
6814 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6816 rtx slot = XVECEXP (container, 0, i);
6817 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
6818 || INTVAL (XEXP (slot, 1)) != i * 8)
6819 need_temp = 1;
6823 if (!need_temp)
6825 int_addr = addr;
6826 sse_addr = addr;
6828 else
6830 int_addr = create_tmp_var (ptr_type_node, "int_addr");
6831 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
6832 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
6833 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
6836 /* First ensure that we fit completely in registers. */
6837 if (needed_intregs)
6839 t = build_int_cst (TREE_TYPE (gpr),
6840 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
6841 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
6842 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6843 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6844 gimplify_and_add (t, pre_p);
6846 if (needed_sseregs)
6848 t = build_int_cst (TREE_TYPE (fpr),
6849 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
6850 + X86_64_REGPARM_MAX * 8);
6851 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
6852 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6853 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6854 gimplify_and_add (t, pre_p);
6857 /* Compute index to start of area used for integer regs. */
6858 if (needed_intregs)
6860 /* int_addr = gpr + sav; */
6861 t = fold_convert (sizetype, gpr);
6862 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6863 gimplify_assign (int_addr, t, pre_p);
6865 if (needed_sseregs)
6867 /* sse_addr = fpr + sav; */
6868 t = fold_convert (sizetype, fpr);
6869 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6870 gimplify_assign (sse_addr, t, pre_p);
6872 if (need_temp)
6874 int i;
6875 tree temp = create_tmp_var (type, "va_arg_tmp");
6877 /* addr = &temp; */
6878 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
6879 gimplify_assign (addr, t, pre_p);
6881 for (i = 0; i < XVECLEN (container, 0); i++)
6883 rtx slot = XVECEXP (container, 0, i);
6884 rtx reg = XEXP (slot, 0);
6885 enum machine_mode mode = GET_MODE (reg);
6886 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
6887 tree addr_type = build_pointer_type (piece_type);
6888 tree daddr_type = build_pointer_type_for_mode (piece_type,
6889 ptr_mode, true);
6890 tree src_addr, src;
6891 int src_offset;
6892 tree dest_addr, dest;
6894 if (SSE_REGNO_P (REGNO (reg)))
6896 src_addr = sse_addr;
6897 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
6899 else
6901 src_addr = int_addr;
6902 src_offset = REGNO (reg) * 8;
6904 src_addr = fold_convert (addr_type, src_addr);
6905 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
6906 size_int (src_offset));
6907 src = build_va_arg_indirect_ref (src_addr);
6909 dest_addr = fold_convert (daddr_type, addr);
6910 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
6911 size_int (INTVAL (XEXP (slot, 1))));
6912 dest = build_va_arg_indirect_ref (dest_addr);
6914 gimplify_assign (dest, src, pre_p);
6918 if (needed_intregs)
6920 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
6921 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
6922 gimplify_assign (gpr, t, pre_p);
6925 if (needed_sseregs)
6927 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
6928 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
6929 gimplify_assign (fpr, t, pre_p);
6932 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
6934 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
6937 /* ... otherwise out of the overflow area. */
6939 /* When we align parameter on stack for caller, if the parameter
6940 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6941 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
6942 here with caller. */
6943 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
6944 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
6945 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
6947 /* Care for on-stack alignment if needed. */
6948 if (arg_boundary <= 64
6949 || integer_zerop (TYPE_SIZE (type)))
6950 t = ovf;
6951 else
6953 HOST_WIDE_INT align = arg_boundary / 8;
6954 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
6955 size_int (align - 1));
6956 t = fold_convert (sizetype, t);
6957 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6958 size_int (-align));
6959 t = fold_convert (TREE_TYPE (ovf), t);
6961 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
6962 gimplify_assign (addr, t, pre_p);
6964 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
6965 size_int (rsize * UNITS_PER_WORD));
6966 gimplify_assign (unshare_expr (ovf), t, pre_p);
6968 if (container)
6969 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
6971 ptrtype = build_pointer_type (type);
6972 addr = fold_convert (ptrtype, addr);
6974 if (indirect_p)
6975 addr = build_va_arg_indirect_ref (addr);
6976 return build_va_arg_indirect_ref (addr);
6979 /* Return nonzero if OPNUM's MEM should be matched
6980 in movabs* patterns. */
6983 ix86_check_movabs (rtx insn, int opnum)
6985 rtx set, mem;
6987 set = PATTERN (insn);
6988 if (GET_CODE (set) == PARALLEL)
6989 set = XVECEXP (set, 0, 0);
6990 gcc_assert (GET_CODE (set) == SET);
6991 mem = XEXP (set, opnum);
6992 while (GET_CODE (mem) == SUBREG)
6993 mem = SUBREG_REG (mem);
6994 gcc_assert (MEM_P (mem));
6995 return (volatile_ok || !MEM_VOLATILE_P (mem));
6998 /* Initialize the table of extra 80387 mathematical constants. */
7000 static void
7001 init_ext_80387_constants (void)
7003 static const char * cst[5] =
7005 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7006 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7007 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7008 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7009 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7011 int i;
7013 for (i = 0; i < 5; i++)
7015 real_from_string (&ext_80387_constants_table[i], cst[i]);
7016 /* Ensure each constant is rounded to XFmode precision. */
7017 real_convert (&ext_80387_constants_table[i],
7018 XFmode, &ext_80387_constants_table[i]);
7021 ext_80387_constants_init = 1;
7024 /* Return true if the constant is something that can be loaded with
7025 a special instruction. */
7028 standard_80387_constant_p (rtx x)
7030 enum machine_mode mode = GET_MODE (x);
7032 REAL_VALUE_TYPE r;
7034 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7035 return -1;
7037 if (x == CONST0_RTX (mode))
7038 return 1;
7039 if (x == CONST1_RTX (mode))
7040 return 2;
7042 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7044 /* For XFmode constants, try to find a special 80387 instruction when
7045 optimizing for size or on those CPUs that benefit from them. */
7046 if (mode == XFmode
7047 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7049 int i;
7051 if (! ext_80387_constants_init)
7052 init_ext_80387_constants ();
7054 for (i = 0; i < 5; i++)
7055 if (real_identical (&r, &ext_80387_constants_table[i]))
7056 return i + 3;
7059 /* Load of the constant -0.0 or -1.0 will be split as
7060 fldz;fchs or fld1;fchs sequence. */
7061 if (real_isnegzero (&r))
7062 return 8;
7063 if (real_identical (&r, &dconstm1))
7064 return 9;
7066 return 0;
7069 /* Return the opcode of the special instruction to be used to load
7070 the constant X. */
7072 const char *
7073 standard_80387_constant_opcode (rtx x)
7075 switch (standard_80387_constant_p (x))
7077 case 1:
7078 return "fldz";
7079 case 2:
7080 return "fld1";
7081 case 3:
7082 return "fldlg2";
7083 case 4:
7084 return "fldln2";
7085 case 5:
7086 return "fldl2e";
7087 case 6:
7088 return "fldl2t";
7089 case 7:
7090 return "fldpi";
7091 case 8:
7092 case 9:
7093 return "#";
7094 default:
7095 gcc_unreachable ();
7099 /* Return the CONST_DOUBLE representing the 80387 constant that is
7100 loaded by the specified special instruction. The argument IDX
7101 matches the return value from standard_80387_constant_p. */
7104 standard_80387_constant_rtx (int idx)
7106 int i;
7108 if (! ext_80387_constants_init)
7109 init_ext_80387_constants ();
7111 switch (idx)
7113 case 3:
7114 case 4:
7115 case 5:
7116 case 6:
7117 case 7:
7118 i = idx - 3;
7119 break;
7121 default:
7122 gcc_unreachable ();
7125 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7126 XFmode);
7129 /* Return 1 if mode is a valid mode for sse. */
7130 static int
7131 standard_sse_mode_p (enum machine_mode mode)
7133 switch (mode)
7135 case V16QImode:
7136 case V8HImode:
7137 case V4SImode:
7138 case V2DImode:
7139 case V4SFmode:
7140 case V2DFmode:
7141 return 1;
7143 default:
7144 return 0;
7148 /* Return 1 if X is all 0s. For all 1s, return 2 if X is in 128bit
7149 SSE modes and SSE2 is enabled, return 3 if X is in 256bit AVX
7150 modes and AVX is enabled. */
7153 standard_sse_constant_p (rtx x)
7155 enum machine_mode mode = GET_MODE (x);
7157 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7158 return 1;
7159 if (vector_all_ones_operand (x, mode))
7161 if (standard_sse_mode_p (mode))
7162 return TARGET_SSE2 ? 2 : -2;
7163 else if (VALID_AVX256_REG_MODE (mode))
7164 return TARGET_AVX ? 3 : -3;
7167 return 0;
7170 /* Return the opcode of the special instruction to be used to load
7171 the constant X. */
7173 const char *
7174 standard_sse_constant_opcode (rtx insn, rtx x)
7176 switch (standard_sse_constant_p (x))
7178 case 1:
7179 switch (get_attr_mode (insn))
7181 case MODE_V4SF:
7182 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7183 case MODE_V2DF:
7184 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7185 case MODE_TI:
7186 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7187 case MODE_V8SF:
7188 return "vxorps\t%x0, %x0, %x0";
7189 case MODE_V4DF:
7190 return "vxorpd\t%x0, %x0, %x0";
7191 case MODE_OI:
7192 return "vpxor\t%x0, %x0, %x0";
7193 default:
7194 gcc_unreachable ();
7196 case 2:
7197 if (TARGET_AVX)
7198 switch (get_attr_mode (insn))
7200 case MODE_V4SF:
7201 case MODE_V2DF:
7202 case MODE_TI:
7203 return "vpcmpeqd\t%0, %0, %0";
7204 break;
7205 default:
7206 gcc_unreachable ();
7208 else
7209 return "pcmpeqd\t%0, %0";
7211 gcc_unreachable ();
7214 /* Returns 1 if OP contains a symbol reference */
7217 symbolic_reference_mentioned_p (rtx op)
7219 const char *fmt;
7220 int i;
7222 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7223 return 1;
7225 fmt = GET_RTX_FORMAT (GET_CODE (op));
7226 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7228 if (fmt[i] == 'E')
7230 int j;
7232 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7233 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7234 return 1;
7237 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7238 return 1;
7241 return 0;
7244 /* Return 1 if it is appropriate to emit `ret' instructions in the
7245 body of a function. Do this only if the epilogue is simple, needing a
7246 couple of insns. Prior to reloading, we can't tell how many registers
7247 must be saved, so return 0 then. Return 0 if there is no frame
7248 marker to de-allocate. */
7251 ix86_can_use_return_insn_p (void)
7253 struct ix86_frame frame;
7255 if (! reload_completed || frame_pointer_needed)
7256 return 0;
7258 /* Don't allow more than 32 pop, since that's all we can do
7259 with one instruction. */
7260 if (crtl->args.pops_args
7261 && crtl->args.size >= 32768)
7262 return 0;
7264 ix86_compute_frame_layout (&frame);
7265 return frame.to_allocate == 0 && (frame.nregs + frame.nsseregs) == 0;
7268 /* Value should be nonzero if functions must have frame pointers.
7269 Zero means the frame pointer need not be set up (and parms may
7270 be accessed via the stack pointer) in functions that seem suitable. */
7273 ix86_frame_pointer_required (void)
7275 /* If we accessed previous frames, then the generated code expects
7276 to be able to access the saved ebp value in our frame. */
7277 if (cfun->machine->accesses_prev_frame)
7278 return 1;
7280 /* Several x86 os'es need a frame pointer for other reasons,
7281 usually pertaining to setjmp. */
7282 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7283 return 1;
7285 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7286 the frame pointer by default. Turn it back on now if we've not
7287 got a leaf function. */
7288 if (TARGET_OMIT_LEAF_FRAME_POINTER
7289 && (!current_function_is_leaf
7290 || ix86_current_function_calls_tls_descriptor))
7291 return 1;
7293 if (crtl->profile)
7294 return 1;
7296 return 0;
7299 /* Record that the current function accesses previous call frames. */
7301 void
7302 ix86_setup_frame_addresses (void)
7304 cfun->machine->accesses_prev_frame = 1;
7307 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7308 # define USE_HIDDEN_LINKONCE 1
7309 #else
7310 # define USE_HIDDEN_LINKONCE 0
7311 #endif
7313 static int pic_labels_used;
7315 /* Fills in the label name that should be used for a pc thunk for
7316 the given register. */
7318 static void
7319 get_pc_thunk_name (char name[32], unsigned int regno)
7321 gcc_assert (!TARGET_64BIT);
7323 if (USE_HIDDEN_LINKONCE)
7324 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7325 else
7326 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7330 /* This function generates code for -fpic that loads %ebx with
7331 the return address of the caller and then returns. */
7333 void
7334 ix86_file_end (void)
7336 rtx xops[2];
7337 int regno;
7339 for (regno = 0; regno < 8; ++regno)
7341 char name[32];
7343 if (! ((pic_labels_used >> regno) & 1))
7344 continue;
7346 get_pc_thunk_name (name, regno);
7348 #if TARGET_MACHO
7349 if (TARGET_MACHO)
7351 switch_to_section (darwin_sections[text_coal_section]);
7352 fputs ("\t.weak_definition\t", asm_out_file);
7353 assemble_name (asm_out_file, name);
7354 fputs ("\n\t.private_extern\t", asm_out_file);
7355 assemble_name (asm_out_file, name);
7356 fputs ("\n", asm_out_file);
7357 ASM_OUTPUT_LABEL (asm_out_file, name);
7359 else
7360 #endif
7361 if (USE_HIDDEN_LINKONCE)
7363 tree decl;
7365 decl = build_decl (FUNCTION_DECL, get_identifier (name),
7366 error_mark_node);
7367 TREE_PUBLIC (decl) = 1;
7368 TREE_STATIC (decl) = 1;
7369 DECL_ONE_ONLY (decl) = 1;
7371 (*targetm.asm_out.unique_section) (decl, 0);
7372 switch_to_section (get_named_section (decl, NULL, 0));
7374 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7375 fputs ("\t.hidden\t", asm_out_file);
7376 assemble_name (asm_out_file, name);
7377 fputc ('\n', asm_out_file);
7378 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7380 else
7382 switch_to_section (text_section);
7383 ASM_OUTPUT_LABEL (asm_out_file, name);
7386 xops[0] = gen_rtx_REG (Pmode, regno);
7387 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7388 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7389 output_asm_insn ("ret", xops);
7392 if (NEED_INDICATE_EXEC_STACK)
7393 file_end_indicate_exec_stack ();
7396 /* Emit code for the SET_GOT patterns. */
7398 const char *
7399 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7401 rtx xops[3];
7403 xops[0] = dest;
7405 if (TARGET_VXWORKS_RTP && flag_pic)
7407 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7408 xops[2] = gen_rtx_MEM (Pmode,
7409 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7410 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7412 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7413 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7414 an unadorned address. */
7415 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7416 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7417 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7418 return "";
7421 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7423 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7425 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7427 if (!flag_pic)
7428 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7429 else
7430 output_asm_insn ("call\t%a2", xops);
7432 #if TARGET_MACHO
7433 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7434 is what will be referenced by the Mach-O PIC subsystem. */
7435 if (!label)
7436 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7437 #endif
7439 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7440 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7442 if (flag_pic)
7443 output_asm_insn ("pop%z0\t%0", xops);
7445 else
7447 char name[32];
7448 get_pc_thunk_name (name, REGNO (dest));
7449 pic_labels_used |= 1 << REGNO (dest);
7451 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7452 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7453 output_asm_insn ("call\t%X2", xops);
7454 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7455 is what will be referenced by the Mach-O PIC subsystem. */
7456 #if TARGET_MACHO
7457 if (!label)
7458 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7459 else
7460 targetm.asm_out.internal_label (asm_out_file, "L",
7461 CODE_LABEL_NUMBER (label));
7462 #endif
7465 if (TARGET_MACHO)
7466 return "";
7468 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7469 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7470 else
7471 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7473 return "";
7476 /* Generate an "push" pattern for input ARG. */
7478 static rtx
7479 gen_push (rtx arg)
7481 return gen_rtx_SET (VOIDmode,
7482 gen_rtx_MEM (Pmode,
7483 gen_rtx_PRE_DEC (Pmode,
7484 stack_pointer_rtx)),
7485 arg);
7488 /* Return >= 0 if there is an unused call-clobbered register available
7489 for the entire function. */
7491 static unsigned int
7492 ix86_select_alt_pic_regnum (void)
7494 if (current_function_is_leaf && !crtl->profile
7495 && !ix86_current_function_calls_tls_descriptor)
7497 int i, drap;
7498 /* Can't use the same register for both PIC and DRAP. */
7499 if (crtl->drap_reg)
7500 drap = REGNO (crtl->drap_reg);
7501 else
7502 drap = -1;
7503 for (i = 2; i >= 0; --i)
7504 if (i != drap && !df_regs_ever_live_p (i))
7505 return i;
7508 return INVALID_REGNUM;
7511 /* Return 1 if we need to save REGNO. */
7512 static int
7513 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7515 if (pic_offset_table_rtx
7516 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7517 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7518 || crtl->profile
7519 || crtl->calls_eh_return
7520 || crtl->uses_const_pool))
7522 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7523 return 0;
7524 return 1;
7527 if (crtl->calls_eh_return && maybe_eh_return)
7529 unsigned i;
7530 for (i = 0; ; i++)
7532 unsigned test = EH_RETURN_DATA_REGNO (i);
7533 if (test == INVALID_REGNUM)
7534 break;
7535 if (test == regno)
7536 return 1;
7540 if (crtl->drap_reg
7541 && regno == REGNO (crtl->drap_reg))
7542 return 1;
7544 return (df_regs_ever_live_p (regno)
7545 && !call_used_regs[regno]
7546 && !fixed_regs[regno]
7547 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7550 /* Return number of saved general prupose registers. */
7552 static int
7553 ix86_nsaved_regs (void)
7555 int nregs = 0;
7556 int regno;
7558 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7559 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7560 nregs ++;
7561 return nregs;
7564 /* Return number of saved SSE registrers. */
7566 static int
7567 ix86_nsaved_sseregs (void)
7569 int nregs = 0;
7570 int regno;
7572 if (ix86_cfun_abi () != MS_ABI)
7573 return 0;
7574 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7575 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7576 nregs ++;
7577 return nregs;
7580 /* Given FROM and TO register numbers, say whether this elimination is
7581 allowed. If stack alignment is needed, we can only replace argument
7582 pointer with hard frame pointer, or replace frame pointer with stack
7583 pointer. Otherwise, frame pointer elimination is automatically
7584 handled and all other eliminations are valid. */
7587 ix86_can_eliminate (int from, int to)
7589 if (stack_realign_fp)
7590 return ((from == ARG_POINTER_REGNUM
7591 && to == HARD_FRAME_POINTER_REGNUM)
7592 || (from == FRAME_POINTER_REGNUM
7593 && to == STACK_POINTER_REGNUM));
7594 else
7595 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : 1;
7598 /* Return the offset between two registers, one to be eliminated, and the other
7599 its replacement, at the start of a routine. */
7601 HOST_WIDE_INT
7602 ix86_initial_elimination_offset (int from, int to)
7604 struct ix86_frame frame;
7605 ix86_compute_frame_layout (&frame);
7607 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7608 return frame.hard_frame_pointer_offset;
7609 else if (from == FRAME_POINTER_REGNUM
7610 && to == HARD_FRAME_POINTER_REGNUM)
7611 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7612 else
7614 gcc_assert (to == STACK_POINTER_REGNUM);
7616 if (from == ARG_POINTER_REGNUM)
7617 return frame.stack_pointer_offset;
7619 gcc_assert (from == FRAME_POINTER_REGNUM);
7620 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7624 /* In a dynamically-aligned function, we can't know the offset from
7625 stack pointer to frame pointer, so we must ensure that setjmp
7626 eliminates fp against the hard fp (%ebp) rather than trying to
7627 index from %esp up to the top of the frame across a gap that is
7628 of unknown (at compile-time) size. */
7629 static rtx
7630 ix86_builtin_setjmp_frame_value (void)
7632 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7635 /* Fill structure ix86_frame about frame of currently computed function. */
7637 static void
7638 ix86_compute_frame_layout (struct ix86_frame *frame)
7640 HOST_WIDE_INT total_size;
7641 unsigned int stack_alignment_needed;
7642 HOST_WIDE_INT offset;
7643 unsigned int preferred_alignment;
7644 HOST_WIDE_INT size = get_frame_size ();
7646 frame->nregs = ix86_nsaved_regs ();
7647 frame->nsseregs = ix86_nsaved_sseregs ();
7648 total_size = size;
7650 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7651 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7653 /* MS ABI seem to require stack alignment to be always 16 except for function
7654 prologues. */
7655 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7657 preferred_alignment = 16;
7658 stack_alignment_needed = 16;
7659 crtl->preferred_stack_boundary = 128;
7660 crtl->stack_alignment_needed = 128;
7663 gcc_assert (!size || stack_alignment_needed);
7664 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7665 gcc_assert (preferred_alignment <= stack_alignment_needed);
7667 /* During reload iteration the amount of registers saved can change.
7668 Recompute the value as needed. Do not recompute when amount of registers
7669 didn't change as reload does multiple calls to the function and does not
7670 expect the decision to change within single iteration. */
7671 if (!optimize_function_for_size_p (cfun)
7672 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7674 int count = frame->nregs;
7676 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7677 /* The fast prologue uses move instead of push to save registers. This
7678 is significantly longer, but also executes faster as modern hardware
7679 can execute the moves in parallel, but can't do that for push/pop.
7681 Be careful about choosing what prologue to emit: When function takes
7682 many instructions to execute we may use slow version as well as in
7683 case function is known to be outside hot spot (this is known with
7684 feedback only). Weight the size of function by number of registers
7685 to save as it is cheap to use one or two push instructions but very
7686 slow to use many of them. */
7687 if (count)
7688 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7689 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7690 || (flag_branch_probabilities
7691 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7692 cfun->machine->use_fast_prologue_epilogue = false;
7693 else
7694 cfun->machine->use_fast_prologue_epilogue
7695 = !expensive_function_p (count);
7697 if (TARGET_PROLOGUE_USING_MOVE
7698 && cfun->machine->use_fast_prologue_epilogue)
7699 frame->save_regs_using_mov = true;
7700 else
7701 frame->save_regs_using_mov = false;
7704 /* Skip return address and saved base pointer. */
7705 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7707 frame->hard_frame_pointer_offset = offset;
7709 /* Set offset to aligned because the realigned frame starts from
7710 here. */
7711 if (stack_realign_fp)
7712 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7714 /* Register save area */
7715 offset += frame->nregs * UNITS_PER_WORD;
7717 /* Align SSE reg save area. */
7718 if (frame->nsseregs)
7719 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
7720 else
7721 frame->padding0 = 0;
7723 /* SSE register save area. */
7724 offset += frame->padding0 + frame->nsseregs * 16;
7726 /* Va-arg area */
7727 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7728 offset += frame->va_arg_size;
7730 /* Align start of frame for local function. */
7731 frame->padding1 = ((offset + stack_alignment_needed - 1)
7732 & -stack_alignment_needed) - offset;
7734 offset += frame->padding1;
7736 /* Frame pointer points here. */
7737 frame->frame_pointer_offset = offset;
7739 offset += size;
7741 /* Add outgoing arguments area. Can be skipped if we eliminated
7742 all the function calls as dead code.
7743 Skipping is however impossible when function calls alloca. Alloca
7744 expander assumes that last crtl->outgoing_args_size
7745 of stack frame are unused. */
7746 if (ACCUMULATE_OUTGOING_ARGS
7747 && (!current_function_is_leaf || cfun->calls_alloca
7748 || ix86_current_function_calls_tls_descriptor))
7750 offset += crtl->outgoing_args_size;
7751 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7753 else
7754 frame->outgoing_arguments_size = 0;
7756 /* Align stack boundary. Only needed if we're calling another function
7757 or using alloca. */
7758 if (!current_function_is_leaf || cfun->calls_alloca
7759 || ix86_current_function_calls_tls_descriptor)
7760 frame->padding2 = ((offset + preferred_alignment - 1)
7761 & -preferred_alignment) - offset;
7762 else
7763 frame->padding2 = 0;
7765 offset += frame->padding2;
7767 /* We've reached end of stack frame. */
7768 frame->stack_pointer_offset = offset;
7770 /* Size prologue needs to allocate. */
7771 frame->to_allocate =
7772 (size + frame->padding1 + frame->padding2
7773 + frame->outgoing_arguments_size + frame->va_arg_size);
7775 if ((!frame->to_allocate && frame->nregs <= 1)
7776 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7777 frame->save_regs_using_mov = false;
7779 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && current_function_sp_is_unchanging
7780 && current_function_is_leaf
7781 && !ix86_current_function_calls_tls_descriptor)
7783 frame->red_zone_size = frame->to_allocate;
7784 if (frame->save_regs_using_mov)
7785 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7786 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7787 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7789 else
7790 frame->red_zone_size = 0;
7791 frame->to_allocate -= frame->red_zone_size;
7792 frame->stack_pointer_offset -= frame->red_zone_size;
7793 #if 0
7794 fprintf (stderr, "\n");
7795 fprintf (stderr, "size: %ld\n", (long)size);
7796 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
7797 fprintf (stderr, "nsseregs: %ld\n", (long)frame->nsseregs);
7798 fprintf (stderr, "padding0: %ld\n", (long)frame->padding0);
7799 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
7800 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
7801 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
7802 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
7803 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
7804 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
7805 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
7806 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
7807 (long)frame->hard_frame_pointer_offset);
7808 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
7809 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
7810 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
7811 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
7812 #endif
7815 /* Emit code to save registers in the prologue. */
7817 static void
7818 ix86_emit_save_regs (void)
7820 unsigned int regno;
7821 rtx insn;
7823 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
7824 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7826 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7827 RTX_FRAME_RELATED_P (insn) = 1;
7831 /* Emit code to save registers using MOV insns. First register
7832 is restored from POINTER + OFFSET. */
7833 static void
7834 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7836 unsigned int regno;
7837 rtx insn;
7839 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7840 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7842 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7843 Pmode, offset),
7844 gen_rtx_REG (Pmode, regno));
7845 RTX_FRAME_RELATED_P (insn) = 1;
7846 offset += UNITS_PER_WORD;
7850 /* Emit code to save registers using MOV insns. First register
7851 is restored from POINTER + OFFSET. */
7852 static void
7853 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7855 unsigned int regno;
7856 rtx insn;
7857 rtx mem;
7859 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7860 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7862 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
7863 set_mem_align (mem, 128);
7864 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
7865 RTX_FRAME_RELATED_P (insn) = 1;
7866 offset += 16;
7870 /* Expand prologue or epilogue stack adjustment.
7871 The pattern exist to put a dependency on all ebp-based memory accesses.
7872 STYLE should be negative if instructions should be marked as frame related,
7873 zero if %r11 register is live and cannot be freely used and positive
7874 otherwise. */
7876 static void
7877 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
7879 rtx insn;
7881 if (! TARGET_64BIT)
7882 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
7883 else if (x86_64_immediate_operand (offset, DImode))
7884 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
7885 else
7887 rtx r11;
7888 /* r11 is used by indirect sibcall return as well, set before the
7889 epilogue and used after the epilogue. ATM indirect sibcall
7890 shouldn't be used together with huge frame sizes in one
7891 function because of the frame_size check in sibcall.c. */
7892 gcc_assert (style);
7893 r11 = gen_rtx_REG (DImode, R11_REG);
7894 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
7895 if (style < 0)
7896 RTX_FRAME_RELATED_P (insn) = 1;
7897 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
7898 offset));
7900 if (style < 0)
7901 RTX_FRAME_RELATED_P (insn) = 1;
7904 /* Find an available register to be used as dynamic realign argument
7905 pointer regsiter. Such a register will be written in prologue and
7906 used in begin of body, so it must not be
7907 1. parameter passing register.
7908 2. GOT pointer.
7909 We reuse static-chain register if it is available. Otherwise, we
7910 use DI for i386 and R13 for x86-64. We chose R13 since it has
7911 shorter encoding.
7913 Return: the regno of chosen register. */
7915 static unsigned int
7916 find_drap_reg (void)
7918 tree decl = cfun->decl;
7920 if (TARGET_64BIT)
7922 /* Use R13 for nested function or function need static chain.
7923 Since function with tail call may use any caller-saved
7924 registers in epilogue, DRAP must not use caller-saved
7925 register in such case. */
7926 if ((decl_function_context (decl)
7927 && !DECL_NO_STATIC_CHAIN (decl))
7928 || crtl->tail_call_emit)
7929 return R13_REG;
7931 return R10_REG;
7933 else
7935 /* Use DI for nested function or function need static chain.
7936 Since function with tail call may use any caller-saved
7937 registers in epilogue, DRAP must not use caller-saved
7938 register in such case. */
7939 if ((decl_function_context (decl)
7940 && !DECL_NO_STATIC_CHAIN (decl))
7941 || crtl->tail_call_emit)
7942 return DI_REG;
7944 /* Reuse static chain register if it isn't used for parameter
7945 passing. */
7946 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
7947 && !lookup_attribute ("fastcall",
7948 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
7949 return CX_REG;
7950 else
7951 return DI_REG;
7955 /* Update incoming stack boundary and estimated stack alignment. */
7957 static void
7958 ix86_update_stack_boundary (void)
7960 /* Prefer the one specified at command line. */
7961 ix86_incoming_stack_boundary
7962 = (ix86_user_incoming_stack_boundary
7963 ? ix86_user_incoming_stack_boundary
7964 : ix86_default_incoming_stack_boundary);
7966 /* Incoming stack alignment can be changed on individual functions
7967 via force_align_arg_pointer attribute. We use the smallest
7968 incoming stack boundary. */
7969 if (ix86_incoming_stack_boundary > MIN_STACK_BOUNDARY
7970 && lookup_attribute (ix86_force_align_arg_pointer_string,
7971 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
7972 ix86_incoming_stack_boundary = MIN_STACK_BOUNDARY;
7974 /* The incoming stack frame has to be aligned at least at
7975 parm_stack_boundary. */
7976 if (ix86_incoming_stack_boundary < crtl->parm_stack_boundary)
7977 ix86_incoming_stack_boundary = crtl->parm_stack_boundary;
7979 /* Stack at entrance of main is aligned by runtime. We use the
7980 smallest incoming stack boundary. */
7981 if (ix86_incoming_stack_boundary > MAIN_STACK_BOUNDARY
7982 && DECL_NAME (current_function_decl)
7983 && MAIN_NAME_P (DECL_NAME (current_function_decl))
7984 && DECL_FILE_SCOPE_P (current_function_decl))
7985 ix86_incoming_stack_boundary = MAIN_STACK_BOUNDARY;
7987 /* x86_64 vararg needs 16byte stack alignment for register save
7988 area. */
7989 if (TARGET_64BIT
7990 && cfun->stdarg
7991 && crtl->stack_alignment_estimated < 128)
7992 crtl->stack_alignment_estimated = 128;
7995 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
7996 needed or an rtx for DRAP otherwise. */
7998 static rtx
7999 ix86_get_drap_rtx (void)
8001 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8002 crtl->need_drap = true;
8004 if (stack_realign_drap)
8006 /* Assign DRAP to vDRAP and returns vDRAP */
8007 unsigned int regno = find_drap_reg ();
8008 rtx drap_vreg;
8009 rtx arg_ptr;
8010 rtx seq, insn;
8012 arg_ptr = gen_rtx_REG (Pmode, regno);
8013 crtl->drap_reg = arg_ptr;
8015 start_sequence ();
8016 drap_vreg = copy_to_reg (arg_ptr);
8017 seq = get_insns ();
8018 end_sequence ();
8020 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8021 RTX_FRAME_RELATED_P (insn) = 1;
8022 return drap_vreg;
8024 else
8025 return NULL;
8028 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8030 static rtx
8031 ix86_internal_arg_pointer (void)
8033 return virtual_incoming_args_rtx;
8036 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8037 This is called from dwarf2out.c to emit call frame instructions
8038 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8039 static void
8040 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
8042 rtx unspec = SET_SRC (pattern);
8043 gcc_assert (GET_CODE (unspec) == UNSPEC);
8045 switch (index)
8047 case UNSPEC_REG_SAVE:
8048 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
8049 SET_DEST (pattern));
8050 break;
8051 case UNSPEC_DEF_CFA:
8052 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
8053 INTVAL (XVECEXP (unspec, 0, 0)));
8054 break;
8055 default:
8056 gcc_unreachable ();
8060 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8061 to be generated in correct form. */
8062 static void
8063 ix86_finalize_stack_realign_flags (void)
8065 /* Check if stack realign is really needed after reload, and
8066 stores result in cfun */
8067 unsigned int incoming_stack_boundary
8068 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8069 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8070 unsigned int stack_realign = (incoming_stack_boundary
8071 < (current_function_is_leaf
8072 ? crtl->max_used_stack_slot_alignment
8073 : crtl->stack_alignment_needed));
8075 if (crtl->stack_realign_finalized)
8077 /* After stack_realign_needed is finalized, we can't no longer
8078 change it. */
8079 gcc_assert (crtl->stack_realign_needed == stack_realign);
8081 else
8083 crtl->stack_realign_needed = stack_realign;
8084 crtl->stack_realign_finalized = true;
8088 /* Expand the prologue into a bunch of separate insns. */
8090 void
8091 ix86_expand_prologue (void)
8093 rtx insn;
8094 bool pic_reg_used;
8095 struct ix86_frame frame;
8096 HOST_WIDE_INT allocate;
8098 ix86_finalize_stack_realign_flags ();
8100 /* DRAP should not coexist with stack_realign_fp */
8101 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8103 ix86_compute_frame_layout (&frame);
8105 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8106 of DRAP is needed and stack realignment is really needed after reload */
8107 if (crtl->drap_reg && crtl->stack_realign_needed)
8109 rtx x, y;
8110 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8111 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8112 ? 0 : UNITS_PER_WORD);
8114 gcc_assert (stack_realign_drap);
8116 /* Grab the argument pointer. */
8117 x = plus_constant (stack_pointer_rtx,
8118 (UNITS_PER_WORD + param_ptr_offset));
8119 y = crtl->drap_reg;
8121 /* Only need to push parameter pointer reg if it is caller
8122 saved reg */
8123 if (!call_used_regs[REGNO (crtl->drap_reg)])
8125 /* Push arg pointer reg */
8126 insn = emit_insn (gen_push (y));
8127 RTX_FRAME_RELATED_P (insn) = 1;
8130 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8131 RTX_FRAME_RELATED_P (insn) = 1;
8133 /* Align the stack. */
8134 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8135 stack_pointer_rtx,
8136 GEN_INT (-align_bytes)));
8137 RTX_FRAME_RELATED_P (insn) = 1;
8139 /* Replicate the return address on the stack so that return
8140 address can be reached via (argp - 1) slot. This is needed
8141 to implement macro RETURN_ADDR_RTX and intrinsic function
8142 expand_builtin_return_addr etc. */
8143 x = crtl->drap_reg;
8144 x = gen_frame_mem (Pmode,
8145 plus_constant (x, -UNITS_PER_WORD));
8146 insn = emit_insn (gen_push (x));
8147 RTX_FRAME_RELATED_P (insn) = 1;
8150 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8151 slower on all targets. Also sdb doesn't like it. */
8153 if (frame_pointer_needed)
8155 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8156 RTX_FRAME_RELATED_P (insn) = 1;
8158 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8159 RTX_FRAME_RELATED_P (insn) = 1;
8162 if (stack_realign_fp)
8164 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8165 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8167 /* Align the stack. */
8168 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8169 stack_pointer_rtx,
8170 GEN_INT (-align_bytes)));
8171 RTX_FRAME_RELATED_P (insn) = 1;
8174 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8176 if (!frame.save_regs_using_mov)
8177 ix86_emit_save_regs ();
8178 else
8179 allocate += frame.nregs * UNITS_PER_WORD;
8181 /* When using red zone we may start register saving before allocating
8182 the stack frame saving one cycle of the prologue. However I will
8183 avoid doing this if I am going to have to probe the stack since
8184 at least on x86_64 the stack probe can turn into a call that clobbers
8185 a red zone location */
8186 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8187 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8188 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8189 && !crtl->stack_realign_needed)
8190 ? hard_frame_pointer_rtx
8191 : stack_pointer_rtx,
8192 -frame.nregs * UNITS_PER_WORD);
8194 if (allocate == 0)
8196 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8197 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8198 GEN_INT (-allocate), -1);
8199 else
8201 /* Only valid for Win32. */
8202 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8203 bool eax_live;
8204 rtx t;
8206 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8208 if (cfun->machine->call_abi == MS_ABI)
8209 eax_live = false;
8210 else
8211 eax_live = ix86_eax_live_at_start_p ();
8213 if (eax_live)
8215 emit_insn (gen_push (eax));
8216 allocate -= UNITS_PER_WORD;
8219 emit_move_insn (eax, GEN_INT (allocate));
8221 if (TARGET_64BIT)
8222 insn = gen_allocate_stack_worker_64 (eax, eax);
8223 else
8224 insn = gen_allocate_stack_worker_32 (eax, eax);
8225 insn = emit_insn (insn);
8226 RTX_FRAME_RELATED_P (insn) = 1;
8227 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8228 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8229 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8230 t, REG_NOTES (insn));
8232 if (eax_live)
8234 if (frame_pointer_needed)
8235 t = plus_constant (hard_frame_pointer_rtx,
8236 allocate
8237 - frame.to_allocate
8238 - frame.nregs * UNITS_PER_WORD);
8239 else
8240 t = plus_constant (stack_pointer_rtx, allocate);
8241 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8245 if (frame.save_regs_using_mov
8246 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8247 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8249 if (!frame_pointer_needed
8250 || !frame.to_allocate
8251 || crtl->stack_realign_needed)
8252 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8253 frame.to_allocate
8254 + frame.nsseregs * 16 + frame.padding0);
8255 else
8256 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8257 -frame.nregs * UNITS_PER_WORD);
8259 if (!frame_pointer_needed
8260 || !frame.to_allocate
8261 || crtl->stack_realign_needed)
8262 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8263 frame.to_allocate);
8264 else
8265 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8266 - frame.nregs * UNITS_PER_WORD
8267 - frame.nsseregs * 16
8268 - frame.padding0);
8270 pic_reg_used = false;
8271 if (pic_offset_table_rtx
8272 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8273 || crtl->profile))
8275 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8277 if (alt_pic_reg_used != INVALID_REGNUM)
8278 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8280 pic_reg_used = true;
8283 if (pic_reg_used)
8285 if (TARGET_64BIT)
8287 if (ix86_cmodel == CM_LARGE_PIC)
8289 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8290 rtx label = gen_label_rtx ();
8291 emit_label (label);
8292 LABEL_PRESERVE_P (label) = 1;
8293 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8294 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8295 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8296 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8297 pic_offset_table_rtx, tmp_reg));
8299 else
8300 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8302 else
8303 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8306 /* In the pic_reg_used case, make sure that the got load isn't deleted
8307 when mcount needs it. Blockage to avoid call movement across mcount
8308 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8309 note. */
8310 if (crtl->profile && pic_reg_used)
8311 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8313 if (crtl->drap_reg && !crtl->stack_realign_needed)
8315 /* vDRAP is setup but after reload it turns out stack realign
8316 isn't necessary, here we will emit prologue to setup DRAP
8317 without stack realign adjustment */
8318 int drap_bp_offset = UNITS_PER_WORD * 2;
8319 rtx x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8320 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8323 /* Prevent instructions from being scheduled into register save push
8324 sequence when access to the redzone area is done through frame pointer.
8325 The offset betweeh the frame pointer and the stack pointer is calculated
8326 relative to the value of the stack pointer at the end of the function
8327 prologue, and moving instructions that access redzone area via frame
8328 pointer inside push sequence violates this assumption. */
8329 if (frame_pointer_needed && frame.red_zone_size)
8330 emit_insn (gen_memory_blockage ());
8332 /* Emit cld instruction if stringops are used in the function. */
8333 if (TARGET_CLD && ix86_current_function_needs_cld)
8334 emit_insn (gen_cld ());
8337 /* Emit code to restore saved registers using MOV insns. First register
8338 is restored from POINTER + OFFSET. */
8339 static void
8340 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8341 int maybe_eh_return)
8343 int regno;
8344 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8346 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8347 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8349 /* Ensure that adjust_address won't be forced to produce pointer
8350 out of range allowed by x86-64 instruction set. */
8351 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8353 rtx r11;
8355 r11 = gen_rtx_REG (DImode, R11_REG);
8356 emit_move_insn (r11, GEN_INT (offset));
8357 emit_insn (gen_adddi3 (r11, r11, pointer));
8358 base_address = gen_rtx_MEM (Pmode, r11);
8359 offset = 0;
8361 emit_move_insn (gen_rtx_REG (Pmode, regno),
8362 adjust_address (base_address, Pmode, offset));
8363 offset += UNITS_PER_WORD;
8367 /* Emit code to restore saved registers using MOV insns. First register
8368 is restored from POINTER + OFFSET. */
8369 static void
8370 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8371 int maybe_eh_return)
8373 int regno;
8374 rtx base_address = gen_rtx_MEM (TImode, pointer);
8375 rtx mem;
8377 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8378 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8380 /* Ensure that adjust_address won't be forced to produce pointer
8381 out of range allowed by x86-64 instruction set. */
8382 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8384 rtx r11;
8386 r11 = gen_rtx_REG (DImode, R11_REG);
8387 emit_move_insn (r11, GEN_INT (offset));
8388 emit_insn (gen_adddi3 (r11, r11, pointer));
8389 base_address = gen_rtx_MEM (TImode, r11);
8390 offset = 0;
8392 mem = adjust_address (base_address, TImode, offset);
8393 set_mem_align (mem, 128);
8394 emit_move_insn (gen_rtx_REG (TImode, regno), mem);
8395 offset += 16;
8399 /* Restore function stack, frame, and registers. */
8401 void
8402 ix86_expand_epilogue (int style)
8404 int regno;
8405 int sp_valid;
8406 struct ix86_frame frame;
8407 HOST_WIDE_INT offset;
8409 ix86_finalize_stack_realign_flags ();
8411 /* When stack is realigned, SP must be valid. */
8412 sp_valid = (!frame_pointer_needed
8413 || current_function_sp_is_unchanging
8414 || stack_realign_fp);
8416 ix86_compute_frame_layout (&frame);
8418 /* See the comment about red zone and frame
8419 pointer usage in ix86_expand_prologue. */
8420 if (frame_pointer_needed && frame.red_zone_size)
8421 emit_insn (gen_memory_blockage ());
8423 /* Calculate start of saved registers relative to ebp. Special care
8424 must be taken for the normal return case of a function using
8425 eh_return: the eax and edx registers are marked as saved, but not
8426 restored along this path. */
8427 offset = frame.nregs;
8428 if (crtl->calls_eh_return && style != 2)
8429 offset -= 2;
8430 offset *= -UNITS_PER_WORD;
8431 offset -= frame.nsseregs * 16 + frame.padding0;
8433 /* If we're only restoring one register and sp is not valid then
8434 using a move instruction to restore the register since it's
8435 less work than reloading sp and popping the register.
8437 The default code result in stack adjustment using add/lea instruction,
8438 while this code results in LEAVE instruction (or discrete equivalent),
8439 so it is profitable in some other cases as well. Especially when there
8440 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8441 and there is exactly one register to pop. This heuristic may need some
8442 tuning in future. */
8443 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
8444 || (TARGET_EPILOGUE_USING_MOVE
8445 && cfun->machine->use_fast_prologue_epilogue
8446 && ((frame.nregs + frame.nsseregs) > 1 || frame.to_allocate))
8447 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs) && frame.to_allocate)
8448 || (frame_pointer_needed && TARGET_USE_LEAVE
8449 && cfun->machine->use_fast_prologue_epilogue
8450 && (frame.nregs + frame.nsseregs) == 1)
8451 || crtl->calls_eh_return)
8453 /* Restore registers. We can use ebp or esp to address the memory
8454 locations. If both are available, default to ebp, since offsets
8455 are known to be small. Only exception is esp pointing directly
8456 to the end of block of saved registers, where we may simplify
8457 addressing mode.
8459 If we are realigning stack with bp and sp, regs restore can't
8460 be addressed by bp. sp must be used instead. */
8462 if (!frame_pointer_needed
8463 || (sp_valid && !frame.to_allocate)
8464 || stack_realign_fp)
8466 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8467 frame.to_allocate, style == 2);
8468 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8469 frame.to_allocate
8470 + frame.nsseregs * 16
8471 + frame.padding0, style == 2);
8473 else
8475 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
8476 offset, style == 2);
8477 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8478 offset
8479 + frame.nsseregs * 16
8480 + frame.padding0, style == 2);
8483 /* eh_return epilogues need %ecx added to the stack pointer. */
8484 if (style == 2)
8486 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8488 /* Stack align doesn't work with eh_return. */
8489 gcc_assert (!crtl->stack_realign_needed);
8491 if (frame_pointer_needed)
8493 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
8494 tmp = plus_constant (tmp, UNITS_PER_WORD);
8495 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
8497 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
8498 emit_move_insn (hard_frame_pointer_rtx, tmp);
8500 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
8501 const0_rtx, style);
8503 else
8505 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
8506 tmp = plus_constant (tmp, (frame.to_allocate
8507 + frame.nregs * UNITS_PER_WORD
8508 + frame.nsseregs * 16
8509 + frame.padding0));
8510 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
8513 else if (!frame_pointer_needed)
8514 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8515 GEN_INT (frame.to_allocate
8516 + frame.nregs * UNITS_PER_WORD
8517 + frame.nsseregs * 16
8518 + frame.padding0),
8519 style);
8520 /* If not an i386, mov & pop is faster than "leave". */
8521 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
8522 || !cfun->machine->use_fast_prologue_epilogue)
8523 emit_insn ((*ix86_gen_leave) ());
8524 else
8526 pro_epilogue_adjust_stack (stack_pointer_rtx,
8527 hard_frame_pointer_rtx,
8528 const0_rtx, style);
8530 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8533 else
8535 /* First step is to deallocate the stack frame so that we can
8536 pop the registers.
8538 If we realign stack with frame pointer, then stack pointer
8539 won't be able to recover via lea $offset(%bp), %sp, because
8540 there is a padding area between bp and sp for realign.
8541 "add $to_allocate, %sp" must be used instead. */
8542 if (!sp_valid)
8544 gcc_assert (frame_pointer_needed);
8545 gcc_assert (!stack_realign_fp);
8546 pro_epilogue_adjust_stack (stack_pointer_rtx,
8547 hard_frame_pointer_rtx,
8548 GEN_INT (offset), style);
8549 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8550 frame.to_allocate, style == 2);
8551 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8552 GEN_INT (frame.nsseregs * 16), style);
8554 else if (frame.to_allocate || frame.nsseregs)
8556 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8557 frame.to_allocate,
8558 style == 2);
8559 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8560 GEN_INT (frame.to_allocate
8561 + frame.nsseregs * 16
8562 + frame.padding0), style);
8565 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8566 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8567 emit_insn ((*ix86_gen_pop1) (gen_rtx_REG (Pmode, regno)));
8568 if (frame_pointer_needed)
8570 /* Leave results in shorter dependency chains on CPUs that are
8571 able to grok it fast. */
8572 if (TARGET_USE_LEAVE)
8573 emit_insn ((*ix86_gen_leave) ());
8574 else
8576 /* For stack realigned really happens, recover stack
8577 pointer to hard frame pointer is a must, if not using
8578 leave. */
8579 if (stack_realign_fp)
8580 pro_epilogue_adjust_stack (stack_pointer_rtx,
8581 hard_frame_pointer_rtx,
8582 const0_rtx, style);
8583 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8588 if (crtl->drap_reg && crtl->stack_realign_needed)
8590 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8591 ? 0 : UNITS_PER_WORD);
8592 gcc_assert (stack_realign_drap);
8593 emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
8594 crtl->drap_reg,
8595 GEN_INT (-(UNITS_PER_WORD
8596 + param_ptr_offset))));
8597 if (!call_used_regs[REGNO (crtl->drap_reg)])
8598 emit_insn ((*ix86_gen_pop1) (crtl->drap_reg));
8602 /* Sibcall epilogues don't want a return instruction. */
8603 if (style == 0)
8604 return;
8606 if (crtl->args.pops_args && crtl->args.size)
8608 rtx popc = GEN_INT (crtl->args.pops_args);
8610 /* i386 can only pop 64K bytes. If asked to pop more, pop
8611 return address, do explicit add, and jump indirectly to the
8612 caller. */
8614 if (crtl->args.pops_args >= 65536)
8616 rtx ecx = gen_rtx_REG (SImode, CX_REG);
8618 /* There is no "pascal" calling convention in any 64bit ABI. */
8619 gcc_assert (!TARGET_64BIT);
8621 emit_insn (gen_popsi1 (ecx));
8622 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
8623 emit_jump_insn (gen_return_indirect_internal (ecx));
8625 else
8626 emit_jump_insn (gen_return_pop_internal (popc));
8628 else
8629 emit_jump_insn (gen_return_internal ());
8632 /* Reset from the function's potential modifications. */
8634 static void
8635 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8636 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8638 if (pic_offset_table_rtx)
8639 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
8640 #if TARGET_MACHO
8641 /* Mach-O doesn't support labels at the end of objects, so if
8642 it looks like we might want one, insert a NOP. */
8644 rtx insn = get_last_insn ();
8645 while (insn
8646 && NOTE_P (insn)
8647 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
8648 insn = PREV_INSN (insn);
8649 if (insn
8650 && (LABEL_P (insn)
8651 || (NOTE_P (insn)
8652 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
8653 fputs ("\tnop\n", file);
8655 #endif
8659 /* Extract the parts of an RTL expression that is a valid memory address
8660 for an instruction. Return 0 if the structure of the address is
8661 grossly off. Return -1 if the address contains ASHIFT, so it is not
8662 strictly valid, but still used for computing length of lea instruction. */
8665 ix86_decompose_address (rtx addr, struct ix86_address *out)
8667 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
8668 rtx base_reg, index_reg;
8669 HOST_WIDE_INT scale = 1;
8670 rtx scale_rtx = NULL_RTX;
8671 int retval = 1;
8672 enum ix86_address_seg seg = SEG_DEFAULT;
8674 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
8675 base = addr;
8676 else if (GET_CODE (addr) == PLUS)
8678 rtx addends[4], op;
8679 int n = 0, i;
8681 op = addr;
8684 if (n >= 4)
8685 return 0;
8686 addends[n++] = XEXP (op, 1);
8687 op = XEXP (op, 0);
8689 while (GET_CODE (op) == PLUS);
8690 if (n >= 4)
8691 return 0;
8692 addends[n] = op;
8694 for (i = n; i >= 0; --i)
8696 op = addends[i];
8697 switch (GET_CODE (op))
8699 case MULT:
8700 if (index)
8701 return 0;
8702 index = XEXP (op, 0);
8703 scale_rtx = XEXP (op, 1);
8704 break;
8706 case UNSPEC:
8707 if (XINT (op, 1) == UNSPEC_TP
8708 && TARGET_TLS_DIRECT_SEG_REFS
8709 && seg == SEG_DEFAULT)
8710 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
8711 else
8712 return 0;
8713 break;
8715 case REG:
8716 case SUBREG:
8717 if (!base)
8718 base = op;
8719 else if (!index)
8720 index = op;
8721 else
8722 return 0;
8723 break;
8725 case CONST:
8726 case CONST_INT:
8727 case SYMBOL_REF:
8728 case LABEL_REF:
8729 if (disp)
8730 return 0;
8731 disp = op;
8732 break;
8734 default:
8735 return 0;
8739 else if (GET_CODE (addr) == MULT)
8741 index = XEXP (addr, 0); /* index*scale */
8742 scale_rtx = XEXP (addr, 1);
8744 else if (GET_CODE (addr) == ASHIFT)
8746 rtx tmp;
8748 /* We're called for lea too, which implements ashift on occasion. */
8749 index = XEXP (addr, 0);
8750 tmp = XEXP (addr, 1);
8751 if (!CONST_INT_P (tmp))
8752 return 0;
8753 scale = INTVAL (tmp);
8754 if ((unsigned HOST_WIDE_INT) scale > 3)
8755 return 0;
8756 scale = 1 << scale;
8757 retval = -1;
8759 else
8760 disp = addr; /* displacement */
8762 /* Extract the integral value of scale. */
8763 if (scale_rtx)
8765 if (!CONST_INT_P (scale_rtx))
8766 return 0;
8767 scale = INTVAL (scale_rtx);
8770 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
8771 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
8773 /* Allow arg pointer and stack pointer as index if there is not scaling. */
8774 if (base_reg && index_reg && scale == 1
8775 && (index_reg == arg_pointer_rtx
8776 || index_reg == frame_pointer_rtx
8777 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
8779 rtx tmp;
8780 tmp = base, base = index, index = tmp;
8781 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
8784 /* Special case: %ebp cannot be encoded as a base without a displacement. */
8785 if ((base_reg == hard_frame_pointer_rtx
8786 || base_reg == frame_pointer_rtx
8787 || base_reg == arg_pointer_rtx) && !disp)
8788 disp = const0_rtx;
8790 /* Special case: on K6, [%esi] makes the instruction vector decoded.
8791 Avoid this by transforming to [%esi+0].
8792 Reload calls address legitimization without cfun defined, so we need
8793 to test cfun for being non-NULL. */
8794 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
8795 && base_reg && !index_reg && !disp
8796 && REG_P (base_reg)
8797 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
8798 disp = const0_rtx;
8800 /* Special case: encode reg+reg instead of reg*2. */
8801 if (!base && index && scale && scale == 2)
8802 base = index, base_reg = index_reg, scale = 1;
8804 /* Special case: scaling cannot be encoded without base or displacement. */
8805 if (!base && !disp && index && scale != 1)
8806 disp = const0_rtx;
8808 out->base = base;
8809 out->index = index;
8810 out->disp = disp;
8811 out->scale = scale;
8812 out->seg = seg;
8814 return retval;
8817 /* Return cost of the memory address x.
8818 For i386, it is better to use a complex address than let gcc copy
8819 the address into a reg and make a new pseudo. But not if the address
8820 requires to two regs - that would mean more pseudos with longer
8821 lifetimes. */
8822 static int
8823 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
8825 struct ix86_address parts;
8826 int cost = 1;
8827 int ok = ix86_decompose_address (x, &parts);
8829 gcc_assert (ok);
8831 if (parts.base && GET_CODE (parts.base) == SUBREG)
8832 parts.base = SUBREG_REG (parts.base);
8833 if (parts.index && GET_CODE (parts.index) == SUBREG)
8834 parts.index = SUBREG_REG (parts.index);
8836 /* Attempt to minimize number of registers in the address. */
8837 if ((parts.base
8838 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
8839 || (parts.index
8840 && (!REG_P (parts.index)
8841 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
8842 cost++;
8844 if (parts.base
8845 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
8846 && parts.index
8847 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
8848 && parts.base != parts.index)
8849 cost++;
8851 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
8852 since it's predecode logic can't detect the length of instructions
8853 and it degenerates to vector decoded. Increase cost of such
8854 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
8855 to split such addresses or even refuse such addresses at all.
8857 Following addressing modes are affected:
8858 [base+scale*index]
8859 [scale*index+disp]
8860 [base+index]
8862 The first and last case may be avoidable by explicitly coding the zero in
8863 memory address, but I don't have AMD-K6 machine handy to check this
8864 theory. */
8866 if (TARGET_K6
8867 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
8868 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
8869 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
8870 cost += 10;
8872 return cost;
8875 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
8876 this is used for to form addresses to local data when -fPIC is in
8877 use. */
8879 static bool
8880 darwin_local_data_pic (rtx disp)
8882 return (GET_CODE (disp) == UNSPEC
8883 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
8886 /* Determine if a given RTX is a valid constant. We already know this
8887 satisfies CONSTANT_P. */
8889 bool
8890 legitimate_constant_p (rtx x)
8892 switch (GET_CODE (x))
8894 case CONST:
8895 x = XEXP (x, 0);
8897 if (GET_CODE (x) == PLUS)
8899 if (!CONST_INT_P (XEXP (x, 1)))
8900 return false;
8901 x = XEXP (x, 0);
8904 if (TARGET_MACHO && darwin_local_data_pic (x))
8905 return true;
8907 /* Only some unspecs are valid as "constants". */
8908 if (GET_CODE (x) == UNSPEC)
8909 switch (XINT (x, 1))
8911 case UNSPEC_GOT:
8912 case UNSPEC_GOTOFF:
8913 case UNSPEC_PLTOFF:
8914 return TARGET_64BIT;
8915 case UNSPEC_TPOFF:
8916 case UNSPEC_NTPOFF:
8917 x = XVECEXP (x, 0, 0);
8918 return (GET_CODE (x) == SYMBOL_REF
8919 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8920 case UNSPEC_DTPOFF:
8921 x = XVECEXP (x, 0, 0);
8922 return (GET_CODE (x) == SYMBOL_REF
8923 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
8924 default:
8925 return false;
8928 /* We must have drilled down to a symbol. */
8929 if (GET_CODE (x) == LABEL_REF)
8930 return true;
8931 if (GET_CODE (x) != SYMBOL_REF)
8932 return false;
8933 /* FALLTHRU */
8935 case SYMBOL_REF:
8936 /* TLS symbols are never valid. */
8937 if (SYMBOL_REF_TLS_MODEL (x))
8938 return false;
8940 /* DLLIMPORT symbols are never valid. */
8941 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
8942 && SYMBOL_REF_DLLIMPORT_P (x))
8943 return false;
8944 break;
8946 case CONST_DOUBLE:
8947 if (GET_MODE (x) == TImode
8948 && x != CONST0_RTX (TImode)
8949 && !TARGET_64BIT)
8950 return false;
8951 break;
8953 case CONST_VECTOR:
8954 if (!standard_sse_constant_p (x))
8955 return false;
8957 default:
8958 break;
8961 /* Otherwise we handle everything else in the move patterns. */
8962 return true;
8965 /* Determine if it's legal to put X into the constant pool. This
8966 is not possible for the address of thread-local symbols, which
8967 is checked above. */
8969 static bool
8970 ix86_cannot_force_const_mem (rtx x)
8972 /* We can always put integral constants and vectors in memory. */
8973 switch (GET_CODE (x))
8975 case CONST_INT:
8976 case CONST_DOUBLE:
8977 case CONST_VECTOR:
8978 return false;
8980 default:
8981 break;
8983 return !legitimate_constant_p (x);
8986 /* Determine if a given RTX is a valid constant address. */
8988 bool
8989 constant_address_p (rtx x)
8991 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
8994 /* Nonzero if the constant value X is a legitimate general operand
8995 when generating PIC code. It is given that flag_pic is on and
8996 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
8998 bool
8999 legitimate_pic_operand_p (rtx x)
9001 rtx inner;
9003 switch (GET_CODE (x))
9005 case CONST:
9006 inner = XEXP (x, 0);
9007 if (GET_CODE (inner) == PLUS
9008 && CONST_INT_P (XEXP (inner, 1)))
9009 inner = XEXP (inner, 0);
9011 /* Only some unspecs are valid as "constants". */
9012 if (GET_CODE (inner) == UNSPEC)
9013 switch (XINT (inner, 1))
9015 case UNSPEC_GOT:
9016 case UNSPEC_GOTOFF:
9017 case UNSPEC_PLTOFF:
9018 return TARGET_64BIT;
9019 case UNSPEC_TPOFF:
9020 x = XVECEXP (inner, 0, 0);
9021 return (GET_CODE (x) == SYMBOL_REF
9022 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9023 case UNSPEC_MACHOPIC_OFFSET:
9024 return legitimate_pic_address_disp_p (x);
9025 default:
9026 return false;
9028 /* FALLTHRU */
9030 case SYMBOL_REF:
9031 case LABEL_REF:
9032 return legitimate_pic_address_disp_p (x);
9034 default:
9035 return true;
9039 /* Determine if a given CONST RTX is a valid memory displacement
9040 in PIC mode. */
9043 legitimate_pic_address_disp_p (rtx disp)
9045 bool saw_plus;
9047 /* In 64bit mode we can allow direct addresses of symbols and labels
9048 when they are not dynamic symbols. */
9049 if (TARGET_64BIT)
9051 rtx op0 = disp, op1;
9053 switch (GET_CODE (disp))
9055 case LABEL_REF:
9056 return true;
9058 case CONST:
9059 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9060 break;
9061 op0 = XEXP (XEXP (disp, 0), 0);
9062 op1 = XEXP (XEXP (disp, 0), 1);
9063 if (!CONST_INT_P (op1)
9064 || INTVAL (op1) >= 16*1024*1024
9065 || INTVAL (op1) < -16*1024*1024)
9066 break;
9067 if (GET_CODE (op0) == LABEL_REF)
9068 return true;
9069 if (GET_CODE (op0) != SYMBOL_REF)
9070 break;
9071 /* FALLTHRU */
9073 case SYMBOL_REF:
9074 /* TLS references should always be enclosed in UNSPEC. */
9075 if (SYMBOL_REF_TLS_MODEL (op0))
9076 return false;
9077 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9078 && ix86_cmodel != CM_LARGE_PIC)
9079 return true;
9080 break;
9082 default:
9083 break;
9086 if (GET_CODE (disp) != CONST)
9087 return 0;
9088 disp = XEXP (disp, 0);
9090 if (TARGET_64BIT)
9092 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9093 of GOT tables. We should not need these anyway. */
9094 if (GET_CODE (disp) != UNSPEC
9095 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9096 && XINT (disp, 1) != UNSPEC_GOTOFF
9097 && XINT (disp, 1) != UNSPEC_PLTOFF))
9098 return 0;
9100 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9101 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9102 return 0;
9103 return 1;
9106 saw_plus = false;
9107 if (GET_CODE (disp) == PLUS)
9109 if (!CONST_INT_P (XEXP (disp, 1)))
9110 return 0;
9111 disp = XEXP (disp, 0);
9112 saw_plus = true;
9115 if (TARGET_MACHO && darwin_local_data_pic (disp))
9116 return 1;
9118 if (GET_CODE (disp) != UNSPEC)
9119 return 0;
9121 switch (XINT (disp, 1))
9123 case UNSPEC_GOT:
9124 if (saw_plus)
9125 return false;
9126 /* We need to check for both symbols and labels because VxWorks loads
9127 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9128 details. */
9129 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9130 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9131 case UNSPEC_GOTOFF:
9132 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9133 While ABI specify also 32bit relocation but we don't produce it in
9134 small PIC model at all. */
9135 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9136 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9137 && !TARGET_64BIT)
9138 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9139 return false;
9140 case UNSPEC_GOTTPOFF:
9141 case UNSPEC_GOTNTPOFF:
9142 case UNSPEC_INDNTPOFF:
9143 if (saw_plus)
9144 return false;
9145 disp = XVECEXP (disp, 0, 0);
9146 return (GET_CODE (disp) == SYMBOL_REF
9147 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9148 case UNSPEC_NTPOFF:
9149 disp = XVECEXP (disp, 0, 0);
9150 return (GET_CODE (disp) == SYMBOL_REF
9151 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9152 case UNSPEC_DTPOFF:
9153 disp = XVECEXP (disp, 0, 0);
9154 return (GET_CODE (disp) == SYMBOL_REF
9155 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9158 return 0;
9161 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
9162 memory address for an instruction. The MODE argument is the machine mode
9163 for the MEM expression that wants to use this address.
9165 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9166 convert common non-canonical forms to canonical form so that they will
9167 be recognized. */
9170 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9171 rtx addr, int strict)
9173 struct ix86_address parts;
9174 rtx base, index, disp;
9175 HOST_WIDE_INT scale;
9176 const char *reason = NULL;
9177 rtx reason_rtx = NULL_RTX;
9179 if (ix86_decompose_address (addr, &parts) <= 0)
9181 reason = "decomposition failed";
9182 goto report_error;
9185 base = parts.base;
9186 index = parts.index;
9187 disp = parts.disp;
9188 scale = parts.scale;
9190 /* Validate base register.
9192 Don't allow SUBREG's that span more than a word here. It can lead to spill
9193 failures when the base is one word out of a two word structure, which is
9194 represented internally as a DImode int. */
9196 if (base)
9198 rtx reg;
9199 reason_rtx = base;
9201 if (REG_P (base))
9202 reg = base;
9203 else if (GET_CODE (base) == SUBREG
9204 && REG_P (SUBREG_REG (base))
9205 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9206 <= UNITS_PER_WORD)
9207 reg = SUBREG_REG (base);
9208 else
9210 reason = "base is not a register";
9211 goto report_error;
9214 if (GET_MODE (base) != Pmode)
9216 reason = "base is not in Pmode";
9217 goto report_error;
9220 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9221 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9223 reason = "base is not valid";
9224 goto report_error;
9228 /* Validate index register.
9230 Don't allow SUBREG's that span more than a word here -- same as above. */
9232 if (index)
9234 rtx reg;
9235 reason_rtx = index;
9237 if (REG_P (index))
9238 reg = index;
9239 else if (GET_CODE (index) == SUBREG
9240 && REG_P (SUBREG_REG (index))
9241 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9242 <= UNITS_PER_WORD)
9243 reg = SUBREG_REG (index);
9244 else
9246 reason = "index is not a register";
9247 goto report_error;
9250 if (GET_MODE (index) != Pmode)
9252 reason = "index is not in Pmode";
9253 goto report_error;
9256 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9257 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9259 reason = "index is not valid";
9260 goto report_error;
9264 /* Validate scale factor. */
9265 if (scale != 1)
9267 reason_rtx = GEN_INT (scale);
9268 if (!index)
9270 reason = "scale without index";
9271 goto report_error;
9274 if (scale != 2 && scale != 4 && scale != 8)
9276 reason = "scale is not a valid multiplier";
9277 goto report_error;
9281 /* Validate displacement. */
9282 if (disp)
9284 reason_rtx = disp;
9286 if (GET_CODE (disp) == CONST
9287 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9288 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9289 switch (XINT (XEXP (disp, 0), 1))
9291 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9292 used. While ABI specify also 32bit relocations, we don't produce
9293 them at all and use IP relative instead. */
9294 case UNSPEC_GOT:
9295 case UNSPEC_GOTOFF:
9296 gcc_assert (flag_pic);
9297 if (!TARGET_64BIT)
9298 goto is_legitimate_pic;
9299 reason = "64bit address unspec";
9300 goto report_error;
9302 case UNSPEC_GOTPCREL:
9303 gcc_assert (flag_pic);
9304 goto is_legitimate_pic;
9306 case UNSPEC_GOTTPOFF:
9307 case UNSPEC_GOTNTPOFF:
9308 case UNSPEC_INDNTPOFF:
9309 case UNSPEC_NTPOFF:
9310 case UNSPEC_DTPOFF:
9311 break;
9313 default:
9314 reason = "invalid address unspec";
9315 goto report_error;
9318 else if (SYMBOLIC_CONST (disp)
9319 && (flag_pic
9320 || (TARGET_MACHO
9321 #if TARGET_MACHO
9322 && MACHOPIC_INDIRECT
9323 && !machopic_operand_p (disp)
9324 #endif
9328 is_legitimate_pic:
9329 if (TARGET_64BIT && (index || base))
9331 /* foo@dtpoff(%rX) is ok. */
9332 if (GET_CODE (disp) != CONST
9333 || GET_CODE (XEXP (disp, 0)) != PLUS
9334 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9335 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9336 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9337 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9339 reason = "non-constant pic memory reference";
9340 goto report_error;
9343 else if (! legitimate_pic_address_disp_p (disp))
9345 reason = "displacement is an invalid pic construct";
9346 goto report_error;
9349 /* This code used to verify that a symbolic pic displacement
9350 includes the pic_offset_table_rtx register.
9352 While this is good idea, unfortunately these constructs may
9353 be created by "adds using lea" optimization for incorrect
9354 code like:
9356 int a;
9357 int foo(int i)
9359 return *(&a+i);
9362 This code is nonsensical, but results in addressing
9363 GOT table with pic_offset_table_rtx base. We can't
9364 just refuse it easily, since it gets matched by
9365 "addsi3" pattern, that later gets split to lea in the
9366 case output register differs from input. While this
9367 can be handled by separate addsi pattern for this case
9368 that never results in lea, this seems to be easier and
9369 correct fix for crash to disable this test. */
9371 else if (GET_CODE (disp) != LABEL_REF
9372 && !CONST_INT_P (disp)
9373 && (GET_CODE (disp) != CONST
9374 || !legitimate_constant_p (disp))
9375 && (GET_CODE (disp) != SYMBOL_REF
9376 || !legitimate_constant_p (disp)))
9378 reason = "displacement is not constant";
9379 goto report_error;
9381 else if (TARGET_64BIT
9382 && !x86_64_immediate_operand (disp, VOIDmode))
9384 reason = "displacement is out of range";
9385 goto report_error;
9389 /* Everything looks valid. */
9390 return TRUE;
9392 report_error:
9393 return FALSE;
9396 /* Return a unique alias set for the GOT. */
9398 static alias_set_type
9399 ix86_GOT_alias_set (void)
9401 static alias_set_type set = -1;
9402 if (set == -1)
9403 set = new_alias_set ();
9404 return set;
9407 /* Return a legitimate reference for ORIG (an address) using the
9408 register REG. If REG is 0, a new pseudo is generated.
9410 There are two types of references that must be handled:
9412 1. Global data references must load the address from the GOT, via
9413 the PIC reg. An insn is emitted to do this load, and the reg is
9414 returned.
9416 2. Static data references, constant pool addresses, and code labels
9417 compute the address as an offset from the GOT, whose base is in
9418 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9419 differentiate them from global data objects. The returned
9420 address is the PIC reg + an unspec constant.
9422 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
9423 reg also appears in the address. */
9425 static rtx
9426 legitimize_pic_address (rtx orig, rtx reg)
9428 rtx addr = orig;
9429 rtx new_rtx = orig;
9430 rtx base;
9432 #if TARGET_MACHO
9433 if (TARGET_MACHO && !TARGET_64BIT)
9435 if (reg == 0)
9436 reg = gen_reg_rtx (Pmode);
9437 /* Use the generic Mach-O PIC machinery. */
9438 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
9440 #endif
9442 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
9443 new_rtx = addr;
9444 else if (TARGET_64BIT
9445 && ix86_cmodel != CM_SMALL_PIC
9446 && gotoff_operand (addr, Pmode))
9448 rtx tmpreg;
9449 /* This symbol may be referenced via a displacement from the PIC
9450 base address (@GOTOFF). */
9452 if (reload_in_progress)
9453 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9454 if (GET_CODE (addr) == CONST)
9455 addr = XEXP (addr, 0);
9456 if (GET_CODE (addr) == PLUS)
9458 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9459 UNSPEC_GOTOFF);
9460 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9462 else
9463 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9464 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9465 if (!reg)
9466 tmpreg = gen_reg_rtx (Pmode);
9467 else
9468 tmpreg = reg;
9469 emit_move_insn (tmpreg, new_rtx);
9471 if (reg != 0)
9473 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
9474 tmpreg, 1, OPTAB_DIRECT);
9475 new_rtx = reg;
9477 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
9479 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
9481 /* This symbol may be referenced via a displacement from the PIC
9482 base address (@GOTOFF). */
9484 if (reload_in_progress)
9485 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9486 if (GET_CODE (addr) == CONST)
9487 addr = XEXP (addr, 0);
9488 if (GET_CODE (addr) == PLUS)
9490 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9491 UNSPEC_GOTOFF);
9492 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9494 else
9495 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9496 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9497 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9499 if (reg != 0)
9501 emit_move_insn (reg, new_rtx);
9502 new_rtx = reg;
9505 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
9506 /* We can't use @GOTOFF for text labels on VxWorks;
9507 see gotoff_operand. */
9508 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
9510 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9512 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
9513 return legitimize_dllimport_symbol (addr, true);
9514 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
9515 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
9516 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
9518 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
9519 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
9523 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
9525 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
9526 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9527 new_rtx = gen_const_mem (Pmode, new_rtx);
9528 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9530 if (reg == 0)
9531 reg = gen_reg_rtx (Pmode);
9532 /* Use directly gen_movsi, otherwise the address is loaded
9533 into register for CSE. We don't want to CSE this addresses,
9534 instead we CSE addresses from the GOT table, so skip this. */
9535 emit_insn (gen_movsi (reg, new_rtx));
9536 new_rtx = reg;
9538 else
9540 /* This symbol must be referenced via a load from the
9541 Global Offset Table (@GOT). */
9543 if (reload_in_progress)
9544 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9545 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
9546 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9547 if (TARGET_64BIT)
9548 new_rtx = force_reg (Pmode, new_rtx);
9549 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9550 new_rtx = gen_const_mem (Pmode, new_rtx);
9551 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9553 if (reg == 0)
9554 reg = gen_reg_rtx (Pmode);
9555 emit_move_insn (reg, new_rtx);
9556 new_rtx = reg;
9559 else
9561 if (CONST_INT_P (addr)
9562 && !x86_64_immediate_operand (addr, VOIDmode))
9564 if (reg)
9566 emit_move_insn (reg, addr);
9567 new_rtx = reg;
9569 else
9570 new_rtx = force_reg (Pmode, addr);
9572 else if (GET_CODE (addr) == CONST)
9574 addr = XEXP (addr, 0);
9576 /* We must match stuff we generate before. Assume the only
9577 unspecs that can get here are ours. Not that we could do
9578 anything with them anyway.... */
9579 if (GET_CODE (addr) == UNSPEC
9580 || (GET_CODE (addr) == PLUS
9581 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
9582 return orig;
9583 gcc_assert (GET_CODE (addr) == PLUS);
9585 if (GET_CODE (addr) == PLUS)
9587 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
9589 /* Check first to see if this is a constant offset from a @GOTOFF
9590 symbol reference. */
9591 if (gotoff_operand (op0, Pmode)
9592 && CONST_INT_P (op1))
9594 if (!TARGET_64BIT)
9596 if (reload_in_progress)
9597 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9598 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
9599 UNSPEC_GOTOFF);
9600 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
9601 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9602 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9604 if (reg != 0)
9606 emit_move_insn (reg, new_rtx);
9607 new_rtx = reg;
9610 else
9612 if (INTVAL (op1) < -16*1024*1024
9613 || INTVAL (op1) >= 16*1024*1024)
9615 if (!x86_64_immediate_operand (op1, Pmode))
9616 op1 = force_reg (Pmode, op1);
9617 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
9621 else
9623 base = legitimize_pic_address (XEXP (addr, 0), reg);
9624 new_rtx = legitimize_pic_address (XEXP (addr, 1),
9625 base == reg ? NULL_RTX : reg);
9627 if (CONST_INT_P (new_rtx))
9628 new_rtx = plus_constant (base, INTVAL (new_rtx));
9629 else
9631 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
9633 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
9634 new_rtx = XEXP (new_rtx, 1);
9636 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
9641 return new_rtx;
9644 /* Load the thread pointer. If TO_REG is true, force it into a register. */
9646 static rtx
9647 get_thread_pointer (int to_reg)
9649 rtx tp, reg, insn;
9651 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
9652 if (!to_reg)
9653 return tp;
9655 reg = gen_reg_rtx (Pmode);
9656 insn = gen_rtx_SET (VOIDmode, reg, tp);
9657 insn = emit_insn (insn);
9659 return reg;
9662 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
9663 false if we expect this to be used for a memory address and true if
9664 we expect to load the address into a register. */
9666 static rtx
9667 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
9669 rtx dest, base, off, pic, tp;
9670 int type;
9672 switch (model)
9674 case TLS_MODEL_GLOBAL_DYNAMIC:
9675 dest = gen_reg_rtx (Pmode);
9676 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9678 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9680 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
9682 start_sequence ();
9683 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
9684 insns = get_insns ();
9685 end_sequence ();
9687 RTL_CONST_CALL_P (insns) = 1;
9688 emit_libcall_block (insns, dest, rax, x);
9690 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9691 emit_insn (gen_tls_global_dynamic_64 (dest, x));
9692 else
9693 emit_insn (gen_tls_global_dynamic_32 (dest, x));
9695 if (TARGET_GNU2_TLS)
9697 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
9699 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9701 break;
9703 case TLS_MODEL_LOCAL_DYNAMIC:
9704 base = gen_reg_rtx (Pmode);
9705 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9707 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9709 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
9711 start_sequence ();
9712 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
9713 insns = get_insns ();
9714 end_sequence ();
9716 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
9717 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
9718 RTL_CONST_CALL_P (insns) = 1;
9719 emit_libcall_block (insns, base, rax, note);
9721 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9722 emit_insn (gen_tls_local_dynamic_base_64 (base));
9723 else
9724 emit_insn (gen_tls_local_dynamic_base_32 (base));
9726 if (TARGET_GNU2_TLS)
9728 rtx x = ix86_tls_module_base ();
9730 set_unique_reg_note (get_last_insn (), REG_EQUIV,
9731 gen_rtx_MINUS (Pmode, x, tp));
9734 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
9735 off = gen_rtx_CONST (Pmode, off);
9737 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
9739 if (TARGET_GNU2_TLS)
9741 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
9743 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9746 break;
9748 case TLS_MODEL_INITIAL_EXEC:
9749 if (TARGET_64BIT)
9751 pic = NULL;
9752 type = UNSPEC_GOTNTPOFF;
9754 else if (flag_pic)
9756 if (reload_in_progress)
9757 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9758 pic = pic_offset_table_rtx;
9759 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
9761 else if (!TARGET_ANY_GNU_TLS)
9763 pic = gen_reg_rtx (Pmode);
9764 emit_insn (gen_set_got (pic));
9765 type = UNSPEC_GOTTPOFF;
9767 else
9769 pic = NULL;
9770 type = UNSPEC_INDNTPOFF;
9773 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
9774 off = gen_rtx_CONST (Pmode, off);
9775 if (pic)
9776 off = gen_rtx_PLUS (Pmode, pic, off);
9777 off = gen_const_mem (Pmode, off);
9778 set_mem_alias_set (off, ix86_GOT_alias_set ());
9780 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9782 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9783 off = force_reg (Pmode, off);
9784 return gen_rtx_PLUS (Pmode, base, off);
9786 else
9788 base = get_thread_pointer (true);
9789 dest = gen_reg_rtx (Pmode);
9790 emit_insn (gen_subsi3 (dest, base, off));
9792 break;
9794 case TLS_MODEL_LOCAL_EXEC:
9795 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
9796 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9797 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
9798 off = gen_rtx_CONST (Pmode, off);
9800 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9802 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9803 return gen_rtx_PLUS (Pmode, base, off);
9805 else
9807 base = get_thread_pointer (true);
9808 dest = gen_reg_rtx (Pmode);
9809 emit_insn (gen_subsi3 (dest, base, off));
9811 break;
9813 default:
9814 gcc_unreachable ();
9817 return dest;
9820 /* Create or return the unique __imp_DECL dllimport symbol corresponding
9821 to symbol DECL. */
9823 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
9824 htab_t dllimport_map;
9826 static tree
9827 get_dllimport_decl (tree decl)
9829 struct tree_map *h, in;
9830 void **loc;
9831 const char *name;
9832 const char *prefix;
9833 size_t namelen, prefixlen;
9834 char *imp_name;
9835 tree to;
9836 rtx rtl;
9838 if (!dllimport_map)
9839 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
9841 in.hash = htab_hash_pointer (decl);
9842 in.base.from = decl;
9843 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
9844 h = (struct tree_map *) *loc;
9845 if (h)
9846 return h->to;
9848 *loc = h = GGC_NEW (struct tree_map);
9849 h->hash = in.hash;
9850 h->base.from = decl;
9851 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
9852 DECL_ARTIFICIAL (to) = 1;
9853 DECL_IGNORED_P (to) = 1;
9854 DECL_EXTERNAL (to) = 1;
9855 TREE_READONLY (to) = 1;
9857 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9858 name = targetm.strip_name_encoding (name);
9859 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
9860 ? "*__imp_" : "*__imp__";
9861 namelen = strlen (name);
9862 prefixlen = strlen (prefix);
9863 imp_name = (char *) alloca (namelen + prefixlen + 1);
9864 memcpy (imp_name, prefix, prefixlen);
9865 memcpy (imp_name + prefixlen, name, namelen + 1);
9867 name = ggc_alloc_string (imp_name, namelen + prefixlen);
9868 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
9869 SET_SYMBOL_REF_DECL (rtl, to);
9870 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
9872 rtl = gen_const_mem (Pmode, rtl);
9873 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
9875 SET_DECL_RTL (to, rtl);
9876 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
9878 return to;
9881 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
9882 true if we require the result be a register. */
9884 static rtx
9885 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
9887 tree imp_decl;
9888 rtx x;
9890 gcc_assert (SYMBOL_REF_DECL (symbol));
9891 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
9893 x = DECL_RTL (imp_decl);
9894 if (want_reg)
9895 x = force_reg (Pmode, x);
9896 return x;
9899 /* Try machine-dependent ways of modifying an illegitimate address
9900 to be legitimate. If we find one, return the new, valid address.
9901 This macro is used in only one place: `memory_address' in explow.c.
9903 OLDX is the address as it was before break_out_memory_refs was called.
9904 In some cases it is useful to look at this to decide what needs to be done.
9906 MODE and WIN are passed so that this macro can use
9907 GO_IF_LEGITIMATE_ADDRESS.
9909 It is always safe for this macro to do nothing. It exists to recognize
9910 opportunities to optimize the output.
9912 For the 80386, we handle X+REG by loading X into a register R and
9913 using R+REG. R will go in a general reg and indexing will be used.
9914 However, if REG is a broken-out memory address or multiplication,
9915 nothing needs to be done because REG can certainly go in a general reg.
9917 When -fpic is used, special handling is needed for symbolic references.
9918 See comments by legitimize_pic_address in i386.c for details. */
9921 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
9923 int changed = 0;
9924 unsigned log;
9926 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
9927 if (log)
9928 return legitimize_tls_address (x, (enum tls_model) log, false);
9929 if (GET_CODE (x) == CONST
9930 && GET_CODE (XEXP (x, 0)) == PLUS
9931 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9932 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
9934 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
9935 (enum tls_model) log, false);
9936 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9939 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9941 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
9942 return legitimize_dllimport_symbol (x, true);
9943 if (GET_CODE (x) == CONST
9944 && GET_CODE (XEXP (x, 0)) == PLUS
9945 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9946 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
9948 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
9949 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9953 if (flag_pic && SYMBOLIC_CONST (x))
9954 return legitimize_pic_address (x, 0);
9956 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
9957 if (GET_CODE (x) == ASHIFT
9958 && CONST_INT_P (XEXP (x, 1))
9959 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
9961 changed = 1;
9962 log = INTVAL (XEXP (x, 1));
9963 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
9964 GEN_INT (1 << log));
9967 if (GET_CODE (x) == PLUS)
9969 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
9971 if (GET_CODE (XEXP (x, 0)) == ASHIFT
9972 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9973 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
9975 changed = 1;
9976 log = INTVAL (XEXP (XEXP (x, 0), 1));
9977 XEXP (x, 0) = gen_rtx_MULT (Pmode,
9978 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
9979 GEN_INT (1 << log));
9982 if (GET_CODE (XEXP (x, 1)) == ASHIFT
9983 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
9984 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
9986 changed = 1;
9987 log = INTVAL (XEXP (XEXP (x, 1), 1));
9988 XEXP (x, 1) = gen_rtx_MULT (Pmode,
9989 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
9990 GEN_INT (1 << log));
9993 /* Put multiply first if it isn't already. */
9994 if (GET_CODE (XEXP (x, 1)) == MULT)
9996 rtx tmp = XEXP (x, 0);
9997 XEXP (x, 0) = XEXP (x, 1);
9998 XEXP (x, 1) = tmp;
9999 changed = 1;
10002 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10003 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10004 created by virtual register instantiation, register elimination, and
10005 similar optimizations. */
10006 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10008 changed = 1;
10009 x = gen_rtx_PLUS (Pmode,
10010 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10011 XEXP (XEXP (x, 1), 0)),
10012 XEXP (XEXP (x, 1), 1));
10015 /* Canonicalize
10016 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10017 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10018 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10019 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10020 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10021 && CONSTANT_P (XEXP (x, 1)))
10023 rtx constant;
10024 rtx other = NULL_RTX;
10026 if (CONST_INT_P (XEXP (x, 1)))
10028 constant = XEXP (x, 1);
10029 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10031 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10033 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10034 other = XEXP (x, 1);
10036 else
10037 constant = 0;
10039 if (constant)
10041 changed = 1;
10042 x = gen_rtx_PLUS (Pmode,
10043 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10044 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10045 plus_constant (other, INTVAL (constant)));
10049 if (changed && legitimate_address_p (mode, x, FALSE))
10050 return x;
10052 if (GET_CODE (XEXP (x, 0)) == MULT)
10054 changed = 1;
10055 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10058 if (GET_CODE (XEXP (x, 1)) == MULT)
10060 changed = 1;
10061 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10064 if (changed
10065 && REG_P (XEXP (x, 1))
10066 && REG_P (XEXP (x, 0)))
10067 return x;
10069 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10071 changed = 1;
10072 x = legitimize_pic_address (x, 0);
10075 if (changed && legitimate_address_p (mode, x, FALSE))
10076 return x;
10078 if (REG_P (XEXP (x, 0)))
10080 rtx temp = gen_reg_rtx (Pmode);
10081 rtx val = force_operand (XEXP (x, 1), temp);
10082 if (val != temp)
10083 emit_move_insn (temp, val);
10085 XEXP (x, 1) = temp;
10086 return x;
10089 else if (REG_P (XEXP (x, 1)))
10091 rtx temp = gen_reg_rtx (Pmode);
10092 rtx val = force_operand (XEXP (x, 0), temp);
10093 if (val != temp)
10094 emit_move_insn (temp, val);
10096 XEXP (x, 0) = temp;
10097 return x;
10101 return x;
10104 /* Print an integer constant expression in assembler syntax. Addition
10105 and subtraction are the only arithmetic that may appear in these
10106 expressions. FILE is the stdio stream to write to, X is the rtx, and
10107 CODE is the operand print code from the output string. */
10109 static void
10110 output_pic_addr_const (FILE *file, rtx x, int code)
10112 char buf[256];
10114 switch (GET_CODE (x))
10116 case PC:
10117 gcc_assert (flag_pic);
10118 putc ('.', file);
10119 break;
10121 case SYMBOL_REF:
10122 if (! TARGET_MACHO || TARGET_64BIT)
10123 output_addr_const (file, x);
10124 else
10126 const char *name = XSTR (x, 0);
10128 /* Mark the decl as referenced so that cgraph will
10129 output the function. */
10130 if (SYMBOL_REF_DECL (x))
10131 mark_decl_referenced (SYMBOL_REF_DECL (x));
10133 #if TARGET_MACHO
10134 if (MACHOPIC_INDIRECT
10135 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10136 name = machopic_indirection_name (x, /*stub_p=*/true);
10137 #endif
10138 assemble_name (file, name);
10140 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10141 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10142 fputs ("@PLT", file);
10143 break;
10145 case LABEL_REF:
10146 x = XEXP (x, 0);
10147 /* FALLTHRU */
10148 case CODE_LABEL:
10149 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10150 assemble_name (asm_out_file, buf);
10151 break;
10153 case CONST_INT:
10154 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10155 break;
10157 case CONST:
10158 /* This used to output parentheses around the expression,
10159 but that does not work on the 386 (either ATT or BSD assembler). */
10160 output_pic_addr_const (file, XEXP (x, 0), code);
10161 break;
10163 case CONST_DOUBLE:
10164 if (GET_MODE (x) == VOIDmode)
10166 /* We can use %d if the number is <32 bits and positive. */
10167 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10168 fprintf (file, "0x%lx%08lx",
10169 (unsigned long) CONST_DOUBLE_HIGH (x),
10170 (unsigned long) CONST_DOUBLE_LOW (x));
10171 else
10172 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10174 else
10175 /* We can't handle floating point constants;
10176 PRINT_OPERAND must handle them. */
10177 output_operand_lossage ("floating constant misused");
10178 break;
10180 case PLUS:
10181 /* Some assemblers need integer constants to appear first. */
10182 if (CONST_INT_P (XEXP (x, 0)))
10184 output_pic_addr_const (file, XEXP (x, 0), code);
10185 putc ('+', file);
10186 output_pic_addr_const (file, XEXP (x, 1), code);
10188 else
10190 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10191 output_pic_addr_const (file, XEXP (x, 1), code);
10192 putc ('+', file);
10193 output_pic_addr_const (file, XEXP (x, 0), code);
10195 break;
10197 case MINUS:
10198 if (!TARGET_MACHO)
10199 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10200 output_pic_addr_const (file, XEXP (x, 0), code);
10201 putc ('-', file);
10202 output_pic_addr_const (file, XEXP (x, 1), code);
10203 if (!TARGET_MACHO)
10204 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10205 break;
10207 case UNSPEC:
10208 gcc_assert (XVECLEN (x, 0) == 1);
10209 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10210 switch (XINT (x, 1))
10212 case UNSPEC_GOT:
10213 fputs ("@GOT", file);
10214 break;
10215 case UNSPEC_GOTOFF:
10216 fputs ("@GOTOFF", file);
10217 break;
10218 case UNSPEC_PLTOFF:
10219 fputs ("@PLTOFF", file);
10220 break;
10221 case UNSPEC_GOTPCREL:
10222 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10223 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10224 break;
10225 case UNSPEC_GOTTPOFF:
10226 /* FIXME: This might be @TPOFF in Sun ld too. */
10227 fputs ("@GOTTPOFF", file);
10228 break;
10229 case UNSPEC_TPOFF:
10230 fputs ("@TPOFF", file);
10231 break;
10232 case UNSPEC_NTPOFF:
10233 if (TARGET_64BIT)
10234 fputs ("@TPOFF", file);
10235 else
10236 fputs ("@NTPOFF", file);
10237 break;
10238 case UNSPEC_DTPOFF:
10239 fputs ("@DTPOFF", file);
10240 break;
10241 case UNSPEC_GOTNTPOFF:
10242 if (TARGET_64BIT)
10243 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10244 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
10245 else
10246 fputs ("@GOTNTPOFF", file);
10247 break;
10248 case UNSPEC_INDNTPOFF:
10249 fputs ("@INDNTPOFF", file);
10250 break;
10251 #if TARGET_MACHO
10252 case UNSPEC_MACHOPIC_OFFSET:
10253 putc ('-', file);
10254 machopic_output_function_base_name (file);
10255 break;
10256 #endif
10257 default:
10258 output_operand_lossage ("invalid UNSPEC as operand");
10259 break;
10261 break;
10263 default:
10264 output_operand_lossage ("invalid expression as operand");
10268 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10269 We need to emit DTP-relative relocations. */
10271 static void ATTRIBUTE_UNUSED
10272 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10274 fputs (ASM_LONG, file);
10275 output_addr_const (file, x);
10276 fputs ("@DTPOFF", file);
10277 switch (size)
10279 case 4:
10280 break;
10281 case 8:
10282 fputs (", 0", file);
10283 break;
10284 default:
10285 gcc_unreachable ();
10289 /* Return true if X is a representation of the PIC register. This copes
10290 with calls from ix86_find_base_term, where the register might have
10291 been replaced by a cselib value. */
10293 static bool
10294 ix86_pic_register_p (rtx x)
10296 if (GET_CODE (x) == VALUE)
10297 return (pic_offset_table_rtx
10298 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10299 else
10300 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10303 /* In the name of slightly smaller debug output, and to cater to
10304 general assembler lossage, recognize PIC+GOTOFF and turn it back
10305 into a direct symbol reference.
10307 On Darwin, this is necessary to avoid a crash, because Darwin
10308 has a different PIC label for each routine but the DWARF debugging
10309 information is not associated with any particular routine, so it's
10310 necessary to remove references to the PIC label from RTL stored by
10311 the DWARF output code. */
10313 static rtx
10314 ix86_delegitimize_address (rtx orig_x)
10316 rtx x = orig_x;
10317 /* reg_addend is NULL or a multiple of some register. */
10318 rtx reg_addend = NULL_RTX;
10319 /* const_addend is NULL or a const_int. */
10320 rtx const_addend = NULL_RTX;
10321 /* This is the result, or NULL. */
10322 rtx result = NULL_RTX;
10324 if (MEM_P (x))
10325 x = XEXP (x, 0);
10327 if (TARGET_64BIT)
10329 if (GET_CODE (x) != CONST
10330 || GET_CODE (XEXP (x, 0)) != UNSPEC
10331 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10332 || !MEM_P (orig_x))
10333 return orig_x;
10334 return XVECEXP (XEXP (x, 0), 0, 0);
10337 if (GET_CODE (x) != PLUS
10338 || GET_CODE (XEXP (x, 1)) != CONST)
10339 return orig_x;
10341 if (ix86_pic_register_p (XEXP (x, 0)))
10342 /* %ebx + GOT/GOTOFF */
10344 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10346 /* %ebx + %reg * scale + GOT/GOTOFF */
10347 reg_addend = XEXP (x, 0);
10348 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10349 reg_addend = XEXP (reg_addend, 1);
10350 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10351 reg_addend = XEXP (reg_addend, 0);
10352 else
10353 return orig_x;
10354 if (!REG_P (reg_addend)
10355 && GET_CODE (reg_addend) != MULT
10356 && GET_CODE (reg_addend) != ASHIFT)
10357 return orig_x;
10359 else
10360 return orig_x;
10362 x = XEXP (XEXP (x, 1), 0);
10363 if (GET_CODE (x) == PLUS
10364 && CONST_INT_P (XEXP (x, 1)))
10366 const_addend = XEXP (x, 1);
10367 x = XEXP (x, 0);
10370 if (GET_CODE (x) == UNSPEC
10371 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10372 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10373 result = XVECEXP (x, 0, 0);
10375 if (TARGET_MACHO && darwin_local_data_pic (x)
10376 && !MEM_P (orig_x))
10377 result = XVECEXP (x, 0, 0);
10379 if (! result)
10380 return orig_x;
10382 if (const_addend)
10383 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10384 if (reg_addend)
10385 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10386 return result;
10389 /* If X is a machine specific address (i.e. a symbol or label being
10390 referenced as a displacement from the GOT implemented using an
10391 UNSPEC), then return the base term. Otherwise return X. */
10394 ix86_find_base_term (rtx x)
10396 rtx term;
10398 if (TARGET_64BIT)
10400 if (GET_CODE (x) != CONST)
10401 return x;
10402 term = XEXP (x, 0);
10403 if (GET_CODE (term) == PLUS
10404 && (CONST_INT_P (XEXP (term, 1))
10405 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10406 term = XEXP (term, 0);
10407 if (GET_CODE (term) != UNSPEC
10408 || XINT (term, 1) != UNSPEC_GOTPCREL)
10409 return x;
10411 return XVECEXP (term, 0, 0);
10414 return ix86_delegitimize_address (x);
10417 static void
10418 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10419 int fp, FILE *file)
10421 const char *suffix;
10423 if (mode == CCFPmode || mode == CCFPUmode)
10425 enum rtx_code second_code, bypass_code;
10426 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
10427 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
10428 code = ix86_fp_compare_code_to_integer (code);
10429 mode = CCmode;
10431 if (reverse)
10432 code = reverse_condition (code);
10434 switch (code)
10436 case EQ:
10437 switch (mode)
10439 case CCAmode:
10440 suffix = "a";
10441 break;
10443 case CCCmode:
10444 suffix = "c";
10445 break;
10447 case CCOmode:
10448 suffix = "o";
10449 break;
10451 case CCSmode:
10452 suffix = "s";
10453 break;
10455 default:
10456 suffix = "e";
10458 break;
10459 case NE:
10460 switch (mode)
10462 case CCAmode:
10463 suffix = "na";
10464 break;
10466 case CCCmode:
10467 suffix = "nc";
10468 break;
10470 case CCOmode:
10471 suffix = "no";
10472 break;
10474 case CCSmode:
10475 suffix = "ns";
10476 break;
10478 default:
10479 suffix = "ne";
10481 break;
10482 case GT:
10483 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
10484 suffix = "g";
10485 break;
10486 case GTU:
10487 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
10488 Those same assemblers have the same but opposite lossage on cmov. */
10489 if (mode == CCmode)
10490 suffix = fp ? "nbe" : "a";
10491 else if (mode == CCCmode)
10492 suffix = "b";
10493 else
10494 gcc_unreachable ();
10495 break;
10496 case LT:
10497 switch (mode)
10499 case CCNOmode:
10500 case CCGOCmode:
10501 suffix = "s";
10502 break;
10504 case CCmode:
10505 case CCGCmode:
10506 suffix = "l";
10507 break;
10509 default:
10510 gcc_unreachable ();
10512 break;
10513 case LTU:
10514 gcc_assert (mode == CCmode || mode == CCCmode);
10515 suffix = "b";
10516 break;
10517 case GE:
10518 switch (mode)
10520 case CCNOmode:
10521 case CCGOCmode:
10522 suffix = "ns";
10523 break;
10525 case CCmode:
10526 case CCGCmode:
10527 suffix = "ge";
10528 break;
10530 default:
10531 gcc_unreachable ();
10533 break;
10534 case GEU:
10535 /* ??? As above. */
10536 gcc_assert (mode == CCmode || mode == CCCmode);
10537 suffix = fp ? "nb" : "ae";
10538 break;
10539 case LE:
10540 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
10541 suffix = "le";
10542 break;
10543 case LEU:
10544 /* ??? As above. */
10545 if (mode == CCmode)
10546 suffix = "be";
10547 else if (mode == CCCmode)
10548 suffix = fp ? "nb" : "ae";
10549 else
10550 gcc_unreachable ();
10551 break;
10552 case UNORDERED:
10553 suffix = fp ? "u" : "p";
10554 break;
10555 case ORDERED:
10556 suffix = fp ? "nu" : "np";
10557 break;
10558 default:
10559 gcc_unreachable ();
10561 fputs (suffix, file);
10564 /* Print the name of register X to FILE based on its machine mode and number.
10565 If CODE is 'w', pretend the mode is HImode.
10566 If CODE is 'b', pretend the mode is QImode.
10567 If CODE is 'k', pretend the mode is SImode.
10568 If CODE is 'q', pretend the mode is DImode.
10569 If CODE is 'x', pretend the mode is V4SFmode.
10570 If CODE is 't', pretend the mode is V8SFmode.
10571 If CODE is 'h', pretend the reg is the 'high' byte register.
10572 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
10573 If CODE is 'd', duplicate the operand for AVX instruction.
10576 void
10577 print_reg (rtx x, int code, FILE *file)
10579 const char *reg;
10580 bool duplicated = code == 'd' && TARGET_AVX;
10582 gcc_assert (x == pc_rtx
10583 || (REGNO (x) != ARG_POINTER_REGNUM
10584 && REGNO (x) != FRAME_POINTER_REGNUM
10585 && REGNO (x) != FLAGS_REG
10586 && REGNO (x) != FPSR_REG
10587 && REGNO (x) != FPCR_REG));
10589 if (ASSEMBLER_DIALECT == ASM_ATT)
10590 putc ('%', file);
10592 if (x == pc_rtx)
10594 gcc_assert (TARGET_64BIT);
10595 fputs ("rip", file);
10596 return;
10599 if (code == 'w' || MMX_REG_P (x))
10600 code = 2;
10601 else if (code == 'b')
10602 code = 1;
10603 else if (code == 'k')
10604 code = 4;
10605 else if (code == 'q')
10606 code = 8;
10607 else if (code == 'y')
10608 code = 3;
10609 else if (code == 'h')
10610 code = 0;
10611 else if (code == 'x')
10612 code = 16;
10613 else if (code == 't')
10614 code = 32;
10615 else
10616 code = GET_MODE_SIZE (GET_MODE (x));
10618 /* Irritatingly, AMD extended registers use different naming convention
10619 from the normal registers. */
10620 if (REX_INT_REG_P (x))
10622 gcc_assert (TARGET_64BIT);
10623 switch (code)
10625 case 0:
10626 error ("extended registers have no high halves");
10627 break;
10628 case 1:
10629 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
10630 break;
10631 case 2:
10632 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
10633 break;
10634 case 4:
10635 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
10636 break;
10637 case 8:
10638 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
10639 break;
10640 default:
10641 error ("unsupported operand size for extended register");
10642 break;
10644 return;
10647 reg = NULL;
10648 switch (code)
10650 case 3:
10651 if (STACK_TOP_P (x))
10653 reg = "st(0)";
10654 break;
10656 /* FALLTHRU */
10657 case 8:
10658 case 4:
10659 case 12:
10660 if (! ANY_FP_REG_P (x))
10661 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
10662 /* FALLTHRU */
10663 case 16:
10664 case 2:
10665 normal:
10666 reg = hi_reg_name[REGNO (x)];
10667 break;
10668 case 1:
10669 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
10670 goto normal;
10671 reg = qi_reg_name[REGNO (x)];
10672 break;
10673 case 0:
10674 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
10675 goto normal;
10676 reg = qi_high_reg_name[REGNO (x)];
10677 break;
10678 case 32:
10679 if (SSE_REG_P (x))
10681 gcc_assert (!duplicated);
10682 putc ('y', file);
10683 fputs (hi_reg_name[REGNO (x)] + 1, file);
10684 return;
10686 break;
10687 default:
10688 gcc_unreachable ();
10691 fputs (reg, file);
10692 if (duplicated)
10694 if (ASSEMBLER_DIALECT == ASM_ATT)
10695 fprintf (file, ", %%%s", reg);
10696 else
10697 fprintf (file, ", %s", reg);
10701 /* Locate some local-dynamic symbol still in use by this function
10702 so that we can print its name in some tls_local_dynamic_base
10703 pattern. */
10705 static int
10706 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10708 rtx x = *px;
10710 if (GET_CODE (x) == SYMBOL_REF
10711 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10713 cfun->machine->some_ld_name = XSTR (x, 0);
10714 return 1;
10717 return 0;
10720 static const char *
10721 get_some_local_dynamic_name (void)
10723 rtx insn;
10725 if (cfun->machine->some_ld_name)
10726 return cfun->machine->some_ld_name;
10728 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10729 if (INSN_P (insn)
10730 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10731 return cfun->machine->some_ld_name;
10733 gcc_unreachable ();
10736 /* Meaning of CODE:
10737 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
10738 C -- print opcode suffix for set/cmov insn.
10739 c -- like C, but print reversed condition
10740 E,e -- likewise, but for compare-and-branch fused insn.
10741 F,f -- likewise, but for floating-point.
10742 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
10743 otherwise nothing
10744 R -- print the prefix for register names.
10745 z -- print the opcode suffix for the size of the current operand.
10746 * -- print a star (in certain assembler syntax)
10747 A -- print an absolute memory reference.
10748 w -- print the operand as if it's a "word" (HImode) even if it isn't.
10749 s -- print a shift double count, followed by the assemblers argument
10750 delimiter.
10751 b -- print the QImode name of the register for the indicated operand.
10752 %b0 would print %al if operands[0] is reg 0.
10753 w -- likewise, print the HImode name of the register.
10754 k -- likewise, print the SImode name of the register.
10755 q -- likewise, print the DImode name of the register.
10756 x -- likewise, print the V4SFmode name of the register.
10757 t -- likewise, print the V8SFmode name of the register.
10758 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
10759 y -- print "st(0)" instead of "st" as a register.
10760 d -- print duplicated register operand for AVX instruction.
10761 D -- print condition for SSE cmp instruction.
10762 P -- if PIC, print an @PLT suffix.
10763 X -- don't print any sort of PIC '@' suffix for a symbol.
10764 & -- print some in-use local-dynamic symbol name.
10765 H -- print a memory address offset by 8; used for sse high-parts
10766 Y -- print condition for SSE5 com* instruction.
10767 + -- print a branch hint as 'cs' or 'ds' prefix
10768 ; -- print a semicolon (after prefixes due to bug in older gas).
10771 void
10772 print_operand (FILE *file, rtx x, int code)
10774 if (code)
10776 switch (code)
10778 case '*':
10779 if (ASSEMBLER_DIALECT == ASM_ATT)
10780 putc ('*', file);
10781 return;
10783 case '&':
10784 assemble_name (file, get_some_local_dynamic_name ());
10785 return;
10787 case 'A':
10788 switch (ASSEMBLER_DIALECT)
10790 case ASM_ATT:
10791 putc ('*', file);
10792 break;
10794 case ASM_INTEL:
10795 /* Intel syntax. For absolute addresses, registers should not
10796 be surrounded by braces. */
10797 if (!REG_P (x))
10799 putc ('[', file);
10800 PRINT_OPERAND (file, x, 0);
10801 putc (']', file);
10802 return;
10804 break;
10806 default:
10807 gcc_unreachable ();
10810 PRINT_OPERAND (file, x, 0);
10811 return;
10814 case 'L':
10815 if (ASSEMBLER_DIALECT == ASM_ATT)
10816 putc ('l', file);
10817 return;
10819 case 'W':
10820 if (ASSEMBLER_DIALECT == ASM_ATT)
10821 putc ('w', file);
10822 return;
10824 case 'B':
10825 if (ASSEMBLER_DIALECT == ASM_ATT)
10826 putc ('b', file);
10827 return;
10829 case 'Q':
10830 if (ASSEMBLER_DIALECT == ASM_ATT)
10831 putc ('l', file);
10832 return;
10834 case 'S':
10835 if (ASSEMBLER_DIALECT == ASM_ATT)
10836 putc ('s', file);
10837 return;
10839 case 'T':
10840 if (ASSEMBLER_DIALECT == ASM_ATT)
10841 putc ('t', file);
10842 return;
10844 case 'z':
10845 /* 387 opcodes don't get size suffixes if the operands are
10846 registers. */
10847 if (STACK_REG_P (x))
10848 return;
10850 /* Likewise if using Intel opcodes. */
10851 if (ASSEMBLER_DIALECT == ASM_INTEL)
10852 return;
10854 /* This is the size of op from size of operand. */
10855 switch (GET_MODE_SIZE (GET_MODE (x)))
10857 case 1:
10858 putc ('b', file);
10859 return;
10861 case 2:
10862 if (MEM_P (x))
10864 #ifdef HAVE_GAS_FILDS_FISTS
10865 putc ('s', file);
10866 #endif
10867 return;
10869 else
10870 putc ('w', file);
10871 return;
10873 case 4:
10874 if (GET_MODE (x) == SFmode)
10876 putc ('s', file);
10877 return;
10879 else
10880 putc ('l', file);
10881 return;
10883 case 12:
10884 case 16:
10885 putc ('t', file);
10886 return;
10888 case 8:
10889 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10891 if (MEM_P (x))
10893 #ifdef GAS_MNEMONICS
10894 putc ('q', file);
10895 #else
10896 putc ('l', file);
10897 putc ('l', file);
10898 #endif
10900 else
10901 putc ('q', file);
10903 else
10904 putc ('l', file);
10905 return;
10907 default:
10908 gcc_unreachable ();
10911 case 'd':
10912 case 'b':
10913 case 'w':
10914 case 'k':
10915 case 'q':
10916 case 'h':
10917 case 't':
10918 case 'y':
10919 case 'x':
10920 case 'X':
10921 case 'P':
10922 break;
10924 case 's':
10925 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
10927 PRINT_OPERAND (file, x, 0);
10928 fputs (", ", file);
10930 return;
10932 case 'D':
10933 /* Little bit of braindamage here. The SSE compare instructions
10934 does use completely different names for the comparisons that the
10935 fp conditional moves. */
10936 if (TARGET_AVX)
10938 switch (GET_CODE (x))
10940 case EQ:
10941 fputs ("eq", file);
10942 break;
10943 case UNEQ:
10944 fputs ("eq_us", file);
10945 break;
10946 case LT:
10947 fputs ("lt", file);
10948 break;
10949 case UNLT:
10950 fputs ("nge", file);
10951 break;
10952 case LE:
10953 fputs ("le", file);
10954 break;
10955 case UNLE:
10956 fputs ("ngt", file);
10957 break;
10958 case UNORDERED:
10959 fputs ("unord", file);
10960 break;
10961 case NE:
10962 fputs ("neq", file);
10963 break;
10964 case LTGT:
10965 fputs ("neq_oq", file);
10966 break;
10967 case GE:
10968 fputs ("ge", file);
10969 break;
10970 case UNGE:
10971 fputs ("nlt", file);
10972 break;
10973 case GT:
10974 fputs ("gt", file);
10975 break;
10976 case UNGT:
10977 fputs ("nle", file);
10978 break;
10979 case ORDERED:
10980 fputs ("ord", file);
10981 break;
10982 default:
10983 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
10984 return;
10987 else
10989 switch (GET_CODE (x))
10991 case EQ:
10992 case UNEQ:
10993 fputs ("eq", file);
10994 break;
10995 case LT:
10996 case UNLT:
10997 fputs ("lt", file);
10998 break;
10999 case LE:
11000 case UNLE:
11001 fputs ("le", file);
11002 break;
11003 case UNORDERED:
11004 fputs ("unord", file);
11005 break;
11006 case NE:
11007 case LTGT:
11008 fputs ("neq", file);
11009 break;
11010 case UNGE:
11011 case GE:
11012 fputs ("nlt", file);
11013 break;
11014 case UNGT:
11015 case GT:
11016 fputs ("nle", file);
11017 break;
11018 case ORDERED:
11019 fputs ("ord", file);
11020 break;
11021 default:
11022 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11023 return;
11026 return;
11027 case 'O':
11028 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11029 if (ASSEMBLER_DIALECT == ASM_ATT)
11031 switch (GET_MODE (x))
11033 case HImode: putc ('w', file); break;
11034 case SImode:
11035 case SFmode: putc ('l', file); break;
11036 case DImode:
11037 case DFmode: putc ('q', file); break;
11038 default: gcc_unreachable ();
11040 putc ('.', file);
11042 #endif
11043 return;
11044 case 'C':
11045 if (!COMPARISON_P (x))
11047 output_operand_lossage ("operand is neither a constant nor a "
11048 "condition code, invalid operand code "
11049 "'C'");
11050 return;
11052 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11053 return;
11054 case 'F':
11055 if (!COMPARISON_P (x))
11057 output_operand_lossage ("operand is neither a constant nor a "
11058 "condition code, invalid operand code "
11059 "'F'");
11060 return;
11062 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11063 if (ASSEMBLER_DIALECT == ASM_ATT)
11064 putc ('.', file);
11065 #endif
11066 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11067 return;
11069 /* Like above, but reverse condition */
11070 case 'c':
11071 /* Check to see if argument to %c is really a constant
11072 and not a condition code which needs to be reversed. */
11073 if (!COMPARISON_P (x))
11075 output_operand_lossage ("operand is neither a constant nor a "
11076 "condition code, invalid operand "
11077 "code 'c'");
11078 return;
11080 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11081 return;
11082 case 'f':
11083 if (!COMPARISON_P (x))
11085 output_operand_lossage ("operand is neither a constant nor a "
11086 "condition code, invalid operand "
11087 "code 'f'");
11088 return;
11090 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11091 if (ASSEMBLER_DIALECT == ASM_ATT)
11092 putc ('.', file);
11093 #endif
11094 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11095 return;
11097 case 'E':
11098 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11099 return;
11101 case 'e':
11102 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11103 return;
11105 case 'H':
11106 /* It doesn't actually matter what mode we use here, as we're
11107 only going to use this for printing. */
11108 x = adjust_address_nv (x, DImode, 8);
11109 break;
11111 case '+':
11113 rtx x;
11115 if (!optimize
11116 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11117 return;
11119 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11120 if (x)
11122 int pred_val = INTVAL (XEXP (x, 0));
11124 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11125 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11127 int taken = pred_val > REG_BR_PROB_BASE / 2;
11128 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11130 /* Emit hints only in the case default branch prediction
11131 heuristics would fail. */
11132 if (taken != cputaken)
11134 /* We use 3e (DS) prefix for taken branches and
11135 2e (CS) prefix for not taken branches. */
11136 if (taken)
11137 fputs ("ds ; ", file);
11138 else
11139 fputs ("cs ; ", file);
11143 return;
11146 case 'Y':
11147 switch (GET_CODE (x))
11149 case NE:
11150 fputs ("neq", file);
11151 break;
11152 case EQ:
11153 fputs ("eq", file);
11154 break;
11155 case GE:
11156 case GEU:
11157 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11158 break;
11159 case GT:
11160 case GTU:
11161 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11162 break;
11163 case LE:
11164 case LEU:
11165 fputs ("le", file);
11166 break;
11167 case LT:
11168 case LTU:
11169 fputs ("lt", file);
11170 break;
11171 case UNORDERED:
11172 fputs ("unord", file);
11173 break;
11174 case ORDERED:
11175 fputs ("ord", file);
11176 break;
11177 case UNEQ:
11178 fputs ("ueq", file);
11179 break;
11180 case UNGE:
11181 fputs ("nlt", file);
11182 break;
11183 case UNGT:
11184 fputs ("nle", file);
11185 break;
11186 case UNLE:
11187 fputs ("ule", file);
11188 break;
11189 case UNLT:
11190 fputs ("ult", file);
11191 break;
11192 case LTGT:
11193 fputs ("une", file);
11194 break;
11195 default:
11196 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11197 return;
11199 return;
11201 case ';':
11202 #if TARGET_MACHO
11203 fputs (" ; ", file);
11204 #else
11205 fputc (' ', file);
11206 #endif
11207 return;
11209 default:
11210 output_operand_lossage ("invalid operand code '%c'", code);
11214 if (REG_P (x))
11215 print_reg (x, code, file);
11217 else if (MEM_P (x))
11219 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11220 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11221 && GET_MODE (x) != BLKmode)
11223 const char * size;
11224 switch (GET_MODE_SIZE (GET_MODE (x)))
11226 case 1: size = "BYTE"; break;
11227 case 2: size = "WORD"; break;
11228 case 4: size = "DWORD"; break;
11229 case 8: size = "QWORD"; break;
11230 case 12: size = "XWORD"; break;
11231 case 16:
11232 if (GET_MODE (x) == XFmode)
11233 size = "XWORD";
11234 else
11235 size = "XMMWORD";
11236 break;
11237 default:
11238 gcc_unreachable ();
11241 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11242 if (code == 'b')
11243 size = "BYTE";
11244 else if (code == 'w')
11245 size = "WORD";
11246 else if (code == 'k')
11247 size = "DWORD";
11249 fputs (size, file);
11250 fputs (" PTR ", file);
11253 x = XEXP (x, 0);
11254 /* Avoid (%rip) for call operands. */
11255 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11256 && !CONST_INT_P (x))
11257 output_addr_const (file, x);
11258 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11259 output_operand_lossage ("invalid constraints for operand");
11260 else
11261 output_address (x);
11264 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11266 REAL_VALUE_TYPE r;
11267 long l;
11269 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11270 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11272 if (ASSEMBLER_DIALECT == ASM_ATT)
11273 putc ('$', file);
11274 fprintf (file, "0x%08lx", (long unsigned int) l);
11277 /* These float cases don't actually occur as immediate operands. */
11278 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11280 char dstr[30];
11282 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11283 fprintf (file, "%s", dstr);
11286 else if (GET_CODE (x) == CONST_DOUBLE
11287 && GET_MODE (x) == XFmode)
11289 char dstr[30];
11291 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11292 fprintf (file, "%s", dstr);
11295 else
11297 /* We have patterns that allow zero sets of memory, for instance.
11298 In 64-bit mode, we should probably support all 8-byte vectors,
11299 since we can in fact encode that into an immediate. */
11300 if (GET_CODE (x) == CONST_VECTOR)
11302 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11303 x = const0_rtx;
11306 if (code != 'P')
11308 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11310 if (ASSEMBLER_DIALECT == ASM_ATT)
11311 putc ('$', file);
11313 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11314 || GET_CODE (x) == LABEL_REF)
11316 if (ASSEMBLER_DIALECT == ASM_ATT)
11317 putc ('$', file);
11318 else
11319 fputs ("OFFSET FLAT:", file);
11322 if (CONST_INT_P (x))
11323 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11324 else if (flag_pic)
11325 output_pic_addr_const (file, x, code);
11326 else
11327 output_addr_const (file, x);
11331 /* Print a memory operand whose address is ADDR. */
11333 void
11334 print_operand_address (FILE *file, rtx addr)
11336 struct ix86_address parts;
11337 rtx base, index, disp;
11338 int scale;
11339 int ok = ix86_decompose_address (addr, &parts);
11341 gcc_assert (ok);
11343 base = parts.base;
11344 index = parts.index;
11345 disp = parts.disp;
11346 scale = parts.scale;
11348 switch (parts.seg)
11350 case SEG_DEFAULT:
11351 break;
11352 case SEG_FS:
11353 case SEG_GS:
11354 if (ASSEMBLER_DIALECT == ASM_ATT)
11355 putc ('%', file);
11356 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11357 break;
11358 default:
11359 gcc_unreachable ();
11362 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11363 if (TARGET_64BIT && !base && !index)
11365 rtx symbol = disp;
11367 if (GET_CODE (disp) == CONST
11368 && GET_CODE (XEXP (disp, 0)) == PLUS
11369 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11370 symbol = XEXP (XEXP (disp, 0), 0);
11372 if (GET_CODE (symbol) == LABEL_REF
11373 || (GET_CODE (symbol) == SYMBOL_REF
11374 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11375 base = pc_rtx;
11377 if (!base && !index)
11379 /* Displacement only requires special attention. */
11381 if (CONST_INT_P (disp))
11383 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
11384 fputs ("ds:", file);
11385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
11387 else if (flag_pic)
11388 output_pic_addr_const (file, disp, 0);
11389 else
11390 output_addr_const (file, disp);
11392 else
11394 if (ASSEMBLER_DIALECT == ASM_ATT)
11396 if (disp)
11398 if (flag_pic)
11399 output_pic_addr_const (file, disp, 0);
11400 else if (GET_CODE (disp) == LABEL_REF)
11401 output_asm_label (disp);
11402 else
11403 output_addr_const (file, disp);
11406 putc ('(', file);
11407 if (base)
11408 print_reg (base, 0, file);
11409 if (index)
11411 putc (',', file);
11412 print_reg (index, 0, file);
11413 if (scale != 1)
11414 fprintf (file, ",%d", scale);
11416 putc (')', file);
11418 else
11420 rtx offset = NULL_RTX;
11422 if (disp)
11424 /* Pull out the offset of a symbol; print any symbol itself. */
11425 if (GET_CODE (disp) == CONST
11426 && GET_CODE (XEXP (disp, 0)) == PLUS
11427 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11429 offset = XEXP (XEXP (disp, 0), 1);
11430 disp = gen_rtx_CONST (VOIDmode,
11431 XEXP (XEXP (disp, 0), 0));
11434 if (flag_pic)
11435 output_pic_addr_const (file, disp, 0);
11436 else if (GET_CODE (disp) == LABEL_REF)
11437 output_asm_label (disp);
11438 else if (CONST_INT_P (disp))
11439 offset = disp;
11440 else
11441 output_addr_const (file, disp);
11444 putc ('[', file);
11445 if (base)
11447 print_reg (base, 0, file);
11448 if (offset)
11450 if (INTVAL (offset) >= 0)
11451 putc ('+', file);
11452 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11455 else if (offset)
11456 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11457 else
11458 putc ('0', file);
11460 if (index)
11462 putc ('+', file);
11463 print_reg (index, 0, file);
11464 if (scale != 1)
11465 fprintf (file, "*%d", scale);
11467 putc (']', file);
11472 bool
11473 output_addr_const_extra (FILE *file, rtx x)
11475 rtx op;
11477 if (GET_CODE (x) != UNSPEC)
11478 return false;
11480 op = XVECEXP (x, 0, 0);
11481 switch (XINT (x, 1))
11483 case UNSPEC_GOTTPOFF:
11484 output_addr_const (file, op);
11485 /* FIXME: This might be @TPOFF in Sun ld. */
11486 fputs ("@GOTTPOFF", file);
11487 break;
11488 case UNSPEC_TPOFF:
11489 output_addr_const (file, op);
11490 fputs ("@TPOFF", file);
11491 break;
11492 case UNSPEC_NTPOFF:
11493 output_addr_const (file, op);
11494 if (TARGET_64BIT)
11495 fputs ("@TPOFF", file);
11496 else
11497 fputs ("@NTPOFF", file);
11498 break;
11499 case UNSPEC_DTPOFF:
11500 output_addr_const (file, op);
11501 fputs ("@DTPOFF", file);
11502 break;
11503 case UNSPEC_GOTNTPOFF:
11504 output_addr_const (file, op);
11505 if (TARGET_64BIT)
11506 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11507 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
11508 else
11509 fputs ("@GOTNTPOFF", file);
11510 break;
11511 case UNSPEC_INDNTPOFF:
11512 output_addr_const (file, op);
11513 fputs ("@INDNTPOFF", file);
11514 break;
11515 #if TARGET_MACHO
11516 case UNSPEC_MACHOPIC_OFFSET:
11517 output_addr_const (file, op);
11518 putc ('-', file);
11519 machopic_output_function_base_name (file);
11520 break;
11521 #endif
11523 default:
11524 return false;
11527 return true;
11530 /* Split one or more DImode RTL references into pairs of SImode
11531 references. The RTL can be REG, offsettable MEM, integer constant, or
11532 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11533 split and "num" is its length. lo_half and hi_half are output arrays
11534 that parallel "operands". */
11536 void
11537 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11539 while (num--)
11541 rtx op = operands[num];
11543 /* simplify_subreg refuse to split volatile memory addresses,
11544 but we still have to handle it. */
11545 if (MEM_P (op))
11547 lo_half[num] = adjust_address (op, SImode, 0);
11548 hi_half[num] = adjust_address (op, SImode, 4);
11550 else
11552 lo_half[num] = simplify_gen_subreg (SImode, op,
11553 GET_MODE (op) == VOIDmode
11554 ? DImode : GET_MODE (op), 0);
11555 hi_half[num] = simplify_gen_subreg (SImode, op,
11556 GET_MODE (op) == VOIDmode
11557 ? DImode : GET_MODE (op), 4);
11561 /* Split one or more TImode RTL references into pairs of DImode
11562 references. The RTL can be REG, offsettable MEM, integer constant, or
11563 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11564 split and "num" is its length. lo_half and hi_half are output arrays
11565 that parallel "operands". */
11567 void
11568 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11570 while (num--)
11572 rtx op = operands[num];
11574 /* simplify_subreg refuse to split volatile memory addresses, but we
11575 still have to handle it. */
11576 if (MEM_P (op))
11578 lo_half[num] = adjust_address (op, DImode, 0);
11579 hi_half[num] = adjust_address (op, DImode, 8);
11581 else
11583 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
11584 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
11589 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
11590 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
11591 is the expression of the binary operation. The output may either be
11592 emitted here, or returned to the caller, like all output_* functions.
11594 There is no guarantee that the operands are the same mode, as they
11595 might be within FLOAT or FLOAT_EXTEND expressions. */
11597 #ifndef SYSV386_COMPAT
11598 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
11599 wants to fix the assemblers because that causes incompatibility
11600 with gcc. No-one wants to fix gcc because that causes
11601 incompatibility with assemblers... You can use the option of
11602 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
11603 #define SYSV386_COMPAT 1
11604 #endif
11606 const char *
11607 output_387_binary_op (rtx insn, rtx *operands)
11609 static char buf[40];
11610 const char *p;
11611 const char *ssep;
11612 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
11614 #ifdef ENABLE_CHECKING
11615 /* Even if we do not want to check the inputs, this documents input
11616 constraints. Which helps in understanding the following code. */
11617 if (STACK_REG_P (operands[0])
11618 && ((REG_P (operands[1])
11619 && REGNO (operands[0]) == REGNO (operands[1])
11620 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
11621 || (REG_P (operands[2])
11622 && REGNO (operands[0]) == REGNO (operands[2])
11623 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
11624 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
11625 ; /* ok */
11626 else
11627 gcc_assert (is_sse);
11628 #endif
11630 switch (GET_CODE (operands[3]))
11632 case PLUS:
11633 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11634 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11635 p = "fiadd";
11636 else
11637 p = "fadd";
11638 ssep = "vadd";
11639 break;
11641 case MINUS:
11642 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11643 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11644 p = "fisub";
11645 else
11646 p = "fsub";
11647 ssep = "vsub";
11648 break;
11650 case MULT:
11651 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11652 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11653 p = "fimul";
11654 else
11655 p = "fmul";
11656 ssep = "vmul";
11657 break;
11659 case DIV:
11660 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11661 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11662 p = "fidiv";
11663 else
11664 p = "fdiv";
11665 ssep = "vdiv";
11666 break;
11668 default:
11669 gcc_unreachable ();
11672 if (is_sse)
11674 if (TARGET_AVX)
11676 strcpy (buf, ssep);
11677 if (GET_MODE (operands[0]) == SFmode)
11678 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
11679 else
11680 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
11682 else
11684 strcpy (buf, ssep + 1);
11685 if (GET_MODE (operands[0]) == SFmode)
11686 strcat (buf, "ss\t{%2, %0|%0, %2}");
11687 else
11688 strcat (buf, "sd\t{%2, %0|%0, %2}");
11690 return buf;
11692 strcpy (buf, p);
11694 switch (GET_CODE (operands[3]))
11696 case MULT:
11697 case PLUS:
11698 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
11700 rtx temp = operands[2];
11701 operands[2] = operands[1];
11702 operands[1] = temp;
11705 /* know operands[0] == operands[1]. */
11707 if (MEM_P (operands[2]))
11709 p = "%z2\t%2";
11710 break;
11713 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11715 if (STACK_TOP_P (operands[0]))
11716 /* How is it that we are storing to a dead operand[2]?
11717 Well, presumably operands[1] is dead too. We can't
11718 store the result to st(0) as st(0) gets popped on this
11719 instruction. Instead store to operands[2] (which I
11720 think has to be st(1)). st(1) will be popped later.
11721 gcc <= 2.8.1 didn't have this check and generated
11722 assembly code that the Unixware assembler rejected. */
11723 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11724 else
11725 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11726 break;
11729 if (STACK_TOP_P (operands[0]))
11730 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11731 else
11732 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11733 break;
11735 case MINUS:
11736 case DIV:
11737 if (MEM_P (operands[1]))
11739 p = "r%z1\t%1";
11740 break;
11743 if (MEM_P (operands[2]))
11745 p = "%z2\t%2";
11746 break;
11749 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11751 #if SYSV386_COMPAT
11752 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
11753 derived assemblers, confusingly reverse the direction of
11754 the operation for fsub{r} and fdiv{r} when the
11755 destination register is not st(0). The Intel assembler
11756 doesn't have this brain damage. Read !SYSV386_COMPAT to
11757 figure out what the hardware really does. */
11758 if (STACK_TOP_P (operands[0]))
11759 p = "{p\t%0, %2|rp\t%2, %0}";
11760 else
11761 p = "{rp\t%2, %0|p\t%0, %2}";
11762 #else
11763 if (STACK_TOP_P (operands[0]))
11764 /* As above for fmul/fadd, we can't store to st(0). */
11765 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11766 else
11767 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11768 #endif
11769 break;
11772 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
11774 #if SYSV386_COMPAT
11775 if (STACK_TOP_P (operands[0]))
11776 p = "{rp\t%0, %1|p\t%1, %0}";
11777 else
11778 p = "{p\t%1, %0|rp\t%0, %1}";
11779 #else
11780 if (STACK_TOP_P (operands[0]))
11781 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
11782 else
11783 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
11784 #endif
11785 break;
11788 if (STACK_TOP_P (operands[0]))
11790 if (STACK_TOP_P (operands[1]))
11791 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11792 else
11793 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
11794 break;
11796 else if (STACK_TOP_P (operands[1]))
11798 #if SYSV386_COMPAT
11799 p = "{\t%1, %0|r\t%0, %1}";
11800 #else
11801 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
11802 #endif
11804 else
11806 #if SYSV386_COMPAT
11807 p = "{r\t%2, %0|\t%0, %2}";
11808 #else
11809 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11810 #endif
11812 break;
11814 default:
11815 gcc_unreachable ();
11818 strcat (buf, p);
11819 return buf;
11822 /* Return needed mode for entity in optimize_mode_switching pass. */
11825 ix86_mode_needed (int entity, rtx insn)
11827 enum attr_i387_cw mode;
11829 /* The mode UNINITIALIZED is used to store control word after a
11830 function call or ASM pattern. The mode ANY specify that function
11831 has no requirements on the control word and make no changes in the
11832 bits we are interested in. */
11834 if (CALL_P (insn)
11835 || (NONJUMP_INSN_P (insn)
11836 && (asm_noperands (PATTERN (insn)) >= 0
11837 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
11838 return I387_CW_UNINITIALIZED;
11840 if (recog_memoized (insn) < 0)
11841 return I387_CW_ANY;
11843 mode = get_attr_i387_cw (insn);
11845 switch (entity)
11847 case I387_TRUNC:
11848 if (mode == I387_CW_TRUNC)
11849 return mode;
11850 break;
11852 case I387_FLOOR:
11853 if (mode == I387_CW_FLOOR)
11854 return mode;
11855 break;
11857 case I387_CEIL:
11858 if (mode == I387_CW_CEIL)
11859 return mode;
11860 break;
11862 case I387_MASK_PM:
11863 if (mode == I387_CW_MASK_PM)
11864 return mode;
11865 break;
11867 default:
11868 gcc_unreachable ();
11871 return I387_CW_ANY;
11874 /* Output code to initialize control word copies used by trunc?f?i and
11875 rounding patterns. CURRENT_MODE is set to current control word,
11876 while NEW_MODE is set to new control word. */
11878 void
11879 emit_i387_cw_initialization (int mode)
11881 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
11882 rtx new_mode;
11884 enum ix86_stack_slot slot;
11886 rtx reg = gen_reg_rtx (HImode);
11888 emit_insn (gen_x86_fnstcw_1 (stored_mode));
11889 emit_move_insn (reg, copy_rtx (stored_mode));
11891 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
11892 || optimize_function_for_size_p (cfun))
11894 switch (mode)
11896 case I387_CW_TRUNC:
11897 /* round toward zero (truncate) */
11898 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
11899 slot = SLOT_CW_TRUNC;
11900 break;
11902 case I387_CW_FLOOR:
11903 /* round down toward -oo */
11904 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11905 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
11906 slot = SLOT_CW_FLOOR;
11907 break;
11909 case I387_CW_CEIL:
11910 /* round up toward +oo */
11911 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11912 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
11913 slot = SLOT_CW_CEIL;
11914 break;
11916 case I387_CW_MASK_PM:
11917 /* mask precision exception for nearbyint() */
11918 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11919 slot = SLOT_CW_MASK_PM;
11920 break;
11922 default:
11923 gcc_unreachable ();
11926 else
11928 switch (mode)
11930 case I387_CW_TRUNC:
11931 /* round toward zero (truncate) */
11932 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
11933 slot = SLOT_CW_TRUNC;
11934 break;
11936 case I387_CW_FLOOR:
11937 /* round down toward -oo */
11938 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
11939 slot = SLOT_CW_FLOOR;
11940 break;
11942 case I387_CW_CEIL:
11943 /* round up toward +oo */
11944 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
11945 slot = SLOT_CW_CEIL;
11946 break;
11948 case I387_CW_MASK_PM:
11949 /* mask precision exception for nearbyint() */
11950 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11951 slot = SLOT_CW_MASK_PM;
11952 break;
11954 default:
11955 gcc_unreachable ();
11959 gcc_assert (slot < MAX_386_STACK_LOCALS);
11961 new_mode = assign_386_stack_local (HImode, slot);
11962 emit_move_insn (new_mode, reg);
11965 /* Output code for INSN to convert a float to a signed int. OPERANDS
11966 are the insn operands. The output may be [HSD]Imode and the input
11967 operand may be [SDX]Fmode. */
11969 const char *
11970 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
11972 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11973 int dimode_p = GET_MODE (operands[0]) == DImode;
11974 int round_mode = get_attr_i387_cw (insn);
11976 /* Jump through a hoop or two for DImode, since the hardware has no
11977 non-popping instruction. We used to do this a different way, but
11978 that was somewhat fragile and broke with post-reload splitters. */
11979 if ((dimode_p || fisttp) && !stack_top_dies)
11980 output_asm_insn ("fld\t%y1", operands);
11982 gcc_assert (STACK_TOP_P (operands[1]));
11983 gcc_assert (MEM_P (operands[0]));
11984 gcc_assert (GET_MODE (operands[1]) != TFmode);
11986 if (fisttp)
11987 output_asm_insn ("fisttp%z0\t%0", operands);
11988 else
11990 if (round_mode != I387_CW_ANY)
11991 output_asm_insn ("fldcw\t%3", operands);
11992 if (stack_top_dies || dimode_p)
11993 output_asm_insn ("fistp%z0\t%0", operands);
11994 else
11995 output_asm_insn ("fist%z0\t%0", operands);
11996 if (round_mode != I387_CW_ANY)
11997 output_asm_insn ("fldcw\t%2", operands);
12000 return "";
12003 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12004 have the values zero or one, indicates the ffreep insn's operand
12005 from the OPERANDS array. */
12007 static const char *
12008 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12010 if (TARGET_USE_FFREEP)
12011 #if HAVE_AS_IX86_FFREEP
12012 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12013 #else
12015 static char retval[] = ".word\t0xc_df";
12016 int regno = REGNO (operands[opno]);
12018 gcc_assert (FP_REGNO_P (regno));
12020 retval[9] = '0' + (regno - FIRST_STACK_REG);
12021 return retval;
12023 #endif
12025 return opno ? "fstp\t%y1" : "fstp\t%y0";
12029 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12030 should be used. UNORDERED_P is true when fucom should be used. */
12032 const char *
12033 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12035 int stack_top_dies;
12036 rtx cmp_op0, cmp_op1;
12037 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12039 if (eflags_p)
12041 cmp_op0 = operands[0];
12042 cmp_op1 = operands[1];
12044 else
12046 cmp_op0 = operands[1];
12047 cmp_op1 = operands[2];
12050 if (is_sse)
12052 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12053 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12054 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12055 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12057 if (GET_MODE (operands[0]) == SFmode)
12058 if (unordered_p)
12059 return &ucomiss[TARGET_AVX ? 0 : 1];
12060 else
12061 return &comiss[TARGET_AVX ? 0 : 1];
12062 else
12063 if (unordered_p)
12064 return &ucomisd[TARGET_AVX ? 0 : 1];
12065 else
12066 return &comisd[TARGET_AVX ? 0 : 1];
12069 gcc_assert (STACK_TOP_P (cmp_op0));
12071 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12073 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12075 if (stack_top_dies)
12077 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12078 return output_387_ffreep (operands, 1);
12080 else
12081 return "ftst\n\tfnstsw\t%0";
12084 if (STACK_REG_P (cmp_op1)
12085 && stack_top_dies
12086 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12087 && REGNO (cmp_op1) != FIRST_STACK_REG)
12089 /* If both the top of the 387 stack dies, and the other operand
12090 is also a stack register that dies, then this must be a
12091 `fcompp' float compare */
12093 if (eflags_p)
12095 /* There is no double popping fcomi variant. Fortunately,
12096 eflags is immune from the fstp's cc clobbering. */
12097 if (unordered_p)
12098 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12099 else
12100 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12101 return output_387_ffreep (operands, 0);
12103 else
12105 if (unordered_p)
12106 return "fucompp\n\tfnstsw\t%0";
12107 else
12108 return "fcompp\n\tfnstsw\t%0";
12111 else
12113 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12115 static const char * const alt[16] =
12117 "fcom%z2\t%y2\n\tfnstsw\t%0",
12118 "fcomp%z2\t%y2\n\tfnstsw\t%0",
12119 "fucom%z2\t%y2\n\tfnstsw\t%0",
12120 "fucomp%z2\t%y2\n\tfnstsw\t%0",
12122 "ficom%z2\t%y2\n\tfnstsw\t%0",
12123 "ficomp%z2\t%y2\n\tfnstsw\t%0",
12124 NULL,
12125 NULL,
12127 "fcomi\t{%y1, %0|%0, %y1}",
12128 "fcomip\t{%y1, %0|%0, %y1}",
12129 "fucomi\t{%y1, %0|%0, %y1}",
12130 "fucomip\t{%y1, %0|%0, %y1}",
12132 NULL,
12133 NULL,
12134 NULL,
12135 NULL
12138 int mask;
12139 const char *ret;
12141 mask = eflags_p << 3;
12142 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12143 mask |= unordered_p << 1;
12144 mask |= stack_top_dies;
12146 gcc_assert (mask < 16);
12147 ret = alt[mask];
12148 gcc_assert (ret);
12150 return ret;
12154 void
12155 ix86_output_addr_vec_elt (FILE *file, int value)
12157 const char *directive = ASM_LONG;
12159 #ifdef ASM_QUAD
12160 if (TARGET_64BIT)
12161 directive = ASM_QUAD;
12162 #else
12163 gcc_assert (!TARGET_64BIT);
12164 #endif
12166 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
12169 void
12170 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12172 const char *directive = ASM_LONG;
12174 #ifdef ASM_QUAD
12175 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12176 directive = ASM_QUAD;
12177 #else
12178 gcc_assert (!TARGET_64BIT);
12179 #endif
12180 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12181 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12182 fprintf (file, "%s%s%d-%s%d\n",
12183 directive, LPREFIX, value, LPREFIX, rel);
12184 else if (HAVE_AS_GOTOFF_IN_DATA)
12185 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
12186 #if TARGET_MACHO
12187 else if (TARGET_MACHO)
12189 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
12190 machopic_output_function_base_name (file);
12191 fprintf(file, "\n");
12193 #endif
12194 else
12195 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
12196 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
12199 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12200 for the target. */
12202 void
12203 ix86_expand_clear (rtx dest)
12205 rtx tmp;
12207 /* We play register width games, which are only valid after reload. */
12208 gcc_assert (reload_completed);
12210 /* Avoid HImode and its attendant prefix byte. */
12211 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12212 dest = gen_rtx_REG (SImode, REGNO (dest));
12213 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12215 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12216 if (reload_completed && (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ()))
12218 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12219 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12222 emit_insn (tmp);
12225 /* X is an unchanging MEM. If it is a constant pool reference, return
12226 the constant pool rtx, else NULL. */
12229 maybe_get_pool_constant (rtx x)
12231 x = ix86_delegitimize_address (XEXP (x, 0));
12233 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12234 return get_pool_constant (x);
12236 return NULL_RTX;
12239 void
12240 ix86_expand_move (enum machine_mode mode, rtx operands[])
12242 rtx op0, op1;
12243 enum tls_model model;
12245 op0 = operands[0];
12246 op1 = operands[1];
12248 if (GET_CODE (op1) == SYMBOL_REF)
12250 model = SYMBOL_REF_TLS_MODEL (op1);
12251 if (model)
12253 op1 = legitimize_tls_address (op1, model, true);
12254 op1 = force_operand (op1, op0);
12255 if (op1 == op0)
12256 return;
12258 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12259 && SYMBOL_REF_DLLIMPORT_P (op1))
12260 op1 = legitimize_dllimport_symbol (op1, false);
12262 else if (GET_CODE (op1) == CONST
12263 && GET_CODE (XEXP (op1, 0)) == PLUS
12264 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12266 rtx addend = XEXP (XEXP (op1, 0), 1);
12267 rtx symbol = XEXP (XEXP (op1, 0), 0);
12268 rtx tmp = NULL;
12270 model = SYMBOL_REF_TLS_MODEL (symbol);
12271 if (model)
12272 tmp = legitimize_tls_address (symbol, model, true);
12273 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12274 && SYMBOL_REF_DLLIMPORT_P (symbol))
12275 tmp = legitimize_dllimport_symbol (symbol, true);
12277 if (tmp)
12279 tmp = force_operand (tmp, NULL);
12280 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12281 op0, 1, OPTAB_DIRECT);
12282 if (tmp == op0)
12283 return;
12287 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12289 if (TARGET_MACHO && !TARGET_64BIT)
12291 #if TARGET_MACHO
12292 if (MACHOPIC_PURE)
12294 rtx temp = ((reload_in_progress
12295 || ((op0 && REG_P (op0))
12296 && mode == Pmode))
12297 ? op0 : gen_reg_rtx (Pmode));
12298 op1 = machopic_indirect_data_reference (op1, temp);
12299 op1 = machopic_legitimize_pic_address (op1, mode,
12300 temp == op1 ? 0 : temp);
12302 else if (MACHOPIC_INDIRECT)
12303 op1 = machopic_indirect_data_reference (op1, 0);
12304 if (op0 == op1)
12305 return;
12306 #endif
12308 else
12310 if (MEM_P (op0))
12311 op1 = force_reg (Pmode, op1);
12312 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12314 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
12315 op1 = legitimize_pic_address (op1, reg);
12316 if (op0 == op1)
12317 return;
12321 else
12323 if (MEM_P (op0)
12324 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12325 || !push_operand (op0, mode))
12326 && MEM_P (op1))
12327 op1 = force_reg (mode, op1);
12329 if (push_operand (op0, mode)
12330 && ! general_no_elim_operand (op1, mode))
12331 op1 = copy_to_mode_reg (mode, op1);
12333 /* Force large constants in 64bit compilation into register
12334 to get them CSEed. */
12335 if (can_create_pseudo_p ()
12336 && (mode == DImode) && TARGET_64BIT
12337 && immediate_operand (op1, mode)
12338 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12339 && !register_operand (op0, mode)
12340 && optimize)
12341 op1 = copy_to_mode_reg (mode, op1);
12343 if (can_create_pseudo_p ()
12344 && FLOAT_MODE_P (mode)
12345 && GET_CODE (op1) == CONST_DOUBLE)
12347 /* If we are loading a floating point constant to a register,
12348 force the value to memory now, since we'll get better code
12349 out the back end. */
12351 op1 = validize_mem (force_const_mem (mode, op1));
12352 if (!register_operand (op0, mode))
12354 rtx temp = gen_reg_rtx (mode);
12355 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12356 emit_move_insn (op0, temp);
12357 return;
12362 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12365 void
12366 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12368 rtx op0 = operands[0], op1 = operands[1];
12369 unsigned int align = GET_MODE_ALIGNMENT (mode);
12371 /* Force constants other than zero into memory. We do not know how
12372 the instructions used to build constants modify the upper 64 bits
12373 of the register, once we have that information we may be able
12374 to handle some of them more efficiently. */
12375 if (can_create_pseudo_p ()
12376 && register_operand (op0, mode)
12377 && (CONSTANT_P (op1)
12378 || (GET_CODE (op1) == SUBREG
12379 && CONSTANT_P (SUBREG_REG (op1))))
12380 && standard_sse_constant_p (op1) <= 0)
12381 op1 = validize_mem (force_const_mem (mode, op1));
12383 /* We need to check memory alignment for SSE mode since attribute
12384 can make operands unaligned. */
12385 if (can_create_pseudo_p ()
12386 && SSE_REG_MODE_P (mode)
12387 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
12388 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
12390 rtx tmp[2];
12392 /* ix86_expand_vector_move_misalign() does not like constants ... */
12393 if (CONSTANT_P (op1)
12394 || (GET_CODE (op1) == SUBREG
12395 && CONSTANT_P (SUBREG_REG (op1))))
12396 op1 = validize_mem (force_const_mem (mode, op1));
12398 /* ... nor both arguments in memory. */
12399 if (!register_operand (op0, mode)
12400 && !register_operand (op1, mode))
12401 op1 = force_reg (mode, op1);
12403 tmp[0] = op0; tmp[1] = op1;
12404 ix86_expand_vector_move_misalign (mode, tmp);
12405 return;
12408 /* Make operand1 a register if it isn't already. */
12409 if (can_create_pseudo_p ()
12410 && !register_operand (op0, mode)
12411 && !register_operand (op1, mode))
12413 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
12414 return;
12417 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12420 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
12421 straight to ix86_expand_vector_move. */
12422 /* Code generation for scalar reg-reg moves of single and double precision data:
12423 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
12424 movaps reg, reg
12425 else
12426 movss reg, reg
12427 if (x86_sse_partial_reg_dependency == true)
12428 movapd reg, reg
12429 else
12430 movsd reg, reg
12432 Code generation for scalar loads of double precision data:
12433 if (x86_sse_split_regs == true)
12434 movlpd mem, reg (gas syntax)
12435 else
12436 movsd mem, reg
12438 Code generation for unaligned packed loads of single precision data
12439 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
12440 if (x86_sse_unaligned_move_optimal)
12441 movups mem, reg
12443 if (x86_sse_partial_reg_dependency == true)
12445 xorps reg, reg
12446 movlps mem, reg
12447 movhps mem+8, reg
12449 else
12451 movlps mem, reg
12452 movhps mem+8, reg
12455 Code generation for unaligned packed loads of double precision data
12456 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
12457 if (x86_sse_unaligned_move_optimal)
12458 movupd mem, reg
12460 if (x86_sse_split_regs == true)
12462 movlpd mem, reg
12463 movhpd mem+8, reg
12465 else
12467 movsd mem, reg
12468 movhpd mem+8, reg
12472 void
12473 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
12475 rtx op0, op1, m;
12477 op0 = operands[0];
12478 op1 = operands[1];
12480 if (TARGET_AVX)
12482 switch (GET_MODE_CLASS (mode))
12484 case MODE_VECTOR_INT:
12485 case MODE_INT:
12486 switch (GET_MODE_SIZE (mode))
12488 case 16:
12489 op0 = gen_lowpart (V16QImode, op0);
12490 op1 = gen_lowpart (V16QImode, op1);
12491 emit_insn (gen_avx_movdqu (op0, op1));
12492 break;
12493 case 32:
12494 op0 = gen_lowpart (V32QImode, op0);
12495 op1 = gen_lowpart (V32QImode, op1);
12496 emit_insn (gen_avx_movdqu256 (op0, op1));
12497 break;
12498 default:
12499 gcc_unreachable ();
12501 break;
12502 case MODE_VECTOR_FLOAT:
12503 op0 = gen_lowpart (mode, op0);
12504 op1 = gen_lowpart (mode, op1);
12506 switch (mode)
12508 case V4SFmode:
12509 emit_insn (gen_avx_movups (op0, op1));
12510 break;
12511 case V8SFmode:
12512 emit_insn (gen_avx_movups256 (op0, op1));
12513 break;
12514 case V2DFmode:
12515 emit_insn (gen_avx_movupd (op0, op1));
12516 break;
12517 case V4DFmode:
12518 emit_insn (gen_avx_movupd256 (op0, op1));
12519 break;
12520 default:
12521 gcc_unreachable ();
12523 break;
12525 default:
12526 gcc_unreachable ();
12529 return;
12532 if (MEM_P (op1))
12534 /* If we're optimizing for size, movups is the smallest. */
12535 if (optimize_insn_for_size_p ())
12537 op0 = gen_lowpart (V4SFmode, op0);
12538 op1 = gen_lowpart (V4SFmode, op1);
12539 emit_insn (gen_sse_movups (op0, op1));
12540 return;
12543 /* ??? If we have typed data, then it would appear that using
12544 movdqu is the only way to get unaligned data loaded with
12545 integer type. */
12546 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12548 op0 = gen_lowpart (V16QImode, op0);
12549 op1 = gen_lowpart (V16QImode, op1);
12550 emit_insn (gen_sse2_movdqu (op0, op1));
12551 return;
12554 if (TARGET_SSE2 && mode == V2DFmode)
12556 rtx zero;
12558 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12560 op0 = gen_lowpart (V2DFmode, op0);
12561 op1 = gen_lowpart (V2DFmode, op1);
12562 emit_insn (gen_sse2_movupd (op0, op1));
12563 return;
12566 /* When SSE registers are split into halves, we can avoid
12567 writing to the top half twice. */
12568 if (TARGET_SSE_SPLIT_REGS)
12570 emit_clobber (op0);
12571 zero = op0;
12573 else
12575 /* ??? Not sure about the best option for the Intel chips.
12576 The following would seem to satisfy; the register is
12577 entirely cleared, breaking the dependency chain. We
12578 then store to the upper half, with a dependency depth
12579 of one. A rumor has it that Intel recommends two movsd
12580 followed by an unpacklpd, but this is unconfirmed. And
12581 given that the dependency depth of the unpacklpd would
12582 still be one, I'm not sure why this would be better. */
12583 zero = CONST0_RTX (V2DFmode);
12586 m = adjust_address (op1, DFmode, 0);
12587 emit_insn (gen_sse2_loadlpd (op0, zero, m));
12588 m = adjust_address (op1, DFmode, 8);
12589 emit_insn (gen_sse2_loadhpd (op0, op0, m));
12591 else
12593 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12595 op0 = gen_lowpart (V4SFmode, op0);
12596 op1 = gen_lowpart (V4SFmode, op1);
12597 emit_insn (gen_sse_movups (op0, op1));
12598 return;
12601 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
12602 emit_move_insn (op0, CONST0_RTX (mode));
12603 else
12604 emit_clobber (op0);
12606 if (mode != V4SFmode)
12607 op0 = gen_lowpart (V4SFmode, op0);
12608 m = adjust_address (op1, V2SFmode, 0);
12609 emit_insn (gen_sse_loadlps (op0, op0, m));
12610 m = adjust_address (op1, V2SFmode, 8);
12611 emit_insn (gen_sse_loadhps (op0, op0, m));
12614 else if (MEM_P (op0))
12616 /* If we're optimizing for size, movups is the smallest. */
12617 if (optimize_insn_for_size_p ())
12619 op0 = gen_lowpart (V4SFmode, op0);
12620 op1 = gen_lowpart (V4SFmode, op1);
12621 emit_insn (gen_sse_movups (op0, op1));
12622 return;
12625 /* ??? Similar to above, only less clear because of quote
12626 typeless stores unquote. */
12627 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
12628 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12630 op0 = gen_lowpart (V16QImode, op0);
12631 op1 = gen_lowpart (V16QImode, op1);
12632 emit_insn (gen_sse2_movdqu (op0, op1));
12633 return;
12636 if (TARGET_SSE2 && mode == V2DFmode)
12638 m = adjust_address (op0, DFmode, 0);
12639 emit_insn (gen_sse2_storelpd (m, op1));
12640 m = adjust_address (op0, DFmode, 8);
12641 emit_insn (gen_sse2_storehpd (m, op1));
12643 else
12645 if (mode != V4SFmode)
12646 op1 = gen_lowpart (V4SFmode, op1);
12647 m = adjust_address (op0, V2SFmode, 0);
12648 emit_insn (gen_sse_storelps (m, op1));
12649 m = adjust_address (op0, V2SFmode, 8);
12650 emit_insn (gen_sse_storehps (m, op1));
12653 else
12654 gcc_unreachable ();
12657 /* Expand a push in MODE. This is some mode for which we do not support
12658 proper push instructions, at least from the registers that we expect
12659 the value to live in. */
12661 void
12662 ix86_expand_push (enum machine_mode mode, rtx x)
12664 rtx tmp;
12666 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
12667 GEN_INT (-GET_MODE_SIZE (mode)),
12668 stack_pointer_rtx, 1, OPTAB_DIRECT);
12669 if (tmp != stack_pointer_rtx)
12670 emit_move_insn (stack_pointer_rtx, tmp);
12672 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
12674 /* When we push an operand onto stack, it has to be aligned at least
12675 at the function argument boundary. However since we don't have
12676 the argument type, we can't determine the actual argument
12677 boundary. */
12678 emit_move_insn (tmp, x);
12681 /* Helper function of ix86_fixup_binary_operands to canonicalize
12682 operand order. Returns true if the operands should be swapped. */
12684 static bool
12685 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
12686 rtx operands[])
12688 rtx dst = operands[0];
12689 rtx src1 = operands[1];
12690 rtx src2 = operands[2];
12692 /* If the operation is not commutative, we can't do anything. */
12693 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
12694 return false;
12696 /* Highest priority is that src1 should match dst. */
12697 if (rtx_equal_p (dst, src1))
12698 return false;
12699 if (rtx_equal_p (dst, src2))
12700 return true;
12702 /* Next highest priority is that immediate constants come second. */
12703 if (immediate_operand (src2, mode))
12704 return false;
12705 if (immediate_operand (src1, mode))
12706 return true;
12708 /* Lowest priority is that memory references should come second. */
12709 if (MEM_P (src2))
12710 return false;
12711 if (MEM_P (src1))
12712 return true;
12714 return false;
12718 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
12719 destination to use for the operation. If different from the true
12720 destination in operands[0], a copy operation will be required. */
12723 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
12724 rtx operands[])
12726 rtx dst = operands[0];
12727 rtx src1 = operands[1];
12728 rtx src2 = operands[2];
12730 /* Canonicalize operand order. */
12731 if (ix86_swap_binary_operands_p (code, mode, operands))
12733 rtx temp;
12735 /* It is invalid to swap operands of different modes. */
12736 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
12738 temp = src1;
12739 src1 = src2;
12740 src2 = temp;
12743 /* Both source operands cannot be in memory. */
12744 if (MEM_P (src1) && MEM_P (src2))
12746 /* Optimization: Only read from memory once. */
12747 if (rtx_equal_p (src1, src2))
12749 src2 = force_reg (mode, src2);
12750 src1 = src2;
12752 else
12753 src2 = force_reg (mode, src2);
12756 /* If the destination is memory, and we do not have matching source
12757 operands, do things in registers. */
12758 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12759 dst = gen_reg_rtx (mode);
12761 /* Source 1 cannot be a constant. */
12762 if (CONSTANT_P (src1))
12763 src1 = force_reg (mode, src1);
12765 /* Source 1 cannot be a non-matching memory. */
12766 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12767 src1 = force_reg (mode, src1);
12769 operands[1] = src1;
12770 operands[2] = src2;
12771 return dst;
12774 /* Similarly, but assume that the destination has already been
12775 set up properly. */
12777 void
12778 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
12779 enum machine_mode mode, rtx operands[])
12781 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
12782 gcc_assert (dst == operands[0]);
12785 /* Attempt to expand a binary operator. Make the expansion closer to the
12786 actual machine, then just general_operand, which will allow 3 separate
12787 memory references (one output, two input) in a single insn. */
12789 void
12790 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
12791 rtx operands[])
12793 rtx src1, src2, dst, op, clob;
12795 dst = ix86_fixup_binary_operands (code, mode, operands);
12796 src1 = operands[1];
12797 src2 = operands[2];
12799 /* Emit the instruction. */
12801 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
12802 if (reload_in_progress)
12804 /* Reload doesn't know about the flags register, and doesn't know that
12805 it doesn't want to clobber it. We can only do this with PLUS. */
12806 gcc_assert (code == PLUS);
12807 emit_insn (op);
12809 else
12811 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12812 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12815 /* Fix up the destination if needed. */
12816 if (dst != operands[0])
12817 emit_move_insn (operands[0], dst);
12820 /* Return TRUE or FALSE depending on whether the binary operator meets the
12821 appropriate constraints. */
12824 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
12825 rtx operands[3])
12827 rtx dst = operands[0];
12828 rtx src1 = operands[1];
12829 rtx src2 = operands[2];
12831 /* Both source operands cannot be in memory. */
12832 if (MEM_P (src1) && MEM_P (src2))
12833 return 0;
12835 /* Canonicalize operand order for commutative operators. */
12836 if (ix86_swap_binary_operands_p (code, mode, operands))
12838 rtx temp = src1;
12839 src1 = src2;
12840 src2 = temp;
12843 /* If the destination is memory, we must have a matching source operand. */
12844 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12845 return 0;
12847 /* Source 1 cannot be a constant. */
12848 if (CONSTANT_P (src1))
12849 return 0;
12851 /* Source 1 cannot be a non-matching memory. */
12852 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12853 return 0;
12855 return 1;
12858 /* Attempt to expand a unary operator. Make the expansion closer to the
12859 actual machine, then just general_operand, which will allow 2 separate
12860 memory references (one output, one input) in a single insn. */
12862 void
12863 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
12864 rtx operands[])
12866 int matching_memory;
12867 rtx src, dst, op, clob;
12869 dst = operands[0];
12870 src = operands[1];
12872 /* If the destination is memory, and we do not have matching source
12873 operands, do things in registers. */
12874 matching_memory = 0;
12875 if (MEM_P (dst))
12877 if (rtx_equal_p (dst, src))
12878 matching_memory = 1;
12879 else
12880 dst = gen_reg_rtx (mode);
12883 /* When source operand is memory, destination must match. */
12884 if (MEM_P (src) && !matching_memory)
12885 src = force_reg (mode, src);
12887 /* Emit the instruction. */
12889 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
12890 if (reload_in_progress || code == NOT)
12892 /* Reload doesn't know about the flags register, and doesn't know that
12893 it doesn't want to clobber it. */
12894 gcc_assert (code == NOT);
12895 emit_insn (op);
12897 else
12899 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12900 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12903 /* Fix up the destination if needed. */
12904 if (dst != operands[0])
12905 emit_move_insn (operands[0], dst);
12908 /* Return TRUE or FALSE depending on whether the unary operator meets the
12909 appropriate constraints. */
12912 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
12913 enum machine_mode mode ATTRIBUTE_UNUSED,
12914 rtx operands[2] ATTRIBUTE_UNUSED)
12916 /* If one of operands is memory, source and destination must match. */
12917 if ((MEM_P (operands[0])
12918 || MEM_P (operands[1]))
12919 && ! rtx_equal_p (operands[0], operands[1]))
12920 return FALSE;
12921 return TRUE;
12924 /* Post-reload splitter for converting an SF or DFmode value in an
12925 SSE register into an unsigned SImode. */
12927 void
12928 ix86_split_convert_uns_si_sse (rtx operands[])
12930 enum machine_mode vecmode;
12931 rtx value, large, zero_or_two31, input, two31, x;
12933 large = operands[1];
12934 zero_or_two31 = operands[2];
12935 input = operands[3];
12936 two31 = operands[4];
12937 vecmode = GET_MODE (large);
12938 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
12940 /* Load up the value into the low element. We must ensure that the other
12941 elements are valid floats -- zero is the easiest such value. */
12942 if (MEM_P (input))
12944 if (vecmode == V4SFmode)
12945 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
12946 else
12947 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
12949 else
12951 input = gen_rtx_REG (vecmode, REGNO (input));
12952 emit_move_insn (value, CONST0_RTX (vecmode));
12953 if (vecmode == V4SFmode)
12954 emit_insn (gen_sse_movss (value, value, input));
12955 else
12956 emit_insn (gen_sse2_movsd (value, value, input));
12959 emit_move_insn (large, two31);
12960 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
12962 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
12963 emit_insn (gen_rtx_SET (VOIDmode, large, x));
12965 x = gen_rtx_AND (vecmode, zero_or_two31, large);
12966 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
12968 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
12969 emit_insn (gen_rtx_SET (VOIDmode, value, x));
12971 large = gen_rtx_REG (V4SImode, REGNO (large));
12972 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
12974 x = gen_rtx_REG (V4SImode, REGNO (value));
12975 if (vecmode == V4SFmode)
12976 emit_insn (gen_sse2_cvttps2dq (x, value));
12977 else
12978 emit_insn (gen_sse2_cvttpd2dq (x, value));
12979 value = x;
12981 emit_insn (gen_xorv4si3 (value, value, large));
12984 /* Convert an unsigned DImode value into a DFmode, using only SSE.
12985 Expects the 64-bit DImode to be supplied in a pair of integral
12986 registers. Requires SSE2; will use SSE3 if available. For x86_32,
12987 -mfpmath=sse, !optimize_size only. */
12989 void
12990 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
12992 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
12993 rtx int_xmm, fp_xmm;
12994 rtx biases, exponents;
12995 rtx x;
12997 int_xmm = gen_reg_rtx (V4SImode);
12998 if (TARGET_INTER_UNIT_MOVES)
12999 emit_insn (gen_movdi_to_sse (int_xmm, input));
13000 else if (TARGET_SSE_SPLIT_REGS)
13002 emit_clobber (int_xmm);
13003 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
13005 else
13007 x = gen_reg_rtx (V2DImode);
13008 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
13009 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
13012 x = gen_rtx_CONST_VECTOR (V4SImode,
13013 gen_rtvec (4, GEN_INT (0x43300000UL),
13014 GEN_INT (0x45300000UL),
13015 const0_rtx, const0_rtx));
13016 exponents = validize_mem (force_const_mem (V4SImode, x));
13018 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
13019 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
13021 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
13022 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
13023 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
13024 (0x1.0p84 + double(fp_value_hi_xmm)).
13025 Note these exponents differ by 32. */
13027 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
13029 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
13030 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
13031 real_ldexp (&bias_lo_rvt, &dconst1, 52);
13032 real_ldexp (&bias_hi_rvt, &dconst1, 84);
13033 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
13034 x = const_double_from_real_value (bias_hi_rvt, DFmode);
13035 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
13036 biases = validize_mem (force_const_mem (V2DFmode, biases));
13037 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
13039 /* Add the upper and lower DFmode values together. */
13040 if (TARGET_SSE3)
13041 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
13042 else
13044 x = copy_to_mode_reg (V2DFmode, fp_xmm);
13045 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
13046 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
13049 ix86_expand_vector_extract (false, target, fp_xmm, 0);
13052 /* Not used, but eases macroization of patterns. */
13053 void
13054 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
13055 rtx input ATTRIBUTE_UNUSED)
13057 gcc_unreachable ();
13060 /* Convert an unsigned SImode value into a DFmode. Only currently used
13061 for SSE, but applicable anywhere. */
13063 void
13064 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
13066 REAL_VALUE_TYPE TWO31r;
13067 rtx x, fp;
13069 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
13070 NULL, 1, OPTAB_DIRECT);
13072 fp = gen_reg_rtx (DFmode);
13073 emit_insn (gen_floatsidf2 (fp, x));
13075 real_ldexp (&TWO31r, &dconst1, 31);
13076 x = const_double_from_real_value (TWO31r, DFmode);
13078 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
13079 if (x != target)
13080 emit_move_insn (target, x);
13083 /* Convert a signed DImode value into a DFmode. Only used for SSE in
13084 32-bit mode; otherwise we have a direct convert instruction. */
13086 void
13087 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
13089 REAL_VALUE_TYPE TWO32r;
13090 rtx fp_lo, fp_hi, x;
13092 fp_lo = gen_reg_rtx (DFmode);
13093 fp_hi = gen_reg_rtx (DFmode);
13095 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
13097 real_ldexp (&TWO32r, &dconst1, 32);
13098 x = const_double_from_real_value (TWO32r, DFmode);
13099 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
13101 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
13103 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
13104 0, OPTAB_DIRECT);
13105 if (x != target)
13106 emit_move_insn (target, x);
13109 /* Convert an unsigned SImode value into a SFmode, using only SSE.
13110 For x86_32, -mfpmath=sse, !optimize_size only. */
13111 void
13112 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
13114 REAL_VALUE_TYPE ONE16r;
13115 rtx fp_hi, fp_lo, int_hi, int_lo, x;
13117 real_ldexp (&ONE16r, &dconst1, 16);
13118 x = const_double_from_real_value (ONE16r, SFmode);
13119 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
13120 NULL, 0, OPTAB_DIRECT);
13121 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
13122 NULL, 0, OPTAB_DIRECT);
13123 fp_hi = gen_reg_rtx (SFmode);
13124 fp_lo = gen_reg_rtx (SFmode);
13125 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
13126 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
13127 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
13128 0, OPTAB_DIRECT);
13129 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
13130 0, OPTAB_DIRECT);
13131 if (!rtx_equal_p (target, fp_hi))
13132 emit_move_insn (target, fp_hi);
13135 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
13136 then replicate the value for all elements of the vector
13137 register. */
13140 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
13142 rtvec v;
13143 switch (mode)
13145 case SImode:
13146 gcc_assert (vect);
13147 v = gen_rtvec (4, value, value, value, value);
13148 return gen_rtx_CONST_VECTOR (V4SImode, v);
13150 case DImode:
13151 gcc_assert (vect);
13152 v = gen_rtvec (2, value, value);
13153 return gen_rtx_CONST_VECTOR (V2DImode, v);
13155 case SFmode:
13156 if (vect)
13157 v = gen_rtvec (4, value, value, value, value);
13158 else
13159 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
13160 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13161 return gen_rtx_CONST_VECTOR (V4SFmode, v);
13163 case DFmode:
13164 if (vect)
13165 v = gen_rtvec (2, value, value);
13166 else
13167 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
13168 return gen_rtx_CONST_VECTOR (V2DFmode, v);
13170 default:
13171 gcc_unreachable ();
13175 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
13176 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
13177 for an SSE register. If VECT is true, then replicate the mask for
13178 all elements of the vector register. If INVERT is true, then create
13179 a mask excluding the sign bit. */
13182 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
13184 enum machine_mode vec_mode, imode;
13185 HOST_WIDE_INT hi, lo;
13186 int shift = 63;
13187 rtx v;
13188 rtx mask;
13190 /* Find the sign bit, sign extended to 2*HWI. */
13191 switch (mode)
13193 case SImode:
13194 case SFmode:
13195 imode = SImode;
13196 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
13197 lo = 0x80000000, hi = lo < 0;
13198 break;
13200 case DImode:
13201 case DFmode:
13202 imode = DImode;
13203 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
13204 if (HOST_BITS_PER_WIDE_INT >= 64)
13205 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
13206 else
13207 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13208 break;
13210 case TImode:
13211 case TFmode:
13212 vec_mode = VOIDmode;
13213 if (HOST_BITS_PER_WIDE_INT >= 64)
13215 imode = TImode;
13216 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
13218 else
13220 rtvec vec;
13222 imode = DImode;
13223 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13225 if (invert)
13227 lo = ~lo, hi = ~hi;
13228 v = constm1_rtx;
13230 else
13231 v = const0_rtx;
13233 mask = immed_double_const (lo, hi, imode);
13235 vec = gen_rtvec (2, v, mask);
13236 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
13237 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
13239 return v;
13241 break;
13243 default:
13244 gcc_unreachable ();
13247 if (invert)
13248 lo = ~lo, hi = ~hi;
13250 /* Force this value into the low part of a fp vector constant. */
13251 mask = immed_double_const (lo, hi, imode);
13252 mask = gen_lowpart (mode, mask);
13254 if (vec_mode == VOIDmode)
13255 return force_reg (mode, mask);
13257 v = ix86_build_const_vector (mode, vect, mask);
13258 return force_reg (vec_mode, v);
13261 /* Generate code for floating point ABS or NEG. */
13263 void
13264 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
13265 rtx operands[])
13267 rtx mask, set, use, clob, dst, src;
13268 bool use_sse = false;
13269 bool vector_mode = VECTOR_MODE_P (mode);
13270 enum machine_mode elt_mode = mode;
13272 if (vector_mode)
13274 elt_mode = GET_MODE_INNER (mode);
13275 use_sse = true;
13277 else if (mode == TFmode)
13278 use_sse = true;
13279 else if (TARGET_SSE_MATH)
13280 use_sse = SSE_FLOAT_MODE_P (mode);
13282 /* NEG and ABS performed with SSE use bitwise mask operations.
13283 Create the appropriate mask now. */
13284 if (use_sse)
13285 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
13286 else
13287 mask = NULL_RTX;
13289 dst = operands[0];
13290 src = operands[1];
13292 if (vector_mode)
13294 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
13295 set = gen_rtx_SET (VOIDmode, dst, set);
13296 emit_insn (set);
13298 else
13300 set = gen_rtx_fmt_e (code, mode, src);
13301 set = gen_rtx_SET (VOIDmode, dst, set);
13302 if (mask)
13304 use = gen_rtx_USE (VOIDmode, mask);
13305 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13306 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13307 gen_rtvec (3, set, use, clob)));
13309 else
13310 emit_insn (set);
13314 /* Expand a copysign operation. Special case operand 0 being a constant. */
13316 void
13317 ix86_expand_copysign (rtx operands[])
13319 enum machine_mode mode;
13320 rtx dest, op0, op1, mask, nmask;
13322 dest = operands[0];
13323 op0 = operands[1];
13324 op1 = operands[2];
13326 mode = GET_MODE (dest);
13328 if (GET_CODE (op0) == CONST_DOUBLE)
13330 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
13332 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
13333 op0 = simplify_unary_operation (ABS, mode, op0, mode);
13335 if (mode == SFmode || mode == DFmode)
13337 enum machine_mode vmode;
13339 vmode = mode == SFmode ? V4SFmode : V2DFmode;
13341 if (op0 == CONST0_RTX (mode))
13342 op0 = CONST0_RTX (vmode);
13343 else
13345 rtvec v;
13347 if (mode == SFmode)
13348 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
13349 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13350 else
13351 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
13353 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
13356 else if (op0 != CONST0_RTX (mode))
13357 op0 = force_reg (mode, op0);
13359 mask = ix86_build_signbit_mask (mode, 0, 0);
13361 if (mode == SFmode)
13362 copysign_insn = gen_copysignsf3_const;
13363 else if (mode == DFmode)
13364 copysign_insn = gen_copysigndf3_const;
13365 else
13366 copysign_insn = gen_copysigntf3_const;
13368 emit_insn (copysign_insn (dest, op0, op1, mask));
13370 else
13372 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
13374 nmask = ix86_build_signbit_mask (mode, 0, 1);
13375 mask = ix86_build_signbit_mask (mode, 0, 0);
13377 if (mode == SFmode)
13378 copysign_insn = gen_copysignsf3_var;
13379 else if (mode == DFmode)
13380 copysign_insn = gen_copysigndf3_var;
13381 else
13382 copysign_insn = gen_copysigntf3_var;
13384 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
13388 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
13389 be a constant, and so has already been expanded into a vector constant. */
13391 void
13392 ix86_split_copysign_const (rtx operands[])
13394 enum machine_mode mode, vmode;
13395 rtx dest, op0, op1, mask, x;
13397 dest = operands[0];
13398 op0 = operands[1];
13399 op1 = operands[2];
13400 mask = operands[3];
13402 mode = GET_MODE (dest);
13403 vmode = GET_MODE (mask);
13405 dest = simplify_gen_subreg (vmode, dest, mode, 0);
13406 x = gen_rtx_AND (vmode, dest, mask);
13407 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13409 if (op0 != CONST0_RTX (vmode))
13411 x = gen_rtx_IOR (vmode, dest, op0);
13412 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13416 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
13417 so we have to do two masks. */
13419 void
13420 ix86_split_copysign_var (rtx operands[])
13422 enum machine_mode mode, vmode;
13423 rtx dest, scratch, op0, op1, mask, nmask, x;
13425 dest = operands[0];
13426 scratch = operands[1];
13427 op0 = operands[2];
13428 op1 = operands[3];
13429 nmask = operands[4];
13430 mask = operands[5];
13432 mode = GET_MODE (dest);
13433 vmode = GET_MODE (mask);
13435 if (rtx_equal_p (op0, op1))
13437 /* Shouldn't happen often (it's useless, obviously), but when it does
13438 we'd generate incorrect code if we continue below. */
13439 emit_move_insn (dest, op0);
13440 return;
13443 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
13445 gcc_assert (REGNO (op1) == REGNO (scratch));
13447 x = gen_rtx_AND (vmode, scratch, mask);
13448 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13450 dest = mask;
13451 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13452 x = gen_rtx_NOT (vmode, dest);
13453 x = gen_rtx_AND (vmode, x, op0);
13454 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13456 else
13458 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
13460 x = gen_rtx_AND (vmode, scratch, mask);
13462 else /* alternative 2,4 */
13464 gcc_assert (REGNO (mask) == REGNO (scratch));
13465 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
13466 x = gen_rtx_AND (vmode, scratch, op1);
13468 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13470 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
13472 dest = simplify_gen_subreg (vmode, op0, mode, 0);
13473 x = gen_rtx_AND (vmode, dest, nmask);
13475 else /* alternative 3,4 */
13477 gcc_assert (REGNO (nmask) == REGNO (dest));
13478 dest = nmask;
13479 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13480 x = gen_rtx_AND (vmode, dest, op0);
13482 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13485 x = gen_rtx_IOR (vmode, dest, scratch);
13486 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13489 /* Return TRUE or FALSE depending on whether the first SET in INSN
13490 has source and destination with matching CC modes, and that the
13491 CC mode is at least as constrained as REQ_MODE. */
13494 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
13496 rtx set;
13497 enum machine_mode set_mode;
13499 set = PATTERN (insn);
13500 if (GET_CODE (set) == PARALLEL)
13501 set = XVECEXP (set, 0, 0);
13502 gcc_assert (GET_CODE (set) == SET);
13503 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
13505 set_mode = GET_MODE (SET_DEST (set));
13506 switch (set_mode)
13508 case CCNOmode:
13509 if (req_mode != CCNOmode
13510 && (req_mode != CCmode
13511 || XEXP (SET_SRC (set), 1) != const0_rtx))
13512 return 0;
13513 break;
13514 case CCmode:
13515 if (req_mode == CCGCmode)
13516 return 0;
13517 /* FALLTHRU */
13518 case CCGCmode:
13519 if (req_mode == CCGOCmode || req_mode == CCNOmode)
13520 return 0;
13521 /* FALLTHRU */
13522 case CCGOCmode:
13523 if (req_mode == CCZmode)
13524 return 0;
13525 /* FALLTHRU */
13526 case CCAmode:
13527 case CCCmode:
13528 case CCOmode:
13529 case CCSmode:
13530 case CCZmode:
13531 break;
13533 default:
13534 gcc_unreachable ();
13537 return (GET_MODE (SET_SRC (set)) == set_mode);
13540 /* Generate insn patterns to do an integer compare of OPERANDS. */
13542 static rtx
13543 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
13545 enum machine_mode cmpmode;
13546 rtx tmp, flags;
13548 cmpmode = SELECT_CC_MODE (code, op0, op1);
13549 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
13551 /* This is very simple, but making the interface the same as in the
13552 FP case makes the rest of the code easier. */
13553 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
13554 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
13556 /* Return the test that should be put into the flags user, i.e.
13557 the bcc, scc, or cmov instruction. */
13558 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
13561 /* Figure out whether to use ordered or unordered fp comparisons.
13562 Return the appropriate mode to use. */
13564 enum machine_mode
13565 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
13567 /* ??? In order to make all comparisons reversible, we do all comparisons
13568 non-trapping when compiling for IEEE. Once gcc is able to distinguish
13569 all forms trapping and nontrapping comparisons, we can make inequality
13570 comparisons trapping again, since it results in better code when using
13571 FCOM based compares. */
13572 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
13575 enum machine_mode
13576 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
13578 enum machine_mode mode = GET_MODE (op0);
13580 if (SCALAR_FLOAT_MODE_P (mode))
13582 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
13583 return ix86_fp_compare_mode (code);
13586 switch (code)
13588 /* Only zero flag is needed. */
13589 case EQ: /* ZF=0 */
13590 case NE: /* ZF!=0 */
13591 return CCZmode;
13592 /* Codes needing carry flag. */
13593 case GEU: /* CF=0 */
13594 case LTU: /* CF=1 */
13595 /* Detect overflow checks. They need just the carry flag. */
13596 if (GET_CODE (op0) == PLUS
13597 && rtx_equal_p (op1, XEXP (op0, 0)))
13598 return CCCmode;
13599 else
13600 return CCmode;
13601 case GTU: /* CF=0 & ZF=0 */
13602 case LEU: /* CF=1 | ZF=1 */
13603 /* Detect overflow checks. They need just the carry flag. */
13604 if (GET_CODE (op0) == MINUS
13605 && rtx_equal_p (op1, XEXP (op0, 0)))
13606 return CCCmode;
13607 else
13608 return CCmode;
13609 /* Codes possibly doable only with sign flag when
13610 comparing against zero. */
13611 case GE: /* SF=OF or SF=0 */
13612 case LT: /* SF<>OF or SF=1 */
13613 if (op1 == const0_rtx)
13614 return CCGOCmode;
13615 else
13616 /* For other cases Carry flag is not required. */
13617 return CCGCmode;
13618 /* Codes doable only with sign flag when comparing
13619 against zero, but we miss jump instruction for it
13620 so we need to use relational tests against overflow
13621 that thus needs to be zero. */
13622 case GT: /* ZF=0 & SF=OF */
13623 case LE: /* ZF=1 | SF<>OF */
13624 if (op1 == const0_rtx)
13625 return CCNOmode;
13626 else
13627 return CCGCmode;
13628 /* strcmp pattern do (use flags) and combine may ask us for proper
13629 mode. */
13630 case USE:
13631 return CCmode;
13632 default:
13633 gcc_unreachable ();
13637 /* Return the fixed registers used for condition codes. */
13639 static bool
13640 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13642 *p1 = FLAGS_REG;
13643 *p2 = FPSR_REG;
13644 return true;
13647 /* If two condition code modes are compatible, return a condition code
13648 mode which is compatible with both. Otherwise, return
13649 VOIDmode. */
13651 static enum machine_mode
13652 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
13654 if (m1 == m2)
13655 return m1;
13657 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
13658 return VOIDmode;
13660 if ((m1 == CCGCmode && m2 == CCGOCmode)
13661 || (m1 == CCGOCmode && m2 == CCGCmode))
13662 return CCGCmode;
13664 switch (m1)
13666 default:
13667 gcc_unreachable ();
13669 case CCmode:
13670 case CCGCmode:
13671 case CCGOCmode:
13672 case CCNOmode:
13673 case CCAmode:
13674 case CCCmode:
13675 case CCOmode:
13676 case CCSmode:
13677 case CCZmode:
13678 switch (m2)
13680 default:
13681 return VOIDmode;
13683 case CCmode:
13684 case CCGCmode:
13685 case CCGOCmode:
13686 case CCNOmode:
13687 case CCAmode:
13688 case CCCmode:
13689 case CCOmode:
13690 case CCSmode:
13691 case CCZmode:
13692 return CCmode;
13695 case CCFPmode:
13696 case CCFPUmode:
13697 /* These are only compatible with themselves, which we already
13698 checked above. */
13699 return VOIDmode;
13703 /* Split comparison code CODE into comparisons we can do using branch
13704 instructions. BYPASS_CODE is comparison code for branch that will
13705 branch around FIRST_CODE and SECOND_CODE. If some of branches
13706 is not required, set value to UNKNOWN.
13707 We never require more than two branches. */
13709 void
13710 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
13711 enum rtx_code *first_code,
13712 enum rtx_code *second_code)
13714 *first_code = code;
13715 *bypass_code = UNKNOWN;
13716 *second_code = UNKNOWN;
13718 /* The fcomi comparison sets flags as follows:
13720 cmp ZF PF CF
13721 > 0 0 0
13722 < 0 0 1
13723 = 1 0 0
13724 un 1 1 1 */
13726 switch (code)
13728 case GT: /* GTU - CF=0 & ZF=0 */
13729 case GE: /* GEU - CF=0 */
13730 case ORDERED: /* PF=0 */
13731 case UNORDERED: /* PF=1 */
13732 case UNEQ: /* EQ - ZF=1 */
13733 case UNLT: /* LTU - CF=1 */
13734 case UNLE: /* LEU - CF=1 | ZF=1 */
13735 case LTGT: /* EQ - ZF=0 */
13736 break;
13737 case LT: /* LTU - CF=1 - fails on unordered */
13738 *first_code = UNLT;
13739 *bypass_code = UNORDERED;
13740 break;
13741 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
13742 *first_code = UNLE;
13743 *bypass_code = UNORDERED;
13744 break;
13745 case EQ: /* EQ - ZF=1 - fails on unordered */
13746 *first_code = UNEQ;
13747 *bypass_code = UNORDERED;
13748 break;
13749 case NE: /* NE - ZF=0 - fails on unordered */
13750 *first_code = LTGT;
13751 *second_code = UNORDERED;
13752 break;
13753 case UNGE: /* GEU - CF=0 - fails on unordered */
13754 *first_code = GE;
13755 *second_code = UNORDERED;
13756 break;
13757 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
13758 *first_code = GT;
13759 *second_code = UNORDERED;
13760 break;
13761 default:
13762 gcc_unreachable ();
13764 if (!TARGET_IEEE_FP)
13766 *second_code = UNKNOWN;
13767 *bypass_code = UNKNOWN;
13771 /* Return cost of comparison done fcom + arithmetics operations on AX.
13772 All following functions do use number of instructions as a cost metrics.
13773 In future this should be tweaked to compute bytes for optimize_size and
13774 take into account performance of various instructions on various CPUs. */
13775 static int
13776 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
13778 if (!TARGET_IEEE_FP)
13779 return 4;
13780 /* The cost of code output by ix86_expand_fp_compare. */
13781 switch (code)
13783 case UNLE:
13784 case UNLT:
13785 case LTGT:
13786 case GT:
13787 case GE:
13788 case UNORDERED:
13789 case ORDERED:
13790 case UNEQ:
13791 return 4;
13792 break;
13793 case LT:
13794 case NE:
13795 case EQ:
13796 case UNGE:
13797 return 5;
13798 break;
13799 case LE:
13800 case UNGT:
13801 return 6;
13802 break;
13803 default:
13804 gcc_unreachable ();
13808 /* Return cost of comparison done using fcomi operation.
13809 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13810 static int
13811 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
13813 enum rtx_code bypass_code, first_code, second_code;
13814 /* Return arbitrarily high cost when instruction is not supported - this
13815 prevents gcc from using it. */
13816 if (!TARGET_CMOVE)
13817 return 1024;
13818 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13819 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
13822 /* Return cost of comparison done using sahf operation.
13823 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13824 static int
13825 ix86_fp_comparison_sahf_cost (enum rtx_code code)
13827 enum rtx_code bypass_code, first_code, second_code;
13828 /* Return arbitrarily high cost when instruction is not preferred - this
13829 avoids gcc from using it. */
13830 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ())))
13831 return 1024;
13832 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13833 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
13836 /* Compute cost of the comparison done using any method.
13837 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13838 static int
13839 ix86_fp_comparison_cost (enum rtx_code code)
13841 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
13842 int min;
13844 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
13845 sahf_cost = ix86_fp_comparison_sahf_cost (code);
13847 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
13848 if (min > sahf_cost)
13849 min = sahf_cost;
13850 if (min > fcomi_cost)
13851 min = fcomi_cost;
13852 return min;
13855 /* Return true if we should use an FCOMI instruction for this
13856 fp comparison. */
13859 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
13861 enum rtx_code swapped_code = swap_condition (code);
13863 return ((ix86_fp_comparison_cost (code)
13864 == ix86_fp_comparison_fcomi_cost (code))
13865 || (ix86_fp_comparison_cost (swapped_code)
13866 == ix86_fp_comparison_fcomi_cost (swapped_code)));
13869 /* Swap, force into registers, or otherwise massage the two operands
13870 to a fp comparison. The operands are updated in place; the new
13871 comparison code is returned. */
13873 static enum rtx_code
13874 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
13876 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
13877 rtx op0 = *pop0, op1 = *pop1;
13878 enum machine_mode op_mode = GET_MODE (op0);
13879 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
13881 /* All of the unordered compare instructions only work on registers.
13882 The same is true of the fcomi compare instructions. The XFmode
13883 compare instructions require registers except when comparing
13884 against zero or when converting operand 1 from fixed point to
13885 floating point. */
13887 if (!is_sse
13888 && (fpcmp_mode == CCFPUmode
13889 || (op_mode == XFmode
13890 && ! (standard_80387_constant_p (op0) == 1
13891 || standard_80387_constant_p (op1) == 1)
13892 && GET_CODE (op1) != FLOAT)
13893 || ix86_use_fcomi_compare (code)))
13895 op0 = force_reg (op_mode, op0);
13896 op1 = force_reg (op_mode, op1);
13898 else
13900 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
13901 things around if they appear profitable, otherwise force op0
13902 into a register. */
13904 if (standard_80387_constant_p (op0) == 0
13905 || (MEM_P (op0)
13906 && ! (standard_80387_constant_p (op1) == 0
13907 || MEM_P (op1))))
13909 rtx tmp;
13910 tmp = op0, op0 = op1, op1 = tmp;
13911 code = swap_condition (code);
13914 if (!REG_P (op0))
13915 op0 = force_reg (op_mode, op0);
13917 if (CONSTANT_P (op1))
13919 int tmp = standard_80387_constant_p (op1);
13920 if (tmp == 0)
13921 op1 = validize_mem (force_const_mem (op_mode, op1));
13922 else if (tmp == 1)
13924 if (TARGET_CMOVE)
13925 op1 = force_reg (op_mode, op1);
13927 else
13928 op1 = force_reg (op_mode, op1);
13932 /* Try to rearrange the comparison to make it cheaper. */
13933 if (ix86_fp_comparison_cost (code)
13934 > ix86_fp_comparison_cost (swap_condition (code))
13935 && (REG_P (op1) || can_create_pseudo_p ()))
13937 rtx tmp;
13938 tmp = op0, op0 = op1, op1 = tmp;
13939 code = swap_condition (code);
13940 if (!REG_P (op0))
13941 op0 = force_reg (op_mode, op0);
13944 *pop0 = op0;
13945 *pop1 = op1;
13946 return code;
13949 /* Convert comparison codes we use to represent FP comparison to integer
13950 code that will result in proper branch. Return UNKNOWN if no such code
13951 is available. */
13953 enum rtx_code
13954 ix86_fp_compare_code_to_integer (enum rtx_code code)
13956 switch (code)
13958 case GT:
13959 return GTU;
13960 case GE:
13961 return GEU;
13962 case ORDERED:
13963 case UNORDERED:
13964 return code;
13965 break;
13966 case UNEQ:
13967 return EQ;
13968 break;
13969 case UNLT:
13970 return LTU;
13971 break;
13972 case UNLE:
13973 return LEU;
13974 break;
13975 case LTGT:
13976 return NE;
13977 break;
13978 default:
13979 return UNKNOWN;
13983 /* Generate insn patterns to do a floating point compare of OPERANDS. */
13985 static rtx
13986 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
13987 rtx *second_test, rtx *bypass_test)
13989 enum machine_mode fpcmp_mode, intcmp_mode;
13990 rtx tmp, tmp2;
13991 int cost = ix86_fp_comparison_cost (code);
13992 enum rtx_code bypass_code, first_code, second_code;
13994 fpcmp_mode = ix86_fp_compare_mode (code);
13995 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
13997 if (second_test)
13998 *second_test = NULL_RTX;
13999 if (bypass_test)
14000 *bypass_test = NULL_RTX;
14002 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14004 /* Do fcomi/sahf based test when profitable. */
14005 if (ix86_fp_comparison_arithmetics_cost (code) > cost
14006 && (bypass_code == UNKNOWN || bypass_test)
14007 && (second_code == UNKNOWN || second_test))
14009 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14010 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14011 tmp);
14012 if (TARGET_CMOVE)
14013 emit_insn (tmp);
14014 else
14016 gcc_assert (TARGET_SAHF);
14018 if (!scratch)
14019 scratch = gen_reg_rtx (HImode);
14020 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14022 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14025 /* The FP codes work out to act like unsigned. */
14026 intcmp_mode = fpcmp_mode;
14027 code = first_code;
14028 if (bypass_code != UNKNOWN)
14029 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
14030 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14031 const0_rtx);
14032 if (second_code != UNKNOWN)
14033 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
14034 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14035 const0_rtx);
14037 else
14039 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14040 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14041 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14042 if (!scratch)
14043 scratch = gen_reg_rtx (HImode);
14044 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14046 /* In the unordered case, we have to check C2 for NaN's, which
14047 doesn't happen to work out to anything nice combination-wise.
14048 So do some bit twiddling on the value we've got in AH to come
14049 up with an appropriate set of condition codes. */
14051 intcmp_mode = CCNOmode;
14052 switch (code)
14054 case GT:
14055 case UNGT:
14056 if (code == GT || !TARGET_IEEE_FP)
14058 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14059 code = EQ;
14061 else
14063 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14064 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14065 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14066 intcmp_mode = CCmode;
14067 code = GEU;
14069 break;
14070 case LT:
14071 case UNLT:
14072 if (code == LT && TARGET_IEEE_FP)
14074 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14075 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
14076 intcmp_mode = CCmode;
14077 code = EQ;
14079 else
14081 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
14082 code = NE;
14084 break;
14085 case GE:
14086 case UNGE:
14087 if (code == GE || !TARGET_IEEE_FP)
14089 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
14090 code = EQ;
14092 else
14094 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14095 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14096 GEN_INT (0x01)));
14097 code = NE;
14099 break;
14100 case LE:
14101 case UNLE:
14102 if (code == LE && TARGET_IEEE_FP)
14104 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14105 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14106 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14107 intcmp_mode = CCmode;
14108 code = LTU;
14110 else
14112 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14113 code = NE;
14115 break;
14116 case EQ:
14117 case UNEQ:
14118 if (code == EQ && TARGET_IEEE_FP)
14120 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14121 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14122 intcmp_mode = CCmode;
14123 code = EQ;
14125 else
14127 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14128 code = NE;
14129 break;
14131 break;
14132 case NE:
14133 case LTGT:
14134 if (code == NE && TARGET_IEEE_FP)
14136 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14137 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14138 GEN_INT (0x40)));
14139 code = NE;
14141 else
14143 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14144 code = EQ;
14146 break;
14148 case UNORDERED:
14149 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14150 code = NE;
14151 break;
14152 case ORDERED:
14153 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14154 code = EQ;
14155 break;
14157 default:
14158 gcc_unreachable ();
14162 /* Return the test that should be put into the flags user, i.e.
14163 the bcc, scc, or cmov instruction. */
14164 return gen_rtx_fmt_ee (code, VOIDmode,
14165 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14166 const0_rtx);
14170 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
14172 rtx op0, op1, ret;
14173 op0 = ix86_compare_op0;
14174 op1 = ix86_compare_op1;
14176 if (second_test)
14177 *second_test = NULL_RTX;
14178 if (bypass_test)
14179 *bypass_test = NULL_RTX;
14181 if (ix86_compare_emitted)
14183 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
14184 ix86_compare_emitted = NULL_RTX;
14186 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
14188 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
14189 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14190 second_test, bypass_test);
14192 else
14193 ret = ix86_expand_int_compare (code, op0, op1);
14195 return ret;
14198 /* Return true if the CODE will result in nontrivial jump sequence. */
14199 bool
14200 ix86_fp_jump_nontrivial_p (enum rtx_code code)
14202 enum rtx_code bypass_code, first_code, second_code;
14203 if (!TARGET_CMOVE)
14204 return true;
14205 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14206 return bypass_code != UNKNOWN || second_code != UNKNOWN;
14209 void
14210 ix86_expand_branch (enum rtx_code code, rtx label)
14212 rtx tmp;
14214 /* If we have emitted a compare insn, go straight to simple.
14215 ix86_expand_compare won't emit anything if ix86_compare_emitted
14216 is non NULL. */
14217 if (ix86_compare_emitted)
14218 goto simple;
14220 switch (GET_MODE (ix86_compare_op0))
14222 case QImode:
14223 case HImode:
14224 case SImode:
14225 simple:
14226 tmp = ix86_expand_compare (code, NULL, NULL);
14227 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14228 gen_rtx_LABEL_REF (VOIDmode, label),
14229 pc_rtx);
14230 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14231 return;
14233 case SFmode:
14234 case DFmode:
14235 case XFmode:
14237 rtvec vec;
14238 int use_fcomi;
14239 enum rtx_code bypass_code, first_code, second_code;
14241 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
14242 &ix86_compare_op1);
14244 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14246 /* Check whether we will use the natural sequence with one jump. If
14247 so, we can expand jump early. Otherwise delay expansion by
14248 creating compound insn to not confuse optimizers. */
14249 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
14251 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
14252 gen_rtx_LABEL_REF (VOIDmode, label),
14253 pc_rtx, NULL_RTX, NULL_RTX);
14255 else
14257 tmp = gen_rtx_fmt_ee (code, VOIDmode,
14258 ix86_compare_op0, ix86_compare_op1);
14259 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14260 gen_rtx_LABEL_REF (VOIDmode, label),
14261 pc_rtx);
14262 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
14264 use_fcomi = ix86_use_fcomi_compare (code);
14265 vec = rtvec_alloc (3 + !use_fcomi);
14266 RTVEC_ELT (vec, 0) = tmp;
14267 RTVEC_ELT (vec, 1)
14268 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
14269 RTVEC_ELT (vec, 2)
14270 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
14271 if (! use_fcomi)
14272 RTVEC_ELT (vec, 3)
14273 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
14275 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
14277 return;
14280 case DImode:
14281 if (TARGET_64BIT)
14282 goto simple;
14283 case TImode:
14284 /* Expand DImode branch into multiple compare+branch. */
14286 rtx lo[2], hi[2], label2;
14287 enum rtx_code code1, code2, code3;
14288 enum machine_mode submode;
14290 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
14292 tmp = ix86_compare_op0;
14293 ix86_compare_op0 = ix86_compare_op1;
14294 ix86_compare_op1 = tmp;
14295 code = swap_condition (code);
14297 if (GET_MODE (ix86_compare_op0) == DImode)
14299 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
14300 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
14301 submode = SImode;
14303 else
14305 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
14306 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
14307 submode = DImode;
14310 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
14311 avoid two branches. This costs one extra insn, so disable when
14312 optimizing for size. */
14314 if ((code == EQ || code == NE)
14315 && (!optimize_insn_for_size_p ()
14316 || hi[1] == const0_rtx || lo[1] == const0_rtx))
14318 rtx xor0, xor1;
14320 xor1 = hi[0];
14321 if (hi[1] != const0_rtx)
14322 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
14323 NULL_RTX, 0, OPTAB_WIDEN);
14325 xor0 = lo[0];
14326 if (lo[1] != const0_rtx)
14327 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
14328 NULL_RTX, 0, OPTAB_WIDEN);
14330 tmp = expand_binop (submode, ior_optab, xor1, xor0,
14331 NULL_RTX, 0, OPTAB_WIDEN);
14333 ix86_compare_op0 = tmp;
14334 ix86_compare_op1 = const0_rtx;
14335 ix86_expand_branch (code, label);
14336 return;
14339 /* Otherwise, if we are doing less-than or greater-or-equal-than,
14340 op1 is a constant and the low word is zero, then we can just
14341 examine the high word. Similarly for low word -1 and
14342 less-or-equal-than or greater-than. */
14344 if (CONST_INT_P (hi[1]))
14345 switch (code)
14347 case LT: case LTU: case GE: case GEU:
14348 if (lo[1] == const0_rtx)
14350 ix86_compare_op0 = hi[0];
14351 ix86_compare_op1 = hi[1];
14352 ix86_expand_branch (code, label);
14353 return;
14355 break;
14356 case LE: case LEU: case GT: case GTU:
14357 if (lo[1] == constm1_rtx)
14359 ix86_compare_op0 = hi[0];
14360 ix86_compare_op1 = hi[1];
14361 ix86_expand_branch (code, label);
14362 return;
14364 break;
14365 default:
14366 break;
14369 /* Otherwise, we need two or three jumps. */
14371 label2 = gen_label_rtx ();
14373 code1 = code;
14374 code2 = swap_condition (code);
14375 code3 = unsigned_condition (code);
14377 switch (code)
14379 case LT: case GT: case LTU: case GTU:
14380 break;
14382 case LE: code1 = LT; code2 = GT; break;
14383 case GE: code1 = GT; code2 = LT; break;
14384 case LEU: code1 = LTU; code2 = GTU; break;
14385 case GEU: code1 = GTU; code2 = LTU; break;
14387 case EQ: code1 = UNKNOWN; code2 = NE; break;
14388 case NE: code2 = UNKNOWN; break;
14390 default:
14391 gcc_unreachable ();
14395 * a < b =>
14396 * if (hi(a) < hi(b)) goto true;
14397 * if (hi(a) > hi(b)) goto false;
14398 * if (lo(a) < lo(b)) goto true;
14399 * false:
14402 ix86_compare_op0 = hi[0];
14403 ix86_compare_op1 = hi[1];
14405 if (code1 != UNKNOWN)
14406 ix86_expand_branch (code1, label);
14407 if (code2 != UNKNOWN)
14408 ix86_expand_branch (code2, label2);
14410 ix86_compare_op0 = lo[0];
14411 ix86_compare_op1 = lo[1];
14412 ix86_expand_branch (code3, label);
14414 if (code2 != UNKNOWN)
14415 emit_label (label2);
14416 return;
14419 default:
14420 gcc_unreachable ();
14424 /* Split branch based on floating point condition. */
14425 void
14426 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
14427 rtx target1, rtx target2, rtx tmp, rtx pushed)
14429 rtx second, bypass;
14430 rtx label = NULL_RTX;
14431 rtx condition;
14432 int bypass_probability = -1, second_probability = -1, probability = -1;
14433 rtx i;
14435 if (target2 != pc_rtx)
14437 rtx tmp = target2;
14438 code = reverse_condition_maybe_unordered (code);
14439 target2 = target1;
14440 target1 = tmp;
14443 condition = ix86_expand_fp_compare (code, op1, op2,
14444 tmp, &second, &bypass);
14446 /* Remove pushed operand from stack. */
14447 if (pushed)
14448 ix86_free_from_memory (GET_MODE (pushed));
14450 if (split_branch_probability >= 0)
14452 /* Distribute the probabilities across the jumps.
14453 Assume the BYPASS and SECOND to be always test
14454 for UNORDERED. */
14455 probability = split_branch_probability;
14457 /* Value of 1 is low enough to make no need for probability
14458 to be updated. Later we may run some experiments and see
14459 if unordered values are more frequent in practice. */
14460 if (bypass)
14461 bypass_probability = 1;
14462 if (second)
14463 second_probability = 1;
14465 if (bypass != NULL_RTX)
14467 label = gen_label_rtx ();
14468 i = emit_jump_insn (gen_rtx_SET
14469 (VOIDmode, pc_rtx,
14470 gen_rtx_IF_THEN_ELSE (VOIDmode,
14471 bypass,
14472 gen_rtx_LABEL_REF (VOIDmode,
14473 label),
14474 pc_rtx)));
14475 if (bypass_probability >= 0)
14476 REG_NOTES (i)
14477 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14478 GEN_INT (bypass_probability),
14479 REG_NOTES (i));
14481 i = emit_jump_insn (gen_rtx_SET
14482 (VOIDmode, pc_rtx,
14483 gen_rtx_IF_THEN_ELSE (VOIDmode,
14484 condition, target1, target2)));
14485 if (probability >= 0)
14486 REG_NOTES (i)
14487 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14488 GEN_INT (probability),
14489 REG_NOTES (i));
14490 if (second != NULL_RTX)
14492 i = emit_jump_insn (gen_rtx_SET
14493 (VOIDmode, pc_rtx,
14494 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
14495 target2)));
14496 if (second_probability >= 0)
14497 REG_NOTES (i)
14498 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14499 GEN_INT (second_probability),
14500 REG_NOTES (i));
14502 if (label != NULL_RTX)
14503 emit_label (label);
14507 ix86_expand_setcc (enum rtx_code code, rtx dest)
14509 rtx ret, tmp, tmpreg, equiv;
14510 rtx second_test, bypass_test;
14512 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
14513 return 0; /* FAIL */
14515 gcc_assert (GET_MODE (dest) == QImode);
14517 ret = ix86_expand_compare (code, &second_test, &bypass_test);
14518 PUT_MODE (ret, QImode);
14520 tmp = dest;
14521 tmpreg = dest;
14523 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
14524 if (bypass_test || second_test)
14526 rtx test = second_test;
14527 int bypass = 0;
14528 rtx tmp2 = gen_reg_rtx (QImode);
14529 if (bypass_test)
14531 gcc_assert (!second_test);
14532 test = bypass_test;
14533 bypass = 1;
14534 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
14536 PUT_MODE (test, QImode);
14537 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
14539 if (bypass)
14540 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
14541 else
14542 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
14545 /* Attach a REG_EQUAL note describing the comparison result. */
14546 if (ix86_compare_op0 && ix86_compare_op1)
14548 equiv = simplify_gen_relational (code, QImode,
14549 GET_MODE (ix86_compare_op0),
14550 ix86_compare_op0, ix86_compare_op1);
14551 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
14554 return 1; /* DONE */
14557 /* Expand comparison setting or clearing carry flag. Return true when
14558 successful and set pop for the operation. */
14559 static bool
14560 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
14562 enum machine_mode mode =
14563 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
14565 /* Do not handle DImode compares that go through special path. */
14566 if (mode == (TARGET_64BIT ? TImode : DImode))
14567 return false;
14569 if (SCALAR_FLOAT_MODE_P (mode))
14571 rtx second_test = NULL, bypass_test = NULL;
14572 rtx compare_op, compare_seq;
14574 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14576 /* Shortcut: following common codes never translate
14577 into carry flag compares. */
14578 if (code == EQ || code == NE || code == UNEQ || code == LTGT
14579 || code == ORDERED || code == UNORDERED)
14580 return false;
14582 /* These comparisons require zero flag; swap operands so they won't. */
14583 if ((code == GT || code == UNLE || code == LE || code == UNGT)
14584 && !TARGET_IEEE_FP)
14586 rtx tmp = op0;
14587 op0 = op1;
14588 op1 = tmp;
14589 code = swap_condition (code);
14592 /* Try to expand the comparison and verify that we end up with
14593 carry flag based comparison. This fails to be true only when
14594 we decide to expand comparison using arithmetic that is not
14595 too common scenario. */
14596 start_sequence ();
14597 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14598 &second_test, &bypass_test);
14599 compare_seq = get_insns ();
14600 end_sequence ();
14602 if (second_test || bypass_test)
14603 return false;
14605 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14606 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14607 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
14608 else
14609 code = GET_CODE (compare_op);
14611 if (code != LTU && code != GEU)
14612 return false;
14614 emit_insn (compare_seq);
14615 *pop = compare_op;
14616 return true;
14619 if (!INTEGRAL_MODE_P (mode))
14620 return false;
14622 switch (code)
14624 case LTU:
14625 case GEU:
14626 break;
14628 /* Convert a==0 into (unsigned)a<1. */
14629 case EQ:
14630 case NE:
14631 if (op1 != const0_rtx)
14632 return false;
14633 op1 = const1_rtx;
14634 code = (code == EQ ? LTU : GEU);
14635 break;
14637 /* Convert a>b into b<a or a>=b-1. */
14638 case GTU:
14639 case LEU:
14640 if (CONST_INT_P (op1))
14642 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
14643 /* Bail out on overflow. We still can swap operands but that
14644 would force loading of the constant into register. */
14645 if (op1 == const0_rtx
14646 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
14647 return false;
14648 code = (code == GTU ? GEU : LTU);
14650 else
14652 rtx tmp = op1;
14653 op1 = op0;
14654 op0 = tmp;
14655 code = (code == GTU ? LTU : GEU);
14657 break;
14659 /* Convert a>=0 into (unsigned)a<0x80000000. */
14660 case LT:
14661 case GE:
14662 if (mode == DImode || op1 != const0_rtx)
14663 return false;
14664 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14665 code = (code == LT ? GEU : LTU);
14666 break;
14667 case LE:
14668 case GT:
14669 if (mode == DImode || op1 != constm1_rtx)
14670 return false;
14671 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14672 code = (code == LE ? GEU : LTU);
14673 break;
14675 default:
14676 return false;
14678 /* Swapping operands may cause constant to appear as first operand. */
14679 if (!nonimmediate_operand (op0, VOIDmode))
14681 if (!can_create_pseudo_p ())
14682 return false;
14683 op0 = force_reg (mode, op0);
14685 ix86_compare_op0 = op0;
14686 ix86_compare_op1 = op1;
14687 *pop = ix86_expand_compare (code, NULL, NULL);
14688 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
14689 return true;
14693 ix86_expand_int_movcc (rtx operands[])
14695 enum rtx_code code = GET_CODE (operands[1]), compare_code;
14696 rtx compare_seq, compare_op;
14697 rtx second_test, bypass_test;
14698 enum machine_mode mode = GET_MODE (operands[0]);
14699 bool sign_bit_compare_p = false;;
14701 start_sequence ();
14702 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14703 compare_seq = get_insns ();
14704 end_sequence ();
14706 compare_code = GET_CODE (compare_op);
14708 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
14709 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
14710 sign_bit_compare_p = true;
14712 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
14713 HImode insns, we'd be swallowed in word prefix ops. */
14715 if ((mode != HImode || TARGET_FAST_PREFIX)
14716 && (mode != (TARGET_64BIT ? TImode : DImode))
14717 && CONST_INT_P (operands[2])
14718 && CONST_INT_P (operands[3]))
14720 rtx out = operands[0];
14721 HOST_WIDE_INT ct = INTVAL (operands[2]);
14722 HOST_WIDE_INT cf = INTVAL (operands[3]);
14723 HOST_WIDE_INT diff;
14725 diff = ct - cf;
14726 /* Sign bit compares are better done using shifts than we do by using
14727 sbb. */
14728 if (sign_bit_compare_p
14729 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
14730 ix86_compare_op1, &compare_op))
14732 /* Detect overlap between destination and compare sources. */
14733 rtx tmp = out;
14735 if (!sign_bit_compare_p)
14737 bool fpcmp = false;
14739 compare_code = GET_CODE (compare_op);
14741 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14742 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14744 fpcmp = true;
14745 compare_code = ix86_fp_compare_code_to_integer (compare_code);
14748 /* To simplify rest of code, restrict to the GEU case. */
14749 if (compare_code == LTU)
14751 HOST_WIDE_INT tmp = ct;
14752 ct = cf;
14753 cf = tmp;
14754 compare_code = reverse_condition (compare_code);
14755 code = reverse_condition (code);
14757 else
14759 if (fpcmp)
14760 PUT_CODE (compare_op,
14761 reverse_condition_maybe_unordered
14762 (GET_CODE (compare_op)));
14763 else
14764 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
14766 diff = ct - cf;
14768 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
14769 || reg_overlap_mentioned_p (out, ix86_compare_op1))
14770 tmp = gen_reg_rtx (mode);
14772 if (mode == DImode)
14773 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
14774 else
14775 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
14777 else
14779 if (code == GT || code == GE)
14780 code = reverse_condition (code);
14781 else
14783 HOST_WIDE_INT tmp = ct;
14784 ct = cf;
14785 cf = tmp;
14786 diff = ct - cf;
14788 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
14789 ix86_compare_op1, VOIDmode, 0, -1);
14792 if (diff == 1)
14795 * cmpl op0,op1
14796 * sbbl dest,dest
14797 * [addl dest, ct]
14799 * Size 5 - 8.
14801 if (ct)
14802 tmp = expand_simple_binop (mode, PLUS,
14803 tmp, GEN_INT (ct),
14804 copy_rtx (tmp), 1, OPTAB_DIRECT);
14806 else if (cf == -1)
14809 * cmpl op0,op1
14810 * sbbl dest,dest
14811 * orl $ct, dest
14813 * Size 8.
14815 tmp = expand_simple_binop (mode, IOR,
14816 tmp, GEN_INT (ct),
14817 copy_rtx (tmp), 1, OPTAB_DIRECT);
14819 else if (diff == -1 && ct)
14822 * cmpl op0,op1
14823 * sbbl dest,dest
14824 * notl dest
14825 * [addl dest, cf]
14827 * Size 8 - 11.
14829 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14830 if (cf)
14831 tmp = expand_simple_binop (mode, PLUS,
14832 copy_rtx (tmp), GEN_INT (cf),
14833 copy_rtx (tmp), 1, OPTAB_DIRECT);
14835 else
14838 * cmpl op0,op1
14839 * sbbl dest,dest
14840 * [notl dest]
14841 * andl cf - ct, dest
14842 * [addl dest, ct]
14844 * Size 8 - 11.
14847 if (cf == 0)
14849 cf = ct;
14850 ct = 0;
14851 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14854 tmp = expand_simple_binop (mode, AND,
14855 copy_rtx (tmp),
14856 gen_int_mode (cf - ct, mode),
14857 copy_rtx (tmp), 1, OPTAB_DIRECT);
14858 if (ct)
14859 tmp = expand_simple_binop (mode, PLUS,
14860 copy_rtx (tmp), GEN_INT (ct),
14861 copy_rtx (tmp), 1, OPTAB_DIRECT);
14864 if (!rtx_equal_p (tmp, out))
14865 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
14867 return 1; /* DONE */
14870 if (diff < 0)
14872 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14874 HOST_WIDE_INT tmp;
14875 tmp = ct, ct = cf, cf = tmp;
14876 diff = -diff;
14878 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14880 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14882 /* We may be reversing unordered compare to normal compare, that
14883 is not valid in general (we may convert non-trapping condition
14884 to trapping one), however on i386 we currently emit all
14885 comparisons unordered. */
14886 compare_code = reverse_condition_maybe_unordered (compare_code);
14887 code = reverse_condition_maybe_unordered (code);
14889 else
14891 compare_code = reverse_condition (compare_code);
14892 code = reverse_condition (code);
14896 compare_code = UNKNOWN;
14897 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
14898 && CONST_INT_P (ix86_compare_op1))
14900 if (ix86_compare_op1 == const0_rtx
14901 && (code == LT || code == GE))
14902 compare_code = code;
14903 else if (ix86_compare_op1 == constm1_rtx)
14905 if (code == LE)
14906 compare_code = LT;
14907 else if (code == GT)
14908 compare_code = GE;
14912 /* Optimize dest = (op0 < 0) ? -1 : cf. */
14913 if (compare_code != UNKNOWN
14914 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
14915 && (cf == -1 || ct == -1))
14917 /* If lea code below could be used, only optimize
14918 if it results in a 2 insn sequence. */
14920 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
14921 || diff == 3 || diff == 5 || diff == 9)
14922 || (compare_code == LT && ct == -1)
14923 || (compare_code == GE && cf == -1))
14926 * notl op1 (if necessary)
14927 * sarl $31, op1
14928 * orl cf, op1
14930 if (ct != -1)
14932 cf = ct;
14933 ct = -1;
14934 code = reverse_condition (code);
14937 out = emit_store_flag (out, code, ix86_compare_op0,
14938 ix86_compare_op1, VOIDmode, 0, -1);
14940 out = expand_simple_binop (mode, IOR,
14941 out, GEN_INT (cf),
14942 out, 1, OPTAB_DIRECT);
14943 if (out != operands[0])
14944 emit_move_insn (operands[0], out);
14946 return 1; /* DONE */
14951 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
14952 || diff == 3 || diff == 5 || diff == 9)
14953 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
14954 && (mode != DImode
14955 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
14958 * xorl dest,dest
14959 * cmpl op1,op2
14960 * setcc dest
14961 * lea cf(dest*(ct-cf)),dest
14963 * Size 14.
14965 * This also catches the degenerate setcc-only case.
14968 rtx tmp;
14969 int nops;
14971 out = emit_store_flag (out, code, ix86_compare_op0,
14972 ix86_compare_op1, VOIDmode, 0, 1);
14974 nops = 0;
14975 /* On x86_64 the lea instruction operates on Pmode, so we need
14976 to get arithmetics done in proper mode to match. */
14977 if (diff == 1)
14978 tmp = copy_rtx (out);
14979 else
14981 rtx out1;
14982 out1 = copy_rtx (out);
14983 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
14984 nops++;
14985 if (diff & 1)
14987 tmp = gen_rtx_PLUS (mode, tmp, out1);
14988 nops++;
14991 if (cf != 0)
14993 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
14994 nops++;
14996 if (!rtx_equal_p (tmp, out))
14998 if (nops == 1)
14999 out = force_operand (tmp, copy_rtx (out));
15000 else
15001 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15003 if (!rtx_equal_p (out, operands[0]))
15004 emit_move_insn (operands[0], copy_rtx (out));
15006 return 1; /* DONE */
15010 * General case: Jumpful:
15011 * xorl dest,dest cmpl op1, op2
15012 * cmpl op1, op2 movl ct, dest
15013 * setcc dest jcc 1f
15014 * decl dest movl cf, dest
15015 * andl (cf-ct),dest 1:
15016 * addl ct,dest
15018 * Size 20. Size 14.
15020 * This is reasonably steep, but branch mispredict costs are
15021 * high on modern cpus, so consider failing only if optimizing
15022 * for space.
15025 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15026 && BRANCH_COST (optimize_insn_for_speed_p (),
15027 false) >= 2)
15029 if (cf == 0)
15031 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15033 cf = ct;
15034 ct = 0;
15036 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15038 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15040 /* We may be reversing unordered compare to normal compare,
15041 that is not valid in general (we may convert non-trapping
15042 condition to trapping one), however on i386 we currently
15043 emit all comparisons unordered. */
15044 code = reverse_condition_maybe_unordered (code);
15046 else
15048 code = reverse_condition (code);
15049 if (compare_code != UNKNOWN)
15050 compare_code = reverse_condition (compare_code);
15054 if (compare_code != UNKNOWN)
15056 /* notl op1 (if needed)
15057 sarl $31, op1
15058 andl (cf-ct), op1
15059 addl ct, op1
15061 For x < 0 (resp. x <= -1) there will be no notl,
15062 so if possible swap the constants to get rid of the
15063 complement.
15064 True/false will be -1/0 while code below (store flag
15065 followed by decrement) is 0/-1, so the constants need
15066 to be exchanged once more. */
15068 if (compare_code == GE || !cf)
15070 code = reverse_condition (code);
15071 compare_code = LT;
15073 else
15075 HOST_WIDE_INT tmp = cf;
15076 cf = ct;
15077 ct = tmp;
15080 out = emit_store_flag (out, code, ix86_compare_op0,
15081 ix86_compare_op1, VOIDmode, 0, -1);
15083 else
15085 out = emit_store_flag (out, code, ix86_compare_op0,
15086 ix86_compare_op1, VOIDmode, 0, 1);
15088 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15089 copy_rtx (out), 1, OPTAB_DIRECT);
15092 out = expand_simple_binop (mode, AND, copy_rtx (out),
15093 gen_int_mode (cf - ct, mode),
15094 copy_rtx (out), 1, OPTAB_DIRECT);
15095 if (ct)
15096 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15097 copy_rtx (out), 1, OPTAB_DIRECT);
15098 if (!rtx_equal_p (out, operands[0]))
15099 emit_move_insn (operands[0], copy_rtx (out));
15101 return 1; /* DONE */
15105 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15107 /* Try a few things more with specific constants and a variable. */
15109 optab op;
15110 rtx var, orig_out, out, tmp;
15112 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15113 return 0; /* FAIL */
15115 /* If one of the two operands is an interesting constant, load a
15116 constant with the above and mask it in with a logical operation. */
15118 if (CONST_INT_P (operands[2]))
15120 var = operands[3];
15121 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15122 operands[3] = constm1_rtx, op = and_optab;
15123 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15124 operands[3] = const0_rtx, op = ior_optab;
15125 else
15126 return 0; /* FAIL */
15128 else if (CONST_INT_P (operands[3]))
15130 var = operands[2];
15131 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15132 operands[2] = constm1_rtx, op = and_optab;
15133 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15134 operands[2] = const0_rtx, op = ior_optab;
15135 else
15136 return 0; /* FAIL */
15138 else
15139 return 0; /* FAIL */
15141 orig_out = operands[0];
15142 tmp = gen_reg_rtx (mode);
15143 operands[0] = tmp;
15145 /* Recurse to get the constant loaded. */
15146 if (ix86_expand_int_movcc (operands) == 0)
15147 return 0; /* FAIL */
15149 /* Mask in the interesting variable. */
15150 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15151 OPTAB_WIDEN);
15152 if (!rtx_equal_p (out, orig_out))
15153 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15155 return 1; /* DONE */
15159 * For comparison with above,
15161 * movl cf,dest
15162 * movl ct,tmp
15163 * cmpl op1,op2
15164 * cmovcc tmp,dest
15166 * Size 15.
15169 if (! nonimmediate_operand (operands[2], mode))
15170 operands[2] = force_reg (mode, operands[2]);
15171 if (! nonimmediate_operand (operands[3], mode))
15172 operands[3] = force_reg (mode, operands[3]);
15174 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15176 rtx tmp = gen_reg_rtx (mode);
15177 emit_move_insn (tmp, operands[3]);
15178 operands[3] = tmp;
15180 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15182 rtx tmp = gen_reg_rtx (mode);
15183 emit_move_insn (tmp, operands[2]);
15184 operands[2] = tmp;
15187 if (! register_operand (operands[2], VOIDmode)
15188 && (mode == QImode
15189 || ! register_operand (operands[3], VOIDmode)))
15190 operands[2] = force_reg (mode, operands[2]);
15192 if (mode == QImode
15193 && ! register_operand (operands[3], VOIDmode))
15194 operands[3] = force_reg (mode, operands[3]);
15196 emit_insn (compare_seq);
15197 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15198 gen_rtx_IF_THEN_ELSE (mode,
15199 compare_op, operands[2],
15200 operands[3])));
15201 if (bypass_test)
15202 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
15203 gen_rtx_IF_THEN_ELSE (mode,
15204 bypass_test,
15205 copy_rtx (operands[3]),
15206 copy_rtx (operands[0]))));
15207 if (second_test)
15208 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
15209 gen_rtx_IF_THEN_ELSE (mode,
15210 second_test,
15211 copy_rtx (operands[2]),
15212 copy_rtx (operands[0]))));
15214 return 1; /* DONE */
15217 /* Swap, force into registers, or otherwise massage the two operands
15218 to an sse comparison with a mask result. Thus we differ a bit from
15219 ix86_prepare_fp_compare_args which expects to produce a flags result.
15221 The DEST operand exists to help determine whether to commute commutative
15222 operators. The POP0/POP1 operands are updated in place. The new
15223 comparison code is returned, or UNKNOWN if not implementable. */
15225 static enum rtx_code
15226 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15227 rtx *pop0, rtx *pop1)
15229 rtx tmp;
15231 switch (code)
15233 case LTGT:
15234 case UNEQ:
15235 /* We have no LTGT as an operator. We could implement it with
15236 NE & ORDERED, but this requires an extra temporary. It's
15237 not clear that it's worth it. */
15238 return UNKNOWN;
15240 case LT:
15241 case LE:
15242 case UNGT:
15243 case UNGE:
15244 /* These are supported directly. */
15245 break;
15247 case EQ:
15248 case NE:
15249 case UNORDERED:
15250 case ORDERED:
15251 /* For commutative operators, try to canonicalize the destination
15252 operand to be first in the comparison - this helps reload to
15253 avoid extra moves. */
15254 if (!dest || !rtx_equal_p (dest, *pop1))
15255 break;
15256 /* FALLTHRU */
15258 case GE:
15259 case GT:
15260 case UNLE:
15261 case UNLT:
15262 /* These are not supported directly. Swap the comparison operands
15263 to transform into something that is supported. */
15264 tmp = *pop0;
15265 *pop0 = *pop1;
15266 *pop1 = tmp;
15267 code = swap_condition (code);
15268 break;
15270 default:
15271 gcc_unreachable ();
15274 return code;
15277 /* Detect conditional moves that exactly match min/max operational
15278 semantics. Note that this is IEEE safe, as long as we don't
15279 interchange the operands.
15281 Returns FALSE if this conditional move doesn't match a MIN/MAX,
15282 and TRUE if the operation is successful and instructions are emitted. */
15284 static bool
15285 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
15286 rtx cmp_op1, rtx if_true, rtx if_false)
15288 enum machine_mode mode;
15289 bool is_min;
15290 rtx tmp;
15292 if (code == LT)
15294 else if (code == UNGE)
15296 tmp = if_true;
15297 if_true = if_false;
15298 if_false = tmp;
15300 else
15301 return false;
15303 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
15304 is_min = true;
15305 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
15306 is_min = false;
15307 else
15308 return false;
15310 mode = GET_MODE (dest);
15312 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15313 but MODE may be a vector mode and thus not appropriate. */
15314 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15316 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15317 rtvec v;
15319 if_true = force_reg (mode, if_true);
15320 v = gen_rtvec (2, if_true, if_false);
15321 tmp = gen_rtx_UNSPEC (mode, v, u);
15323 else
15325 code = is_min ? SMIN : SMAX;
15326 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
15329 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
15330 return true;
15333 /* Expand an sse vector comparison. Return the register with the result. */
15335 static rtx
15336 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
15337 rtx op_true, rtx op_false)
15339 enum machine_mode mode = GET_MODE (dest);
15340 rtx x;
15342 cmp_op0 = force_reg (mode, cmp_op0);
15343 if (!nonimmediate_operand (cmp_op1, mode))
15344 cmp_op1 = force_reg (mode, cmp_op1);
15346 if (optimize
15347 || reg_overlap_mentioned_p (dest, op_true)
15348 || reg_overlap_mentioned_p (dest, op_false))
15349 dest = gen_reg_rtx (mode);
15351 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
15352 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15354 return dest;
15357 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
15358 operations. This is used for both scalar and vector conditional moves. */
15360 static void
15361 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
15363 enum machine_mode mode = GET_MODE (dest);
15364 rtx t2, t3, x;
15366 if (op_false == CONST0_RTX (mode))
15368 op_true = force_reg (mode, op_true);
15369 x = gen_rtx_AND (mode, cmp, op_true);
15370 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15372 else if (op_true == CONST0_RTX (mode))
15374 op_false = force_reg (mode, op_false);
15375 x = gen_rtx_NOT (mode, cmp);
15376 x = gen_rtx_AND (mode, x, op_false);
15377 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15379 else if (TARGET_SSE5)
15381 rtx pcmov = gen_rtx_SET (mode, dest,
15382 gen_rtx_IF_THEN_ELSE (mode, cmp,
15383 op_true,
15384 op_false));
15385 emit_insn (pcmov);
15387 else
15389 op_true = force_reg (mode, op_true);
15390 op_false = force_reg (mode, op_false);
15392 t2 = gen_reg_rtx (mode);
15393 if (optimize)
15394 t3 = gen_reg_rtx (mode);
15395 else
15396 t3 = dest;
15398 x = gen_rtx_AND (mode, op_true, cmp);
15399 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
15401 x = gen_rtx_NOT (mode, cmp);
15402 x = gen_rtx_AND (mode, x, op_false);
15403 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
15405 x = gen_rtx_IOR (mode, t3, t2);
15406 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15410 /* Expand a floating-point conditional move. Return true if successful. */
15413 ix86_expand_fp_movcc (rtx operands[])
15415 enum machine_mode mode = GET_MODE (operands[0]);
15416 enum rtx_code code = GET_CODE (operands[1]);
15417 rtx tmp, compare_op, second_test, bypass_test;
15419 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
15421 enum machine_mode cmode;
15423 /* Since we've no cmove for sse registers, don't force bad register
15424 allocation just to gain access to it. Deny movcc when the
15425 comparison mode doesn't match the move mode. */
15426 cmode = GET_MODE (ix86_compare_op0);
15427 if (cmode == VOIDmode)
15428 cmode = GET_MODE (ix86_compare_op1);
15429 if (cmode != mode)
15430 return 0;
15432 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15433 &ix86_compare_op0,
15434 &ix86_compare_op1);
15435 if (code == UNKNOWN)
15436 return 0;
15438 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
15439 ix86_compare_op1, operands[2],
15440 operands[3]))
15441 return 1;
15443 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
15444 ix86_compare_op1, operands[2], operands[3]);
15445 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
15446 return 1;
15449 /* The floating point conditional move instructions don't directly
15450 support conditions resulting from a signed integer comparison. */
15452 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15454 /* The floating point conditional move instructions don't directly
15455 support signed integer comparisons. */
15457 if (!fcmov_comparison_operator (compare_op, VOIDmode))
15459 gcc_assert (!second_test && !bypass_test);
15460 tmp = gen_reg_rtx (QImode);
15461 ix86_expand_setcc (code, tmp);
15462 code = NE;
15463 ix86_compare_op0 = tmp;
15464 ix86_compare_op1 = const0_rtx;
15465 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15467 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15469 tmp = gen_reg_rtx (mode);
15470 emit_move_insn (tmp, operands[3]);
15471 operands[3] = tmp;
15473 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15475 tmp = gen_reg_rtx (mode);
15476 emit_move_insn (tmp, operands[2]);
15477 operands[2] = tmp;
15480 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15481 gen_rtx_IF_THEN_ELSE (mode, compare_op,
15482 operands[2], operands[3])));
15483 if (bypass_test)
15484 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15485 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
15486 operands[3], operands[0])));
15487 if (second_test)
15488 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15489 gen_rtx_IF_THEN_ELSE (mode, second_test,
15490 operands[2], operands[0])));
15492 return 1;
15495 /* Expand a floating-point vector conditional move; a vcond operation
15496 rather than a movcc operation. */
15498 bool
15499 ix86_expand_fp_vcond (rtx operands[])
15501 enum rtx_code code = GET_CODE (operands[3]);
15502 rtx cmp;
15504 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15505 &operands[4], &operands[5]);
15506 if (code == UNKNOWN)
15507 return false;
15509 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
15510 operands[5], operands[1], operands[2]))
15511 return true;
15513 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
15514 operands[1], operands[2]);
15515 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
15516 return true;
15519 /* Expand a signed/unsigned integral vector conditional move. */
15521 bool
15522 ix86_expand_int_vcond (rtx operands[])
15524 enum machine_mode mode = GET_MODE (operands[0]);
15525 enum rtx_code code = GET_CODE (operands[3]);
15526 bool negate = false;
15527 rtx x, cop0, cop1;
15529 cop0 = operands[4];
15530 cop1 = operands[5];
15532 /* SSE5 supports all of the comparisons on all vector int types. */
15533 if (!TARGET_SSE5)
15535 /* Canonicalize the comparison to EQ, GT, GTU. */
15536 switch (code)
15538 case EQ:
15539 case GT:
15540 case GTU:
15541 break;
15543 case NE:
15544 case LE:
15545 case LEU:
15546 code = reverse_condition (code);
15547 negate = true;
15548 break;
15550 case GE:
15551 case GEU:
15552 code = reverse_condition (code);
15553 negate = true;
15554 /* FALLTHRU */
15556 case LT:
15557 case LTU:
15558 code = swap_condition (code);
15559 x = cop0, cop0 = cop1, cop1 = x;
15560 break;
15562 default:
15563 gcc_unreachable ();
15566 /* Only SSE4.1/SSE4.2 supports V2DImode. */
15567 if (mode == V2DImode)
15569 switch (code)
15571 case EQ:
15572 /* SSE4.1 supports EQ. */
15573 if (!TARGET_SSE4_1)
15574 return false;
15575 break;
15577 case GT:
15578 case GTU:
15579 /* SSE4.2 supports GT/GTU. */
15580 if (!TARGET_SSE4_2)
15581 return false;
15582 break;
15584 default:
15585 gcc_unreachable ();
15589 /* Unsigned parallel compare is not supported by the hardware. Play some
15590 tricks to turn this into a signed comparison against 0. */
15591 if (code == GTU)
15593 cop0 = force_reg (mode, cop0);
15595 switch (mode)
15597 case V4SImode:
15598 case V2DImode:
15600 rtx t1, t2, mask;
15602 /* Perform a parallel modulo subtraction. */
15603 t1 = gen_reg_rtx (mode);
15604 emit_insn ((mode == V4SImode
15605 ? gen_subv4si3
15606 : gen_subv2di3) (t1, cop0, cop1));
15608 /* Extract the original sign bit of op0. */
15609 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
15610 true, false);
15611 t2 = gen_reg_rtx (mode);
15612 emit_insn ((mode == V4SImode
15613 ? gen_andv4si3
15614 : gen_andv2di3) (t2, cop0, mask));
15616 /* XOR it back into the result of the subtraction. This results
15617 in the sign bit set iff we saw unsigned underflow. */
15618 x = gen_reg_rtx (mode);
15619 emit_insn ((mode == V4SImode
15620 ? gen_xorv4si3
15621 : gen_xorv2di3) (x, t1, t2));
15623 code = GT;
15625 break;
15627 case V16QImode:
15628 case V8HImode:
15629 /* Perform a parallel unsigned saturating subtraction. */
15630 x = gen_reg_rtx (mode);
15631 emit_insn (gen_rtx_SET (VOIDmode, x,
15632 gen_rtx_US_MINUS (mode, cop0, cop1)));
15634 code = EQ;
15635 negate = !negate;
15636 break;
15638 default:
15639 gcc_unreachable ();
15642 cop0 = x;
15643 cop1 = CONST0_RTX (mode);
15647 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
15648 operands[1+negate], operands[2-negate]);
15650 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
15651 operands[2-negate]);
15652 return true;
15655 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
15656 true if we should do zero extension, else sign extension. HIGH_P is
15657 true if we want the N/2 high elements, else the low elements. */
15659 void
15660 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15662 enum machine_mode imode = GET_MODE (operands[1]);
15663 rtx (*unpack)(rtx, rtx, rtx);
15664 rtx se, dest;
15666 switch (imode)
15668 case V16QImode:
15669 if (high_p)
15670 unpack = gen_vec_interleave_highv16qi;
15671 else
15672 unpack = gen_vec_interleave_lowv16qi;
15673 break;
15674 case V8HImode:
15675 if (high_p)
15676 unpack = gen_vec_interleave_highv8hi;
15677 else
15678 unpack = gen_vec_interleave_lowv8hi;
15679 break;
15680 case V4SImode:
15681 if (high_p)
15682 unpack = gen_vec_interleave_highv4si;
15683 else
15684 unpack = gen_vec_interleave_lowv4si;
15685 break;
15686 default:
15687 gcc_unreachable ();
15690 dest = gen_lowpart (imode, operands[0]);
15692 if (unsigned_p)
15693 se = force_reg (imode, CONST0_RTX (imode));
15694 else
15695 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
15696 operands[1], pc_rtx, pc_rtx);
15698 emit_insn (unpack (dest, operands[1], se));
15701 /* This function performs the same task as ix86_expand_sse_unpack,
15702 but with SSE4.1 instructions. */
15704 void
15705 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15707 enum machine_mode imode = GET_MODE (operands[1]);
15708 rtx (*unpack)(rtx, rtx);
15709 rtx src, dest;
15711 switch (imode)
15713 case V16QImode:
15714 if (unsigned_p)
15715 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
15716 else
15717 unpack = gen_sse4_1_extendv8qiv8hi2;
15718 break;
15719 case V8HImode:
15720 if (unsigned_p)
15721 unpack = gen_sse4_1_zero_extendv4hiv4si2;
15722 else
15723 unpack = gen_sse4_1_extendv4hiv4si2;
15724 break;
15725 case V4SImode:
15726 if (unsigned_p)
15727 unpack = gen_sse4_1_zero_extendv2siv2di2;
15728 else
15729 unpack = gen_sse4_1_extendv2siv2di2;
15730 break;
15731 default:
15732 gcc_unreachable ();
15735 dest = operands[0];
15736 if (high_p)
15738 /* Shift higher 8 bytes to lower 8 bytes. */
15739 src = gen_reg_rtx (imode);
15740 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
15741 gen_lowpart (TImode, operands[1]),
15742 GEN_INT (64)));
15744 else
15745 src = operands[1];
15747 emit_insn (unpack (dest, src));
15750 /* This function performs the same task as ix86_expand_sse_unpack,
15751 but with sse5 instructions. */
15753 void
15754 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15756 enum machine_mode imode = GET_MODE (operands[1]);
15757 int pperm_bytes[16];
15758 int i;
15759 int h = (high_p) ? 8 : 0;
15760 int h2;
15761 int sign_extend;
15762 rtvec v = rtvec_alloc (16);
15763 rtvec vs;
15764 rtx x, p;
15765 rtx op0 = operands[0], op1 = operands[1];
15767 switch (imode)
15769 case V16QImode:
15770 vs = rtvec_alloc (8);
15771 h2 = (high_p) ? 8 : 0;
15772 for (i = 0; i < 8; i++)
15774 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
15775 pperm_bytes[2*i+1] = ((unsigned_p)
15776 ? PPERM_ZERO
15777 : PPERM_SIGN | PPERM_SRC2 | i | h);
15780 for (i = 0; i < 16; i++)
15781 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15783 for (i = 0; i < 8; i++)
15784 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15786 p = gen_rtx_PARALLEL (VOIDmode, vs);
15787 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15788 if (unsigned_p)
15789 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
15790 else
15791 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
15792 break;
15794 case V8HImode:
15795 vs = rtvec_alloc (4);
15796 h2 = (high_p) ? 4 : 0;
15797 for (i = 0; i < 4; i++)
15799 sign_extend = ((unsigned_p)
15800 ? PPERM_ZERO
15801 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
15802 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
15803 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
15804 pperm_bytes[4*i+2] = sign_extend;
15805 pperm_bytes[4*i+3] = sign_extend;
15808 for (i = 0; i < 16; i++)
15809 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15811 for (i = 0; i < 4; i++)
15812 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15814 p = gen_rtx_PARALLEL (VOIDmode, vs);
15815 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15816 if (unsigned_p)
15817 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
15818 else
15819 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
15820 break;
15822 case V4SImode:
15823 vs = rtvec_alloc (2);
15824 h2 = (high_p) ? 2 : 0;
15825 for (i = 0; i < 2; i++)
15827 sign_extend = ((unsigned_p)
15828 ? PPERM_ZERO
15829 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
15830 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
15831 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
15832 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
15833 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
15834 pperm_bytes[8*i+4] = sign_extend;
15835 pperm_bytes[8*i+5] = sign_extend;
15836 pperm_bytes[8*i+6] = sign_extend;
15837 pperm_bytes[8*i+7] = sign_extend;
15840 for (i = 0; i < 16; i++)
15841 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15843 for (i = 0; i < 2; i++)
15844 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15846 p = gen_rtx_PARALLEL (VOIDmode, vs);
15847 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15848 if (unsigned_p)
15849 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
15850 else
15851 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
15852 break;
15854 default:
15855 gcc_unreachable ();
15858 return;
15861 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
15862 next narrower integer vector type */
15863 void
15864 ix86_expand_sse5_pack (rtx operands[3])
15866 enum machine_mode imode = GET_MODE (operands[0]);
15867 int pperm_bytes[16];
15868 int i;
15869 rtvec v = rtvec_alloc (16);
15870 rtx x;
15871 rtx op0 = operands[0];
15872 rtx op1 = operands[1];
15873 rtx op2 = operands[2];
15875 switch (imode)
15877 case V16QImode:
15878 for (i = 0; i < 8; i++)
15880 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
15881 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
15884 for (i = 0; i < 16; i++)
15885 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15887 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15888 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
15889 break;
15891 case V8HImode:
15892 for (i = 0; i < 4; i++)
15894 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
15895 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
15896 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
15897 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
15900 for (i = 0; i < 16; i++)
15901 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15903 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15904 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
15905 break;
15907 case V4SImode:
15908 for (i = 0; i < 2; i++)
15910 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
15911 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
15912 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
15913 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
15914 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
15915 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
15916 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
15917 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
15920 for (i = 0; i < 16; i++)
15921 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15923 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15924 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
15925 break;
15927 default:
15928 gcc_unreachable ();
15931 return;
15934 /* Expand conditional increment or decrement using adb/sbb instructions.
15935 The default case using setcc followed by the conditional move can be
15936 done by generic code. */
15938 ix86_expand_int_addcc (rtx operands[])
15940 enum rtx_code code = GET_CODE (operands[1]);
15941 rtx compare_op;
15942 rtx val = const0_rtx;
15943 bool fpcmp = false;
15944 enum machine_mode mode = GET_MODE (operands[0]);
15946 if (operands[3] != const1_rtx
15947 && operands[3] != constm1_rtx)
15948 return 0;
15949 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15950 ix86_compare_op1, &compare_op))
15951 return 0;
15952 code = GET_CODE (compare_op);
15954 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15955 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15957 fpcmp = true;
15958 code = ix86_fp_compare_code_to_integer (code);
15961 if (code != LTU)
15963 val = constm1_rtx;
15964 if (fpcmp)
15965 PUT_CODE (compare_op,
15966 reverse_condition_maybe_unordered
15967 (GET_CODE (compare_op)));
15968 else
15969 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
15971 PUT_MODE (compare_op, mode);
15973 /* Construct either adc or sbb insn. */
15974 if ((code == LTU) == (operands[3] == constm1_rtx))
15976 switch (GET_MODE (operands[0]))
15978 case QImode:
15979 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
15980 break;
15981 case HImode:
15982 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
15983 break;
15984 case SImode:
15985 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
15986 break;
15987 case DImode:
15988 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15989 break;
15990 default:
15991 gcc_unreachable ();
15994 else
15996 switch (GET_MODE (operands[0]))
15998 case QImode:
15999 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
16000 break;
16001 case HImode:
16002 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
16003 break;
16004 case SImode:
16005 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
16006 break;
16007 case DImode:
16008 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
16009 break;
16010 default:
16011 gcc_unreachable ();
16014 return 1; /* DONE */
16018 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16019 works for floating pointer parameters and nonoffsetable memories.
16020 For pushes, it returns just stack offsets; the values will be saved
16021 in the right order. Maximally three parts are generated. */
16023 static int
16024 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16026 int size;
16028 if (!TARGET_64BIT)
16029 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16030 else
16031 size = (GET_MODE_SIZE (mode) + 4) / 8;
16033 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16034 gcc_assert (size >= 2 && size <= 4);
16036 /* Optimize constant pool reference to immediates. This is used by fp
16037 moves, that force all constants to memory to allow combining. */
16038 if (MEM_P (operand) && MEM_READONLY_P (operand))
16040 rtx tmp = maybe_get_pool_constant (operand);
16041 if (tmp)
16042 operand = tmp;
16045 if (MEM_P (operand) && !offsettable_memref_p (operand))
16047 /* The only non-offsetable memories we handle are pushes. */
16048 int ok = push_operand (operand, VOIDmode);
16050 gcc_assert (ok);
16052 operand = copy_rtx (operand);
16053 PUT_MODE (operand, Pmode);
16054 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16055 return size;
16058 if (GET_CODE (operand) == CONST_VECTOR)
16060 enum machine_mode imode = int_mode_for_mode (mode);
16061 /* Caution: if we looked through a constant pool memory above,
16062 the operand may actually have a different mode now. That's
16063 ok, since we want to pun this all the way back to an integer. */
16064 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16065 gcc_assert (operand != NULL);
16066 mode = imode;
16069 if (!TARGET_64BIT)
16071 if (mode == DImode)
16072 split_di (&operand, 1, &parts[0], &parts[1]);
16073 else
16075 int i;
16077 if (REG_P (operand))
16079 gcc_assert (reload_completed);
16080 for (i = 0; i < size; i++)
16081 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16083 else if (offsettable_memref_p (operand))
16085 operand = adjust_address (operand, SImode, 0);
16086 parts[0] = operand;
16087 for (i = 1; i < size; i++)
16088 parts[i] = adjust_address (operand, SImode, 4 * i);
16090 else if (GET_CODE (operand) == CONST_DOUBLE)
16092 REAL_VALUE_TYPE r;
16093 long l[4];
16095 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16096 switch (mode)
16098 case TFmode:
16099 real_to_target (l, &r, mode);
16100 parts[3] = gen_int_mode (l[3], SImode);
16101 parts[2] = gen_int_mode (l[2], SImode);
16102 break;
16103 case XFmode:
16104 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16105 parts[2] = gen_int_mode (l[2], SImode);
16106 break;
16107 case DFmode:
16108 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16109 break;
16110 default:
16111 gcc_unreachable ();
16113 parts[1] = gen_int_mode (l[1], SImode);
16114 parts[0] = gen_int_mode (l[0], SImode);
16116 else
16117 gcc_unreachable ();
16120 else
16122 if (mode == TImode)
16123 split_ti (&operand, 1, &parts[0], &parts[1]);
16124 if (mode == XFmode || mode == TFmode)
16126 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16127 if (REG_P (operand))
16129 gcc_assert (reload_completed);
16130 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16131 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16133 else if (offsettable_memref_p (operand))
16135 operand = adjust_address (operand, DImode, 0);
16136 parts[0] = operand;
16137 parts[1] = adjust_address (operand, upper_mode, 8);
16139 else if (GET_CODE (operand) == CONST_DOUBLE)
16141 REAL_VALUE_TYPE r;
16142 long l[4];
16144 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16145 real_to_target (l, &r, mode);
16147 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16148 if (HOST_BITS_PER_WIDE_INT >= 64)
16149 parts[0]
16150 = gen_int_mode
16151 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16152 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16153 DImode);
16154 else
16155 parts[0] = immed_double_const (l[0], l[1], DImode);
16157 if (upper_mode == SImode)
16158 parts[1] = gen_int_mode (l[2], SImode);
16159 else if (HOST_BITS_PER_WIDE_INT >= 64)
16160 parts[1]
16161 = gen_int_mode
16162 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16163 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16164 DImode);
16165 else
16166 parts[1] = immed_double_const (l[2], l[3], DImode);
16168 else
16169 gcc_unreachable ();
16173 return size;
16176 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16177 Return false when normal moves are needed; true when all required
16178 insns have been emitted. Operands 2-4 contain the input values
16179 int the correct order; operands 5-7 contain the output values. */
16181 void
16182 ix86_split_long_move (rtx operands[])
16184 rtx part[2][4];
16185 int nparts, i, j;
16186 int push = 0;
16187 int collisions = 0;
16188 enum machine_mode mode = GET_MODE (operands[0]);
16189 bool collisionparts[4];
16191 /* The DFmode expanders may ask us to move double.
16192 For 64bit target this is single move. By hiding the fact
16193 here we simplify i386.md splitters. */
16194 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16196 /* Optimize constant pool reference to immediates. This is used by
16197 fp moves, that force all constants to memory to allow combining. */
16199 if (MEM_P (operands[1])
16200 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16201 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16202 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16203 if (push_operand (operands[0], VOIDmode))
16205 operands[0] = copy_rtx (operands[0]);
16206 PUT_MODE (operands[0], Pmode);
16208 else
16209 operands[0] = gen_lowpart (DImode, operands[0]);
16210 operands[1] = gen_lowpart (DImode, operands[1]);
16211 emit_move_insn (operands[0], operands[1]);
16212 return;
16215 /* The only non-offsettable memory we handle is push. */
16216 if (push_operand (operands[0], VOIDmode))
16217 push = 1;
16218 else
16219 gcc_assert (!MEM_P (operands[0])
16220 || offsettable_memref_p (operands[0]));
16222 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16223 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16225 /* When emitting push, take care for source operands on the stack. */
16226 if (push && MEM_P (operands[1])
16227 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16228 for (i = 0; i < nparts - 1; i++)
16229 part[1][i] = change_address (part[1][i],
16230 GET_MODE (part[1][i]),
16231 XEXP (part[1][i + 1], 0));
16233 /* We need to do copy in the right order in case an address register
16234 of the source overlaps the destination. */
16235 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16237 rtx tmp;
16239 for (i = 0; i < nparts; i++)
16241 collisionparts[i]
16242 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16243 if (collisionparts[i])
16244 collisions++;
16247 /* Collision in the middle part can be handled by reordering. */
16248 if (collisions == 1 && nparts == 3 && collisionparts [1])
16250 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16251 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16253 else if (collisions == 1
16254 && nparts == 4
16255 && (collisionparts [1] || collisionparts [2]))
16257 if (collisionparts [1])
16259 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16260 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16262 else
16264 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16265 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16269 /* If there are more collisions, we can't handle it by reordering.
16270 Do an lea to the last part and use only one colliding move. */
16271 else if (collisions > 1)
16273 rtx base;
16275 collisions = 1;
16277 base = part[0][nparts - 1];
16279 /* Handle the case when the last part isn't valid for lea.
16280 Happens in 64-bit mode storing the 12-byte XFmode. */
16281 if (GET_MODE (base) != Pmode)
16282 base = gen_rtx_REG (Pmode, REGNO (base));
16284 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16285 part[1][0] = replace_equiv_address (part[1][0], base);
16286 for (i = 1; i < nparts; i++)
16288 tmp = plus_constant (base, UNITS_PER_WORD * i);
16289 part[1][i] = replace_equiv_address (part[1][i], tmp);
16294 if (push)
16296 if (!TARGET_64BIT)
16298 if (nparts == 3)
16300 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16301 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
16302 emit_move_insn (part[0][2], part[1][2]);
16304 else if (nparts == 4)
16306 emit_move_insn (part[0][3], part[1][3]);
16307 emit_move_insn (part[0][2], part[1][2]);
16310 else
16312 /* In 64bit mode we don't have 32bit push available. In case this is
16313 register, it is OK - we will just use larger counterpart. We also
16314 retype memory - these comes from attempt to avoid REX prefix on
16315 moving of second half of TFmode value. */
16316 if (GET_MODE (part[1][1]) == SImode)
16318 switch (GET_CODE (part[1][1]))
16320 case MEM:
16321 part[1][1] = adjust_address (part[1][1], DImode, 0);
16322 break;
16324 case REG:
16325 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16326 break;
16328 default:
16329 gcc_unreachable ();
16332 if (GET_MODE (part[1][0]) == SImode)
16333 part[1][0] = part[1][1];
16336 emit_move_insn (part[0][1], part[1][1]);
16337 emit_move_insn (part[0][0], part[1][0]);
16338 return;
16341 /* Choose correct order to not overwrite the source before it is copied. */
16342 if ((REG_P (part[0][0])
16343 && REG_P (part[1][1])
16344 && (REGNO (part[0][0]) == REGNO (part[1][1])
16345 || (nparts == 3
16346 && REGNO (part[0][0]) == REGNO (part[1][2]))
16347 || (nparts == 4
16348 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16349 || (collisions > 0
16350 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16352 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16354 operands[2 + i] = part[0][j];
16355 operands[6 + i] = part[1][j];
16358 else
16360 for (i = 0; i < nparts; i++)
16362 operands[2 + i] = part[0][i];
16363 operands[6 + i] = part[1][i];
16367 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16368 if (optimize_insn_for_size_p ())
16370 for (j = 0; j < nparts - 1; j++)
16371 if (CONST_INT_P (operands[6 + j])
16372 && operands[6 + j] != const0_rtx
16373 && REG_P (operands[2 + j]))
16374 for (i = j; i < nparts - 1; i++)
16375 if (CONST_INT_P (operands[7 + i])
16376 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16377 operands[7 + i] = operands[2 + j];
16380 for (i = 0; i < nparts; i++)
16381 emit_move_insn (operands[2 + i], operands[6 + i]);
16383 return;
16386 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16387 left shift by a constant, either using a single shift or
16388 a sequence of add instructions. */
16390 static void
16391 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16393 if (count == 1)
16395 emit_insn ((mode == DImode
16396 ? gen_addsi3
16397 : gen_adddi3) (operand, operand, operand));
16399 else if (!optimize_insn_for_size_p ()
16400 && count * ix86_cost->add <= ix86_cost->shift_const)
16402 int i;
16403 for (i=0; i<count; i++)
16405 emit_insn ((mode == DImode
16406 ? gen_addsi3
16407 : gen_adddi3) (operand, operand, operand));
16410 else
16411 emit_insn ((mode == DImode
16412 ? gen_ashlsi3
16413 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16416 void
16417 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16419 rtx low[2], high[2];
16420 int count;
16421 const int single_width = mode == DImode ? 32 : 64;
16423 if (CONST_INT_P (operands[2]))
16425 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16426 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16428 if (count >= single_width)
16430 emit_move_insn (high[0], low[1]);
16431 emit_move_insn (low[0], const0_rtx);
16433 if (count > single_width)
16434 ix86_expand_ashl_const (high[0], count - single_width, mode);
16436 else
16438 if (!rtx_equal_p (operands[0], operands[1]))
16439 emit_move_insn (operands[0], operands[1]);
16440 emit_insn ((mode == DImode
16441 ? gen_x86_shld
16442 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16443 ix86_expand_ashl_const (low[0], count, mode);
16445 return;
16448 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16450 if (operands[1] == const1_rtx)
16452 /* Assuming we've chosen a QImode capable registers, then 1 << N
16453 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16454 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16456 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16458 ix86_expand_clear (low[0]);
16459 ix86_expand_clear (high[0]);
16460 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16462 d = gen_lowpart (QImode, low[0]);
16463 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16464 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16465 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16467 d = gen_lowpart (QImode, high[0]);
16468 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16469 s = gen_rtx_NE (QImode, flags, const0_rtx);
16470 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16473 /* Otherwise, we can get the same results by manually performing
16474 a bit extract operation on bit 5/6, and then performing the two
16475 shifts. The two methods of getting 0/1 into low/high are exactly
16476 the same size. Avoiding the shift in the bit extract case helps
16477 pentium4 a bit; no one else seems to care much either way. */
16478 else
16480 rtx x;
16482 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16483 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16484 else
16485 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16486 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16488 emit_insn ((mode == DImode
16489 ? gen_lshrsi3
16490 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
16491 emit_insn ((mode == DImode
16492 ? gen_andsi3
16493 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
16494 emit_move_insn (low[0], high[0]);
16495 emit_insn ((mode == DImode
16496 ? gen_xorsi3
16497 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
16500 emit_insn ((mode == DImode
16501 ? gen_ashlsi3
16502 : gen_ashldi3) (low[0], low[0], operands[2]));
16503 emit_insn ((mode == DImode
16504 ? gen_ashlsi3
16505 : gen_ashldi3) (high[0], high[0], operands[2]));
16506 return;
16509 if (operands[1] == constm1_rtx)
16511 /* For -1 << N, we can avoid the shld instruction, because we
16512 know that we're shifting 0...31/63 ones into a -1. */
16513 emit_move_insn (low[0], constm1_rtx);
16514 if (optimize_insn_for_size_p ())
16515 emit_move_insn (high[0], low[0]);
16516 else
16517 emit_move_insn (high[0], constm1_rtx);
16519 else
16521 if (!rtx_equal_p (operands[0], operands[1]))
16522 emit_move_insn (operands[0], operands[1]);
16524 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16525 emit_insn ((mode == DImode
16526 ? gen_x86_shld
16527 : gen_x86_64_shld) (high[0], low[0], operands[2]));
16530 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
16532 if (TARGET_CMOVE && scratch)
16534 ix86_expand_clear (scratch);
16535 emit_insn ((mode == DImode
16536 ? gen_x86_shift_adj_1
16537 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
16538 scratch));
16540 else
16541 emit_insn ((mode == DImode
16542 ? gen_x86_shift_adj_2
16543 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
16546 void
16547 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
16549 rtx low[2], high[2];
16550 int count;
16551 const int single_width = mode == DImode ? 32 : 64;
16553 if (CONST_INT_P (operands[2]))
16555 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16556 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16558 if (count == single_width * 2 - 1)
16560 emit_move_insn (high[0], high[1]);
16561 emit_insn ((mode == DImode
16562 ? gen_ashrsi3
16563 : gen_ashrdi3) (high[0], high[0],
16564 GEN_INT (single_width - 1)));
16565 emit_move_insn (low[0], high[0]);
16568 else if (count >= single_width)
16570 emit_move_insn (low[0], high[1]);
16571 emit_move_insn (high[0], low[0]);
16572 emit_insn ((mode == DImode
16573 ? gen_ashrsi3
16574 : gen_ashrdi3) (high[0], high[0],
16575 GEN_INT (single_width - 1)));
16576 if (count > single_width)
16577 emit_insn ((mode == DImode
16578 ? gen_ashrsi3
16579 : gen_ashrdi3) (low[0], low[0],
16580 GEN_INT (count - single_width)));
16582 else
16584 if (!rtx_equal_p (operands[0], operands[1]))
16585 emit_move_insn (operands[0], operands[1]);
16586 emit_insn ((mode == DImode
16587 ? gen_x86_shrd
16588 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16589 emit_insn ((mode == DImode
16590 ? gen_ashrsi3
16591 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
16594 else
16596 if (!rtx_equal_p (operands[0], operands[1]))
16597 emit_move_insn (operands[0], operands[1]);
16599 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16601 emit_insn ((mode == DImode
16602 ? gen_x86_shrd
16603 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16604 emit_insn ((mode == DImode
16605 ? gen_ashrsi3
16606 : gen_ashrdi3) (high[0], high[0], operands[2]));
16608 if (TARGET_CMOVE && scratch)
16610 emit_move_insn (scratch, high[0]);
16611 emit_insn ((mode == DImode
16612 ? gen_ashrsi3
16613 : gen_ashrdi3) (scratch, scratch,
16614 GEN_INT (single_width - 1)));
16615 emit_insn ((mode == DImode
16616 ? gen_x86_shift_adj_1
16617 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16618 scratch));
16620 else
16621 emit_insn ((mode == DImode
16622 ? gen_x86_shift_adj_3
16623 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
16627 void
16628 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
16630 rtx low[2], high[2];
16631 int count;
16632 const int single_width = mode == DImode ? 32 : 64;
16634 if (CONST_INT_P (operands[2]))
16636 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16637 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16639 if (count >= single_width)
16641 emit_move_insn (low[0], high[1]);
16642 ix86_expand_clear (high[0]);
16644 if (count > single_width)
16645 emit_insn ((mode == DImode
16646 ? gen_lshrsi3
16647 : gen_lshrdi3) (low[0], low[0],
16648 GEN_INT (count - single_width)));
16650 else
16652 if (!rtx_equal_p (operands[0], operands[1]))
16653 emit_move_insn (operands[0], operands[1]);
16654 emit_insn ((mode == DImode
16655 ? gen_x86_shrd
16656 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16657 emit_insn ((mode == DImode
16658 ? gen_lshrsi3
16659 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
16662 else
16664 if (!rtx_equal_p (operands[0], operands[1]))
16665 emit_move_insn (operands[0], operands[1]);
16667 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16669 emit_insn ((mode == DImode
16670 ? gen_x86_shrd
16671 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16672 emit_insn ((mode == DImode
16673 ? gen_lshrsi3
16674 : gen_lshrdi3) (high[0], high[0], operands[2]));
16676 /* Heh. By reversing the arguments, we can reuse this pattern. */
16677 if (TARGET_CMOVE && scratch)
16679 ix86_expand_clear (scratch);
16680 emit_insn ((mode == DImode
16681 ? gen_x86_shift_adj_1
16682 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16683 scratch));
16685 else
16686 emit_insn ((mode == DImode
16687 ? gen_x86_shift_adj_2
16688 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
16692 /* Predict just emitted jump instruction to be taken with probability PROB. */
16693 static void
16694 predict_jump (int prob)
16696 rtx insn = get_last_insn ();
16697 gcc_assert (JUMP_P (insn));
16698 REG_NOTES (insn)
16699 = gen_rtx_EXPR_LIST (REG_BR_PROB,
16700 GEN_INT (prob),
16701 REG_NOTES (insn));
16704 /* Helper function for the string operations below. Dest VARIABLE whether
16705 it is aligned to VALUE bytes. If true, jump to the label. */
16706 static rtx
16707 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
16709 rtx label = gen_label_rtx ();
16710 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
16711 if (GET_MODE (variable) == DImode)
16712 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
16713 else
16714 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
16715 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
16716 1, label);
16717 if (epilogue)
16718 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16719 else
16720 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16721 return label;
16724 /* Adjust COUNTER by the VALUE. */
16725 static void
16726 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
16728 if (GET_MODE (countreg) == DImode)
16729 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
16730 else
16731 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
16734 /* Zero extend possibly SImode EXP to Pmode register. */
16736 ix86_zero_extend_to_Pmode (rtx exp)
16738 rtx r;
16739 if (GET_MODE (exp) == VOIDmode)
16740 return force_reg (Pmode, exp);
16741 if (GET_MODE (exp) == Pmode)
16742 return copy_to_mode_reg (Pmode, exp);
16743 r = gen_reg_rtx (Pmode);
16744 emit_insn (gen_zero_extendsidi2 (r, exp));
16745 return r;
16748 /* Divide COUNTREG by SCALE. */
16749 static rtx
16750 scale_counter (rtx countreg, int scale)
16752 rtx sc;
16753 rtx piece_size_mask;
16755 if (scale == 1)
16756 return countreg;
16757 if (CONST_INT_P (countreg))
16758 return GEN_INT (INTVAL (countreg) / scale);
16759 gcc_assert (REG_P (countreg));
16761 piece_size_mask = GEN_INT (scale - 1);
16762 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
16763 GEN_INT (exact_log2 (scale)),
16764 NULL, 1, OPTAB_DIRECT);
16765 return sc;
16768 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
16769 DImode for constant loop counts. */
16771 static enum machine_mode
16772 counter_mode (rtx count_exp)
16774 if (GET_MODE (count_exp) != VOIDmode)
16775 return GET_MODE (count_exp);
16776 if (GET_CODE (count_exp) != CONST_INT)
16777 return Pmode;
16778 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
16779 return DImode;
16780 return SImode;
16783 /* When SRCPTR is non-NULL, output simple loop to move memory
16784 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
16785 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
16786 equivalent loop to set memory by VALUE (supposed to be in MODE).
16788 The size is rounded down to whole number of chunk size moved at once.
16789 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
16792 static void
16793 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
16794 rtx destptr, rtx srcptr, rtx value,
16795 rtx count, enum machine_mode mode, int unroll,
16796 int expected_size)
16798 rtx out_label, top_label, iter, tmp;
16799 enum machine_mode iter_mode = counter_mode (count);
16800 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
16801 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
16802 rtx size;
16803 rtx x_addr;
16804 rtx y_addr;
16805 int i;
16807 top_label = gen_label_rtx ();
16808 out_label = gen_label_rtx ();
16809 iter = gen_reg_rtx (iter_mode);
16811 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
16812 NULL, 1, OPTAB_DIRECT);
16813 /* Those two should combine. */
16814 if (piece_size == const1_rtx)
16816 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
16817 true, out_label);
16818 predict_jump (REG_BR_PROB_BASE * 10 / 100);
16820 emit_move_insn (iter, const0_rtx);
16822 emit_label (top_label);
16824 tmp = convert_modes (Pmode, iter_mode, iter, true);
16825 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
16826 destmem = change_address (destmem, mode, x_addr);
16828 if (srcmem)
16830 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
16831 srcmem = change_address (srcmem, mode, y_addr);
16833 /* When unrolling for chips that reorder memory reads and writes,
16834 we can save registers by using single temporary.
16835 Also using 4 temporaries is overkill in 32bit mode. */
16836 if (!TARGET_64BIT && 0)
16838 for (i = 0; i < unroll; i++)
16840 if (i)
16842 destmem =
16843 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16844 srcmem =
16845 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16847 emit_move_insn (destmem, srcmem);
16850 else
16852 rtx tmpreg[4];
16853 gcc_assert (unroll <= 4);
16854 for (i = 0; i < unroll; i++)
16856 tmpreg[i] = gen_reg_rtx (mode);
16857 if (i)
16859 srcmem =
16860 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16862 emit_move_insn (tmpreg[i], srcmem);
16864 for (i = 0; i < unroll; i++)
16866 if (i)
16868 destmem =
16869 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16871 emit_move_insn (destmem, tmpreg[i]);
16875 else
16876 for (i = 0; i < unroll; i++)
16878 if (i)
16879 destmem =
16880 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16881 emit_move_insn (destmem, value);
16884 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
16885 true, OPTAB_LIB_WIDEN);
16886 if (tmp != iter)
16887 emit_move_insn (iter, tmp);
16889 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
16890 true, top_label);
16891 if (expected_size != -1)
16893 expected_size /= GET_MODE_SIZE (mode) * unroll;
16894 if (expected_size == 0)
16895 predict_jump (0);
16896 else if (expected_size > REG_BR_PROB_BASE)
16897 predict_jump (REG_BR_PROB_BASE - 1);
16898 else
16899 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
16901 else
16902 predict_jump (REG_BR_PROB_BASE * 80 / 100);
16903 iter = ix86_zero_extend_to_Pmode (iter);
16904 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
16905 true, OPTAB_LIB_WIDEN);
16906 if (tmp != destptr)
16907 emit_move_insn (destptr, tmp);
16908 if (srcptr)
16910 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
16911 true, OPTAB_LIB_WIDEN);
16912 if (tmp != srcptr)
16913 emit_move_insn (srcptr, tmp);
16915 emit_label (out_label);
16918 /* Output "rep; mov" instruction.
16919 Arguments have same meaning as for previous function */
16920 static void
16921 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
16922 rtx destptr, rtx srcptr,
16923 rtx count,
16924 enum machine_mode mode)
16926 rtx destexp;
16927 rtx srcexp;
16928 rtx countreg;
16930 /* If the size is known, it is shorter to use rep movs. */
16931 if (mode == QImode && CONST_INT_P (count)
16932 && !(INTVAL (count) & 3))
16933 mode = SImode;
16935 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16936 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16937 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
16938 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
16939 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16940 if (mode != QImode)
16942 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16943 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16944 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16945 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
16946 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16947 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
16949 else
16951 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16952 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
16954 if (CONST_INT_P (count))
16956 count = GEN_INT (INTVAL (count)
16957 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
16958 destmem = shallow_copy_rtx (destmem);
16959 srcmem = shallow_copy_rtx (srcmem);
16960 set_mem_size (destmem, count);
16961 set_mem_size (srcmem, count);
16963 else
16965 if (MEM_SIZE (destmem))
16966 set_mem_size (destmem, NULL_RTX);
16967 if (MEM_SIZE (srcmem))
16968 set_mem_size (srcmem, NULL_RTX);
16970 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
16971 destexp, srcexp));
16974 /* Output "rep; stos" instruction.
16975 Arguments have same meaning as for previous function */
16976 static void
16977 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
16978 rtx count, enum machine_mode mode,
16979 rtx orig_value)
16981 rtx destexp;
16982 rtx countreg;
16984 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16985 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16986 value = force_reg (mode, gen_lowpart (mode, value));
16987 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16988 if (mode != QImode)
16990 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16991 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16992 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16994 else
16995 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16996 if (orig_value == const0_rtx && CONST_INT_P (count))
16998 count = GEN_INT (INTVAL (count)
16999 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17000 destmem = shallow_copy_rtx (destmem);
17001 set_mem_size (destmem, count);
17003 else if (MEM_SIZE (destmem))
17004 set_mem_size (destmem, NULL_RTX);
17005 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17008 static void
17009 emit_strmov (rtx destmem, rtx srcmem,
17010 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17012 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17013 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17014 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17017 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17018 static void
17019 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17020 rtx destptr, rtx srcptr, rtx count, int max_size)
17022 rtx src, dest;
17023 if (CONST_INT_P (count))
17025 HOST_WIDE_INT countval = INTVAL (count);
17026 int offset = 0;
17028 if ((countval & 0x10) && max_size > 16)
17030 if (TARGET_64BIT)
17032 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17033 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17035 else
17036 gcc_unreachable ();
17037 offset += 16;
17039 if ((countval & 0x08) && max_size > 8)
17041 if (TARGET_64BIT)
17042 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17043 else
17045 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17046 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17048 offset += 8;
17050 if ((countval & 0x04) && max_size > 4)
17052 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17053 offset += 4;
17055 if ((countval & 0x02) && max_size > 2)
17057 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17058 offset += 2;
17060 if ((countval & 0x01) && max_size > 1)
17062 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17063 offset += 1;
17065 return;
17067 if (max_size > 8)
17069 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17070 count, 1, OPTAB_DIRECT);
17071 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17072 count, QImode, 1, 4);
17073 return;
17076 /* When there are stringops, we can cheaply increase dest and src pointers.
17077 Otherwise we save code size by maintaining offset (zero is readily
17078 available from preceding rep operation) and using x86 addressing modes.
17080 if (TARGET_SINGLE_STRINGOP)
17082 if (max_size > 4)
17084 rtx label = ix86_expand_aligntest (count, 4, true);
17085 src = change_address (srcmem, SImode, srcptr);
17086 dest = change_address (destmem, SImode, destptr);
17087 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17088 emit_label (label);
17089 LABEL_NUSES (label) = 1;
17091 if (max_size > 2)
17093 rtx label = ix86_expand_aligntest (count, 2, true);
17094 src = change_address (srcmem, HImode, srcptr);
17095 dest = change_address (destmem, HImode, destptr);
17096 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17097 emit_label (label);
17098 LABEL_NUSES (label) = 1;
17100 if (max_size > 1)
17102 rtx label = ix86_expand_aligntest (count, 1, true);
17103 src = change_address (srcmem, QImode, srcptr);
17104 dest = change_address (destmem, QImode, destptr);
17105 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17106 emit_label (label);
17107 LABEL_NUSES (label) = 1;
17110 else
17112 rtx offset = force_reg (Pmode, const0_rtx);
17113 rtx tmp;
17115 if (max_size > 4)
17117 rtx label = ix86_expand_aligntest (count, 4, true);
17118 src = change_address (srcmem, SImode, srcptr);
17119 dest = change_address (destmem, SImode, destptr);
17120 emit_move_insn (dest, src);
17121 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17122 true, OPTAB_LIB_WIDEN);
17123 if (tmp != offset)
17124 emit_move_insn (offset, tmp);
17125 emit_label (label);
17126 LABEL_NUSES (label) = 1;
17128 if (max_size > 2)
17130 rtx label = ix86_expand_aligntest (count, 2, true);
17131 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17132 src = change_address (srcmem, HImode, tmp);
17133 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17134 dest = change_address (destmem, HImode, tmp);
17135 emit_move_insn (dest, src);
17136 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17137 true, OPTAB_LIB_WIDEN);
17138 if (tmp != offset)
17139 emit_move_insn (offset, tmp);
17140 emit_label (label);
17141 LABEL_NUSES (label) = 1;
17143 if (max_size > 1)
17145 rtx label = ix86_expand_aligntest (count, 1, true);
17146 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17147 src = change_address (srcmem, QImode, tmp);
17148 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17149 dest = change_address (destmem, QImode, tmp);
17150 emit_move_insn (dest, src);
17151 emit_label (label);
17152 LABEL_NUSES (label) = 1;
17157 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17158 static void
17159 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17160 rtx count, int max_size)
17162 count =
17163 expand_simple_binop (counter_mode (count), AND, count,
17164 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17165 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17166 gen_lowpart (QImode, value), count, QImode,
17167 1, max_size / 2);
17170 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17171 static void
17172 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17174 rtx dest;
17176 if (CONST_INT_P (count))
17178 HOST_WIDE_INT countval = INTVAL (count);
17179 int offset = 0;
17181 if ((countval & 0x10) && max_size > 16)
17183 if (TARGET_64BIT)
17185 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17186 emit_insn (gen_strset (destptr, dest, value));
17187 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17188 emit_insn (gen_strset (destptr, dest, value));
17190 else
17191 gcc_unreachable ();
17192 offset += 16;
17194 if ((countval & 0x08) && max_size > 8)
17196 if (TARGET_64BIT)
17198 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17199 emit_insn (gen_strset (destptr, dest, value));
17201 else
17203 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17204 emit_insn (gen_strset (destptr, dest, value));
17205 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17206 emit_insn (gen_strset (destptr, dest, value));
17208 offset += 8;
17210 if ((countval & 0x04) && max_size > 4)
17212 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17213 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17214 offset += 4;
17216 if ((countval & 0x02) && max_size > 2)
17218 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17219 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17220 offset += 2;
17222 if ((countval & 0x01) && max_size > 1)
17224 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17225 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17226 offset += 1;
17228 return;
17230 if (max_size > 32)
17232 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17233 return;
17235 if (max_size > 16)
17237 rtx label = ix86_expand_aligntest (count, 16, true);
17238 if (TARGET_64BIT)
17240 dest = change_address (destmem, DImode, destptr);
17241 emit_insn (gen_strset (destptr, dest, value));
17242 emit_insn (gen_strset (destptr, dest, value));
17244 else
17246 dest = change_address (destmem, SImode, destptr);
17247 emit_insn (gen_strset (destptr, dest, value));
17248 emit_insn (gen_strset (destptr, dest, value));
17249 emit_insn (gen_strset (destptr, dest, value));
17250 emit_insn (gen_strset (destptr, dest, value));
17252 emit_label (label);
17253 LABEL_NUSES (label) = 1;
17255 if (max_size > 8)
17257 rtx label = ix86_expand_aligntest (count, 8, true);
17258 if (TARGET_64BIT)
17260 dest = change_address (destmem, DImode, destptr);
17261 emit_insn (gen_strset (destptr, dest, value));
17263 else
17265 dest = change_address (destmem, SImode, destptr);
17266 emit_insn (gen_strset (destptr, dest, value));
17267 emit_insn (gen_strset (destptr, dest, value));
17269 emit_label (label);
17270 LABEL_NUSES (label) = 1;
17272 if (max_size > 4)
17274 rtx label = ix86_expand_aligntest (count, 4, true);
17275 dest = change_address (destmem, SImode, destptr);
17276 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17277 emit_label (label);
17278 LABEL_NUSES (label) = 1;
17280 if (max_size > 2)
17282 rtx label = ix86_expand_aligntest (count, 2, true);
17283 dest = change_address (destmem, HImode, destptr);
17284 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17285 emit_label (label);
17286 LABEL_NUSES (label) = 1;
17288 if (max_size > 1)
17290 rtx label = ix86_expand_aligntest (count, 1, true);
17291 dest = change_address (destmem, QImode, destptr);
17292 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17293 emit_label (label);
17294 LABEL_NUSES (label) = 1;
17298 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17299 DESIRED_ALIGNMENT. */
17300 static void
17301 expand_movmem_prologue (rtx destmem, rtx srcmem,
17302 rtx destptr, rtx srcptr, rtx count,
17303 int align, int desired_alignment)
17305 if (align <= 1 && desired_alignment > 1)
17307 rtx label = ix86_expand_aligntest (destptr, 1, false);
17308 srcmem = change_address (srcmem, QImode, srcptr);
17309 destmem = change_address (destmem, QImode, destptr);
17310 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17311 ix86_adjust_counter (count, 1);
17312 emit_label (label);
17313 LABEL_NUSES (label) = 1;
17315 if (align <= 2 && desired_alignment > 2)
17317 rtx label = ix86_expand_aligntest (destptr, 2, false);
17318 srcmem = change_address (srcmem, HImode, srcptr);
17319 destmem = change_address (destmem, HImode, destptr);
17320 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17321 ix86_adjust_counter (count, 2);
17322 emit_label (label);
17323 LABEL_NUSES (label) = 1;
17325 if (align <= 4 && desired_alignment > 4)
17327 rtx label = ix86_expand_aligntest (destptr, 4, false);
17328 srcmem = change_address (srcmem, SImode, srcptr);
17329 destmem = change_address (destmem, SImode, destptr);
17330 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17331 ix86_adjust_counter (count, 4);
17332 emit_label (label);
17333 LABEL_NUSES (label) = 1;
17335 gcc_assert (desired_alignment <= 8);
17338 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17339 ALIGN_BYTES is how many bytes need to be copied. */
17340 static rtx
17341 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17342 int desired_align, int align_bytes)
17344 rtx src = *srcp;
17345 rtx src_size, dst_size;
17346 int off = 0;
17347 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17348 if (src_align_bytes >= 0)
17349 src_align_bytes = desired_align - src_align_bytes;
17350 src_size = MEM_SIZE (src);
17351 dst_size = MEM_SIZE (dst);
17352 if (align_bytes & 1)
17354 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17355 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17356 off = 1;
17357 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17359 if (align_bytes & 2)
17361 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17362 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17363 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17364 set_mem_align (dst, 2 * BITS_PER_UNIT);
17365 if (src_align_bytes >= 0
17366 && (src_align_bytes & 1) == (align_bytes & 1)
17367 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17368 set_mem_align (src, 2 * BITS_PER_UNIT);
17369 off = 2;
17370 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17372 if (align_bytes & 4)
17374 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17375 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17376 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17377 set_mem_align (dst, 4 * BITS_PER_UNIT);
17378 if (src_align_bytes >= 0)
17380 unsigned int src_align = 0;
17381 if ((src_align_bytes & 3) == (align_bytes & 3))
17382 src_align = 4;
17383 else if ((src_align_bytes & 1) == (align_bytes & 1))
17384 src_align = 2;
17385 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17386 set_mem_align (src, src_align * BITS_PER_UNIT);
17388 off = 4;
17389 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17391 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17392 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17393 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17394 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17395 if (src_align_bytes >= 0)
17397 unsigned int src_align = 0;
17398 if ((src_align_bytes & 7) == (align_bytes & 7))
17399 src_align = 8;
17400 else if ((src_align_bytes & 3) == (align_bytes & 3))
17401 src_align = 4;
17402 else if ((src_align_bytes & 1) == (align_bytes & 1))
17403 src_align = 2;
17404 if (src_align > (unsigned int) desired_align)
17405 src_align = desired_align;
17406 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17407 set_mem_align (src, src_align * BITS_PER_UNIT);
17409 if (dst_size)
17410 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17411 if (src_size)
17412 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17413 *srcp = src;
17414 return dst;
17417 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17418 DESIRED_ALIGNMENT. */
17419 static void
17420 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17421 int align, int desired_alignment)
17423 if (align <= 1 && desired_alignment > 1)
17425 rtx label = ix86_expand_aligntest (destptr, 1, false);
17426 destmem = change_address (destmem, QImode, destptr);
17427 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17428 ix86_adjust_counter (count, 1);
17429 emit_label (label);
17430 LABEL_NUSES (label) = 1;
17432 if (align <= 2 && desired_alignment > 2)
17434 rtx label = ix86_expand_aligntest (destptr, 2, false);
17435 destmem = change_address (destmem, HImode, destptr);
17436 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17437 ix86_adjust_counter (count, 2);
17438 emit_label (label);
17439 LABEL_NUSES (label) = 1;
17441 if (align <= 4 && desired_alignment > 4)
17443 rtx label = ix86_expand_aligntest (destptr, 4, false);
17444 destmem = change_address (destmem, SImode, destptr);
17445 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17446 ix86_adjust_counter (count, 4);
17447 emit_label (label);
17448 LABEL_NUSES (label) = 1;
17450 gcc_assert (desired_alignment <= 8);
17453 /* Set enough from DST to align DST known to by aligned by ALIGN to
17454 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
17455 static rtx
17456 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
17457 int desired_align, int align_bytes)
17459 int off = 0;
17460 rtx dst_size = MEM_SIZE (dst);
17461 if (align_bytes & 1)
17463 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17464 off = 1;
17465 emit_insn (gen_strset (destreg, dst,
17466 gen_lowpart (QImode, value)));
17468 if (align_bytes & 2)
17470 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17471 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17472 set_mem_align (dst, 2 * BITS_PER_UNIT);
17473 off = 2;
17474 emit_insn (gen_strset (destreg, dst,
17475 gen_lowpart (HImode, value)));
17477 if (align_bytes & 4)
17479 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17480 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17481 set_mem_align (dst, 4 * BITS_PER_UNIT);
17482 off = 4;
17483 emit_insn (gen_strset (destreg, dst,
17484 gen_lowpart (SImode, value)));
17486 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17487 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17488 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17489 if (dst_size)
17490 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17491 return dst;
17494 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17495 static enum stringop_alg
17496 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17497 int *dynamic_check)
17499 const struct stringop_algs * algs;
17500 bool optimize_for_speed;
17501 /* Algorithms using the rep prefix want at least edi and ecx;
17502 additionally, memset wants eax and memcpy wants esi. Don't
17503 consider such algorithms if the user has appropriated those
17504 registers for their own purposes. */
17505 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17506 || (memset
17507 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17509 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17510 || (alg != rep_prefix_1_byte \
17511 && alg != rep_prefix_4_byte \
17512 && alg != rep_prefix_8_byte))
17513 const struct processor_costs *cost;
17515 /* Even if the string operation call is cold, we still might spend a lot
17516 of time processing large blocks. */
17517 if (optimize_function_for_size_p (cfun)
17518 || (optimize_insn_for_size_p ()
17519 && expected_size != -1 && expected_size < 256))
17520 optimize_for_speed = false;
17521 else
17522 optimize_for_speed = true;
17524 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17526 *dynamic_check = -1;
17527 if (memset)
17528 algs = &cost->memset[TARGET_64BIT != 0];
17529 else
17530 algs = &cost->memcpy[TARGET_64BIT != 0];
17531 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
17532 return stringop_alg;
17533 /* rep; movq or rep; movl is the smallest variant. */
17534 else if (!optimize_for_speed)
17536 if (!count || (count & 3))
17537 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
17538 else
17539 return rep_prefix_usable ? rep_prefix_4_byte : loop;
17541 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
17543 else if (expected_size != -1 && expected_size < 4)
17544 return loop_1_byte;
17545 else if (expected_size != -1)
17547 unsigned int i;
17548 enum stringop_alg alg = libcall;
17549 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17551 /* We get here if the algorithms that were not libcall-based
17552 were rep-prefix based and we are unable to use rep prefixes
17553 based on global register usage. Break out of the loop and
17554 use the heuristic below. */
17555 if (algs->size[i].max == 0)
17556 break;
17557 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
17559 enum stringop_alg candidate = algs->size[i].alg;
17561 if (candidate != libcall && ALG_USABLE_P (candidate))
17562 alg = candidate;
17563 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
17564 last non-libcall inline algorithm. */
17565 if (TARGET_INLINE_ALL_STRINGOPS)
17567 /* When the current size is best to be copied by a libcall,
17568 but we are still forced to inline, run the heuristic below
17569 that will pick code for medium sized blocks. */
17570 if (alg != libcall)
17571 return alg;
17572 break;
17574 else if (ALG_USABLE_P (candidate))
17575 return candidate;
17578 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
17580 /* When asked to inline the call anyway, try to pick meaningful choice.
17581 We look for maximal size of block that is faster to copy by hand and
17582 take blocks of at most of that size guessing that average size will
17583 be roughly half of the block.
17585 If this turns out to be bad, we might simply specify the preferred
17586 choice in ix86_costs. */
17587 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17588 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
17590 int max = -1;
17591 enum stringop_alg alg;
17592 int i;
17593 bool any_alg_usable_p = true;
17595 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17597 enum stringop_alg candidate = algs->size[i].alg;
17598 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
17600 if (candidate != libcall && candidate
17601 && ALG_USABLE_P (candidate))
17602 max = algs->size[i].max;
17604 /* If there aren't any usable algorithms, then recursing on
17605 smaller sizes isn't going to find anything. Just return the
17606 simple byte-at-a-time copy loop. */
17607 if (!any_alg_usable_p)
17609 /* Pick something reasonable. */
17610 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17611 *dynamic_check = 128;
17612 return loop_1_byte;
17614 if (max == -1)
17615 max = 4096;
17616 alg = decide_alg (count, max / 2, memset, dynamic_check);
17617 gcc_assert (*dynamic_check == -1);
17618 gcc_assert (alg != libcall);
17619 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17620 *dynamic_check = max;
17621 return alg;
17623 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
17624 #undef ALG_USABLE_P
17627 /* Decide on alignment. We know that the operand is already aligned to ALIGN
17628 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
17629 static int
17630 decide_alignment (int align,
17631 enum stringop_alg alg,
17632 int expected_size)
17634 int desired_align = 0;
17635 switch (alg)
17637 case no_stringop:
17638 gcc_unreachable ();
17639 case loop:
17640 case unrolled_loop:
17641 desired_align = GET_MODE_SIZE (Pmode);
17642 break;
17643 case rep_prefix_8_byte:
17644 desired_align = 8;
17645 break;
17646 case rep_prefix_4_byte:
17647 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17648 copying whole cacheline at once. */
17649 if (TARGET_PENTIUMPRO)
17650 desired_align = 8;
17651 else
17652 desired_align = 4;
17653 break;
17654 case rep_prefix_1_byte:
17655 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17656 copying whole cacheline at once. */
17657 if (TARGET_PENTIUMPRO)
17658 desired_align = 8;
17659 else
17660 desired_align = 1;
17661 break;
17662 case loop_1_byte:
17663 desired_align = 1;
17664 break;
17665 case libcall:
17666 return 0;
17669 if (optimize_size)
17670 desired_align = 1;
17671 if (desired_align < align)
17672 desired_align = align;
17673 if (expected_size != -1 && expected_size < 4)
17674 desired_align = align;
17675 return desired_align;
17678 /* Return the smallest power of 2 greater than VAL. */
17679 static int
17680 smallest_pow2_greater_than (int val)
17682 int ret = 1;
17683 while (ret <= val)
17684 ret <<= 1;
17685 return ret;
17688 /* Expand string move (memcpy) operation. Use i386 string operations when
17689 profitable. expand_setmem contains similar code. The code depends upon
17690 architecture, block size and alignment, but always has the same
17691 overall structure:
17693 1) Prologue guard: Conditional that jumps up to epilogues for small
17694 blocks that can be handled by epilogue alone. This is faster but
17695 also needed for correctness, since prologue assume the block is larger
17696 than the desired alignment.
17698 Optional dynamic check for size and libcall for large
17699 blocks is emitted here too, with -minline-stringops-dynamically.
17701 2) Prologue: copy first few bytes in order to get destination aligned
17702 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
17703 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
17704 We emit either a jump tree on power of two sized blocks, or a byte loop.
17706 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
17707 with specified algorithm.
17709 4) Epilogue: code copying tail of the block that is too small to be
17710 handled by main body (or up to size guarded by prologue guard). */
17713 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
17714 rtx expected_align_exp, rtx expected_size_exp)
17716 rtx destreg;
17717 rtx srcreg;
17718 rtx label = NULL;
17719 rtx tmp;
17720 rtx jump_around_label = NULL;
17721 HOST_WIDE_INT align = 1;
17722 unsigned HOST_WIDE_INT count = 0;
17723 HOST_WIDE_INT expected_size = -1;
17724 int size_needed = 0, epilogue_size_needed;
17725 int desired_align = 0, align_bytes = 0;
17726 enum stringop_alg alg;
17727 int dynamic_check;
17728 bool need_zero_guard = false;
17730 if (CONST_INT_P (align_exp))
17731 align = INTVAL (align_exp);
17732 /* i386 can do misaligned access on reasonably increased cost. */
17733 if (CONST_INT_P (expected_align_exp)
17734 && INTVAL (expected_align_exp) > align)
17735 align = INTVAL (expected_align_exp);
17736 /* ALIGN is the minimum of destination and source alignment, but we care here
17737 just about destination alignment. */
17738 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
17739 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
17741 if (CONST_INT_P (count_exp))
17742 count = expected_size = INTVAL (count_exp);
17743 if (CONST_INT_P (expected_size_exp) && count == 0)
17744 expected_size = INTVAL (expected_size_exp);
17746 /* Make sure we don't need to care about overflow later on. */
17747 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17748 return 0;
17750 /* Step 0: Decide on preferred algorithm, desired alignment and
17751 size of chunks to be copied by main loop. */
17753 alg = decide_alg (count, expected_size, false, &dynamic_check);
17754 desired_align = decide_alignment (align, alg, expected_size);
17756 if (!TARGET_ALIGN_STRINGOPS)
17757 align = desired_align;
17759 if (alg == libcall)
17760 return 0;
17761 gcc_assert (alg != no_stringop);
17762 if (!count)
17763 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
17764 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17765 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
17766 switch (alg)
17768 case libcall:
17769 case no_stringop:
17770 gcc_unreachable ();
17771 case loop:
17772 need_zero_guard = true;
17773 size_needed = GET_MODE_SIZE (Pmode);
17774 break;
17775 case unrolled_loop:
17776 need_zero_guard = true;
17777 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
17778 break;
17779 case rep_prefix_8_byte:
17780 size_needed = 8;
17781 break;
17782 case rep_prefix_4_byte:
17783 size_needed = 4;
17784 break;
17785 case rep_prefix_1_byte:
17786 size_needed = 1;
17787 break;
17788 case loop_1_byte:
17789 need_zero_guard = true;
17790 size_needed = 1;
17791 break;
17794 epilogue_size_needed = size_needed;
17796 /* Step 1: Prologue guard. */
17798 /* Alignment code needs count to be in register. */
17799 if (CONST_INT_P (count_exp) && desired_align > align)
17801 if (INTVAL (count_exp) > desired_align
17802 && INTVAL (count_exp) > size_needed)
17804 align_bytes
17805 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
17806 if (align_bytes <= 0)
17807 align_bytes = 0;
17808 else
17809 align_bytes = desired_align - align_bytes;
17811 if (align_bytes == 0)
17812 count_exp = force_reg (counter_mode (count_exp), count_exp);
17814 gcc_assert (desired_align >= 1 && align >= 1);
17816 /* Ensure that alignment prologue won't copy past end of block. */
17817 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17819 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17820 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17821 Make sure it is power of 2. */
17822 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17824 if (count)
17826 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
17828 /* If main algorithm works on QImode, no epilogue is needed.
17829 For small sizes just don't align anything. */
17830 if (size_needed == 1)
17831 desired_align = align;
17832 else
17833 goto epilogue;
17836 else
17838 label = gen_label_rtx ();
17839 emit_cmp_and_jump_insns (count_exp,
17840 GEN_INT (epilogue_size_needed),
17841 LTU, 0, counter_mode (count_exp), 1, label);
17842 if (expected_size == -1 || expected_size < epilogue_size_needed)
17843 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17844 else
17845 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17849 /* Emit code to decide on runtime whether library call or inline should be
17850 used. */
17851 if (dynamic_check != -1)
17853 if (CONST_INT_P (count_exp))
17855 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
17857 emit_block_move_via_libcall (dst, src, count_exp, false);
17858 count_exp = const0_rtx;
17859 goto epilogue;
17862 else
17864 rtx hot_label = gen_label_rtx ();
17865 jump_around_label = gen_label_rtx ();
17866 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17867 LEU, 0, GET_MODE (count_exp), 1, hot_label);
17868 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17869 emit_block_move_via_libcall (dst, src, count_exp, false);
17870 emit_jump (jump_around_label);
17871 emit_label (hot_label);
17875 /* Step 2: Alignment prologue. */
17877 if (desired_align > align)
17879 if (align_bytes == 0)
17881 /* Except for the first move in epilogue, we no longer know
17882 constant offset in aliasing info. It don't seems to worth
17883 the pain to maintain it for the first move, so throw away
17884 the info early. */
17885 src = change_address (src, BLKmode, srcreg);
17886 dst = change_address (dst, BLKmode, destreg);
17887 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
17888 desired_align);
17890 else
17892 /* If we know how many bytes need to be stored before dst is
17893 sufficiently aligned, maintain aliasing info accurately. */
17894 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
17895 desired_align, align_bytes);
17896 count_exp = plus_constant (count_exp, -align_bytes);
17897 count -= align_bytes;
17899 if (need_zero_guard
17900 && (count < (unsigned HOST_WIDE_INT) size_needed
17901 || (align_bytes == 0
17902 && count < ((unsigned HOST_WIDE_INT) size_needed
17903 + desired_align - align))))
17905 /* It is possible that we copied enough so the main loop will not
17906 execute. */
17907 gcc_assert (size_needed > 1);
17908 if (label == NULL_RTX)
17909 label = gen_label_rtx ();
17910 emit_cmp_and_jump_insns (count_exp,
17911 GEN_INT (size_needed),
17912 LTU, 0, counter_mode (count_exp), 1, label);
17913 if (expected_size == -1
17914 || expected_size < (desired_align - align) / 2 + size_needed)
17915 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17916 else
17917 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17920 if (label && size_needed == 1)
17922 emit_label (label);
17923 LABEL_NUSES (label) = 1;
17924 label = NULL;
17925 epilogue_size_needed = 1;
17927 else if (label == NULL_RTX)
17928 epilogue_size_needed = size_needed;
17930 /* Step 3: Main loop. */
17932 switch (alg)
17934 case libcall:
17935 case no_stringop:
17936 gcc_unreachable ();
17937 case loop_1_byte:
17938 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17939 count_exp, QImode, 1, expected_size);
17940 break;
17941 case loop:
17942 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17943 count_exp, Pmode, 1, expected_size);
17944 break;
17945 case unrolled_loop:
17946 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
17947 registers for 4 temporaries anyway. */
17948 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17949 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
17950 expected_size);
17951 break;
17952 case rep_prefix_8_byte:
17953 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17954 DImode);
17955 break;
17956 case rep_prefix_4_byte:
17957 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17958 SImode);
17959 break;
17960 case rep_prefix_1_byte:
17961 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17962 QImode);
17963 break;
17965 /* Adjust properly the offset of src and dest memory for aliasing. */
17966 if (CONST_INT_P (count_exp))
17968 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
17969 (count / size_needed) * size_needed);
17970 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17971 (count / size_needed) * size_needed);
17973 else
17975 src = change_address (src, BLKmode, srcreg);
17976 dst = change_address (dst, BLKmode, destreg);
17979 /* Step 4: Epilogue to copy the remaining bytes. */
17980 epilogue:
17981 if (label)
17983 /* When the main loop is done, COUNT_EXP might hold original count,
17984 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17985 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17986 bytes. Compensate if needed. */
17988 if (size_needed < epilogue_size_needed)
17990 tmp =
17991 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17992 GEN_INT (size_needed - 1), count_exp, 1,
17993 OPTAB_DIRECT);
17994 if (tmp != count_exp)
17995 emit_move_insn (count_exp, tmp);
17997 emit_label (label);
17998 LABEL_NUSES (label) = 1;
18001 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18002 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18003 epilogue_size_needed);
18004 if (jump_around_label)
18005 emit_label (jump_around_label);
18006 return 1;
18009 /* Helper function for memcpy. For QImode value 0xXY produce
18010 0xXYXYXYXY of wide specified by MODE. This is essentially
18011 a * 0x10101010, but we can do slightly better than
18012 synth_mult by unwinding the sequence by hand on CPUs with
18013 slow multiply. */
18014 static rtx
18015 promote_duplicated_reg (enum machine_mode mode, rtx val)
18017 enum machine_mode valmode = GET_MODE (val);
18018 rtx tmp;
18019 int nops = mode == DImode ? 3 : 2;
18021 gcc_assert (mode == SImode || mode == DImode);
18022 if (val == const0_rtx)
18023 return copy_to_mode_reg (mode, const0_rtx);
18024 if (CONST_INT_P (val))
18026 HOST_WIDE_INT v = INTVAL (val) & 255;
18028 v |= v << 8;
18029 v |= v << 16;
18030 if (mode == DImode)
18031 v |= (v << 16) << 16;
18032 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18035 if (valmode == VOIDmode)
18036 valmode = QImode;
18037 if (valmode != QImode)
18038 val = gen_lowpart (QImode, val);
18039 if (mode == QImode)
18040 return val;
18041 if (!TARGET_PARTIAL_REG_STALL)
18042 nops--;
18043 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18044 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18045 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18046 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18048 rtx reg = convert_modes (mode, QImode, val, true);
18049 tmp = promote_duplicated_reg (mode, const1_rtx);
18050 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18051 OPTAB_DIRECT);
18053 else
18055 rtx reg = convert_modes (mode, QImode, val, true);
18057 if (!TARGET_PARTIAL_REG_STALL)
18058 if (mode == SImode)
18059 emit_insn (gen_movsi_insv_1 (reg, reg));
18060 else
18061 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18062 else
18064 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18065 NULL, 1, OPTAB_DIRECT);
18066 reg =
18067 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18069 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18070 NULL, 1, OPTAB_DIRECT);
18071 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18072 if (mode == SImode)
18073 return reg;
18074 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18075 NULL, 1, OPTAB_DIRECT);
18076 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18077 return reg;
18081 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18082 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18083 alignment from ALIGN to DESIRED_ALIGN. */
18084 static rtx
18085 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18087 rtx promoted_val;
18089 if (TARGET_64BIT
18090 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18091 promoted_val = promote_duplicated_reg (DImode, val);
18092 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18093 promoted_val = promote_duplicated_reg (SImode, val);
18094 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18095 promoted_val = promote_duplicated_reg (HImode, val);
18096 else
18097 promoted_val = val;
18099 return promoted_val;
18102 /* Expand string clear operation (bzero). Use i386 string operations when
18103 profitable. See expand_movmem comment for explanation of individual
18104 steps performed. */
18106 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18107 rtx expected_align_exp, rtx expected_size_exp)
18109 rtx destreg;
18110 rtx label = NULL;
18111 rtx tmp;
18112 rtx jump_around_label = NULL;
18113 HOST_WIDE_INT align = 1;
18114 unsigned HOST_WIDE_INT count = 0;
18115 HOST_WIDE_INT expected_size = -1;
18116 int size_needed = 0, epilogue_size_needed;
18117 int desired_align = 0, align_bytes = 0;
18118 enum stringop_alg alg;
18119 rtx promoted_val = NULL;
18120 bool force_loopy_epilogue = false;
18121 int dynamic_check;
18122 bool need_zero_guard = false;
18124 if (CONST_INT_P (align_exp))
18125 align = INTVAL (align_exp);
18126 /* i386 can do misaligned access on reasonably increased cost. */
18127 if (CONST_INT_P (expected_align_exp)
18128 && INTVAL (expected_align_exp) > align)
18129 align = INTVAL (expected_align_exp);
18130 if (CONST_INT_P (count_exp))
18131 count = expected_size = INTVAL (count_exp);
18132 if (CONST_INT_P (expected_size_exp) && count == 0)
18133 expected_size = INTVAL (expected_size_exp);
18135 /* Make sure we don't need to care about overflow later on. */
18136 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18137 return 0;
18139 /* Step 0: Decide on preferred algorithm, desired alignment and
18140 size of chunks to be copied by main loop. */
18142 alg = decide_alg (count, expected_size, true, &dynamic_check);
18143 desired_align = decide_alignment (align, alg, expected_size);
18145 if (!TARGET_ALIGN_STRINGOPS)
18146 align = desired_align;
18148 if (alg == libcall)
18149 return 0;
18150 gcc_assert (alg != no_stringop);
18151 if (!count)
18152 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18153 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18154 switch (alg)
18156 case libcall:
18157 case no_stringop:
18158 gcc_unreachable ();
18159 case loop:
18160 need_zero_guard = true;
18161 size_needed = GET_MODE_SIZE (Pmode);
18162 break;
18163 case unrolled_loop:
18164 need_zero_guard = true;
18165 size_needed = GET_MODE_SIZE (Pmode) * 4;
18166 break;
18167 case rep_prefix_8_byte:
18168 size_needed = 8;
18169 break;
18170 case rep_prefix_4_byte:
18171 size_needed = 4;
18172 break;
18173 case rep_prefix_1_byte:
18174 size_needed = 1;
18175 break;
18176 case loop_1_byte:
18177 need_zero_guard = true;
18178 size_needed = 1;
18179 break;
18181 epilogue_size_needed = size_needed;
18183 /* Step 1: Prologue guard. */
18185 /* Alignment code needs count to be in register. */
18186 if (CONST_INT_P (count_exp) && desired_align > align)
18188 if (INTVAL (count_exp) > desired_align
18189 && INTVAL (count_exp) > size_needed)
18191 align_bytes
18192 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18193 if (align_bytes <= 0)
18194 align_bytes = 0;
18195 else
18196 align_bytes = desired_align - align_bytes;
18198 if (align_bytes == 0)
18200 enum machine_mode mode = SImode;
18201 if (TARGET_64BIT && (count & ~0xffffffff))
18202 mode = DImode;
18203 count_exp = force_reg (mode, count_exp);
18206 /* Do the cheap promotion to allow better CSE across the
18207 main loop and epilogue (ie one load of the big constant in the
18208 front of all code. */
18209 if (CONST_INT_P (val_exp))
18210 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18211 desired_align, align);
18212 /* Ensure that alignment prologue won't copy past end of block. */
18213 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18215 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18216 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18217 Make sure it is power of 2. */
18218 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18220 /* To improve performance of small blocks, we jump around the VAL
18221 promoting mode. This mean that if the promoted VAL is not constant,
18222 we might not use it in the epilogue and have to use byte
18223 loop variant. */
18224 if (epilogue_size_needed > 2 && !promoted_val)
18225 force_loopy_epilogue = true;
18226 if (count)
18228 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18230 /* If main algorithm works on QImode, no epilogue is needed.
18231 For small sizes just don't align anything. */
18232 if (size_needed == 1)
18233 desired_align = align;
18234 else
18235 goto epilogue;
18238 else
18240 label = gen_label_rtx ();
18241 emit_cmp_and_jump_insns (count_exp,
18242 GEN_INT (epilogue_size_needed),
18243 LTU, 0, counter_mode (count_exp), 1, label);
18244 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18245 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18246 else
18247 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18250 if (dynamic_check != -1)
18252 rtx hot_label = gen_label_rtx ();
18253 jump_around_label = gen_label_rtx ();
18254 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18255 LEU, 0, counter_mode (count_exp), 1, hot_label);
18256 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18257 set_storage_via_libcall (dst, count_exp, val_exp, false);
18258 emit_jump (jump_around_label);
18259 emit_label (hot_label);
18262 /* Step 2: Alignment prologue. */
18264 /* Do the expensive promotion once we branched off the small blocks. */
18265 if (!promoted_val)
18266 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18267 desired_align, align);
18268 gcc_assert (desired_align >= 1 && align >= 1);
18270 if (desired_align > align)
18272 if (align_bytes == 0)
18274 /* Except for the first move in epilogue, we no longer know
18275 constant offset in aliasing info. It don't seems to worth
18276 the pain to maintain it for the first move, so throw away
18277 the info early. */
18278 dst = change_address (dst, BLKmode, destreg);
18279 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18280 desired_align);
18282 else
18284 /* If we know how many bytes need to be stored before dst is
18285 sufficiently aligned, maintain aliasing info accurately. */
18286 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18287 desired_align, align_bytes);
18288 count_exp = plus_constant (count_exp, -align_bytes);
18289 count -= align_bytes;
18291 if (need_zero_guard
18292 && (count < (unsigned HOST_WIDE_INT) size_needed
18293 || (align_bytes == 0
18294 && count < ((unsigned HOST_WIDE_INT) size_needed
18295 + desired_align - align))))
18297 /* It is possible that we copied enough so the main loop will not
18298 execute. */
18299 gcc_assert (size_needed > 1);
18300 if (label == NULL_RTX)
18301 label = gen_label_rtx ();
18302 emit_cmp_and_jump_insns (count_exp,
18303 GEN_INT (size_needed),
18304 LTU, 0, counter_mode (count_exp), 1, label);
18305 if (expected_size == -1
18306 || expected_size < (desired_align - align) / 2 + size_needed)
18307 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18308 else
18309 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18312 if (label && size_needed == 1)
18314 emit_label (label);
18315 LABEL_NUSES (label) = 1;
18316 label = NULL;
18317 promoted_val = val_exp;
18318 epilogue_size_needed = 1;
18320 else if (label == NULL_RTX)
18321 epilogue_size_needed = size_needed;
18323 /* Step 3: Main loop. */
18325 switch (alg)
18327 case libcall:
18328 case no_stringop:
18329 gcc_unreachable ();
18330 case loop_1_byte:
18331 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18332 count_exp, QImode, 1, expected_size);
18333 break;
18334 case loop:
18335 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18336 count_exp, Pmode, 1, expected_size);
18337 break;
18338 case unrolled_loop:
18339 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18340 count_exp, Pmode, 4, expected_size);
18341 break;
18342 case rep_prefix_8_byte:
18343 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18344 DImode, val_exp);
18345 break;
18346 case rep_prefix_4_byte:
18347 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18348 SImode, val_exp);
18349 break;
18350 case rep_prefix_1_byte:
18351 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18352 QImode, val_exp);
18353 break;
18355 /* Adjust properly the offset of src and dest memory for aliasing. */
18356 if (CONST_INT_P (count_exp))
18357 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18358 (count / size_needed) * size_needed);
18359 else
18360 dst = change_address (dst, BLKmode, destreg);
18362 /* Step 4: Epilogue to copy the remaining bytes. */
18364 if (label)
18366 /* When the main loop is done, COUNT_EXP might hold original count,
18367 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18368 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18369 bytes. Compensate if needed. */
18371 if (size_needed < epilogue_size_needed)
18373 tmp =
18374 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18375 GEN_INT (size_needed - 1), count_exp, 1,
18376 OPTAB_DIRECT);
18377 if (tmp != count_exp)
18378 emit_move_insn (count_exp, tmp);
18380 emit_label (label);
18381 LABEL_NUSES (label) = 1;
18383 epilogue:
18384 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18386 if (force_loopy_epilogue)
18387 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18388 epilogue_size_needed);
18389 else
18390 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18391 epilogue_size_needed);
18393 if (jump_around_label)
18394 emit_label (jump_around_label);
18395 return 1;
18398 /* Expand the appropriate insns for doing strlen if not just doing
18399 repnz; scasb
18401 out = result, initialized with the start address
18402 align_rtx = alignment of the address.
18403 scratch = scratch register, initialized with the startaddress when
18404 not aligned, otherwise undefined
18406 This is just the body. It needs the initializations mentioned above and
18407 some address computing at the end. These things are done in i386.md. */
18409 static void
18410 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18412 int align;
18413 rtx tmp;
18414 rtx align_2_label = NULL_RTX;
18415 rtx align_3_label = NULL_RTX;
18416 rtx align_4_label = gen_label_rtx ();
18417 rtx end_0_label = gen_label_rtx ();
18418 rtx mem;
18419 rtx tmpreg = gen_reg_rtx (SImode);
18420 rtx scratch = gen_reg_rtx (SImode);
18421 rtx cmp;
18423 align = 0;
18424 if (CONST_INT_P (align_rtx))
18425 align = INTVAL (align_rtx);
18427 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18429 /* Is there a known alignment and is it less than 4? */
18430 if (align < 4)
18432 rtx scratch1 = gen_reg_rtx (Pmode);
18433 emit_move_insn (scratch1, out);
18434 /* Is there a known alignment and is it not 2? */
18435 if (align != 2)
18437 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18438 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18440 /* Leave just the 3 lower bits. */
18441 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18442 NULL_RTX, 0, OPTAB_WIDEN);
18444 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18445 Pmode, 1, align_4_label);
18446 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18447 Pmode, 1, align_2_label);
18448 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18449 Pmode, 1, align_3_label);
18451 else
18453 /* Since the alignment is 2, we have to check 2 or 0 bytes;
18454 check if is aligned to 4 - byte. */
18456 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
18457 NULL_RTX, 0, OPTAB_WIDEN);
18459 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18460 Pmode, 1, align_4_label);
18463 mem = change_address (src, QImode, out);
18465 /* Now compare the bytes. */
18467 /* Compare the first n unaligned byte on a byte per byte basis. */
18468 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
18469 QImode, 1, end_0_label);
18471 /* Increment the address. */
18472 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18474 /* Not needed with an alignment of 2 */
18475 if (align != 2)
18477 emit_label (align_2_label);
18479 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18480 end_0_label);
18482 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18484 emit_label (align_3_label);
18487 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18488 end_0_label);
18490 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18493 /* Generate loop to check 4 bytes at a time. It is not a good idea to
18494 align this loop. It gives only huge programs, but does not help to
18495 speed up. */
18496 emit_label (align_4_label);
18498 mem = change_address (src, SImode, out);
18499 emit_move_insn (scratch, mem);
18500 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
18502 /* This formula yields a nonzero result iff one of the bytes is zero.
18503 This saves three branches inside loop and many cycles. */
18505 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
18506 emit_insn (gen_one_cmplsi2 (scratch, scratch));
18507 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
18508 emit_insn (gen_andsi3 (tmpreg, tmpreg,
18509 gen_int_mode (0x80808080, SImode)));
18510 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
18511 align_4_label);
18513 if (TARGET_CMOVE)
18515 rtx reg = gen_reg_rtx (SImode);
18516 rtx reg2 = gen_reg_rtx (Pmode);
18517 emit_move_insn (reg, tmpreg);
18518 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
18520 /* If zero is not in the first two bytes, move two bytes forward. */
18521 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18522 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18523 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18524 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
18525 gen_rtx_IF_THEN_ELSE (SImode, tmp,
18526 reg,
18527 tmpreg)));
18528 /* Emit lea manually to avoid clobbering of flags. */
18529 emit_insn (gen_rtx_SET (SImode, reg2,
18530 gen_rtx_PLUS (Pmode, out, const2_rtx)));
18532 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18533 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18534 emit_insn (gen_rtx_SET (VOIDmode, out,
18535 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
18536 reg2,
18537 out)));
18540 else
18542 rtx end_2_label = gen_label_rtx ();
18543 /* Is zero in the first two bytes? */
18545 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18546 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18547 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
18548 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
18549 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
18550 pc_rtx);
18551 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
18552 JUMP_LABEL (tmp) = end_2_label;
18554 /* Not in the first two. Move two bytes forward. */
18555 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
18556 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
18558 emit_label (end_2_label);
18562 /* Avoid branch in fixing the byte. */
18563 tmpreg = gen_lowpart (QImode, tmpreg);
18564 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
18565 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
18566 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
18568 emit_label (end_0_label);
18571 /* Expand strlen. */
18574 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
18576 rtx addr, scratch1, scratch2, scratch3, scratch4;
18578 /* The generic case of strlen expander is long. Avoid it's
18579 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
18581 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18582 && !TARGET_INLINE_ALL_STRINGOPS
18583 && !optimize_insn_for_size_p ()
18584 && (!CONST_INT_P (align) || INTVAL (align) < 4))
18585 return 0;
18587 addr = force_reg (Pmode, XEXP (src, 0));
18588 scratch1 = gen_reg_rtx (Pmode);
18590 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18591 && !optimize_insn_for_size_p ())
18593 /* Well it seems that some optimizer does not combine a call like
18594 foo(strlen(bar), strlen(bar));
18595 when the move and the subtraction is done here. It does calculate
18596 the length just once when these instructions are done inside of
18597 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
18598 often used and I use one fewer register for the lifetime of
18599 output_strlen_unroll() this is better. */
18601 emit_move_insn (out, addr);
18603 ix86_expand_strlensi_unroll_1 (out, src, align);
18605 /* strlensi_unroll_1 returns the address of the zero at the end of
18606 the string, like memchr(), so compute the length by subtracting
18607 the start address. */
18608 emit_insn ((*ix86_gen_sub3) (out, out, addr));
18610 else
18612 rtx unspec;
18614 /* Can't use this if the user has appropriated eax, ecx, or edi. */
18615 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
18616 return false;
18618 scratch2 = gen_reg_rtx (Pmode);
18619 scratch3 = gen_reg_rtx (Pmode);
18620 scratch4 = force_reg (Pmode, constm1_rtx);
18622 emit_move_insn (scratch3, addr);
18623 eoschar = force_reg (QImode, eoschar);
18625 src = replace_equiv_address_nv (src, scratch3);
18627 /* If .md starts supporting :P, this can be done in .md. */
18628 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
18629 scratch4), UNSPEC_SCAS);
18630 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
18631 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
18632 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
18634 return 1;
18637 /* For given symbol (function) construct code to compute address of it's PLT
18638 entry in large x86-64 PIC model. */
18640 construct_plt_address (rtx symbol)
18642 rtx tmp = gen_reg_rtx (Pmode);
18643 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
18645 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18646 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
18648 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
18649 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
18650 return tmp;
18653 void
18654 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
18655 rtx callarg2,
18656 rtx pop, int sibcall)
18658 rtx use = NULL, call;
18660 if (pop == const0_rtx)
18661 pop = NULL;
18662 gcc_assert (!TARGET_64BIT || !pop);
18664 if (TARGET_MACHO && !TARGET_64BIT)
18666 #if TARGET_MACHO
18667 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
18668 fnaddr = machopic_indirect_call_target (fnaddr);
18669 #endif
18671 else
18673 /* Static functions and indirect calls don't need the pic register. */
18674 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
18675 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18676 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
18677 use_reg (&use, pic_offset_table_rtx);
18680 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
18682 rtx al = gen_rtx_REG (QImode, AX_REG);
18683 emit_move_insn (al, callarg2);
18684 use_reg (&use, al);
18687 if (ix86_cmodel == CM_LARGE_PIC
18688 && GET_CODE (fnaddr) == MEM
18689 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18690 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
18691 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
18692 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
18694 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18695 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18697 if (sibcall && TARGET_64BIT
18698 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
18700 rtx addr;
18701 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18702 fnaddr = gen_rtx_REG (Pmode, R11_REG);
18703 emit_move_insn (fnaddr, addr);
18704 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18707 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
18708 if (retval)
18709 call = gen_rtx_SET (VOIDmode, retval, call);
18710 if (pop)
18712 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
18713 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
18714 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
18716 if (TARGET_64BIT
18717 && ix86_cfun_abi () == MS_ABI
18718 && (!callarg2 || INTVAL (callarg2) != -2))
18720 /* We need to represent that SI and DI registers are clobbered
18721 by SYSV calls. */
18722 static int clobbered_registers[] = {
18723 XMM6_REG, XMM7_REG, XMM8_REG,
18724 XMM9_REG, XMM10_REG, XMM11_REG,
18725 XMM12_REG, XMM13_REG, XMM14_REG,
18726 XMM15_REG, SI_REG, DI_REG
18728 unsigned int i;
18729 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
18730 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
18731 UNSPEC_MS_TO_SYSV_CALL);
18733 vec[0] = call;
18734 vec[1] = unspec;
18735 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
18736 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
18737 ? TImode : DImode,
18738 gen_rtx_REG
18739 (SSE_REGNO_P (clobbered_registers[i])
18740 ? TImode : DImode,
18741 clobbered_registers[i]));
18743 call = gen_rtx_PARALLEL (VOIDmode,
18744 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
18745 + 2, vec));
18748 call = emit_call_insn (call);
18749 if (use)
18750 CALL_INSN_FUNCTION_USAGE (call) = use;
18754 /* Clear stack slot assignments remembered from previous functions.
18755 This is called from INIT_EXPANDERS once before RTL is emitted for each
18756 function. */
18758 static struct machine_function *
18759 ix86_init_machine_status (void)
18761 struct machine_function *f;
18763 f = GGC_CNEW (struct machine_function);
18764 f->use_fast_prologue_epilogue_nregs = -1;
18765 f->tls_descriptor_call_expanded_p = 0;
18766 f->call_abi = ix86_abi;
18768 return f;
18771 /* Return a MEM corresponding to a stack slot with mode MODE.
18772 Allocate a new slot if necessary.
18774 The RTL for a function can have several slots available: N is
18775 which slot to use. */
18778 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
18780 struct stack_local_entry *s;
18782 gcc_assert (n < MAX_386_STACK_LOCALS);
18784 /* Virtual slot is valid only before vregs are instantiated. */
18785 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
18787 for (s = ix86_stack_locals; s; s = s->next)
18788 if (s->mode == mode && s->n == n)
18789 return copy_rtx (s->rtl);
18791 s = (struct stack_local_entry *)
18792 ggc_alloc (sizeof (struct stack_local_entry));
18793 s->n = n;
18794 s->mode = mode;
18795 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18797 s->next = ix86_stack_locals;
18798 ix86_stack_locals = s;
18799 return s->rtl;
18802 /* Construct the SYMBOL_REF for the tls_get_addr function. */
18804 static GTY(()) rtx ix86_tls_symbol;
18806 ix86_tls_get_addr (void)
18809 if (!ix86_tls_symbol)
18811 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
18812 (TARGET_ANY_GNU_TLS
18813 && !TARGET_64BIT)
18814 ? "___tls_get_addr"
18815 : "__tls_get_addr");
18818 return ix86_tls_symbol;
18821 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
18823 static GTY(()) rtx ix86_tls_module_base_symbol;
18825 ix86_tls_module_base (void)
18828 if (!ix86_tls_module_base_symbol)
18830 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
18831 "_TLS_MODULE_BASE_");
18832 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
18833 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
18836 return ix86_tls_module_base_symbol;
18839 /* Calculate the length of the memory address in the instruction
18840 encoding. Does not include the one-byte modrm, opcode, or prefix. */
18843 memory_address_length (rtx addr)
18845 struct ix86_address parts;
18846 rtx base, index, disp;
18847 int len;
18848 int ok;
18850 if (GET_CODE (addr) == PRE_DEC
18851 || GET_CODE (addr) == POST_INC
18852 || GET_CODE (addr) == PRE_MODIFY
18853 || GET_CODE (addr) == POST_MODIFY)
18854 return 0;
18856 ok = ix86_decompose_address (addr, &parts);
18857 gcc_assert (ok);
18859 if (parts.base && GET_CODE (parts.base) == SUBREG)
18860 parts.base = SUBREG_REG (parts.base);
18861 if (parts.index && GET_CODE (parts.index) == SUBREG)
18862 parts.index = SUBREG_REG (parts.index);
18864 base = parts.base;
18865 index = parts.index;
18866 disp = parts.disp;
18867 len = 0;
18869 /* Rule of thumb:
18870 - esp as the base always wants an index,
18871 - ebp as the base always wants a displacement. */
18873 /* Register Indirect. */
18874 if (base && !index && !disp)
18876 /* esp (for its index) and ebp (for its displacement) need
18877 the two-byte modrm form. */
18878 if (addr == stack_pointer_rtx
18879 || addr == arg_pointer_rtx
18880 || addr == frame_pointer_rtx
18881 || addr == hard_frame_pointer_rtx)
18882 len = 1;
18885 /* Direct Addressing. */
18886 else if (disp && !base && !index)
18887 len = 4;
18889 else
18891 /* Find the length of the displacement constant. */
18892 if (disp)
18894 if (base && satisfies_constraint_K (disp))
18895 len = 1;
18896 else
18897 len = 4;
18899 /* ebp always wants a displacement. */
18900 else if (base == hard_frame_pointer_rtx)
18901 len = 1;
18903 /* An index requires the two-byte modrm form.... */
18904 if (index
18905 /* ...like esp, which always wants an index. */
18906 || base == stack_pointer_rtx
18907 || base == arg_pointer_rtx
18908 || base == frame_pointer_rtx)
18909 len += 1;
18912 return len;
18915 /* Compute default value for "length_immediate" attribute. When SHORTFORM
18916 is set, expect that insn have 8bit immediate alternative. */
18918 ix86_attr_length_immediate_default (rtx insn, int shortform)
18920 int len = 0;
18921 int i;
18922 extract_insn_cached (insn);
18923 for (i = recog_data.n_operands - 1; i >= 0; --i)
18924 if (CONSTANT_P (recog_data.operand[i]))
18926 gcc_assert (!len);
18927 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
18928 len = 1;
18929 else
18931 switch (get_attr_mode (insn))
18933 case MODE_QI:
18934 len+=1;
18935 break;
18936 case MODE_HI:
18937 len+=2;
18938 break;
18939 case MODE_SI:
18940 len+=4;
18941 break;
18942 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
18943 case MODE_DI:
18944 len+=4;
18945 break;
18946 default:
18947 fatal_insn ("unknown insn mode", insn);
18951 return len;
18953 /* Compute default value for "length_address" attribute. */
18955 ix86_attr_length_address_default (rtx insn)
18957 int i;
18959 if (get_attr_type (insn) == TYPE_LEA)
18961 rtx set = PATTERN (insn);
18963 if (GET_CODE (set) == PARALLEL)
18964 set = XVECEXP (set, 0, 0);
18966 gcc_assert (GET_CODE (set) == SET);
18968 return memory_address_length (SET_SRC (set));
18971 extract_insn_cached (insn);
18972 for (i = recog_data.n_operands - 1; i >= 0; --i)
18973 if (MEM_P (recog_data.operand[i]))
18975 return memory_address_length (XEXP (recog_data.operand[i], 0));
18976 break;
18978 return 0;
18981 /* Compute default value for "length_vex" attribute. It includes
18982 2 or 3 byte VEX prefix and 1 opcode byte. */
18985 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
18986 int has_vex_w)
18988 int i;
18990 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
18991 byte VEX prefix. */
18992 if (!has_0f_opcode || has_vex_w)
18993 return 3 + 1;
18995 /* We can always use 2 byte VEX prefix in 32bit. */
18996 if (!TARGET_64BIT)
18997 return 2 + 1;
18999 extract_insn_cached (insn);
19001 for (i = recog_data.n_operands - 1; i >= 0; --i)
19002 if (REG_P (recog_data.operand[i]))
19004 /* REX.W bit uses 3 byte VEX prefix. */
19005 if (GET_MODE (recog_data.operand[i]) == DImode)
19006 return 3 + 1;
19008 else
19010 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19011 if (MEM_P (recog_data.operand[i])
19012 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19013 return 3 + 1;
19016 return 2 + 1;
19019 /* Return the maximum number of instructions a cpu can issue. */
19021 static int
19022 ix86_issue_rate (void)
19024 switch (ix86_tune)
19026 case PROCESSOR_PENTIUM:
19027 case PROCESSOR_K6:
19028 return 2;
19030 case PROCESSOR_PENTIUMPRO:
19031 case PROCESSOR_PENTIUM4:
19032 case PROCESSOR_ATHLON:
19033 case PROCESSOR_K8:
19034 case PROCESSOR_AMDFAM10:
19035 case PROCESSOR_NOCONA:
19036 case PROCESSOR_GENERIC32:
19037 case PROCESSOR_GENERIC64:
19038 return 3;
19040 case PROCESSOR_CORE2:
19041 return 4;
19043 default:
19044 return 1;
19048 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19049 by DEP_INSN and nothing set by DEP_INSN. */
19051 static int
19052 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19054 rtx set, set2;
19056 /* Simplify the test for uninteresting insns. */
19057 if (insn_type != TYPE_SETCC
19058 && insn_type != TYPE_ICMOV
19059 && insn_type != TYPE_FCMOV
19060 && insn_type != TYPE_IBR)
19061 return 0;
19063 if ((set = single_set (dep_insn)) != 0)
19065 set = SET_DEST (set);
19066 set2 = NULL_RTX;
19068 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19069 && XVECLEN (PATTERN (dep_insn), 0) == 2
19070 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19071 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19073 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19074 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19076 else
19077 return 0;
19079 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19080 return 0;
19082 /* This test is true if the dependent insn reads the flags but
19083 not any other potentially set register. */
19084 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19085 return 0;
19087 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19088 return 0;
19090 return 1;
19093 /* Return true iff USE_INSN has a memory address with operands set by
19094 SET_INSN. */
19096 bool
19097 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19099 int i;
19100 extract_insn_cached (use_insn);
19101 for (i = recog_data.n_operands - 1; i >= 0; --i)
19102 if (MEM_P (recog_data.operand[i]))
19104 rtx addr = XEXP (recog_data.operand[i], 0);
19105 return modified_in_p (addr, set_insn) != 0;
19107 return false;
19110 static int
19111 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19113 enum attr_type insn_type, dep_insn_type;
19114 enum attr_memory memory;
19115 rtx set, set2;
19116 int dep_insn_code_number;
19118 /* Anti and output dependencies have zero cost on all CPUs. */
19119 if (REG_NOTE_KIND (link) != 0)
19120 return 0;
19122 dep_insn_code_number = recog_memoized (dep_insn);
19124 /* If we can't recognize the insns, we can't really do anything. */
19125 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19126 return cost;
19128 insn_type = get_attr_type (insn);
19129 dep_insn_type = get_attr_type (dep_insn);
19131 switch (ix86_tune)
19133 case PROCESSOR_PENTIUM:
19134 /* Address Generation Interlock adds a cycle of latency. */
19135 if (insn_type == TYPE_LEA)
19137 rtx addr = PATTERN (insn);
19139 if (GET_CODE (addr) == PARALLEL)
19140 addr = XVECEXP (addr, 0, 0);
19142 gcc_assert (GET_CODE (addr) == SET);
19144 addr = SET_SRC (addr);
19145 if (modified_in_p (addr, dep_insn))
19146 cost += 1;
19148 else if (ix86_agi_dependent (dep_insn, insn))
19149 cost += 1;
19151 /* ??? Compares pair with jump/setcc. */
19152 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19153 cost = 0;
19155 /* Floating point stores require value to be ready one cycle earlier. */
19156 if (insn_type == TYPE_FMOV
19157 && get_attr_memory (insn) == MEMORY_STORE
19158 && !ix86_agi_dependent (dep_insn, insn))
19159 cost += 1;
19160 break;
19162 case PROCESSOR_PENTIUMPRO:
19163 memory = get_attr_memory (insn);
19165 /* INT->FP conversion is expensive. */
19166 if (get_attr_fp_int_src (dep_insn))
19167 cost += 5;
19169 /* There is one cycle extra latency between an FP op and a store. */
19170 if (insn_type == TYPE_FMOV
19171 && (set = single_set (dep_insn)) != NULL_RTX
19172 && (set2 = single_set (insn)) != NULL_RTX
19173 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19174 && MEM_P (SET_DEST (set2)))
19175 cost += 1;
19177 /* Show ability of reorder buffer to hide latency of load by executing
19178 in parallel with previous instruction in case
19179 previous instruction is not needed to compute the address. */
19180 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19181 && !ix86_agi_dependent (dep_insn, insn))
19183 /* Claim moves to take one cycle, as core can issue one load
19184 at time and the next load can start cycle later. */
19185 if (dep_insn_type == TYPE_IMOV
19186 || dep_insn_type == TYPE_FMOV)
19187 cost = 1;
19188 else if (cost > 1)
19189 cost--;
19191 break;
19193 case PROCESSOR_K6:
19194 memory = get_attr_memory (insn);
19196 /* The esp dependency is resolved before the instruction is really
19197 finished. */
19198 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19199 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19200 return 1;
19202 /* INT->FP conversion is expensive. */
19203 if (get_attr_fp_int_src (dep_insn))
19204 cost += 5;
19206 /* Show ability of reorder buffer to hide latency of load by executing
19207 in parallel with previous instruction in case
19208 previous instruction is not needed to compute the address. */
19209 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19210 && !ix86_agi_dependent (dep_insn, insn))
19212 /* Claim moves to take one cycle, as core can issue one load
19213 at time and the next load can start cycle later. */
19214 if (dep_insn_type == TYPE_IMOV
19215 || dep_insn_type == TYPE_FMOV)
19216 cost = 1;
19217 else if (cost > 2)
19218 cost -= 2;
19219 else
19220 cost = 1;
19222 break;
19224 case PROCESSOR_ATHLON:
19225 case PROCESSOR_K8:
19226 case PROCESSOR_AMDFAM10:
19227 case PROCESSOR_GENERIC32:
19228 case PROCESSOR_GENERIC64:
19229 memory = get_attr_memory (insn);
19231 /* Show ability of reorder buffer to hide latency of load by executing
19232 in parallel with previous instruction in case
19233 previous instruction is not needed to compute the address. */
19234 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19235 && !ix86_agi_dependent (dep_insn, insn))
19237 enum attr_unit unit = get_attr_unit (insn);
19238 int loadcost = 3;
19240 /* Because of the difference between the length of integer and
19241 floating unit pipeline preparation stages, the memory operands
19242 for floating point are cheaper.
19244 ??? For Athlon it the difference is most probably 2. */
19245 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19246 loadcost = 3;
19247 else
19248 loadcost = TARGET_ATHLON ? 2 : 0;
19250 if (cost >= loadcost)
19251 cost -= loadcost;
19252 else
19253 cost = 0;
19256 default:
19257 break;
19260 return cost;
19263 /* How many alternative schedules to try. This should be as wide as the
19264 scheduling freedom in the DFA, but no wider. Making this value too
19265 large results extra work for the scheduler. */
19267 static int
19268 ia32_multipass_dfa_lookahead (void)
19270 switch (ix86_tune)
19272 case PROCESSOR_PENTIUM:
19273 return 2;
19275 case PROCESSOR_PENTIUMPRO:
19276 case PROCESSOR_K6:
19277 return 1;
19279 default:
19280 return 0;
19285 /* Compute the alignment given to a constant that is being placed in memory.
19286 EXP is the constant and ALIGN is the alignment that the object would
19287 ordinarily have.
19288 The value of this function is used instead of that alignment to align
19289 the object. */
19292 ix86_constant_alignment (tree exp, int align)
19294 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19295 || TREE_CODE (exp) == INTEGER_CST)
19297 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19298 return 64;
19299 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19300 return 128;
19302 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19303 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19304 return BITS_PER_WORD;
19306 return align;
19309 /* Compute the alignment for a static variable.
19310 TYPE is the data type, and ALIGN is the alignment that
19311 the object would ordinarily have. The value of this function is used
19312 instead of that alignment to align the object. */
19315 ix86_data_alignment (tree type, int align)
19317 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19319 if (AGGREGATE_TYPE_P (type)
19320 && TYPE_SIZE (type)
19321 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19322 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19323 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19324 && align < max_align)
19325 align = max_align;
19327 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19328 to 16byte boundary. */
19329 if (TARGET_64BIT)
19331 if (AGGREGATE_TYPE_P (type)
19332 && TYPE_SIZE (type)
19333 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19334 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19335 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19336 return 128;
19339 if (TREE_CODE (type) == ARRAY_TYPE)
19341 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19342 return 64;
19343 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19344 return 128;
19346 else if (TREE_CODE (type) == COMPLEX_TYPE)
19349 if (TYPE_MODE (type) == DCmode && align < 64)
19350 return 64;
19351 if ((TYPE_MODE (type) == XCmode
19352 || TYPE_MODE (type) == TCmode) && align < 128)
19353 return 128;
19355 else if ((TREE_CODE (type) == RECORD_TYPE
19356 || TREE_CODE (type) == UNION_TYPE
19357 || TREE_CODE (type) == QUAL_UNION_TYPE)
19358 && TYPE_FIELDS (type))
19360 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19361 return 64;
19362 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19363 return 128;
19365 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19366 || TREE_CODE (type) == INTEGER_TYPE)
19368 if (TYPE_MODE (type) == DFmode && align < 64)
19369 return 64;
19370 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19371 return 128;
19374 return align;
19377 /* Compute the alignment for a local variable or a stack slot. EXP is
19378 the data type or decl itself, MODE is the widest mode available and
19379 ALIGN is the alignment that the object would ordinarily have. The
19380 value of this macro is used instead of that alignment to align the
19381 object. */
19383 unsigned int
19384 ix86_local_alignment (tree exp, enum machine_mode mode,
19385 unsigned int align)
19387 tree type, decl;
19389 if (exp && DECL_P (exp))
19391 type = TREE_TYPE (exp);
19392 decl = exp;
19394 else
19396 type = exp;
19397 decl = NULL;
19400 /* Don't do dynamic stack realignment for long long objects with
19401 -mpreferred-stack-boundary=2. */
19402 if (!TARGET_64BIT
19403 && align == 64
19404 && ix86_preferred_stack_boundary < 64
19405 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
19406 && (!type || !TYPE_USER_ALIGN (type))
19407 && (!decl || !DECL_USER_ALIGN (decl)))
19408 align = 32;
19410 /* If TYPE is NULL, we are allocating a stack slot for caller-save
19411 register in MODE. We will return the largest alignment of XF
19412 and DF. */
19413 if (!type)
19415 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
19416 align = GET_MODE_ALIGNMENT (DFmode);
19417 return align;
19420 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19421 to 16byte boundary. */
19422 if (TARGET_64BIT)
19424 if (AGGREGATE_TYPE_P (type)
19425 && TYPE_SIZE (type)
19426 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19427 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
19428 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19429 return 128;
19431 if (TREE_CODE (type) == ARRAY_TYPE)
19433 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19434 return 64;
19435 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19436 return 128;
19438 else if (TREE_CODE (type) == COMPLEX_TYPE)
19440 if (TYPE_MODE (type) == DCmode && align < 64)
19441 return 64;
19442 if ((TYPE_MODE (type) == XCmode
19443 || TYPE_MODE (type) == TCmode) && align < 128)
19444 return 128;
19446 else if ((TREE_CODE (type) == RECORD_TYPE
19447 || TREE_CODE (type) == UNION_TYPE
19448 || TREE_CODE (type) == QUAL_UNION_TYPE)
19449 && TYPE_FIELDS (type))
19451 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19452 return 64;
19453 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19454 return 128;
19456 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19457 || TREE_CODE (type) == INTEGER_TYPE)
19460 if (TYPE_MODE (type) == DFmode && align < 64)
19461 return 64;
19462 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19463 return 128;
19465 return align;
19468 /* Emit RTL insns to initialize the variable parts of a trampoline.
19469 FNADDR is an RTX for the address of the function's pure code.
19470 CXT is an RTX for the static chain value for the function. */
19471 void
19472 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
19474 if (!TARGET_64BIT)
19476 /* Compute offset from the end of the jmp to the target function. */
19477 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
19478 plus_constant (tramp, 10),
19479 NULL_RTX, 1, OPTAB_DIRECT);
19480 emit_move_insn (gen_rtx_MEM (QImode, tramp),
19481 gen_int_mode (0xb9, QImode));
19482 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
19483 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
19484 gen_int_mode (0xe9, QImode));
19485 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
19487 else
19489 int offset = 0;
19490 /* Try to load address using shorter movl instead of movabs.
19491 We may want to support movq for kernel mode, but kernel does not use
19492 trampolines at the moment. */
19493 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
19495 fnaddr = copy_to_mode_reg (DImode, fnaddr);
19496 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19497 gen_int_mode (0xbb41, HImode));
19498 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
19499 gen_lowpart (SImode, fnaddr));
19500 offset += 6;
19502 else
19504 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19505 gen_int_mode (0xbb49, HImode));
19506 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19507 fnaddr);
19508 offset += 10;
19510 /* Load static chain using movabs to r10. */
19511 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19512 gen_int_mode (0xba49, HImode));
19513 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19514 cxt);
19515 offset += 10;
19516 /* Jump to the r11 */
19517 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19518 gen_int_mode (0xff49, HImode));
19519 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
19520 gen_int_mode (0xe3, QImode));
19521 offset += 3;
19522 gcc_assert (offset <= TRAMPOLINE_SIZE);
19525 #ifdef ENABLE_EXECUTE_STACK
19526 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
19527 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
19528 #endif
19531 /* Codes for all the SSE/MMX builtins. */
19532 enum ix86_builtins
19534 IX86_BUILTIN_ADDPS,
19535 IX86_BUILTIN_ADDSS,
19536 IX86_BUILTIN_DIVPS,
19537 IX86_BUILTIN_DIVSS,
19538 IX86_BUILTIN_MULPS,
19539 IX86_BUILTIN_MULSS,
19540 IX86_BUILTIN_SUBPS,
19541 IX86_BUILTIN_SUBSS,
19543 IX86_BUILTIN_CMPEQPS,
19544 IX86_BUILTIN_CMPLTPS,
19545 IX86_BUILTIN_CMPLEPS,
19546 IX86_BUILTIN_CMPGTPS,
19547 IX86_BUILTIN_CMPGEPS,
19548 IX86_BUILTIN_CMPNEQPS,
19549 IX86_BUILTIN_CMPNLTPS,
19550 IX86_BUILTIN_CMPNLEPS,
19551 IX86_BUILTIN_CMPNGTPS,
19552 IX86_BUILTIN_CMPNGEPS,
19553 IX86_BUILTIN_CMPORDPS,
19554 IX86_BUILTIN_CMPUNORDPS,
19555 IX86_BUILTIN_CMPEQSS,
19556 IX86_BUILTIN_CMPLTSS,
19557 IX86_BUILTIN_CMPLESS,
19558 IX86_BUILTIN_CMPNEQSS,
19559 IX86_BUILTIN_CMPNLTSS,
19560 IX86_BUILTIN_CMPNLESS,
19561 IX86_BUILTIN_CMPNGTSS,
19562 IX86_BUILTIN_CMPNGESS,
19563 IX86_BUILTIN_CMPORDSS,
19564 IX86_BUILTIN_CMPUNORDSS,
19566 IX86_BUILTIN_COMIEQSS,
19567 IX86_BUILTIN_COMILTSS,
19568 IX86_BUILTIN_COMILESS,
19569 IX86_BUILTIN_COMIGTSS,
19570 IX86_BUILTIN_COMIGESS,
19571 IX86_BUILTIN_COMINEQSS,
19572 IX86_BUILTIN_UCOMIEQSS,
19573 IX86_BUILTIN_UCOMILTSS,
19574 IX86_BUILTIN_UCOMILESS,
19575 IX86_BUILTIN_UCOMIGTSS,
19576 IX86_BUILTIN_UCOMIGESS,
19577 IX86_BUILTIN_UCOMINEQSS,
19579 IX86_BUILTIN_CVTPI2PS,
19580 IX86_BUILTIN_CVTPS2PI,
19581 IX86_BUILTIN_CVTSI2SS,
19582 IX86_BUILTIN_CVTSI642SS,
19583 IX86_BUILTIN_CVTSS2SI,
19584 IX86_BUILTIN_CVTSS2SI64,
19585 IX86_BUILTIN_CVTTPS2PI,
19586 IX86_BUILTIN_CVTTSS2SI,
19587 IX86_BUILTIN_CVTTSS2SI64,
19589 IX86_BUILTIN_MAXPS,
19590 IX86_BUILTIN_MAXSS,
19591 IX86_BUILTIN_MINPS,
19592 IX86_BUILTIN_MINSS,
19594 IX86_BUILTIN_LOADUPS,
19595 IX86_BUILTIN_STOREUPS,
19596 IX86_BUILTIN_MOVSS,
19598 IX86_BUILTIN_MOVHLPS,
19599 IX86_BUILTIN_MOVLHPS,
19600 IX86_BUILTIN_LOADHPS,
19601 IX86_BUILTIN_LOADLPS,
19602 IX86_BUILTIN_STOREHPS,
19603 IX86_BUILTIN_STORELPS,
19605 IX86_BUILTIN_MASKMOVQ,
19606 IX86_BUILTIN_MOVMSKPS,
19607 IX86_BUILTIN_PMOVMSKB,
19609 IX86_BUILTIN_MOVNTPS,
19610 IX86_BUILTIN_MOVNTQ,
19612 IX86_BUILTIN_LOADDQU,
19613 IX86_BUILTIN_STOREDQU,
19615 IX86_BUILTIN_PACKSSWB,
19616 IX86_BUILTIN_PACKSSDW,
19617 IX86_BUILTIN_PACKUSWB,
19619 IX86_BUILTIN_PADDB,
19620 IX86_BUILTIN_PADDW,
19621 IX86_BUILTIN_PADDD,
19622 IX86_BUILTIN_PADDQ,
19623 IX86_BUILTIN_PADDSB,
19624 IX86_BUILTIN_PADDSW,
19625 IX86_BUILTIN_PADDUSB,
19626 IX86_BUILTIN_PADDUSW,
19627 IX86_BUILTIN_PSUBB,
19628 IX86_BUILTIN_PSUBW,
19629 IX86_BUILTIN_PSUBD,
19630 IX86_BUILTIN_PSUBQ,
19631 IX86_BUILTIN_PSUBSB,
19632 IX86_BUILTIN_PSUBSW,
19633 IX86_BUILTIN_PSUBUSB,
19634 IX86_BUILTIN_PSUBUSW,
19636 IX86_BUILTIN_PAND,
19637 IX86_BUILTIN_PANDN,
19638 IX86_BUILTIN_POR,
19639 IX86_BUILTIN_PXOR,
19641 IX86_BUILTIN_PAVGB,
19642 IX86_BUILTIN_PAVGW,
19644 IX86_BUILTIN_PCMPEQB,
19645 IX86_BUILTIN_PCMPEQW,
19646 IX86_BUILTIN_PCMPEQD,
19647 IX86_BUILTIN_PCMPGTB,
19648 IX86_BUILTIN_PCMPGTW,
19649 IX86_BUILTIN_PCMPGTD,
19651 IX86_BUILTIN_PMADDWD,
19653 IX86_BUILTIN_PMAXSW,
19654 IX86_BUILTIN_PMAXUB,
19655 IX86_BUILTIN_PMINSW,
19656 IX86_BUILTIN_PMINUB,
19658 IX86_BUILTIN_PMULHUW,
19659 IX86_BUILTIN_PMULHW,
19660 IX86_BUILTIN_PMULLW,
19662 IX86_BUILTIN_PSADBW,
19663 IX86_BUILTIN_PSHUFW,
19665 IX86_BUILTIN_PSLLW,
19666 IX86_BUILTIN_PSLLD,
19667 IX86_BUILTIN_PSLLQ,
19668 IX86_BUILTIN_PSRAW,
19669 IX86_BUILTIN_PSRAD,
19670 IX86_BUILTIN_PSRLW,
19671 IX86_BUILTIN_PSRLD,
19672 IX86_BUILTIN_PSRLQ,
19673 IX86_BUILTIN_PSLLWI,
19674 IX86_BUILTIN_PSLLDI,
19675 IX86_BUILTIN_PSLLQI,
19676 IX86_BUILTIN_PSRAWI,
19677 IX86_BUILTIN_PSRADI,
19678 IX86_BUILTIN_PSRLWI,
19679 IX86_BUILTIN_PSRLDI,
19680 IX86_BUILTIN_PSRLQI,
19682 IX86_BUILTIN_PUNPCKHBW,
19683 IX86_BUILTIN_PUNPCKHWD,
19684 IX86_BUILTIN_PUNPCKHDQ,
19685 IX86_BUILTIN_PUNPCKLBW,
19686 IX86_BUILTIN_PUNPCKLWD,
19687 IX86_BUILTIN_PUNPCKLDQ,
19689 IX86_BUILTIN_SHUFPS,
19691 IX86_BUILTIN_RCPPS,
19692 IX86_BUILTIN_RCPSS,
19693 IX86_BUILTIN_RSQRTPS,
19694 IX86_BUILTIN_RSQRTPS_NR,
19695 IX86_BUILTIN_RSQRTSS,
19696 IX86_BUILTIN_RSQRTF,
19697 IX86_BUILTIN_SQRTPS,
19698 IX86_BUILTIN_SQRTPS_NR,
19699 IX86_BUILTIN_SQRTSS,
19701 IX86_BUILTIN_UNPCKHPS,
19702 IX86_BUILTIN_UNPCKLPS,
19704 IX86_BUILTIN_ANDPS,
19705 IX86_BUILTIN_ANDNPS,
19706 IX86_BUILTIN_ORPS,
19707 IX86_BUILTIN_XORPS,
19709 IX86_BUILTIN_EMMS,
19710 IX86_BUILTIN_LDMXCSR,
19711 IX86_BUILTIN_STMXCSR,
19712 IX86_BUILTIN_SFENCE,
19714 /* 3DNow! Original */
19715 IX86_BUILTIN_FEMMS,
19716 IX86_BUILTIN_PAVGUSB,
19717 IX86_BUILTIN_PF2ID,
19718 IX86_BUILTIN_PFACC,
19719 IX86_BUILTIN_PFADD,
19720 IX86_BUILTIN_PFCMPEQ,
19721 IX86_BUILTIN_PFCMPGE,
19722 IX86_BUILTIN_PFCMPGT,
19723 IX86_BUILTIN_PFMAX,
19724 IX86_BUILTIN_PFMIN,
19725 IX86_BUILTIN_PFMUL,
19726 IX86_BUILTIN_PFRCP,
19727 IX86_BUILTIN_PFRCPIT1,
19728 IX86_BUILTIN_PFRCPIT2,
19729 IX86_BUILTIN_PFRSQIT1,
19730 IX86_BUILTIN_PFRSQRT,
19731 IX86_BUILTIN_PFSUB,
19732 IX86_BUILTIN_PFSUBR,
19733 IX86_BUILTIN_PI2FD,
19734 IX86_BUILTIN_PMULHRW,
19736 /* 3DNow! Athlon Extensions */
19737 IX86_BUILTIN_PF2IW,
19738 IX86_BUILTIN_PFNACC,
19739 IX86_BUILTIN_PFPNACC,
19740 IX86_BUILTIN_PI2FW,
19741 IX86_BUILTIN_PSWAPDSI,
19742 IX86_BUILTIN_PSWAPDSF,
19744 /* SSE2 */
19745 IX86_BUILTIN_ADDPD,
19746 IX86_BUILTIN_ADDSD,
19747 IX86_BUILTIN_DIVPD,
19748 IX86_BUILTIN_DIVSD,
19749 IX86_BUILTIN_MULPD,
19750 IX86_BUILTIN_MULSD,
19751 IX86_BUILTIN_SUBPD,
19752 IX86_BUILTIN_SUBSD,
19754 IX86_BUILTIN_CMPEQPD,
19755 IX86_BUILTIN_CMPLTPD,
19756 IX86_BUILTIN_CMPLEPD,
19757 IX86_BUILTIN_CMPGTPD,
19758 IX86_BUILTIN_CMPGEPD,
19759 IX86_BUILTIN_CMPNEQPD,
19760 IX86_BUILTIN_CMPNLTPD,
19761 IX86_BUILTIN_CMPNLEPD,
19762 IX86_BUILTIN_CMPNGTPD,
19763 IX86_BUILTIN_CMPNGEPD,
19764 IX86_BUILTIN_CMPORDPD,
19765 IX86_BUILTIN_CMPUNORDPD,
19766 IX86_BUILTIN_CMPEQSD,
19767 IX86_BUILTIN_CMPLTSD,
19768 IX86_BUILTIN_CMPLESD,
19769 IX86_BUILTIN_CMPNEQSD,
19770 IX86_BUILTIN_CMPNLTSD,
19771 IX86_BUILTIN_CMPNLESD,
19772 IX86_BUILTIN_CMPORDSD,
19773 IX86_BUILTIN_CMPUNORDSD,
19775 IX86_BUILTIN_COMIEQSD,
19776 IX86_BUILTIN_COMILTSD,
19777 IX86_BUILTIN_COMILESD,
19778 IX86_BUILTIN_COMIGTSD,
19779 IX86_BUILTIN_COMIGESD,
19780 IX86_BUILTIN_COMINEQSD,
19781 IX86_BUILTIN_UCOMIEQSD,
19782 IX86_BUILTIN_UCOMILTSD,
19783 IX86_BUILTIN_UCOMILESD,
19784 IX86_BUILTIN_UCOMIGTSD,
19785 IX86_BUILTIN_UCOMIGESD,
19786 IX86_BUILTIN_UCOMINEQSD,
19788 IX86_BUILTIN_MAXPD,
19789 IX86_BUILTIN_MAXSD,
19790 IX86_BUILTIN_MINPD,
19791 IX86_BUILTIN_MINSD,
19793 IX86_BUILTIN_ANDPD,
19794 IX86_BUILTIN_ANDNPD,
19795 IX86_BUILTIN_ORPD,
19796 IX86_BUILTIN_XORPD,
19798 IX86_BUILTIN_SQRTPD,
19799 IX86_BUILTIN_SQRTSD,
19801 IX86_BUILTIN_UNPCKHPD,
19802 IX86_BUILTIN_UNPCKLPD,
19804 IX86_BUILTIN_SHUFPD,
19806 IX86_BUILTIN_LOADUPD,
19807 IX86_BUILTIN_STOREUPD,
19808 IX86_BUILTIN_MOVSD,
19810 IX86_BUILTIN_LOADHPD,
19811 IX86_BUILTIN_LOADLPD,
19813 IX86_BUILTIN_CVTDQ2PD,
19814 IX86_BUILTIN_CVTDQ2PS,
19816 IX86_BUILTIN_CVTPD2DQ,
19817 IX86_BUILTIN_CVTPD2PI,
19818 IX86_BUILTIN_CVTPD2PS,
19819 IX86_BUILTIN_CVTTPD2DQ,
19820 IX86_BUILTIN_CVTTPD2PI,
19822 IX86_BUILTIN_CVTPI2PD,
19823 IX86_BUILTIN_CVTSI2SD,
19824 IX86_BUILTIN_CVTSI642SD,
19826 IX86_BUILTIN_CVTSD2SI,
19827 IX86_BUILTIN_CVTSD2SI64,
19828 IX86_BUILTIN_CVTSD2SS,
19829 IX86_BUILTIN_CVTSS2SD,
19830 IX86_BUILTIN_CVTTSD2SI,
19831 IX86_BUILTIN_CVTTSD2SI64,
19833 IX86_BUILTIN_CVTPS2DQ,
19834 IX86_BUILTIN_CVTPS2PD,
19835 IX86_BUILTIN_CVTTPS2DQ,
19837 IX86_BUILTIN_MOVNTI,
19838 IX86_BUILTIN_MOVNTPD,
19839 IX86_BUILTIN_MOVNTDQ,
19841 IX86_BUILTIN_MOVQ128,
19843 /* SSE2 MMX */
19844 IX86_BUILTIN_MASKMOVDQU,
19845 IX86_BUILTIN_MOVMSKPD,
19846 IX86_BUILTIN_PMOVMSKB128,
19848 IX86_BUILTIN_PACKSSWB128,
19849 IX86_BUILTIN_PACKSSDW128,
19850 IX86_BUILTIN_PACKUSWB128,
19852 IX86_BUILTIN_PADDB128,
19853 IX86_BUILTIN_PADDW128,
19854 IX86_BUILTIN_PADDD128,
19855 IX86_BUILTIN_PADDQ128,
19856 IX86_BUILTIN_PADDSB128,
19857 IX86_BUILTIN_PADDSW128,
19858 IX86_BUILTIN_PADDUSB128,
19859 IX86_BUILTIN_PADDUSW128,
19860 IX86_BUILTIN_PSUBB128,
19861 IX86_BUILTIN_PSUBW128,
19862 IX86_BUILTIN_PSUBD128,
19863 IX86_BUILTIN_PSUBQ128,
19864 IX86_BUILTIN_PSUBSB128,
19865 IX86_BUILTIN_PSUBSW128,
19866 IX86_BUILTIN_PSUBUSB128,
19867 IX86_BUILTIN_PSUBUSW128,
19869 IX86_BUILTIN_PAND128,
19870 IX86_BUILTIN_PANDN128,
19871 IX86_BUILTIN_POR128,
19872 IX86_BUILTIN_PXOR128,
19874 IX86_BUILTIN_PAVGB128,
19875 IX86_BUILTIN_PAVGW128,
19877 IX86_BUILTIN_PCMPEQB128,
19878 IX86_BUILTIN_PCMPEQW128,
19879 IX86_BUILTIN_PCMPEQD128,
19880 IX86_BUILTIN_PCMPGTB128,
19881 IX86_BUILTIN_PCMPGTW128,
19882 IX86_BUILTIN_PCMPGTD128,
19884 IX86_BUILTIN_PMADDWD128,
19886 IX86_BUILTIN_PMAXSW128,
19887 IX86_BUILTIN_PMAXUB128,
19888 IX86_BUILTIN_PMINSW128,
19889 IX86_BUILTIN_PMINUB128,
19891 IX86_BUILTIN_PMULUDQ,
19892 IX86_BUILTIN_PMULUDQ128,
19893 IX86_BUILTIN_PMULHUW128,
19894 IX86_BUILTIN_PMULHW128,
19895 IX86_BUILTIN_PMULLW128,
19897 IX86_BUILTIN_PSADBW128,
19898 IX86_BUILTIN_PSHUFHW,
19899 IX86_BUILTIN_PSHUFLW,
19900 IX86_BUILTIN_PSHUFD,
19902 IX86_BUILTIN_PSLLDQI128,
19903 IX86_BUILTIN_PSLLWI128,
19904 IX86_BUILTIN_PSLLDI128,
19905 IX86_BUILTIN_PSLLQI128,
19906 IX86_BUILTIN_PSRAWI128,
19907 IX86_BUILTIN_PSRADI128,
19908 IX86_BUILTIN_PSRLDQI128,
19909 IX86_BUILTIN_PSRLWI128,
19910 IX86_BUILTIN_PSRLDI128,
19911 IX86_BUILTIN_PSRLQI128,
19913 IX86_BUILTIN_PSLLDQ128,
19914 IX86_BUILTIN_PSLLW128,
19915 IX86_BUILTIN_PSLLD128,
19916 IX86_BUILTIN_PSLLQ128,
19917 IX86_BUILTIN_PSRAW128,
19918 IX86_BUILTIN_PSRAD128,
19919 IX86_BUILTIN_PSRLW128,
19920 IX86_BUILTIN_PSRLD128,
19921 IX86_BUILTIN_PSRLQ128,
19923 IX86_BUILTIN_PUNPCKHBW128,
19924 IX86_BUILTIN_PUNPCKHWD128,
19925 IX86_BUILTIN_PUNPCKHDQ128,
19926 IX86_BUILTIN_PUNPCKHQDQ128,
19927 IX86_BUILTIN_PUNPCKLBW128,
19928 IX86_BUILTIN_PUNPCKLWD128,
19929 IX86_BUILTIN_PUNPCKLDQ128,
19930 IX86_BUILTIN_PUNPCKLQDQ128,
19932 IX86_BUILTIN_CLFLUSH,
19933 IX86_BUILTIN_MFENCE,
19934 IX86_BUILTIN_LFENCE,
19936 /* SSE3. */
19937 IX86_BUILTIN_ADDSUBPS,
19938 IX86_BUILTIN_HADDPS,
19939 IX86_BUILTIN_HSUBPS,
19940 IX86_BUILTIN_MOVSHDUP,
19941 IX86_BUILTIN_MOVSLDUP,
19942 IX86_BUILTIN_ADDSUBPD,
19943 IX86_BUILTIN_HADDPD,
19944 IX86_BUILTIN_HSUBPD,
19945 IX86_BUILTIN_LDDQU,
19947 IX86_BUILTIN_MONITOR,
19948 IX86_BUILTIN_MWAIT,
19950 /* SSSE3. */
19951 IX86_BUILTIN_PHADDW,
19952 IX86_BUILTIN_PHADDD,
19953 IX86_BUILTIN_PHADDSW,
19954 IX86_BUILTIN_PHSUBW,
19955 IX86_BUILTIN_PHSUBD,
19956 IX86_BUILTIN_PHSUBSW,
19957 IX86_BUILTIN_PMADDUBSW,
19958 IX86_BUILTIN_PMULHRSW,
19959 IX86_BUILTIN_PSHUFB,
19960 IX86_BUILTIN_PSIGNB,
19961 IX86_BUILTIN_PSIGNW,
19962 IX86_BUILTIN_PSIGND,
19963 IX86_BUILTIN_PALIGNR,
19964 IX86_BUILTIN_PABSB,
19965 IX86_BUILTIN_PABSW,
19966 IX86_BUILTIN_PABSD,
19968 IX86_BUILTIN_PHADDW128,
19969 IX86_BUILTIN_PHADDD128,
19970 IX86_BUILTIN_PHADDSW128,
19971 IX86_BUILTIN_PHSUBW128,
19972 IX86_BUILTIN_PHSUBD128,
19973 IX86_BUILTIN_PHSUBSW128,
19974 IX86_BUILTIN_PMADDUBSW128,
19975 IX86_BUILTIN_PMULHRSW128,
19976 IX86_BUILTIN_PSHUFB128,
19977 IX86_BUILTIN_PSIGNB128,
19978 IX86_BUILTIN_PSIGNW128,
19979 IX86_BUILTIN_PSIGND128,
19980 IX86_BUILTIN_PALIGNR128,
19981 IX86_BUILTIN_PABSB128,
19982 IX86_BUILTIN_PABSW128,
19983 IX86_BUILTIN_PABSD128,
19985 /* AMDFAM10 - SSE4A New Instructions. */
19986 IX86_BUILTIN_MOVNTSD,
19987 IX86_BUILTIN_MOVNTSS,
19988 IX86_BUILTIN_EXTRQI,
19989 IX86_BUILTIN_EXTRQ,
19990 IX86_BUILTIN_INSERTQI,
19991 IX86_BUILTIN_INSERTQ,
19993 /* SSE4.1. */
19994 IX86_BUILTIN_BLENDPD,
19995 IX86_BUILTIN_BLENDPS,
19996 IX86_BUILTIN_BLENDVPD,
19997 IX86_BUILTIN_BLENDVPS,
19998 IX86_BUILTIN_PBLENDVB128,
19999 IX86_BUILTIN_PBLENDW128,
20001 IX86_BUILTIN_DPPD,
20002 IX86_BUILTIN_DPPS,
20004 IX86_BUILTIN_INSERTPS128,
20006 IX86_BUILTIN_MOVNTDQA,
20007 IX86_BUILTIN_MPSADBW128,
20008 IX86_BUILTIN_PACKUSDW128,
20009 IX86_BUILTIN_PCMPEQQ,
20010 IX86_BUILTIN_PHMINPOSUW128,
20012 IX86_BUILTIN_PMAXSB128,
20013 IX86_BUILTIN_PMAXSD128,
20014 IX86_BUILTIN_PMAXUD128,
20015 IX86_BUILTIN_PMAXUW128,
20017 IX86_BUILTIN_PMINSB128,
20018 IX86_BUILTIN_PMINSD128,
20019 IX86_BUILTIN_PMINUD128,
20020 IX86_BUILTIN_PMINUW128,
20022 IX86_BUILTIN_PMOVSXBW128,
20023 IX86_BUILTIN_PMOVSXBD128,
20024 IX86_BUILTIN_PMOVSXBQ128,
20025 IX86_BUILTIN_PMOVSXWD128,
20026 IX86_BUILTIN_PMOVSXWQ128,
20027 IX86_BUILTIN_PMOVSXDQ128,
20029 IX86_BUILTIN_PMOVZXBW128,
20030 IX86_BUILTIN_PMOVZXBD128,
20031 IX86_BUILTIN_PMOVZXBQ128,
20032 IX86_BUILTIN_PMOVZXWD128,
20033 IX86_BUILTIN_PMOVZXWQ128,
20034 IX86_BUILTIN_PMOVZXDQ128,
20036 IX86_BUILTIN_PMULDQ128,
20037 IX86_BUILTIN_PMULLD128,
20039 IX86_BUILTIN_ROUNDPD,
20040 IX86_BUILTIN_ROUNDPS,
20041 IX86_BUILTIN_ROUNDSD,
20042 IX86_BUILTIN_ROUNDSS,
20044 IX86_BUILTIN_PTESTZ,
20045 IX86_BUILTIN_PTESTC,
20046 IX86_BUILTIN_PTESTNZC,
20048 IX86_BUILTIN_VEC_INIT_V2SI,
20049 IX86_BUILTIN_VEC_INIT_V4HI,
20050 IX86_BUILTIN_VEC_INIT_V8QI,
20051 IX86_BUILTIN_VEC_EXT_V2DF,
20052 IX86_BUILTIN_VEC_EXT_V2DI,
20053 IX86_BUILTIN_VEC_EXT_V4SF,
20054 IX86_BUILTIN_VEC_EXT_V4SI,
20055 IX86_BUILTIN_VEC_EXT_V8HI,
20056 IX86_BUILTIN_VEC_EXT_V2SI,
20057 IX86_BUILTIN_VEC_EXT_V4HI,
20058 IX86_BUILTIN_VEC_EXT_V16QI,
20059 IX86_BUILTIN_VEC_SET_V2DI,
20060 IX86_BUILTIN_VEC_SET_V4SF,
20061 IX86_BUILTIN_VEC_SET_V4SI,
20062 IX86_BUILTIN_VEC_SET_V8HI,
20063 IX86_BUILTIN_VEC_SET_V4HI,
20064 IX86_BUILTIN_VEC_SET_V16QI,
20066 IX86_BUILTIN_VEC_PACK_SFIX,
20068 /* SSE4.2. */
20069 IX86_BUILTIN_CRC32QI,
20070 IX86_BUILTIN_CRC32HI,
20071 IX86_BUILTIN_CRC32SI,
20072 IX86_BUILTIN_CRC32DI,
20074 IX86_BUILTIN_PCMPESTRI128,
20075 IX86_BUILTIN_PCMPESTRM128,
20076 IX86_BUILTIN_PCMPESTRA128,
20077 IX86_BUILTIN_PCMPESTRC128,
20078 IX86_BUILTIN_PCMPESTRO128,
20079 IX86_BUILTIN_PCMPESTRS128,
20080 IX86_BUILTIN_PCMPESTRZ128,
20081 IX86_BUILTIN_PCMPISTRI128,
20082 IX86_BUILTIN_PCMPISTRM128,
20083 IX86_BUILTIN_PCMPISTRA128,
20084 IX86_BUILTIN_PCMPISTRC128,
20085 IX86_BUILTIN_PCMPISTRO128,
20086 IX86_BUILTIN_PCMPISTRS128,
20087 IX86_BUILTIN_PCMPISTRZ128,
20089 IX86_BUILTIN_PCMPGTQ,
20091 /* AES instructions */
20092 IX86_BUILTIN_AESENC128,
20093 IX86_BUILTIN_AESENCLAST128,
20094 IX86_BUILTIN_AESDEC128,
20095 IX86_BUILTIN_AESDECLAST128,
20096 IX86_BUILTIN_AESIMC128,
20097 IX86_BUILTIN_AESKEYGENASSIST128,
20099 /* PCLMUL instruction */
20100 IX86_BUILTIN_PCLMULQDQ128,
20102 /* AVX */
20103 IX86_BUILTIN_ADDPD256,
20104 IX86_BUILTIN_ADDPS256,
20105 IX86_BUILTIN_ADDSUBPD256,
20106 IX86_BUILTIN_ADDSUBPS256,
20107 IX86_BUILTIN_ANDPD256,
20108 IX86_BUILTIN_ANDPS256,
20109 IX86_BUILTIN_ANDNPD256,
20110 IX86_BUILTIN_ANDNPS256,
20111 IX86_BUILTIN_BLENDPD256,
20112 IX86_BUILTIN_BLENDPS256,
20113 IX86_BUILTIN_BLENDVPD256,
20114 IX86_BUILTIN_BLENDVPS256,
20115 IX86_BUILTIN_DIVPD256,
20116 IX86_BUILTIN_DIVPS256,
20117 IX86_BUILTIN_DPPS256,
20118 IX86_BUILTIN_HADDPD256,
20119 IX86_BUILTIN_HADDPS256,
20120 IX86_BUILTIN_HSUBPD256,
20121 IX86_BUILTIN_HSUBPS256,
20122 IX86_BUILTIN_MAXPD256,
20123 IX86_BUILTIN_MAXPS256,
20124 IX86_BUILTIN_MINPD256,
20125 IX86_BUILTIN_MINPS256,
20126 IX86_BUILTIN_MULPD256,
20127 IX86_BUILTIN_MULPS256,
20128 IX86_BUILTIN_ORPD256,
20129 IX86_BUILTIN_ORPS256,
20130 IX86_BUILTIN_SHUFPD256,
20131 IX86_BUILTIN_SHUFPS256,
20132 IX86_BUILTIN_SUBPD256,
20133 IX86_BUILTIN_SUBPS256,
20134 IX86_BUILTIN_XORPD256,
20135 IX86_BUILTIN_XORPS256,
20136 IX86_BUILTIN_CMPSD,
20137 IX86_BUILTIN_CMPSS,
20138 IX86_BUILTIN_CMPPD,
20139 IX86_BUILTIN_CMPPS,
20140 IX86_BUILTIN_CMPPD256,
20141 IX86_BUILTIN_CMPPS256,
20142 IX86_BUILTIN_CVTDQ2PD256,
20143 IX86_BUILTIN_CVTDQ2PS256,
20144 IX86_BUILTIN_CVTPD2PS256,
20145 IX86_BUILTIN_CVTPS2DQ256,
20146 IX86_BUILTIN_CVTPS2PD256,
20147 IX86_BUILTIN_CVTTPD2DQ256,
20148 IX86_BUILTIN_CVTPD2DQ256,
20149 IX86_BUILTIN_CVTTPS2DQ256,
20150 IX86_BUILTIN_EXTRACTF128PD256,
20151 IX86_BUILTIN_EXTRACTF128PS256,
20152 IX86_BUILTIN_EXTRACTF128SI256,
20153 IX86_BUILTIN_VZEROALL,
20154 IX86_BUILTIN_VZEROUPPER,
20155 IX86_BUILTIN_VZEROUPPER_REX64,
20156 IX86_BUILTIN_VPERMILVARPD,
20157 IX86_BUILTIN_VPERMILVARPS,
20158 IX86_BUILTIN_VPERMILVARPD256,
20159 IX86_BUILTIN_VPERMILVARPS256,
20160 IX86_BUILTIN_VPERMILPD,
20161 IX86_BUILTIN_VPERMILPS,
20162 IX86_BUILTIN_VPERMILPD256,
20163 IX86_BUILTIN_VPERMILPS256,
20164 IX86_BUILTIN_VPERM2F128PD256,
20165 IX86_BUILTIN_VPERM2F128PS256,
20166 IX86_BUILTIN_VPERM2F128SI256,
20167 IX86_BUILTIN_VBROADCASTSS,
20168 IX86_BUILTIN_VBROADCASTSD256,
20169 IX86_BUILTIN_VBROADCASTSS256,
20170 IX86_BUILTIN_VBROADCASTPD256,
20171 IX86_BUILTIN_VBROADCASTPS256,
20172 IX86_BUILTIN_VINSERTF128PD256,
20173 IX86_BUILTIN_VINSERTF128PS256,
20174 IX86_BUILTIN_VINSERTF128SI256,
20175 IX86_BUILTIN_LOADUPD256,
20176 IX86_BUILTIN_LOADUPS256,
20177 IX86_BUILTIN_STOREUPD256,
20178 IX86_BUILTIN_STOREUPS256,
20179 IX86_BUILTIN_LDDQU256,
20180 IX86_BUILTIN_MOVNTDQ256,
20181 IX86_BUILTIN_MOVNTPD256,
20182 IX86_BUILTIN_MOVNTPS256,
20183 IX86_BUILTIN_LOADDQU256,
20184 IX86_BUILTIN_STOREDQU256,
20185 IX86_BUILTIN_MASKLOADPD,
20186 IX86_BUILTIN_MASKLOADPS,
20187 IX86_BUILTIN_MASKSTOREPD,
20188 IX86_BUILTIN_MASKSTOREPS,
20189 IX86_BUILTIN_MASKLOADPD256,
20190 IX86_BUILTIN_MASKLOADPS256,
20191 IX86_BUILTIN_MASKSTOREPD256,
20192 IX86_BUILTIN_MASKSTOREPS256,
20193 IX86_BUILTIN_MOVSHDUP256,
20194 IX86_BUILTIN_MOVSLDUP256,
20195 IX86_BUILTIN_MOVDDUP256,
20197 IX86_BUILTIN_SQRTPD256,
20198 IX86_BUILTIN_SQRTPS256,
20199 IX86_BUILTIN_SQRTPS_NR256,
20200 IX86_BUILTIN_RSQRTPS256,
20201 IX86_BUILTIN_RSQRTPS_NR256,
20203 IX86_BUILTIN_RCPPS256,
20205 IX86_BUILTIN_ROUNDPD256,
20206 IX86_BUILTIN_ROUNDPS256,
20208 IX86_BUILTIN_UNPCKHPD256,
20209 IX86_BUILTIN_UNPCKLPD256,
20210 IX86_BUILTIN_UNPCKHPS256,
20211 IX86_BUILTIN_UNPCKLPS256,
20213 IX86_BUILTIN_SI256_SI,
20214 IX86_BUILTIN_PS256_PS,
20215 IX86_BUILTIN_PD256_PD,
20216 IX86_BUILTIN_SI_SI256,
20217 IX86_BUILTIN_PS_PS256,
20218 IX86_BUILTIN_PD_PD256,
20220 IX86_BUILTIN_VTESTZPD,
20221 IX86_BUILTIN_VTESTCPD,
20222 IX86_BUILTIN_VTESTNZCPD,
20223 IX86_BUILTIN_VTESTZPS,
20224 IX86_BUILTIN_VTESTCPS,
20225 IX86_BUILTIN_VTESTNZCPS,
20226 IX86_BUILTIN_VTESTZPD256,
20227 IX86_BUILTIN_VTESTCPD256,
20228 IX86_BUILTIN_VTESTNZCPD256,
20229 IX86_BUILTIN_VTESTZPS256,
20230 IX86_BUILTIN_VTESTCPS256,
20231 IX86_BUILTIN_VTESTNZCPS256,
20232 IX86_BUILTIN_PTESTZ256,
20233 IX86_BUILTIN_PTESTC256,
20234 IX86_BUILTIN_PTESTNZC256,
20236 IX86_BUILTIN_MOVMSKPD256,
20237 IX86_BUILTIN_MOVMSKPS256,
20239 /* TFmode support builtins. */
20240 IX86_BUILTIN_INFQ,
20241 IX86_BUILTIN_HUGE_VALQ,
20242 IX86_BUILTIN_FABSQ,
20243 IX86_BUILTIN_COPYSIGNQ,
20245 /* SSE5 instructions */
20246 IX86_BUILTIN_FMADDSS,
20247 IX86_BUILTIN_FMADDSD,
20248 IX86_BUILTIN_FMADDPS,
20249 IX86_BUILTIN_FMADDPD,
20250 IX86_BUILTIN_FMSUBSS,
20251 IX86_BUILTIN_FMSUBSD,
20252 IX86_BUILTIN_FMSUBPS,
20253 IX86_BUILTIN_FMSUBPD,
20254 IX86_BUILTIN_FNMADDSS,
20255 IX86_BUILTIN_FNMADDSD,
20256 IX86_BUILTIN_FNMADDPS,
20257 IX86_BUILTIN_FNMADDPD,
20258 IX86_BUILTIN_FNMSUBSS,
20259 IX86_BUILTIN_FNMSUBSD,
20260 IX86_BUILTIN_FNMSUBPS,
20261 IX86_BUILTIN_FNMSUBPD,
20262 IX86_BUILTIN_PCMOV,
20263 IX86_BUILTIN_PCMOV_V2DI,
20264 IX86_BUILTIN_PCMOV_V4SI,
20265 IX86_BUILTIN_PCMOV_V8HI,
20266 IX86_BUILTIN_PCMOV_V16QI,
20267 IX86_BUILTIN_PCMOV_V4SF,
20268 IX86_BUILTIN_PCMOV_V2DF,
20269 IX86_BUILTIN_PPERM,
20270 IX86_BUILTIN_PERMPS,
20271 IX86_BUILTIN_PERMPD,
20272 IX86_BUILTIN_PMACSSWW,
20273 IX86_BUILTIN_PMACSWW,
20274 IX86_BUILTIN_PMACSSWD,
20275 IX86_BUILTIN_PMACSWD,
20276 IX86_BUILTIN_PMACSSDD,
20277 IX86_BUILTIN_PMACSDD,
20278 IX86_BUILTIN_PMACSSDQL,
20279 IX86_BUILTIN_PMACSSDQH,
20280 IX86_BUILTIN_PMACSDQL,
20281 IX86_BUILTIN_PMACSDQH,
20282 IX86_BUILTIN_PMADCSSWD,
20283 IX86_BUILTIN_PMADCSWD,
20284 IX86_BUILTIN_PHADDBW,
20285 IX86_BUILTIN_PHADDBD,
20286 IX86_BUILTIN_PHADDBQ,
20287 IX86_BUILTIN_PHADDWD,
20288 IX86_BUILTIN_PHADDWQ,
20289 IX86_BUILTIN_PHADDDQ,
20290 IX86_BUILTIN_PHADDUBW,
20291 IX86_BUILTIN_PHADDUBD,
20292 IX86_BUILTIN_PHADDUBQ,
20293 IX86_BUILTIN_PHADDUWD,
20294 IX86_BUILTIN_PHADDUWQ,
20295 IX86_BUILTIN_PHADDUDQ,
20296 IX86_BUILTIN_PHSUBBW,
20297 IX86_BUILTIN_PHSUBWD,
20298 IX86_BUILTIN_PHSUBDQ,
20299 IX86_BUILTIN_PROTB,
20300 IX86_BUILTIN_PROTW,
20301 IX86_BUILTIN_PROTD,
20302 IX86_BUILTIN_PROTQ,
20303 IX86_BUILTIN_PROTB_IMM,
20304 IX86_BUILTIN_PROTW_IMM,
20305 IX86_BUILTIN_PROTD_IMM,
20306 IX86_BUILTIN_PROTQ_IMM,
20307 IX86_BUILTIN_PSHLB,
20308 IX86_BUILTIN_PSHLW,
20309 IX86_BUILTIN_PSHLD,
20310 IX86_BUILTIN_PSHLQ,
20311 IX86_BUILTIN_PSHAB,
20312 IX86_BUILTIN_PSHAW,
20313 IX86_BUILTIN_PSHAD,
20314 IX86_BUILTIN_PSHAQ,
20315 IX86_BUILTIN_FRCZSS,
20316 IX86_BUILTIN_FRCZSD,
20317 IX86_BUILTIN_FRCZPS,
20318 IX86_BUILTIN_FRCZPD,
20319 IX86_BUILTIN_CVTPH2PS,
20320 IX86_BUILTIN_CVTPS2PH,
20322 IX86_BUILTIN_COMEQSS,
20323 IX86_BUILTIN_COMNESS,
20324 IX86_BUILTIN_COMLTSS,
20325 IX86_BUILTIN_COMLESS,
20326 IX86_BUILTIN_COMGTSS,
20327 IX86_BUILTIN_COMGESS,
20328 IX86_BUILTIN_COMUEQSS,
20329 IX86_BUILTIN_COMUNESS,
20330 IX86_BUILTIN_COMULTSS,
20331 IX86_BUILTIN_COMULESS,
20332 IX86_BUILTIN_COMUGTSS,
20333 IX86_BUILTIN_COMUGESS,
20334 IX86_BUILTIN_COMORDSS,
20335 IX86_BUILTIN_COMUNORDSS,
20336 IX86_BUILTIN_COMFALSESS,
20337 IX86_BUILTIN_COMTRUESS,
20339 IX86_BUILTIN_COMEQSD,
20340 IX86_BUILTIN_COMNESD,
20341 IX86_BUILTIN_COMLTSD,
20342 IX86_BUILTIN_COMLESD,
20343 IX86_BUILTIN_COMGTSD,
20344 IX86_BUILTIN_COMGESD,
20345 IX86_BUILTIN_COMUEQSD,
20346 IX86_BUILTIN_COMUNESD,
20347 IX86_BUILTIN_COMULTSD,
20348 IX86_BUILTIN_COMULESD,
20349 IX86_BUILTIN_COMUGTSD,
20350 IX86_BUILTIN_COMUGESD,
20351 IX86_BUILTIN_COMORDSD,
20352 IX86_BUILTIN_COMUNORDSD,
20353 IX86_BUILTIN_COMFALSESD,
20354 IX86_BUILTIN_COMTRUESD,
20356 IX86_BUILTIN_COMEQPS,
20357 IX86_BUILTIN_COMNEPS,
20358 IX86_BUILTIN_COMLTPS,
20359 IX86_BUILTIN_COMLEPS,
20360 IX86_BUILTIN_COMGTPS,
20361 IX86_BUILTIN_COMGEPS,
20362 IX86_BUILTIN_COMUEQPS,
20363 IX86_BUILTIN_COMUNEPS,
20364 IX86_BUILTIN_COMULTPS,
20365 IX86_BUILTIN_COMULEPS,
20366 IX86_BUILTIN_COMUGTPS,
20367 IX86_BUILTIN_COMUGEPS,
20368 IX86_BUILTIN_COMORDPS,
20369 IX86_BUILTIN_COMUNORDPS,
20370 IX86_BUILTIN_COMFALSEPS,
20371 IX86_BUILTIN_COMTRUEPS,
20373 IX86_BUILTIN_COMEQPD,
20374 IX86_BUILTIN_COMNEPD,
20375 IX86_BUILTIN_COMLTPD,
20376 IX86_BUILTIN_COMLEPD,
20377 IX86_BUILTIN_COMGTPD,
20378 IX86_BUILTIN_COMGEPD,
20379 IX86_BUILTIN_COMUEQPD,
20380 IX86_BUILTIN_COMUNEPD,
20381 IX86_BUILTIN_COMULTPD,
20382 IX86_BUILTIN_COMULEPD,
20383 IX86_BUILTIN_COMUGTPD,
20384 IX86_BUILTIN_COMUGEPD,
20385 IX86_BUILTIN_COMORDPD,
20386 IX86_BUILTIN_COMUNORDPD,
20387 IX86_BUILTIN_COMFALSEPD,
20388 IX86_BUILTIN_COMTRUEPD,
20390 IX86_BUILTIN_PCOMEQUB,
20391 IX86_BUILTIN_PCOMNEUB,
20392 IX86_BUILTIN_PCOMLTUB,
20393 IX86_BUILTIN_PCOMLEUB,
20394 IX86_BUILTIN_PCOMGTUB,
20395 IX86_BUILTIN_PCOMGEUB,
20396 IX86_BUILTIN_PCOMFALSEUB,
20397 IX86_BUILTIN_PCOMTRUEUB,
20398 IX86_BUILTIN_PCOMEQUW,
20399 IX86_BUILTIN_PCOMNEUW,
20400 IX86_BUILTIN_PCOMLTUW,
20401 IX86_BUILTIN_PCOMLEUW,
20402 IX86_BUILTIN_PCOMGTUW,
20403 IX86_BUILTIN_PCOMGEUW,
20404 IX86_BUILTIN_PCOMFALSEUW,
20405 IX86_BUILTIN_PCOMTRUEUW,
20406 IX86_BUILTIN_PCOMEQUD,
20407 IX86_BUILTIN_PCOMNEUD,
20408 IX86_BUILTIN_PCOMLTUD,
20409 IX86_BUILTIN_PCOMLEUD,
20410 IX86_BUILTIN_PCOMGTUD,
20411 IX86_BUILTIN_PCOMGEUD,
20412 IX86_BUILTIN_PCOMFALSEUD,
20413 IX86_BUILTIN_PCOMTRUEUD,
20414 IX86_BUILTIN_PCOMEQUQ,
20415 IX86_BUILTIN_PCOMNEUQ,
20416 IX86_BUILTIN_PCOMLTUQ,
20417 IX86_BUILTIN_PCOMLEUQ,
20418 IX86_BUILTIN_PCOMGTUQ,
20419 IX86_BUILTIN_PCOMGEUQ,
20420 IX86_BUILTIN_PCOMFALSEUQ,
20421 IX86_BUILTIN_PCOMTRUEUQ,
20423 IX86_BUILTIN_PCOMEQB,
20424 IX86_BUILTIN_PCOMNEB,
20425 IX86_BUILTIN_PCOMLTB,
20426 IX86_BUILTIN_PCOMLEB,
20427 IX86_BUILTIN_PCOMGTB,
20428 IX86_BUILTIN_PCOMGEB,
20429 IX86_BUILTIN_PCOMFALSEB,
20430 IX86_BUILTIN_PCOMTRUEB,
20431 IX86_BUILTIN_PCOMEQW,
20432 IX86_BUILTIN_PCOMNEW,
20433 IX86_BUILTIN_PCOMLTW,
20434 IX86_BUILTIN_PCOMLEW,
20435 IX86_BUILTIN_PCOMGTW,
20436 IX86_BUILTIN_PCOMGEW,
20437 IX86_BUILTIN_PCOMFALSEW,
20438 IX86_BUILTIN_PCOMTRUEW,
20439 IX86_BUILTIN_PCOMEQD,
20440 IX86_BUILTIN_PCOMNED,
20441 IX86_BUILTIN_PCOMLTD,
20442 IX86_BUILTIN_PCOMLED,
20443 IX86_BUILTIN_PCOMGTD,
20444 IX86_BUILTIN_PCOMGED,
20445 IX86_BUILTIN_PCOMFALSED,
20446 IX86_BUILTIN_PCOMTRUED,
20447 IX86_BUILTIN_PCOMEQQ,
20448 IX86_BUILTIN_PCOMNEQ,
20449 IX86_BUILTIN_PCOMLTQ,
20450 IX86_BUILTIN_PCOMLEQ,
20451 IX86_BUILTIN_PCOMGTQ,
20452 IX86_BUILTIN_PCOMGEQ,
20453 IX86_BUILTIN_PCOMFALSEQ,
20454 IX86_BUILTIN_PCOMTRUEQ,
20456 IX86_BUILTIN_MAX
20459 /* Table for the ix86 builtin decls. */
20460 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
20462 /* Table of all of the builtin functions that are possible with different ISA's
20463 but are waiting to be built until a function is declared to use that
20464 ISA. */
20465 struct builtin_isa GTY(())
20467 tree type; /* builtin type to use in the declaration */
20468 const char *name; /* function name */
20469 int isa; /* isa_flags this builtin is defined for */
20470 bool const_p; /* true if the declaration is constant */
20473 static GTY(()) struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
20476 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
20477 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
20478 * function decl in the ix86_builtins array. Returns the function decl or
20479 * NULL_TREE, if the builtin was not added.
20481 * If the front end has a special hook for builtin functions, delay adding
20482 * builtin functions that aren't in the current ISA until the ISA is changed
20483 * with function specific optimization. Doing so, can save about 300K for the
20484 * default compiler. When the builtin is expanded, check at that time whether
20485 * it is valid.
20487 * If the front end doesn't have a special hook, record all builtins, even if
20488 * it isn't an instruction set in the current ISA in case the user uses
20489 * function specific options for a different ISA, so that we don't get scope
20490 * errors if a builtin is added in the middle of a function scope. */
20492 static inline tree
20493 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
20495 tree decl = NULL_TREE;
20497 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
20499 ix86_builtins_isa[(int) code].isa = mask;
20501 if ((mask & ix86_isa_flags) != 0
20502 || (lang_hooks.builtin_function
20503 == lang_hooks.builtin_function_ext_scope))
20506 decl = add_builtin_function (name, type, code, BUILT_IN_MD, NULL,
20507 NULL_TREE);
20508 ix86_builtins[(int) code] = decl;
20509 ix86_builtins_isa[(int) code].type = NULL_TREE;
20511 else
20513 ix86_builtins[(int) code] = NULL_TREE;
20514 ix86_builtins_isa[(int) code].const_p = false;
20515 ix86_builtins_isa[(int) code].type = type;
20516 ix86_builtins_isa[(int) code].name = name;
20520 return decl;
20523 /* Like def_builtin, but also marks the function decl "const". */
20525 static inline tree
20526 def_builtin_const (int mask, const char *name, tree type,
20527 enum ix86_builtins code)
20529 tree decl = def_builtin (mask, name, type, code);
20530 if (decl)
20531 TREE_READONLY (decl) = 1;
20532 else
20533 ix86_builtins_isa[(int) code].const_p = true;
20535 return decl;
20538 /* Add any new builtin functions for a given ISA that may not have been
20539 declared. This saves a bit of space compared to adding all of the
20540 declarations to the tree, even if we didn't use them. */
20542 static void
20543 ix86_add_new_builtins (int isa)
20545 int i;
20546 tree decl;
20548 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
20550 if ((ix86_builtins_isa[i].isa & isa) != 0
20551 && ix86_builtins_isa[i].type != NULL_TREE)
20553 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
20554 ix86_builtins_isa[i].type,
20555 i, BUILT_IN_MD, NULL,
20556 NULL_TREE);
20558 ix86_builtins[i] = decl;
20559 ix86_builtins_isa[i].type = NULL_TREE;
20560 if (ix86_builtins_isa[i].const_p)
20561 TREE_READONLY (decl) = 1;
20566 /* Bits for builtin_description.flag. */
20568 /* Set when we don't support the comparison natively, and should
20569 swap_comparison in order to support it. */
20570 #define BUILTIN_DESC_SWAP_OPERANDS 1
20572 struct builtin_description
20574 const unsigned int mask;
20575 const enum insn_code icode;
20576 const char *const name;
20577 const enum ix86_builtins code;
20578 const enum rtx_code comparison;
20579 const int flag;
20582 static const struct builtin_description bdesc_comi[] =
20584 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
20585 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
20586 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
20587 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
20588 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
20589 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
20590 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
20591 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
20592 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
20593 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
20594 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
20595 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
20596 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
20597 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
20598 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
20599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
20600 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
20601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
20602 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
20603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
20604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
20605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
20606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
20607 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
20610 static const struct builtin_description bdesc_pcmpestr[] =
20612 /* SSE4.2 */
20613 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
20614 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
20615 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
20616 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
20617 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
20618 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
20619 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
20622 static const struct builtin_description bdesc_pcmpistr[] =
20624 /* SSE4.2 */
20625 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
20626 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
20627 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
20628 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
20629 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
20630 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
20631 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
20634 /* Special builtin types */
20635 enum ix86_special_builtin_type
20637 SPECIAL_FTYPE_UNKNOWN,
20638 VOID_FTYPE_VOID,
20639 V32QI_FTYPE_PCCHAR,
20640 V16QI_FTYPE_PCCHAR,
20641 V8SF_FTYPE_PCV4SF,
20642 V8SF_FTYPE_PCFLOAT,
20643 V4DF_FTYPE_PCV2DF,
20644 V4DF_FTYPE_PCDOUBLE,
20645 V4SF_FTYPE_PCFLOAT,
20646 V2DF_FTYPE_PCDOUBLE,
20647 V8SF_FTYPE_PCV8SF_V8SF,
20648 V4DF_FTYPE_PCV4DF_V4DF,
20649 V4SF_FTYPE_V4SF_PCV2SF,
20650 V4SF_FTYPE_PCV4SF_V4SF,
20651 V2DF_FTYPE_V2DF_PCDOUBLE,
20652 V2DF_FTYPE_PCV2DF_V2DF,
20653 V2DI_FTYPE_PV2DI,
20654 VOID_FTYPE_PV2SF_V4SF,
20655 VOID_FTYPE_PV4DI_V4DI,
20656 VOID_FTYPE_PV2DI_V2DI,
20657 VOID_FTYPE_PCHAR_V32QI,
20658 VOID_FTYPE_PCHAR_V16QI,
20659 VOID_FTYPE_PFLOAT_V8SF,
20660 VOID_FTYPE_PFLOAT_V4SF,
20661 VOID_FTYPE_PDOUBLE_V4DF,
20662 VOID_FTYPE_PDOUBLE_V2DF,
20663 VOID_FTYPE_PDI_DI,
20664 VOID_FTYPE_PINT_INT,
20665 VOID_FTYPE_PV8SF_V8SF_V8SF,
20666 VOID_FTYPE_PV4DF_V4DF_V4DF,
20667 VOID_FTYPE_PV4SF_V4SF_V4SF,
20668 VOID_FTYPE_PV2DF_V2DF_V2DF
20671 /* Builtin types */
20672 enum ix86_builtin_type
20674 FTYPE_UNKNOWN,
20675 FLOAT128_FTYPE_FLOAT128,
20676 FLOAT_FTYPE_FLOAT,
20677 FLOAT128_FTYPE_FLOAT128_FLOAT128,
20678 INT_FTYPE_V8SF_V8SF_PTEST,
20679 INT_FTYPE_V4DI_V4DI_PTEST,
20680 INT_FTYPE_V4DF_V4DF_PTEST,
20681 INT_FTYPE_V4SF_V4SF_PTEST,
20682 INT_FTYPE_V2DI_V2DI_PTEST,
20683 INT_FTYPE_V2DF_V2DF_PTEST,
20684 INT64_FTYPE_V4SF,
20685 INT64_FTYPE_V2DF,
20686 INT_FTYPE_V16QI,
20687 INT_FTYPE_V8QI,
20688 INT_FTYPE_V8SF,
20689 INT_FTYPE_V4DF,
20690 INT_FTYPE_V4SF,
20691 INT_FTYPE_V2DF,
20692 V16QI_FTYPE_V16QI,
20693 V8SI_FTYPE_V8SF,
20694 V8SI_FTYPE_V4SI,
20695 V8HI_FTYPE_V8HI,
20696 V8HI_FTYPE_V16QI,
20697 V8QI_FTYPE_V8QI,
20698 V8SF_FTYPE_V8SF,
20699 V8SF_FTYPE_V8SI,
20700 V8SF_FTYPE_V4SF,
20701 V4SI_FTYPE_V4SI,
20702 V4SI_FTYPE_V16QI,
20703 V4SI_FTYPE_V8SI,
20704 V4SI_FTYPE_V8HI,
20705 V4SI_FTYPE_V4DF,
20706 V4SI_FTYPE_V4SF,
20707 V4SI_FTYPE_V2DF,
20708 V4HI_FTYPE_V4HI,
20709 V4DF_FTYPE_V4DF,
20710 V4DF_FTYPE_V4SI,
20711 V4DF_FTYPE_V4SF,
20712 V4DF_FTYPE_V2DF,
20713 V4SF_FTYPE_V4DF,
20714 V4SF_FTYPE_V4SF,
20715 V4SF_FTYPE_V4SF_VEC_MERGE,
20716 V4SF_FTYPE_V8SF,
20717 V4SF_FTYPE_V4SI,
20718 V4SF_FTYPE_V2DF,
20719 V2DI_FTYPE_V2DI,
20720 V2DI_FTYPE_V16QI,
20721 V2DI_FTYPE_V8HI,
20722 V2DI_FTYPE_V4SI,
20723 V2DF_FTYPE_V2DF,
20724 V2DF_FTYPE_V2DF_VEC_MERGE,
20725 V2DF_FTYPE_V4SI,
20726 V2DF_FTYPE_V4DF,
20727 V2DF_FTYPE_V4SF,
20728 V2DF_FTYPE_V2SI,
20729 V2SI_FTYPE_V2SI,
20730 V2SI_FTYPE_V4SF,
20731 V2SI_FTYPE_V2SF,
20732 V2SI_FTYPE_V2DF,
20733 V2SF_FTYPE_V2SF,
20734 V2SF_FTYPE_V2SI,
20735 V16QI_FTYPE_V16QI_V16QI,
20736 V16QI_FTYPE_V8HI_V8HI,
20737 V8QI_FTYPE_V8QI_V8QI,
20738 V8QI_FTYPE_V4HI_V4HI,
20739 V8HI_FTYPE_V8HI_V8HI,
20740 V8HI_FTYPE_V8HI_V8HI_COUNT,
20741 V8HI_FTYPE_V16QI_V16QI,
20742 V8HI_FTYPE_V4SI_V4SI,
20743 V8HI_FTYPE_V8HI_SI_COUNT,
20744 V8SF_FTYPE_V8SF_V8SF,
20745 V8SF_FTYPE_V8SF_V8SI,
20746 V4SI_FTYPE_V4SI_V4SI,
20747 V4SI_FTYPE_V4SI_V4SI_COUNT,
20748 V4SI_FTYPE_V8HI_V8HI,
20749 V4SI_FTYPE_V4SF_V4SF,
20750 V4SI_FTYPE_V2DF_V2DF,
20751 V4SI_FTYPE_V4SI_SI_COUNT,
20752 V4HI_FTYPE_V4HI_V4HI,
20753 V4HI_FTYPE_V4HI_V4HI_COUNT,
20754 V4HI_FTYPE_V8QI_V8QI,
20755 V4HI_FTYPE_V2SI_V2SI,
20756 V4HI_FTYPE_V4HI_SI_COUNT,
20757 V4DF_FTYPE_V4DF_V4DF,
20758 V4DF_FTYPE_V4DF_V4DI,
20759 V4SF_FTYPE_V4SF_V4SF,
20760 V4SF_FTYPE_V4SF_V4SF_SWAP,
20761 V4SF_FTYPE_V4SF_V4SI,
20762 V4SF_FTYPE_V4SF_V2SI,
20763 V4SF_FTYPE_V4SF_V2DF,
20764 V4SF_FTYPE_V4SF_DI,
20765 V4SF_FTYPE_V4SF_SI,
20766 V2DI_FTYPE_V2DI_V2DI,
20767 V2DI_FTYPE_V2DI_V2DI_COUNT,
20768 V2DI_FTYPE_V16QI_V16QI,
20769 V2DI_FTYPE_V4SI_V4SI,
20770 V2DI_FTYPE_V2DI_V16QI,
20771 V2DI_FTYPE_V2DF_V2DF,
20772 V2DI_FTYPE_V2DI_SI_COUNT,
20773 V2SI_FTYPE_V2SI_V2SI,
20774 V2SI_FTYPE_V2SI_V2SI_COUNT,
20775 V2SI_FTYPE_V4HI_V4HI,
20776 V2SI_FTYPE_V2SF_V2SF,
20777 V2SI_FTYPE_V2SI_SI_COUNT,
20778 V2DF_FTYPE_V2DF_V2DF,
20779 V2DF_FTYPE_V2DF_V2DF_SWAP,
20780 V2DF_FTYPE_V2DF_V4SF,
20781 V2DF_FTYPE_V2DF_V2DI,
20782 V2DF_FTYPE_V2DF_DI,
20783 V2DF_FTYPE_V2DF_SI,
20784 V2SF_FTYPE_V2SF_V2SF,
20785 V1DI_FTYPE_V1DI_V1DI,
20786 V1DI_FTYPE_V1DI_V1DI_COUNT,
20787 V1DI_FTYPE_V8QI_V8QI,
20788 V1DI_FTYPE_V2SI_V2SI,
20789 V1DI_FTYPE_V1DI_SI_COUNT,
20790 UINT64_FTYPE_UINT64_UINT64,
20791 UINT_FTYPE_UINT_UINT,
20792 UINT_FTYPE_UINT_USHORT,
20793 UINT_FTYPE_UINT_UCHAR,
20794 V8HI_FTYPE_V8HI_INT,
20795 V4SI_FTYPE_V4SI_INT,
20796 V4HI_FTYPE_V4HI_INT,
20797 V8SF_FTYPE_V8SF_INT,
20798 V4SI_FTYPE_V8SI_INT,
20799 V4SF_FTYPE_V8SF_INT,
20800 V2DF_FTYPE_V4DF_INT,
20801 V4DF_FTYPE_V4DF_INT,
20802 V4SF_FTYPE_V4SF_INT,
20803 V2DI_FTYPE_V2DI_INT,
20804 V2DI2TI_FTYPE_V2DI_INT,
20805 V2DF_FTYPE_V2DF_INT,
20806 V16QI_FTYPE_V16QI_V16QI_V16QI,
20807 V8SF_FTYPE_V8SF_V8SF_V8SF,
20808 V4DF_FTYPE_V4DF_V4DF_V4DF,
20809 V4SF_FTYPE_V4SF_V4SF_V4SF,
20810 V2DF_FTYPE_V2DF_V2DF_V2DF,
20811 V16QI_FTYPE_V16QI_V16QI_INT,
20812 V8SI_FTYPE_V8SI_V8SI_INT,
20813 V8SI_FTYPE_V8SI_V4SI_INT,
20814 V8HI_FTYPE_V8HI_V8HI_INT,
20815 V8SF_FTYPE_V8SF_V8SF_INT,
20816 V8SF_FTYPE_V8SF_V4SF_INT,
20817 V4SI_FTYPE_V4SI_V4SI_INT,
20818 V4DF_FTYPE_V4DF_V4DF_INT,
20819 V4DF_FTYPE_V4DF_V2DF_INT,
20820 V4SF_FTYPE_V4SF_V4SF_INT,
20821 V2DI_FTYPE_V2DI_V2DI_INT,
20822 V2DI2TI_FTYPE_V2DI_V2DI_INT,
20823 V1DI2DI_FTYPE_V1DI_V1DI_INT,
20824 V2DF_FTYPE_V2DF_V2DF_INT,
20825 V2DI_FTYPE_V2DI_UINT_UINT,
20826 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
20829 /* Special builtins with variable number of arguments. */
20830 static const struct builtin_description bdesc_special_args[] =
20832 /* MMX */
20833 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20835 /* 3DNow! */
20836 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20838 /* SSE */
20839 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20840 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20841 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20843 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20844 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20845 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20846 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20848 /* SSE or 3DNow!A */
20849 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20850 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
20852 /* SSE2 */
20853 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20854 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20855 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20856 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
20857 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20858 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
20859 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
20860 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
20861 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20863 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20864 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20866 /* SSE3 */
20867 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20869 /* SSE4.1 */
20870 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
20872 /* SSE4A */
20873 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20874 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20876 /* AVX */
20877 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
20878 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, 0, IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
20879 { OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_64BIT, CODE_FOR_avx_vzeroupper_rex64, 0, IX86_BUILTIN_VZEROUPPER_REX64, UNKNOWN, (int) VOID_FTYPE_VOID },
20881 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20882 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastsd256, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20883 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss256, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20884 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_pd256, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
20885 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_ps256, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
20887 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20888 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20889 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
20890 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
20891 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20892 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
20893 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20895 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
20896 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
20897 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
20899 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
20900 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
20901 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
20902 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
20903 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
20904 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
20905 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
20906 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
20909 /* Builtins with variable number of arguments. */
20910 static const struct builtin_description bdesc_args[] =
20912 /* MMX */
20913 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20914 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20915 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20916 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20917 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20918 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20920 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20921 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20922 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20923 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20924 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20925 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20926 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20927 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20929 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20930 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20932 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20933 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20934 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20935 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20937 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20938 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20939 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20940 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20941 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20942 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20944 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20945 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20946 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20947 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20948 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
20949 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
20951 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20952 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
20953 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20955 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
20957 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20958 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20959 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20960 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20961 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20962 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20964 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20965 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20966 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20967 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20968 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20969 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20971 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20972 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20973 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20974 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20976 /* 3DNow! */
20977 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
20978 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
20979 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20980 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20982 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20983 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20984 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20985 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20986 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20987 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20988 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20989 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20990 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20991 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20992 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20993 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20994 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20995 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20996 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20998 /* 3DNow!A */
20999 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21000 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21001 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21002 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21003 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21004 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21006 /* SSE */
21007 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21008 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21009 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21010 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21011 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21012 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21013 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21014 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21015 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21016 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21017 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21018 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21020 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21022 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21023 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21024 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21025 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21026 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21027 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21028 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21029 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21031 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21032 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21033 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21034 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21035 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21036 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21037 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21038 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21039 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21040 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21041 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21042 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21043 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21044 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21045 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21046 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21047 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21048 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21049 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21050 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21051 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21052 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21054 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21055 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21056 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21057 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21059 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21060 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21061 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21062 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21064 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21065 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21066 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21067 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21068 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21070 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21071 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21072 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21074 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21076 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21077 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21078 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21080 /* SSE MMX or 3Dnow!A */
21081 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21082 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21083 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21085 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21086 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21087 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21088 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21090 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21091 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21093 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21095 /* SSE2 */
21096 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21098 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21099 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21100 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21101 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21104 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21105 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21106 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21107 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21108 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21110 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21112 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21113 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21114 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21115 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21117 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21121 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21122 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21123 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21124 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21125 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21126 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21127 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21128 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21130 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21131 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21132 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21133 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21136 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21137 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21143 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21144 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21145 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21146 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21147 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21148 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21149 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21151 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21152 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21153 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21154 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21156 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21157 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21158 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21159 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21162 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21165 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21167 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21168 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21169 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21170 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21171 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21172 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21173 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21174 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21176 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21177 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21178 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21179 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21180 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21181 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21182 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21183 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21185 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21186 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21188 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21189 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21190 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21191 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21193 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21194 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21196 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21197 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21198 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21199 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21201 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21203 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21204 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21205 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21206 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21208 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21209 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21210 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21211 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21212 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21213 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21215 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21219 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21221 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21222 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21224 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21225 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21227 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21229 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21230 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21231 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21232 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21234 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21235 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21236 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21237 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21238 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21239 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21240 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21242 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21243 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21244 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21245 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21246 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21247 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21248 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21250 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21251 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21252 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21253 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21255 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21256 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21257 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21261 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21262 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21264 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21266 /* SSE2 MMX */
21267 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21268 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21270 /* SSE3 */
21271 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21272 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21274 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21275 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21276 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21277 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21278 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21279 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21281 /* SSSE3 */
21282 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
21283 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
21284 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21285 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
21286 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
21287 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21289 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21290 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21291 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21292 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21293 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21294 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21295 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21296 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21297 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21298 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21299 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21300 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21301 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
21302 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
21303 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21304 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21305 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21306 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21307 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21308 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21309 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21310 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21311 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21312 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21314 /* SSSE3. */
21315 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
21316 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
21318 /* SSE4.1 */
21319 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21320 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21321 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
21322 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
21323 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21324 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21325 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21326 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
21327 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21328 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
21330 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21331 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21332 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21333 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21334 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21335 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21336 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21337 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21338 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21339 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21340 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21341 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21342 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21344 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21345 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21346 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21347 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21348 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21349 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21350 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21351 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21352 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21353 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21354 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21355 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21357 /* SSE4.1 and SSE5 */
21358 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21359 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21360 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21361 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21363 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21364 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21365 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21367 /* SSE4.2 */
21368 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21369 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
21370 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
21371 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
21372 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
21374 /* SSE4A */
21375 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
21376 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
21377 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
21378 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21380 /* AES */
21381 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
21382 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21384 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21385 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21386 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21387 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21389 /* PCLMUL */
21390 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
21392 /* AVX */
21393 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21394 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21397 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21398 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21400 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21401 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21404 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21405 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21407 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21408 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21409 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21410 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21411 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21412 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21413 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21414 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21415 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21416 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21417 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21418 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21420 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
21421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
21422 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
21423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
21425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21426 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21427 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
21428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
21429 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21432 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21437 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
21439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
21440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
21441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
21442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
21443 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
21444 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
21446 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21447 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21448 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21449 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21450 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21451 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
21452 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21453 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21454 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21455 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21456 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
21457 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
21458 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
21460 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21461 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21462 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21464 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21465 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21466 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21467 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21468 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21470 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21472 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21473 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21475 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21476 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21477 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21478 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21480 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
21481 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
21482 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
21483 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
21484 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
21485 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
21487 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21488 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21489 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21490 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21491 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21492 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21493 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21494 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21495 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21496 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21497 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21498 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21499 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21500 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21501 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21503 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
21504 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
21507 /* SSE5 */
21508 enum multi_arg_type {
21509 MULTI_ARG_UNKNOWN,
21510 MULTI_ARG_3_SF,
21511 MULTI_ARG_3_DF,
21512 MULTI_ARG_3_DI,
21513 MULTI_ARG_3_SI,
21514 MULTI_ARG_3_SI_DI,
21515 MULTI_ARG_3_HI,
21516 MULTI_ARG_3_HI_SI,
21517 MULTI_ARG_3_QI,
21518 MULTI_ARG_3_PERMPS,
21519 MULTI_ARG_3_PERMPD,
21520 MULTI_ARG_2_SF,
21521 MULTI_ARG_2_DF,
21522 MULTI_ARG_2_DI,
21523 MULTI_ARG_2_SI,
21524 MULTI_ARG_2_HI,
21525 MULTI_ARG_2_QI,
21526 MULTI_ARG_2_DI_IMM,
21527 MULTI_ARG_2_SI_IMM,
21528 MULTI_ARG_2_HI_IMM,
21529 MULTI_ARG_2_QI_IMM,
21530 MULTI_ARG_2_SF_CMP,
21531 MULTI_ARG_2_DF_CMP,
21532 MULTI_ARG_2_DI_CMP,
21533 MULTI_ARG_2_SI_CMP,
21534 MULTI_ARG_2_HI_CMP,
21535 MULTI_ARG_2_QI_CMP,
21536 MULTI_ARG_2_DI_TF,
21537 MULTI_ARG_2_SI_TF,
21538 MULTI_ARG_2_HI_TF,
21539 MULTI_ARG_2_QI_TF,
21540 MULTI_ARG_2_SF_TF,
21541 MULTI_ARG_2_DF_TF,
21542 MULTI_ARG_1_SF,
21543 MULTI_ARG_1_DF,
21544 MULTI_ARG_1_DI,
21545 MULTI_ARG_1_SI,
21546 MULTI_ARG_1_HI,
21547 MULTI_ARG_1_QI,
21548 MULTI_ARG_1_SI_DI,
21549 MULTI_ARG_1_HI_DI,
21550 MULTI_ARG_1_HI_SI,
21551 MULTI_ARG_1_QI_DI,
21552 MULTI_ARG_1_QI_SI,
21553 MULTI_ARG_1_QI_HI,
21554 MULTI_ARG_1_PH2PS,
21555 MULTI_ARG_1_PS2PH
21558 static const struct builtin_description bdesc_multi_arg[] =
21560 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
21561 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
21562 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
21563 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
21564 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
21565 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
21566 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
21567 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
21568 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
21569 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
21570 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
21571 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
21572 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
21573 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
21574 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
21575 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
21576 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV, 0, (int)MULTI_ARG_3_DI },
21577 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
21578 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
21579 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
21580 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
21581 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
21582 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
21583 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
21584 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
21585 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
21586 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
21587 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
21588 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
21589 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
21590 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
21591 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
21592 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21593 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21594 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21595 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21596 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
21597 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
21598 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
21599 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
21600 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
21601 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
21602 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
21603 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
21604 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
21605 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
21606 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
21607 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
21608 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
21609 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
21610 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
21611 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
21612 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
21613 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
21614 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
21615 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
21616 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
21617 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
21618 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
21619 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
21620 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
21621 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
21622 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
21623 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
21624 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
21625 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
21626 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
21627 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
21628 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
21629 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
21630 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
21631 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
21632 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
21633 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
21634 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
21636 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
21637 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21638 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21639 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
21640 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
21641 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
21642 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
21643 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21644 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21645 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21646 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21647 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21648 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21649 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21650 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21651 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21653 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
21654 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21655 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21656 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
21657 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
21658 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
21659 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
21660 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21661 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21662 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21663 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21664 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21665 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21666 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21667 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21668 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21670 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
21671 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21672 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21673 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
21674 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
21675 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
21676 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
21677 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21678 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21679 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21680 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21681 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21682 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21683 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21684 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21685 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21687 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
21688 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21689 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21690 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
21691 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
21692 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
21693 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
21694 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21695 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21696 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21697 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21698 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21699 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21700 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21701 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21702 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21704 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
21705 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21706 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21707 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
21708 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
21709 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
21710 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
21712 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
21713 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21714 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21715 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
21716 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
21717 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
21718 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
21720 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
21721 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21722 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21723 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
21724 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
21725 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
21726 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
21728 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21729 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21730 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21731 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
21732 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
21733 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
21734 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
21736 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
21737 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21738 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21739 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
21740 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
21741 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
21742 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
21744 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
21745 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21746 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21747 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
21748 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
21749 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
21750 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
21752 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
21753 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21754 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21755 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
21756 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
21757 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
21758 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
21760 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21761 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21762 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21763 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
21764 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
21765 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
21766 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
21768 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
21769 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
21770 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
21771 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
21772 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
21773 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
21774 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
21775 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
21777 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21778 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21779 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21780 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21781 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21782 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21783 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21784 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21786 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21787 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21788 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21789 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21790 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21791 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21792 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21793 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21796 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
21797 in the current target ISA to allow the user to compile particular modules
21798 with different target specific options that differ from the command line
21799 options. */
21800 static void
21801 ix86_init_mmx_sse_builtins (void)
21803 const struct builtin_description * d;
21804 size_t i;
21806 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
21807 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
21808 tree V1DI_type_node
21809 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
21810 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
21811 tree V2DI_type_node
21812 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
21813 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
21814 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
21815 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
21816 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
21817 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
21818 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
21820 tree pchar_type_node = build_pointer_type (char_type_node);
21821 tree pcchar_type_node
21822 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
21823 tree pfloat_type_node = build_pointer_type (float_type_node);
21824 tree pcfloat_type_node
21825 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
21826 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
21827 tree pcv2sf_type_node
21828 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
21829 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
21830 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
21832 /* Comparisons. */
21833 tree int_ftype_v4sf_v4sf
21834 = build_function_type_list (integer_type_node,
21835 V4SF_type_node, V4SF_type_node, NULL_TREE);
21836 tree v4si_ftype_v4sf_v4sf
21837 = build_function_type_list (V4SI_type_node,
21838 V4SF_type_node, V4SF_type_node, NULL_TREE);
21839 /* MMX/SSE/integer conversions. */
21840 tree int_ftype_v4sf
21841 = build_function_type_list (integer_type_node,
21842 V4SF_type_node, NULL_TREE);
21843 tree int64_ftype_v4sf
21844 = build_function_type_list (long_long_integer_type_node,
21845 V4SF_type_node, NULL_TREE);
21846 tree int_ftype_v8qi
21847 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
21848 tree v4sf_ftype_v4sf_int
21849 = build_function_type_list (V4SF_type_node,
21850 V4SF_type_node, integer_type_node, NULL_TREE);
21851 tree v4sf_ftype_v4sf_int64
21852 = build_function_type_list (V4SF_type_node,
21853 V4SF_type_node, long_long_integer_type_node,
21854 NULL_TREE);
21855 tree v4sf_ftype_v4sf_v2si
21856 = build_function_type_list (V4SF_type_node,
21857 V4SF_type_node, V2SI_type_node, NULL_TREE);
21859 /* Miscellaneous. */
21860 tree v8qi_ftype_v4hi_v4hi
21861 = build_function_type_list (V8QI_type_node,
21862 V4HI_type_node, V4HI_type_node, NULL_TREE);
21863 tree v4hi_ftype_v2si_v2si
21864 = build_function_type_list (V4HI_type_node,
21865 V2SI_type_node, V2SI_type_node, NULL_TREE);
21866 tree v4sf_ftype_v4sf_v4sf_int
21867 = build_function_type_list (V4SF_type_node,
21868 V4SF_type_node, V4SF_type_node,
21869 integer_type_node, NULL_TREE);
21870 tree v2si_ftype_v4hi_v4hi
21871 = build_function_type_list (V2SI_type_node,
21872 V4HI_type_node, V4HI_type_node, NULL_TREE);
21873 tree v4hi_ftype_v4hi_int
21874 = build_function_type_list (V4HI_type_node,
21875 V4HI_type_node, integer_type_node, NULL_TREE);
21876 tree v2si_ftype_v2si_int
21877 = build_function_type_list (V2SI_type_node,
21878 V2SI_type_node, integer_type_node, NULL_TREE);
21879 tree v1di_ftype_v1di_int
21880 = build_function_type_list (V1DI_type_node,
21881 V1DI_type_node, integer_type_node, NULL_TREE);
21883 tree void_ftype_void
21884 = build_function_type (void_type_node, void_list_node);
21885 tree void_ftype_unsigned
21886 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
21887 tree void_ftype_unsigned_unsigned
21888 = build_function_type_list (void_type_node, unsigned_type_node,
21889 unsigned_type_node, NULL_TREE);
21890 tree void_ftype_pcvoid_unsigned_unsigned
21891 = build_function_type_list (void_type_node, const_ptr_type_node,
21892 unsigned_type_node, unsigned_type_node,
21893 NULL_TREE);
21894 tree unsigned_ftype_void
21895 = build_function_type (unsigned_type_node, void_list_node);
21896 tree v2si_ftype_v4sf
21897 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
21898 /* Loads/stores. */
21899 tree void_ftype_v8qi_v8qi_pchar
21900 = build_function_type_list (void_type_node,
21901 V8QI_type_node, V8QI_type_node,
21902 pchar_type_node, NULL_TREE);
21903 tree v4sf_ftype_pcfloat
21904 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
21905 tree v4sf_ftype_v4sf_pcv2sf
21906 = build_function_type_list (V4SF_type_node,
21907 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
21908 tree void_ftype_pv2sf_v4sf
21909 = build_function_type_list (void_type_node,
21910 pv2sf_type_node, V4SF_type_node, NULL_TREE);
21911 tree void_ftype_pfloat_v4sf
21912 = build_function_type_list (void_type_node,
21913 pfloat_type_node, V4SF_type_node, NULL_TREE);
21914 tree void_ftype_pdi_di
21915 = build_function_type_list (void_type_node,
21916 pdi_type_node, long_long_unsigned_type_node,
21917 NULL_TREE);
21918 tree void_ftype_pv2di_v2di
21919 = build_function_type_list (void_type_node,
21920 pv2di_type_node, V2DI_type_node, NULL_TREE);
21921 /* Normal vector unops. */
21922 tree v4sf_ftype_v4sf
21923 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
21924 tree v16qi_ftype_v16qi
21925 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
21926 tree v8hi_ftype_v8hi
21927 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
21928 tree v4si_ftype_v4si
21929 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
21930 tree v8qi_ftype_v8qi
21931 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
21932 tree v4hi_ftype_v4hi
21933 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
21935 /* Normal vector binops. */
21936 tree v4sf_ftype_v4sf_v4sf
21937 = build_function_type_list (V4SF_type_node,
21938 V4SF_type_node, V4SF_type_node, NULL_TREE);
21939 tree v8qi_ftype_v8qi_v8qi
21940 = build_function_type_list (V8QI_type_node,
21941 V8QI_type_node, V8QI_type_node, NULL_TREE);
21942 tree v4hi_ftype_v4hi_v4hi
21943 = build_function_type_list (V4HI_type_node,
21944 V4HI_type_node, V4HI_type_node, NULL_TREE);
21945 tree v2si_ftype_v2si_v2si
21946 = build_function_type_list (V2SI_type_node,
21947 V2SI_type_node, V2SI_type_node, NULL_TREE);
21948 tree v1di_ftype_v1di_v1di
21949 = build_function_type_list (V1DI_type_node,
21950 V1DI_type_node, V1DI_type_node, NULL_TREE);
21951 tree v1di_ftype_v1di_v1di_int
21952 = build_function_type_list (V1DI_type_node,
21953 V1DI_type_node, V1DI_type_node,
21954 integer_type_node, NULL_TREE);
21955 tree v2si_ftype_v2sf
21956 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
21957 tree v2sf_ftype_v2si
21958 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
21959 tree v2si_ftype_v2si
21960 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
21961 tree v2sf_ftype_v2sf
21962 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
21963 tree v2sf_ftype_v2sf_v2sf
21964 = build_function_type_list (V2SF_type_node,
21965 V2SF_type_node, V2SF_type_node, NULL_TREE);
21966 tree v2si_ftype_v2sf_v2sf
21967 = build_function_type_list (V2SI_type_node,
21968 V2SF_type_node, V2SF_type_node, NULL_TREE);
21969 tree pint_type_node = build_pointer_type (integer_type_node);
21970 tree pdouble_type_node = build_pointer_type (double_type_node);
21971 tree pcdouble_type_node = build_pointer_type (
21972 build_type_variant (double_type_node, 1, 0));
21973 tree int_ftype_v2df_v2df
21974 = build_function_type_list (integer_type_node,
21975 V2DF_type_node, V2DF_type_node, NULL_TREE);
21977 tree void_ftype_pcvoid
21978 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
21979 tree v4sf_ftype_v4si
21980 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
21981 tree v4si_ftype_v4sf
21982 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
21983 tree v2df_ftype_v4si
21984 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
21985 tree v4si_ftype_v2df
21986 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
21987 tree v4si_ftype_v2df_v2df
21988 = build_function_type_list (V4SI_type_node,
21989 V2DF_type_node, V2DF_type_node, NULL_TREE);
21990 tree v2si_ftype_v2df
21991 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
21992 tree v4sf_ftype_v2df
21993 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
21994 tree v2df_ftype_v2si
21995 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
21996 tree v2df_ftype_v4sf
21997 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
21998 tree int_ftype_v2df
21999 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
22000 tree int64_ftype_v2df
22001 = build_function_type_list (long_long_integer_type_node,
22002 V2DF_type_node, NULL_TREE);
22003 tree v2df_ftype_v2df_int
22004 = build_function_type_list (V2DF_type_node,
22005 V2DF_type_node, integer_type_node, NULL_TREE);
22006 tree v2df_ftype_v2df_int64
22007 = build_function_type_list (V2DF_type_node,
22008 V2DF_type_node, long_long_integer_type_node,
22009 NULL_TREE);
22010 tree v4sf_ftype_v4sf_v2df
22011 = build_function_type_list (V4SF_type_node,
22012 V4SF_type_node, V2DF_type_node, NULL_TREE);
22013 tree v2df_ftype_v2df_v4sf
22014 = build_function_type_list (V2DF_type_node,
22015 V2DF_type_node, V4SF_type_node, NULL_TREE);
22016 tree v2df_ftype_v2df_v2df_int
22017 = build_function_type_list (V2DF_type_node,
22018 V2DF_type_node, V2DF_type_node,
22019 integer_type_node,
22020 NULL_TREE);
22021 tree v2df_ftype_v2df_pcdouble
22022 = build_function_type_list (V2DF_type_node,
22023 V2DF_type_node, pcdouble_type_node, NULL_TREE);
22024 tree void_ftype_pdouble_v2df
22025 = build_function_type_list (void_type_node,
22026 pdouble_type_node, V2DF_type_node, NULL_TREE);
22027 tree void_ftype_pint_int
22028 = build_function_type_list (void_type_node,
22029 pint_type_node, integer_type_node, NULL_TREE);
22030 tree void_ftype_v16qi_v16qi_pchar
22031 = build_function_type_list (void_type_node,
22032 V16QI_type_node, V16QI_type_node,
22033 pchar_type_node, NULL_TREE);
22034 tree v2df_ftype_pcdouble
22035 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
22036 tree v2df_ftype_v2df_v2df
22037 = build_function_type_list (V2DF_type_node,
22038 V2DF_type_node, V2DF_type_node, NULL_TREE);
22039 tree v16qi_ftype_v16qi_v16qi
22040 = build_function_type_list (V16QI_type_node,
22041 V16QI_type_node, V16QI_type_node, NULL_TREE);
22042 tree v8hi_ftype_v8hi_v8hi
22043 = build_function_type_list (V8HI_type_node,
22044 V8HI_type_node, V8HI_type_node, NULL_TREE);
22045 tree v4si_ftype_v4si_v4si
22046 = build_function_type_list (V4SI_type_node,
22047 V4SI_type_node, V4SI_type_node, NULL_TREE);
22048 tree v2di_ftype_v2di_v2di
22049 = build_function_type_list (V2DI_type_node,
22050 V2DI_type_node, V2DI_type_node, NULL_TREE);
22051 tree v2di_ftype_v2df_v2df
22052 = build_function_type_list (V2DI_type_node,
22053 V2DF_type_node, V2DF_type_node, NULL_TREE);
22054 tree v2df_ftype_v2df
22055 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
22056 tree v2di_ftype_v2di_int
22057 = build_function_type_list (V2DI_type_node,
22058 V2DI_type_node, integer_type_node, NULL_TREE);
22059 tree v2di_ftype_v2di_v2di_int
22060 = build_function_type_list (V2DI_type_node, V2DI_type_node,
22061 V2DI_type_node, integer_type_node, NULL_TREE);
22062 tree v4si_ftype_v4si_int
22063 = build_function_type_list (V4SI_type_node,
22064 V4SI_type_node, integer_type_node, NULL_TREE);
22065 tree v8hi_ftype_v8hi_int
22066 = build_function_type_list (V8HI_type_node,
22067 V8HI_type_node, integer_type_node, NULL_TREE);
22068 tree v4si_ftype_v8hi_v8hi
22069 = build_function_type_list (V4SI_type_node,
22070 V8HI_type_node, V8HI_type_node, NULL_TREE);
22071 tree v1di_ftype_v8qi_v8qi
22072 = build_function_type_list (V1DI_type_node,
22073 V8QI_type_node, V8QI_type_node, NULL_TREE);
22074 tree v1di_ftype_v2si_v2si
22075 = build_function_type_list (V1DI_type_node,
22076 V2SI_type_node, V2SI_type_node, NULL_TREE);
22077 tree v2di_ftype_v16qi_v16qi
22078 = build_function_type_list (V2DI_type_node,
22079 V16QI_type_node, V16QI_type_node, NULL_TREE);
22080 tree v2di_ftype_v4si_v4si
22081 = build_function_type_list (V2DI_type_node,
22082 V4SI_type_node, V4SI_type_node, NULL_TREE);
22083 tree int_ftype_v16qi
22084 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
22085 tree v16qi_ftype_pcchar
22086 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
22087 tree void_ftype_pchar_v16qi
22088 = build_function_type_list (void_type_node,
22089 pchar_type_node, V16QI_type_node, NULL_TREE);
22091 tree v2di_ftype_v2di_unsigned_unsigned
22092 = build_function_type_list (V2DI_type_node, V2DI_type_node,
22093 unsigned_type_node, unsigned_type_node,
22094 NULL_TREE);
22095 tree v2di_ftype_v2di_v2di_unsigned_unsigned
22096 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
22097 unsigned_type_node, unsigned_type_node,
22098 NULL_TREE);
22099 tree v2di_ftype_v2di_v16qi
22100 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
22101 NULL_TREE);
22102 tree v2df_ftype_v2df_v2df_v2df
22103 = build_function_type_list (V2DF_type_node,
22104 V2DF_type_node, V2DF_type_node,
22105 V2DF_type_node, NULL_TREE);
22106 tree v4sf_ftype_v4sf_v4sf_v4sf
22107 = build_function_type_list (V4SF_type_node,
22108 V4SF_type_node, V4SF_type_node,
22109 V4SF_type_node, NULL_TREE);
22110 tree v8hi_ftype_v16qi
22111 = build_function_type_list (V8HI_type_node, V16QI_type_node,
22112 NULL_TREE);
22113 tree v4si_ftype_v16qi
22114 = build_function_type_list (V4SI_type_node, V16QI_type_node,
22115 NULL_TREE);
22116 tree v2di_ftype_v16qi
22117 = build_function_type_list (V2DI_type_node, V16QI_type_node,
22118 NULL_TREE);
22119 tree v4si_ftype_v8hi
22120 = build_function_type_list (V4SI_type_node, V8HI_type_node,
22121 NULL_TREE);
22122 tree v2di_ftype_v8hi
22123 = build_function_type_list (V2DI_type_node, V8HI_type_node,
22124 NULL_TREE);
22125 tree v2di_ftype_v4si
22126 = build_function_type_list (V2DI_type_node, V4SI_type_node,
22127 NULL_TREE);
22128 tree v2di_ftype_pv2di
22129 = build_function_type_list (V2DI_type_node, pv2di_type_node,
22130 NULL_TREE);
22131 tree v16qi_ftype_v16qi_v16qi_int
22132 = build_function_type_list (V16QI_type_node, V16QI_type_node,
22133 V16QI_type_node, integer_type_node,
22134 NULL_TREE);
22135 tree v16qi_ftype_v16qi_v16qi_v16qi
22136 = build_function_type_list (V16QI_type_node, V16QI_type_node,
22137 V16QI_type_node, V16QI_type_node,
22138 NULL_TREE);
22139 tree v8hi_ftype_v8hi_v8hi_int
22140 = build_function_type_list (V8HI_type_node, V8HI_type_node,
22141 V8HI_type_node, integer_type_node,
22142 NULL_TREE);
22143 tree v4si_ftype_v4si_v4si_int
22144 = build_function_type_list (V4SI_type_node, V4SI_type_node,
22145 V4SI_type_node, integer_type_node,
22146 NULL_TREE);
22147 tree int_ftype_v2di_v2di
22148 = build_function_type_list (integer_type_node,
22149 V2DI_type_node, V2DI_type_node,
22150 NULL_TREE);
22151 tree int_ftype_v16qi_int_v16qi_int_int
22152 = build_function_type_list (integer_type_node,
22153 V16QI_type_node,
22154 integer_type_node,
22155 V16QI_type_node,
22156 integer_type_node,
22157 integer_type_node,
22158 NULL_TREE);
22159 tree v16qi_ftype_v16qi_int_v16qi_int_int
22160 = build_function_type_list (V16QI_type_node,
22161 V16QI_type_node,
22162 integer_type_node,
22163 V16QI_type_node,
22164 integer_type_node,
22165 integer_type_node,
22166 NULL_TREE);
22167 tree int_ftype_v16qi_v16qi_int
22168 = build_function_type_list (integer_type_node,
22169 V16QI_type_node,
22170 V16QI_type_node,
22171 integer_type_node,
22172 NULL_TREE);
22174 /* SSE5 instructions */
22175 tree v2di_ftype_v2di_v2di_v2di
22176 = build_function_type_list (V2DI_type_node,
22177 V2DI_type_node,
22178 V2DI_type_node,
22179 V2DI_type_node,
22180 NULL_TREE);
22182 tree v4si_ftype_v4si_v4si_v4si
22183 = build_function_type_list (V4SI_type_node,
22184 V4SI_type_node,
22185 V4SI_type_node,
22186 V4SI_type_node,
22187 NULL_TREE);
22189 tree v4si_ftype_v4si_v4si_v2di
22190 = build_function_type_list (V4SI_type_node,
22191 V4SI_type_node,
22192 V4SI_type_node,
22193 V2DI_type_node,
22194 NULL_TREE);
22196 tree v8hi_ftype_v8hi_v8hi_v8hi
22197 = build_function_type_list (V8HI_type_node,
22198 V8HI_type_node,
22199 V8HI_type_node,
22200 V8HI_type_node,
22201 NULL_TREE);
22203 tree v8hi_ftype_v8hi_v8hi_v4si
22204 = build_function_type_list (V8HI_type_node,
22205 V8HI_type_node,
22206 V8HI_type_node,
22207 V4SI_type_node,
22208 NULL_TREE);
22210 tree v2df_ftype_v2df_v2df_v16qi
22211 = build_function_type_list (V2DF_type_node,
22212 V2DF_type_node,
22213 V2DF_type_node,
22214 V16QI_type_node,
22215 NULL_TREE);
22217 tree v4sf_ftype_v4sf_v4sf_v16qi
22218 = build_function_type_list (V4SF_type_node,
22219 V4SF_type_node,
22220 V4SF_type_node,
22221 V16QI_type_node,
22222 NULL_TREE);
22224 tree v2di_ftype_v2di_si
22225 = build_function_type_list (V2DI_type_node,
22226 V2DI_type_node,
22227 integer_type_node,
22228 NULL_TREE);
22230 tree v4si_ftype_v4si_si
22231 = build_function_type_list (V4SI_type_node,
22232 V4SI_type_node,
22233 integer_type_node,
22234 NULL_TREE);
22236 tree v8hi_ftype_v8hi_si
22237 = build_function_type_list (V8HI_type_node,
22238 V8HI_type_node,
22239 integer_type_node,
22240 NULL_TREE);
22242 tree v16qi_ftype_v16qi_si
22243 = build_function_type_list (V16QI_type_node,
22244 V16QI_type_node,
22245 integer_type_node,
22246 NULL_TREE);
22247 tree v4sf_ftype_v4hi
22248 = build_function_type_list (V4SF_type_node,
22249 V4HI_type_node,
22250 NULL_TREE);
22252 tree v4hi_ftype_v4sf
22253 = build_function_type_list (V4HI_type_node,
22254 V4SF_type_node,
22255 NULL_TREE);
22257 tree v2di_ftype_v2di
22258 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
22260 tree v16qi_ftype_v8hi_v8hi
22261 = build_function_type_list (V16QI_type_node,
22262 V8HI_type_node, V8HI_type_node,
22263 NULL_TREE);
22264 tree v8hi_ftype_v4si_v4si
22265 = build_function_type_list (V8HI_type_node,
22266 V4SI_type_node, V4SI_type_node,
22267 NULL_TREE);
22268 tree v8hi_ftype_v16qi_v16qi
22269 = build_function_type_list (V8HI_type_node,
22270 V16QI_type_node, V16QI_type_node,
22271 NULL_TREE);
22272 tree v4hi_ftype_v8qi_v8qi
22273 = build_function_type_list (V4HI_type_node,
22274 V8QI_type_node, V8QI_type_node,
22275 NULL_TREE);
22276 tree unsigned_ftype_unsigned_uchar
22277 = build_function_type_list (unsigned_type_node,
22278 unsigned_type_node,
22279 unsigned_char_type_node,
22280 NULL_TREE);
22281 tree unsigned_ftype_unsigned_ushort
22282 = build_function_type_list (unsigned_type_node,
22283 unsigned_type_node,
22284 short_unsigned_type_node,
22285 NULL_TREE);
22286 tree unsigned_ftype_unsigned_unsigned
22287 = build_function_type_list (unsigned_type_node,
22288 unsigned_type_node,
22289 unsigned_type_node,
22290 NULL_TREE);
22291 tree uint64_ftype_uint64_uint64
22292 = build_function_type_list (long_long_unsigned_type_node,
22293 long_long_unsigned_type_node,
22294 long_long_unsigned_type_node,
22295 NULL_TREE);
22296 tree float_ftype_float
22297 = build_function_type_list (float_type_node,
22298 float_type_node,
22299 NULL_TREE);
22301 /* AVX builtins */
22302 tree V32QI_type_node = build_vector_type_for_mode (char_type_node,
22303 V32QImode);
22304 tree V8SI_type_node = build_vector_type_for_mode (intSI_type_node,
22305 V8SImode);
22306 tree V8SF_type_node = build_vector_type_for_mode (float_type_node,
22307 V8SFmode);
22308 tree V4DI_type_node = build_vector_type_for_mode (long_long_integer_type_node,
22309 V4DImode);
22310 tree V4DF_type_node = build_vector_type_for_mode (double_type_node,
22311 V4DFmode);
22312 tree v8sf_ftype_v8sf
22313 = build_function_type_list (V8SF_type_node,
22314 V8SF_type_node,
22315 NULL_TREE);
22316 tree v8si_ftype_v8sf
22317 = build_function_type_list (V8SI_type_node,
22318 V8SF_type_node,
22319 NULL_TREE);
22320 tree v8sf_ftype_v8si
22321 = build_function_type_list (V8SF_type_node,
22322 V8SI_type_node,
22323 NULL_TREE);
22324 tree v4si_ftype_v4df
22325 = build_function_type_list (V4SI_type_node,
22326 V4DF_type_node,
22327 NULL_TREE);
22328 tree v4df_ftype_v4df
22329 = build_function_type_list (V4DF_type_node,
22330 V4DF_type_node,
22331 NULL_TREE);
22332 tree v4df_ftype_v4si
22333 = build_function_type_list (V4DF_type_node,
22334 V4SI_type_node,
22335 NULL_TREE);
22336 tree v4df_ftype_v4sf
22337 = build_function_type_list (V4DF_type_node,
22338 V4SF_type_node,
22339 NULL_TREE);
22340 tree v4sf_ftype_v4df
22341 = build_function_type_list (V4SF_type_node,
22342 V4DF_type_node,
22343 NULL_TREE);
22344 tree v8sf_ftype_v8sf_v8sf
22345 = build_function_type_list (V8SF_type_node,
22346 V8SF_type_node, V8SF_type_node,
22347 NULL_TREE);
22348 tree v4df_ftype_v4df_v4df
22349 = build_function_type_list (V4DF_type_node,
22350 V4DF_type_node, V4DF_type_node,
22351 NULL_TREE);
22352 tree v8sf_ftype_v8sf_int
22353 = build_function_type_list (V8SF_type_node,
22354 V8SF_type_node, integer_type_node,
22355 NULL_TREE);
22356 tree v4si_ftype_v8si_int
22357 = build_function_type_list (V4SI_type_node,
22358 V8SI_type_node, integer_type_node,
22359 NULL_TREE);
22360 tree v4df_ftype_v4df_int
22361 = build_function_type_list (V4DF_type_node,
22362 V4DF_type_node, integer_type_node,
22363 NULL_TREE);
22364 tree v4sf_ftype_v8sf_int
22365 = build_function_type_list (V4SF_type_node,
22366 V8SF_type_node, integer_type_node,
22367 NULL_TREE);
22368 tree v2df_ftype_v4df_int
22369 = build_function_type_list (V2DF_type_node,
22370 V4DF_type_node, integer_type_node,
22371 NULL_TREE);
22372 tree v8sf_ftype_v8sf_v8sf_int
22373 = build_function_type_list (V8SF_type_node,
22374 V8SF_type_node, V8SF_type_node,
22375 integer_type_node,
22376 NULL_TREE);
22377 tree v8sf_ftype_v8sf_v8sf_v8sf
22378 = build_function_type_list (V8SF_type_node,
22379 V8SF_type_node, V8SF_type_node,
22380 V8SF_type_node,
22381 NULL_TREE);
22382 tree v4df_ftype_v4df_v4df_v4df
22383 = build_function_type_list (V4DF_type_node,
22384 V4DF_type_node, V4DF_type_node,
22385 V4DF_type_node,
22386 NULL_TREE);
22387 tree v8si_ftype_v8si_v8si_int
22388 = build_function_type_list (V8SI_type_node,
22389 V8SI_type_node, V8SI_type_node,
22390 integer_type_node,
22391 NULL_TREE);
22392 tree v4df_ftype_v4df_v4df_int
22393 = build_function_type_list (V4DF_type_node,
22394 V4DF_type_node, V4DF_type_node,
22395 integer_type_node,
22396 NULL_TREE);
22397 tree v8sf_ftype_pcfloat
22398 = build_function_type_list (V8SF_type_node,
22399 pcfloat_type_node,
22400 NULL_TREE);
22401 tree v4df_ftype_pcdouble
22402 = build_function_type_list (V4DF_type_node,
22403 pcdouble_type_node,
22404 NULL_TREE);
22405 tree pcv4sf_type_node
22406 = build_pointer_type (build_type_variant (V4SF_type_node, 1, 0));
22407 tree pcv2df_type_node
22408 = build_pointer_type (build_type_variant (V2DF_type_node, 1, 0));
22409 tree v8sf_ftype_pcv4sf
22410 = build_function_type_list (V8SF_type_node,
22411 pcv4sf_type_node,
22412 NULL_TREE);
22413 tree v4df_ftype_pcv2df
22414 = build_function_type_list (V4DF_type_node,
22415 pcv2df_type_node,
22416 NULL_TREE);
22417 tree v32qi_ftype_pcchar
22418 = build_function_type_list (V32QI_type_node,
22419 pcchar_type_node,
22420 NULL_TREE);
22421 tree void_ftype_pchar_v32qi
22422 = build_function_type_list (void_type_node,
22423 pchar_type_node, V32QI_type_node,
22424 NULL_TREE);
22425 tree v8si_ftype_v8si_v4si_int
22426 = build_function_type_list (V8SI_type_node,
22427 V8SI_type_node, V4SI_type_node,
22428 integer_type_node,
22429 NULL_TREE);
22430 tree pv4di_type_node = build_pointer_type (V4DI_type_node);
22431 tree void_ftype_pv4di_v4di
22432 = build_function_type_list (void_type_node,
22433 pv4di_type_node, V4DI_type_node,
22434 NULL_TREE);
22435 tree v8sf_ftype_v8sf_v4sf_int
22436 = build_function_type_list (V8SF_type_node,
22437 V8SF_type_node, V4SF_type_node,
22438 integer_type_node,
22439 NULL_TREE);
22440 tree v4df_ftype_v4df_v2df_int
22441 = build_function_type_list (V4DF_type_node,
22442 V4DF_type_node, V2DF_type_node,
22443 integer_type_node,
22444 NULL_TREE);
22445 tree void_ftype_pfloat_v8sf
22446 = build_function_type_list (void_type_node,
22447 pfloat_type_node, V8SF_type_node,
22448 NULL_TREE);
22449 tree void_ftype_pdouble_v4df
22450 = build_function_type_list (void_type_node,
22451 pdouble_type_node, V4DF_type_node,
22452 NULL_TREE);
22453 tree pv8sf_type_node = build_pointer_type (V8SF_type_node);
22454 tree pv4sf_type_node = build_pointer_type (V4SF_type_node);
22455 tree pv4df_type_node = build_pointer_type (V4DF_type_node);
22456 tree pv2df_type_node = build_pointer_type (V2DF_type_node);
22457 tree pcv8sf_type_node
22458 = build_pointer_type (build_type_variant (V8SF_type_node, 1, 0));
22459 tree pcv4df_type_node
22460 = build_pointer_type (build_type_variant (V4DF_type_node, 1, 0));
22461 tree v8sf_ftype_pcv8sf_v8sf
22462 = build_function_type_list (V8SF_type_node,
22463 pcv8sf_type_node, V8SF_type_node,
22464 NULL_TREE);
22465 tree v4df_ftype_pcv4df_v4df
22466 = build_function_type_list (V4DF_type_node,
22467 pcv4df_type_node, V4DF_type_node,
22468 NULL_TREE);
22469 tree v4sf_ftype_pcv4sf_v4sf
22470 = build_function_type_list (V4SF_type_node,
22471 pcv4sf_type_node, V4SF_type_node,
22472 NULL_TREE);
22473 tree v2df_ftype_pcv2df_v2df
22474 = build_function_type_list (V2DF_type_node,
22475 pcv2df_type_node, V2DF_type_node,
22476 NULL_TREE);
22477 tree void_ftype_pv8sf_v8sf_v8sf
22478 = build_function_type_list (void_type_node,
22479 pv8sf_type_node, V8SF_type_node,
22480 V8SF_type_node,
22481 NULL_TREE);
22482 tree void_ftype_pv4df_v4df_v4df
22483 = build_function_type_list (void_type_node,
22484 pv4df_type_node, V4DF_type_node,
22485 V4DF_type_node,
22486 NULL_TREE);
22487 tree void_ftype_pv4sf_v4sf_v4sf
22488 = build_function_type_list (void_type_node,
22489 pv4sf_type_node, V4SF_type_node,
22490 V4SF_type_node,
22491 NULL_TREE);
22492 tree void_ftype_pv2df_v2df_v2df
22493 = build_function_type_list (void_type_node,
22494 pv2df_type_node, V2DF_type_node,
22495 V2DF_type_node,
22496 NULL_TREE);
22497 tree v4df_ftype_v2df
22498 = build_function_type_list (V4DF_type_node,
22499 V2DF_type_node,
22500 NULL_TREE);
22501 tree v8sf_ftype_v4sf
22502 = build_function_type_list (V8SF_type_node,
22503 V4SF_type_node,
22504 NULL_TREE);
22505 tree v8si_ftype_v4si
22506 = build_function_type_list (V8SI_type_node,
22507 V4SI_type_node,
22508 NULL_TREE);
22509 tree v2df_ftype_v4df
22510 = build_function_type_list (V2DF_type_node,
22511 V4DF_type_node,
22512 NULL_TREE);
22513 tree v4sf_ftype_v8sf
22514 = build_function_type_list (V4SF_type_node,
22515 V8SF_type_node,
22516 NULL_TREE);
22517 tree v4si_ftype_v8si
22518 = build_function_type_list (V4SI_type_node,
22519 V8SI_type_node,
22520 NULL_TREE);
22521 tree int_ftype_v4df
22522 = build_function_type_list (integer_type_node,
22523 V4DF_type_node,
22524 NULL_TREE);
22525 tree int_ftype_v8sf
22526 = build_function_type_list (integer_type_node,
22527 V8SF_type_node,
22528 NULL_TREE);
22529 tree int_ftype_v8sf_v8sf
22530 = build_function_type_list (integer_type_node,
22531 V8SF_type_node, V8SF_type_node,
22532 NULL_TREE);
22533 tree int_ftype_v4di_v4di
22534 = build_function_type_list (integer_type_node,
22535 V4DI_type_node, V4DI_type_node,
22536 NULL_TREE);
22537 tree int_ftype_v4df_v4df
22538 = build_function_type_list (integer_type_node,
22539 V4DF_type_node, V4DF_type_node,
22540 NULL_TREE);
22541 tree v8sf_ftype_v8sf_v8si
22542 = build_function_type_list (V8SF_type_node,
22543 V8SF_type_node, V8SI_type_node,
22544 NULL_TREE);
22545 tree v4df_ftype_v4df_v4di
22546 = build_function_type_list (V4DF_type_node,
22547 V4DF_type_node, V4DI_type_node,
22548 NULL_TREE);
22549 tree v4sf_ftype_v4sf_v4si
22550 = build_function_type_list (V4SF_type_node,
22551 V4SF_type_node, V4SI_type_node, NULL_TREE);
22552 tree v2df_ftype_v2df_v2di
22553 = build_function_type_list (V2DF_type_node,
22554 V2DF_type_node, V2DI_type_node, NULL_TREE);
22556 tree ftype;
22558 /* Add all special builtins with variable number of operands. */
22559 for (i = 0, d = bdesc_special_args;
22560 i < ARRAY_SIZE (bdesc_special_args);
22561 i++, d++)
22563 tree type;
22565 if (d->name == 0)
22566 continue;
22568 switch ((enum ix86_special_builtin_type) d->flag)
22570 case VOID_FTYPE_VOID:
22571 type = void_ftype_void;
22572 break;
22573 case V32QI_FTYPE_PCCHAR:
22574 type = v32qi_ftype_pcchar;
22575 break;
22576 case V16QI_FTYPE_PCCHAR:
22577 type = v16qi_ftype_pcchar;
22578 break;
22579 case V8SF_FTYPE_PCV4SF:
22580 type = v8sf_ftype_pcv4sf;
22581 break;
22582 case V8SF_FTYPE_PCFLOAT:
22583 type = v8sf_ftype_pcfloat;
22584 break;
22585 case V4DF_FTYPE_PCV2DF:
22586 type = v4df_ftype_pcv2df;
22587 break;
22588 case V4DF_FTYPE_PCDOUBLE:
22589 type = v4df_ftype_pcdouble;
22590 break;
22591 case V4SF_FTYPE_PCFLOAT:
22592 type = v4sf_ftype_pcfloat;
22593 break;
22594 case V2DI_FTYPE_PV2DI:
22595 type = v2di_ftype_pv2di;
22596 break;
22597 case V2DF_FTYPE_PCDOUBLE:
22598 type = v2df_ftype_pcdouble;
22599 break;
22600 case V8SF_FTYPE_PCV8SF_V8SF:
22601 type = v8sf_ftype_pcv8sf_v8sf;
22602 break;
22603 case V4DF_FTYPE_PCV4DF_V4DF:
22604 type = v4df_ftype_pcv4df_v4df;
22605 break;
22606 case V4SF_FTYPE_V4SF_PCV2SF:
22607 type = v4sf_ftype_v4sf_pcv2sf;
22608 break;
22609 case V4SF_FTYPE_PCV4SF_V4SF:
22610 type = v4sf_ftype_pcv4sf_v4sf;
22611 break;
22612 case V2DF_FTYPE_V2DF_PCDOUBLE:
22613 type = v2df_ftype_v2df_pcdouble;
22614 break;
22615 case V2DF_FTYPE_PCV2DF_V2DF:
22616 type = v2df_ftype_pcv2df_v2df;
22617 break;
22618 case VOID_FTYPE_PV2SF_V4SF:
22619 type = void_ftype_pv2sf_v4sf;
22620 break;
22621 case VOID_FTYPE_PV4DI_V4DI:
22622 type = void_ftype_pv4di_v4di;
22623 break;
22624 case VOID_FTYPE_PV2DI_V2DI:
22625 type = void_ftype_pv2di_v2di;
22626 break;
22627 case VOID_FTYPE_PCHAR_V32QI:
22628 type = void_ftype_pchar_v32qi;
22629 break;
22630 case VOID_FTYPE_PCHAR_V16QI:
22631 type = void_ftype_pchar_v16qi;
22632 break;
22633 case VOID_FTYPE_PFLOAT_V8SF:
22634 type = void_ftype_pfloat_v8sf;
22635 break;
22636 case VOID_FTYPE_PFLOAT_V4SF:
22637 type = void_ftype_pfloat_v4sf;
22638 break;
22639 case VOID_FTYPE_PDOUBLE_V4DF:
22640 type = void_ftype_pdouble_v4df;
22641 break;
22642 case VOID_FTYPE_PDOUBLE_V2DF:
22643 type = void_ftype_pdouble_v2df;
22644 break;
22645 case VOID_FTYPE_PDI_DI:
22646 type = void_ftype_pdi_di;
22647 break;
22648 case VOID_FTYPE_PINT_INT:
22649 type = void_ftype_pint_int;
22650 break;
22651 case VOID_FTYPE_PV8SF_V8SF_V8SF:
22652 type = void_ftype_pv8sf_v8sf_v8sf;
22653 break;
22654 case VOID_FTYPE_PV4DF_V4DF_V4DF:
22655 type = void_ftype_pv4df_v4df_v4df;
22656 break;
22657 case VOID_FTYPE_PV4SF_V4SF_V4SF:
22658 type = void_ftype_pv4sf_v4sf_v4sf;
22659 break;
22660 case VOID_FTYPE_PV2DF_V2DF_V2DF:
22661 type = void_ftype_pv2df_v2df_v2df;
22662 break;
22663 default:
22664 gcc_unreachable ();
22667 def_builtin (d->mask, d->name, type, d->code);
22670 /* Add all builtins with variable number of operands. */
22671 for (i = 0, d = bdesc_args;
22672 i < ARRAY_SIZE (bdesc_args);
22673 i++, d++)
22675 tree type;
22677 if (d->name == 0)
22678 continue;
22680 switch ((enum ix86_builtin_type) d->flag)
22682 case FLOAT_FTYPE_FLOAT:
22683 type = float_ftype_float;
22684 break;
22685 case INT_FTYPE_V8SF_V8SF_PTEST:
22686 type = int_ftype_v8sf_v8sf;
22687 break;
22688 case INT_FTYPE_V4DI_V4DI_PTEST:
22689 type = int_ftype_v4di_v4di;
22690 break;
22691 case INT_FTYPE_V4DF_V4DF_PTEST:
22692 type = int_ftype_v4df_v4df;
22693 break;
22694 case INT_FTYPE_V4SF_V4SF_PTEST:
22695 type = int_ftype_v4sf_v4sf;
22696 break;
22697 case INT_FTYPE_V2DI_V2DI_PTEST:
22698 type = int_ftype_v2di_v2di;
22699 break;
22700 case INT_FTYPE_V2DF_V2DF_PTEST:
22701 type = int_ftype_v2df_v2df;
22702 break;
22703 case INT64_FTYPE_V4SF:
22704 type = int64_ftype_v4sf;
22705 break;
22706 case INT64_FTYPE_V2DF:
22707 type = int64_ftype_v2df;
22708 break;
22709 case INT_FTYPE_V16QI:
22710 type = int_ftype_v16qi;
22711 break;
22712 case INT_FTYPE_V8QI:
22713 type = int_ftype_v8qi;
22714 break;
22715 case INT_FTYPE_V8SF:
22716 type = int_ftype_v8sf;
22717 break;
22718 case INT_FTYPE_V4DF:
22719 type = int_ftype_v4df;
22720 break;
22721 case INT_FTYPE_V4SF:
22722 type = int_ftype_v4sf;
22723 break;
22724 case INT_FTYPE_V2DF:
22725 type = int_ftype_v2df;
22726 break;
22727 case V16QI_FTYPE_V16QI:
22728 type = v16qi_ftype_v16qi;
22729 break;
22730 case V8SI_FTYPE_V8SF:
22731 type = v8si_ftype_v8sf;
22732 break;
22733 case V8SI_FTYPE_V4SI:
22734 type = v8si_ftype_v4si;
22735 break;
22736 case V8HI_FTYPE_V8HI:
22737 type = v8hi_ftype_v8hi;
22738 break;
22739 case V8HI_FTYPE_V16QI:
22740 type = v8hi_ftype_v16qi;
22741 break;
22742 case V8QI_FTYPE_V8QI:
22743 type = v8qi_ftype_v8qi;
22744 break;
22745 case V8SF_FTYPE_V8SF:
22746 type = v8sf_ftype_v8sf;
22747 break;
22748 case V8SF_FTYPE_V8SI:
22749 type = v8sf_ftype_v8si;
22750 break;
22751 case V8SF_FTYPE_V4SF:
22752 type = v8sf_ftype_v4sf;
22753 break;
22754 case V4SI_FTYPE_V4DF:
22755 type = v4si_ftype_v4df;
22756 break;
22757 case V4SI_FTYPE_V4SI:
22758 type = v4si_ftype_v4si;
22759 break;
22760 case V4SI_FTYPE_V16QI:
22761 type = v4si_ftype_v16qi;
22762 break;
22763 case V4SI_FTYPE_V8SI:
22764 type = v4si_ftype_v8si;
22765 break;
22766 case V4SI_FTYPE_V8HI:
22767 type = v4si_ftype_v8hi;
22768 break;
22769 case V4SI_FTYPE_V4SF:
22770 type = v4si_ftype_v4sf;
22771 break;
22772 case V4SI_FTYPE_V2DF:
22773 type = v4si_ftype_v2df;
22774 break;
22775 case V4HI_FTYPE_V4HI:
22776 type = v4hi_ftype_v4hi;
22777 break;
22778 case V4DF_FTYPE_V4DF:
22779 type = v4df_ftype_v4df;
22780 break;
22781 case V4DF_FTYPE_V4SI:
22782 type = v4df_ftype_v4si;
22783 break;
22784 case V4DF_FTYPE_V4SF:
22785 type = v4df_ftype_v4sf;
22786 break;
22787 case V4DF_FTYPE_V2DF:
22788 type = v4df_ftype_v2df;
22789 break;
22790 case V4SF_FTYPE_V4SF:
22791 case V4SF_FTYPE_V4SF_VEC_MERGE:
22792 type = v4sf_ftype_v4sf;
22793 break;
22794 case V4SF_FTYPE_V8SF:
22795 type = v4sf_ftype_v8sf;
22796 break;
22797 case V4SF_FTYPE_V4SI:
22798 type = v4sf_ftype_v4si;
22799 break;
22800 case V4SF_FTYPE_V4DF:
22801 type = v4sf_ftype_v4df;
22802 break;
22803 case V4SF_FTYPE_V2DF:
22804 type = v4sf_ftype_v2df;
22805 break;
22806 case V2DI_FTYPE_V2DI:
22807 type = v2di_ftype_v2di;
22808 break;
22809 case V2DI_FTYPE_V16QI:
22810 type = v2di_ftype_v16qi;
22811 break;
22812 case V2DI_FTYPE_V8HI:
22813 type = v2di_ftype_v8hi;
22814 break;
22815 case V2DI_FTYPE_V4SI:
22816 type = v2di_ftype_v4si;
22817 break;
22818 case V2SI_FTYPE_V2SI:
22819 type = v2si_ftype_v2si;
22820 break;
22821 case V2SI_FTYPE_V4SF:
22822 type = v2si_ftype_v4sf;
22823 break;
22824 case V2SI_FTYPE_V2DF:
22825 type = v2si_ftype_v2df;
22826 break;
22827 case V2SI_FTYPE_V2SF:
22828 type = v2si_ftype_v2sf;
22829 break;
22830 case V2DF_FTYPE_V4DF:
22831 type = v2df_ftype_v4df;
22832 break;
22833 case V2DF_FTYPE_V4SF:
22834 type = v2df_ftype_v4sf;
22835 break;
22836 case V2DF_FTYPE_V2DF:
22837 case V2DF_FTYPE_V2DF_VEC_MERGE:
22838 type = v2df_ftype_v2df;
22839 break;
22840 case V2DF_FTYPE_V2SI:
22841 type = v2df_ftype_v2si;
22842 break;
22843 case V2DF_FTYPE_V4SI:
22844 type = v2df_ftype_v4si;
22845 break;
22846 case V2SF_FTYPE_V2SF:
22847 type = v2sf_ftype_v2sf;
22848 break;
22849 case V2SF_FTYPE_V2SI:
22850 type = v2sf_ftype_v2si;
22851 break;
22852 case V16QI_FTYPE_V16QI_V16QI:
22853 type = v16qi_ftype_v16qi_v16qi;
22854 break;
22855 case V16QI_FTYPE_V8HI_V8HI:
22856 type = v16qi_ftype_v8hi_v8hi;
22857 break;
22858 case V8QI_FTYPE_V8QI_V8QI:
22859 type = v8qi_ftype_v8qi_v8qi;
22860 break;
22861 case V8QI_FTYPE_V4HI_V4HI:
22862 type = v8qi_ftype_v4hi_v4hi;
22863 break;
22864 case V8HI_FTYPE_V8HI_V8HI:
22865 case V8HI_FTYPE_V8HI_V8HI_COUNT:
22866 type = v8hi_ftype_v8hi_v8hi;
22867 break;
22868 case V8HI_FTYPE_V16QI_V16QI:
22869 type = v8hi_ftype_v16qi_v16qi;
22870 break;
22871 case V8HI_FTYPE_V4SI_V4SI:
22872 type = v8hi_ftype_v4si_v4si;
22873 break;
22874 case V8HI_FTYPE_V8HI_SI_COUNT:
22875 type = v8hi_ftype_v8hi_int;
22876 break;
22877 case V8SF_FTYPE_V8SF_V8SF:
22878 type = v8sf_ftype_v8sf_v8sf;
22879 break;
22880 case V8SF_FTYPE_V8SF_V8SI:
22881 type = v8sf_ftype_v8sf_v8si;
22882 break;
22883 case V4SI_FTYPE_V4SI_V4SI:
22884 case V4SI_FTYPE_V4SI_V4SI_COUNT:
22885 type = v4si_ftype_v4si_v4si;
22886 break;
22887 case V4SI_FTYPE_V8HI_V8HI:
22888 type = v4si_ftype_v8hi_v8hi;
22889 break;
22890 case V4SI_FTYPE_V4SF_V4SF:
22891 type = v4si_ftype_v4sf_v4sf;
22892 break;
22893 case V4SI_FTYPE_V2DF_V2DF:
22894 type = v4si_ftype_v2df_v2df;
22895 break;
22896 case V4SI_FTYPE_V4SI_SI_COUNT:
22897 type = v4si_ftype_v4si_int;
22898 break;
22899 case V4HI_FTYPE_V4HI_V4HI:
22900 case V4HI_FTYPE_V4HI_V4HI_COUNT:
22901 type = v4hi_ftype_v4hi_v4hi;
22902 break;
22903 case V4HI_FTYPE_V8QI_V8QI:
22904 type = v4hi_ftype_v8qi_v8qi;
22905 break;
22906 case V4HI_FTYPE_V2SI_V2SI:
22907 type = v4hi_ftype_v2si_v2si;
22908 break;
22909 case V4HI_FTYPE_V4HI_SI_COUNT:
22910 type = v4hi_ftype_v4hi_int;
22911 break;
22912 case V4DF_FTYPE_V4DF_V4DF:
22913 type = v4df_ftype_v4df_v4df;
22914 break;
22915 case V4DF_FTYPE_V4DF_V4DI:
22916 type = v4df_ftype_v4df_v4di;
22917 break;
22918 case V4SF_FTYPE_V4SF_V4SF:
22919 case V4SF_FTYPE_V4SF_V4SF_SWAP:
22920 type = v4sf_ftype_v4sf_v4sf;
22921 break;
22922 case V4SF_FTYPE_V4SF_V4SI:
22923 type = v4sf_ftype_v4sf_v4si;
22924 break;
22925 case V4SF_FTYPE_V4SF_V2SI:
22926 type = v4sf_ftype_v4sf_v2si;
22927 break;
22928 case V4SF_FTYPE_V4SF_V2DF:
22929 type = v4sf_ftype_v4sf_v2df;
22930 break;
22931 case V4SF_FTYPE_V4SF_DI:
22932 type = v4sf_ftype_v4sf_int64;
22933 break;
22934 case V4SF_FTYPE_V4SF_SI:
22935 type = v4sf_ftype_v4sf_int;
22936 break;
22937 case V2DI_FTYPE_V2DI_V2DI:
22938 case V2DI_FTYPE_V2DI_V2DI_COUNT:
22939 type = v2di_ftype_v2di_v2di;
22940 break;
22941 case V2DI_FTYPE_V16QI_V16QI:
22942 type = v2di_ftype_v16qi_v16qi;
22943 break;
22944 case V2DI_FTYPE_V4SI_V4SI:
22945 type = v2di_ftype_v4si_v4si;
22946 break;
22947 case V2DI_FTYPE_V2DI_V16QI:
22948 type = v2di_ftype_v2di_v16qi;
22949 break;
22950 case V2DI_FTYPE_V2DF_V2DF:
22951 type = v2di_ftype_v2df_v2df;
22952 break;
22953 case V2DI_FTYPE_V2DI_SI_COUNT:
22954 type = v2di_ftype_v2di_int;
22955 break;
22956 case V2SI_FTYPE_V2SI_V2SI:
22957 case V2SI_FTYPE_V2SI_V2SI_COUNT:
22958 type = v2si_ftype_v2si_v2si;
22959 break;
22960 case V2SI_FTYPE_V4HI_V4HI:
22961 type = v2si_ftype_v4hi_v4hi;
22962 break;
22963 case V2SI_FTYPE_V2SF_V2SF:
22964 type = v2si_ftype_v2sf_v2sf;
22965 break;
22966 case V2SI_FTYPE_V2SI_SI_COUNT:
22967 type = v2si_ftype_v2si_int;
22968 break;
22969 case V2DF_FTYPE_V2DF_V2DF:
22970 case V2DF_FTYPE_V2DF_V2DF_SWAP:
22971 type = v2df_ftype_v2df_v2df;
22972 break;
22973 case V2DF_FTYPE_V2DF_V4SF:
22974 type = v2df_ftype_v2df_v4sf;
22975 break;
22976 case V2DF_FTYPE_V2DF_V2DI:
22977 type = v2df_ftype_v2df_v2di;
22978 break;
22979 case V2DF_FTYPE_V2DF_DI:
22980 type = v2df_ftype_v2df_int64;
22981 break;
22982 case V2DF_FTYPE_V2DF_SI:
22983 type = v2df_ftype_v2df_int;
22984 break;
22985 case V2SF_FTYPE_V2SF_V2SF:
22986 type = v2sf_ftype_v2sf_v2sf;
22987 break;
22988 case V1DI_FTYPE_V1DI_V1DI:
22989 case V1DI_FTYPE_V1DI_V1DI_COUNT:
22990 type = v1di_ftype_v1di_v1di;
22991 break;
22992 case V1DI_FTYPE_V8QI_V8QI:
22993 type = v1di_ftype_v8qi_v8qi;
22994 break;
22995 case V1DI_FTYPE_V2SI_V2SI:
22996 type = v1di_ftype_v2si_v2si;
22997 break;
22998 case V1DI_FTYPE_V1DI_SI_COUNT:
22999 type = v1di_ftype_v1di_int;
23000 break;
23001 case UINT64_FTYPE_UINT64_UINT64:
23002 type = uint64_ftype_uint64_uint64;
23003 break;
23004 case UINT_FTYPE_UINT_UINT:
23005 type = unsigned_ftype_unsigned_unsigned;
23006 break;
23007 case UINT_FTYPE_UINT_USHORT:
23008 type = unsigned_ftype_unsigned_ushort;
23009 break;
23010 case UINT_FTYPE_UINT_UCHAR:
23011 type = unsigned_ftype_unsigned_uchar;
23012 break;
23013 case V8HI_FTYPE_V8HI_INT:
23014 type = v8hi_ftype_v8hi_int;
23015 break;
23016 case V8SF_FTYPE_V8SF_INT:
23017 type = v8sf_ftype_v8sf_int;
23018 break;
23019 case V4SI_FTYPE_V4SI_INT:
23020 type = v4si_ftype_v4si_int;
23021 break;
23022 case V4SI_FTYPE_V8SI_INT:
23023 type = v4si_ftype_v8si_int;
23024 break;
23025 case V4HI_FTYPE_V4HI_INT:
23026 type = v4hi_ftype_v4hi_int;
23027 break;
23028 case V4DF_FTYPE_V4DF_INT:
23029 type = v4df_ftype_v4df_int;
23030 break;
23031 case V4SF_FTYPE_V4SF_INT:
23032 type = v4sf_ftype_v4sf_int;
23033 break;
23034 case V4SF_FTYPE_V8SF_INT:
23035 type = v4sf_ftype_v8sf_int;
23036 break;
23037 case V2DI_FTYPE_V2DI_INT:
23038 case V2DI2TI_FTYPE_V2DI_INT:
23039 type = v2di_ftype_v2di_int;
23040 break;
23041 case V2DF_FTYPE_V2DF_INT:
23042 type = v2df_ftype_v2df_int;
23043 break;
23044 case V2DF_FTYPE_V4DF_INT:
23045 type = v2df_ftype_v4df_int;
23046 break;
23047 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23048 type = v16qi_ftype_v16qi_v16qi_v16qi;
23049 break;
23050 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23051 type = v8sf_ftype_v8sf_v8sf_v8sf;
23052 break;
23053 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23054 type = v4df_ftype_v4df_v4df_v4df;
23055 break;
23056 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23057 type = v4sf_ftype_v4sf_v4sf_v4sf;
23058 break;
23059 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23060 type = v2df_ftype_v2df_v2df_v2df;
23061 break;
23062 case V16QI_FTYPE_V16QI_V16QI_INT:
23063 type = v16qi_ftype_v16qi_v16qi_int;
23064 break;
23065 case V8SI_FTYPE_V8SI_V8SI_INT:
23066 type = v8si_ftype_v8si_v8si_int;
23067 break;
23068 case V8SI_FTYPE_V8SI_V4SI_INT:
23069 type = v8si_ftype_v8si_v4si_int;
23070 break;
23071 case V8HI_FTYPE_V8HI_V8HI_INT:
23072 type = v8hi_ftype_v8hi_v8hi_int;
23073 break;
23074 case V8SF_FTYPE_V8SF_V8SF_INT:
23075 type = v8sf_ftype_v8sf_v8sf_int;
23076 break;
23077 case V8SF_FTYPE_V8SF_V4SF_INT:
23078 type = v8sf_ftype_v8sf_v4sf_int;
23079 break;
23080 case V4SI_FTYPE_V4SI_V4SI_INT:
23081 type = v4si_ftype_v4si_v4si_int;
23082 break;
23083 case V4DF_FTYPE_V4DF_V4DF_INT:
23084 type = v4df_ftype_v4df_v4df_int;
23085 break;
23086 case V4DF_FTYPE_V4DF_V2DF_INT:
23087 type = v4df_ftype_v4df_v2df_int;
23088 break;
23089 case V4SF_FTYPE_V4SF_V4SF_INT:
23090 type = v4sf_ftype_v4sf_v4sf_int;
23091 break;
23092 case V2DI_FTYPE_V2DI_V2DI_INT:
23093 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
23094 type = v2di_ftype_v2di_v2di_int;
23095 break;
23096 case V2DF_FTYPE_V2DF_V2DF_INT:
23097 type = v2df_ftype_v2df_v2df_int;
23098 break;
23099 case V2DI_FTYPE_V2DI_UINT_UINT:
23100 type = v2di_ftype_v2di_unsigned_unsigned;
23101 break;
23102 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23103 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
23104 break;
23105 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
23106 type = v1di_ftype_v1di_v1di_int;
23107 break;
23108 default:
23109 gcc_unreachable ();
23112 def_builtin_const (d->mask, d->name, type, d->code);
23115 /* pcmpestr[im] insns. */
23116 for (i = 0, d = bdesc_pcmpestr;
23117 i < ARRAY_SIZE (bdesc_pcmpestr);
23118 i++, d++)
23120 if (d->code == IX86_BUILTIN_PCMPESTRM128)
23121 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
23122 else
23123 ftype = int_ftype_v16qi_int_v16qi_int_int;
23124 def_builtin_const (d->mask, d->name, ftype, d->code);
23127 /* pcmpistr[im] insns. */
23128 for (i = 0, d = bdesc_pcmpistr;
23129 i < ARRAY_SIZE (bdesc_pcmpistr);
23130 i++, d++)
23132 if (d->code == IX86_BUILTIN_PCMPISTRM128)
23133 ftype = v16qi_ftype_v16qi_v16qi_int;
23134 else
23135 ftype = int_ftype_v16qi_v16qi_int;
23136 def_builtin_const (d->mask, d->name, ftype, d->code);
23139 /* comi/ucomi insns. */
23140 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
23141 if (d->mask == OPTION_MASK_ISA_SSE2)
23142 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
23143 else
23144 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
23146 /* SSE */
23147 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
23148 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
23150 /* SSE or 3DNow!A */
23151 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
23153 /* SSE2 */
23154 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
23156 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
23157 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
23159 /* SSE3. */
23160 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
23161 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
23163 /* AES */
23164 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
23165 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
23166 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
23167 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
23168 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
23169 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
23171 /* PCLMUL */
23172 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
23174 /* AVX */
23175 def_builtin (OPTION_MASK_ISA_AVX, "__builtin_ia32_vzeroupper", void_ftype_void,
23176 TARGET_64BIT ? IX86_BUILTIN_VZEROUPPER_REX64 : IX86_BUILTIN_VZEROUPPER);
23178 /* Access to the vec_init patterns. */
23179 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
23180 integer_type_node, NULL_TREE);
23181 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
23183 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
23184 short_integer_type_node,
23185 short_integer_type_node,
23186 short_integer_type_node, NULL_TREE);
23187 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
23189 ftype = build_function_type_list (V8QI_type_node, char_type_node,
23190 char_type_node, char_type_node,
23191 char_type_node, char_type_node,
23192 char_type_node, char_type_node,
23193 char_type_node, NULL_TREE);
23194 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
23196 /* Access to the vec_extract patterns. */
23197 ftype = build_function_type_list (double_type_node, V2DF_type_node,
23198 integer_type_node, NULL_TREE);
23199 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
23201 ftype = build_function_type_list (long_long_integer_type_node,
23202 V2DI_type_node, integer_type_node,
23203 NULL_TREE);
23204 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
23206 ftype = build_function_type_list (float_type_node, V4SF_type_node,
23207 integer_type_node, NULL_TREE);
23208 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
23210 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
23211 integer_type_node, NULL_TREE);
23212 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
23214 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
23215 integer_type_node, NULL_TREE);
23216 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
23218 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
23219 integer_type_node, NULL_TREE);
23220 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
23222 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
23223 integer_type_node, NULL_TREE);
23224 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
23226 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
23227 integer_type_node, NULL_TREE);
23228 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
23230 /* Access to the vec_set patterns. */
23231 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
23232 intDI_type_node,
23233 integer_type_node, NULL_TREE);
23234 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
23236 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
23237 float_type_node,
23238 integer_type_node, NULL_TREE);
23239 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
23241 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
23242 intSI_type_node,
23243 integer_type_node, NULL_TREE);
23244 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
23246 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
23247 intHI_type_node,
23248 integer_type_node, NULL_TREE);
23249 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
23251 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
23252 intHI_type_node,
23253 integer_type_node, NULL_TREE);
23254 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
23256 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
23257 intQI_type_node,
23258 integer_type_node, NULL_TREE);
23259 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
23261 /* Add SSE5 multi-arg argument instructions */
23262 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
23264 tree mtype = NULL_TREE;
23266 if (d->name == 0)
23267 continue;
23269 switch ((enum multi_arg_type)d->flag)
23271 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
23272 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
23273 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
23274 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
23275 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
23276 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
23277 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
23278 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
23279 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
23280 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
23281 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
23282 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
23283 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
23284 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
23285 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
23286 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
23287 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
23288 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
23289 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
23290 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
23291 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
23292 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
23293 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
23294 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
23295 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
23296 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
23297 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
23298 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
23299 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
23300 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
23301 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
23302 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
23303 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
23304 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
23305 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
23306 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
23307 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
23308 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
23309 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
23310 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
23311 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
23312 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
23313 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
23314 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
23315 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
23316 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
23317 case MULTI_ARG_UNKNOWN:
23318 default:
23319 gcc_unreachable ();
23322 if (mtype)
23323 def_builtin_const (d->mask, d->name, mtype, d->code);
23327 /* Internal method for ix86_init_builtins. */
23329 static void
23330 ix86_init_builtins_va_builtins_abi (void)
23332 tree ms_va_ref, sysv_va_ref;
23333 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
23334 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
23335 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
23336 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
23338 if (!TARGET_64BIT)
23339 return;
23340 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
23341 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
23342 ms_va_ref = build_reference_type (ms_va_list_type_node);
23343 sysv_va_ref =
23344 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
23346 fnvoid_va_end_ms =
23347 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23348 fnvoid_va_start_ms =
23349 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23350 fnvoid_va_end_sysv =
23351 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
23352 fnvoid_va_start_sysv =
23353 build_varargs_function_type_list (void_type_node, sysv_va_ref,
23354 NULL_TREE);
23355 fnvoid_va_copy_ms =
23356 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
23357 NULL_TREE);
23358 fnvoid_va_copy_sysv =
23359 build_function_type_list (void_type_node, sysv_va_ref,
23360 sysv_va_ref, NULL_TREE);
23362 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
23363 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
23364 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
23365 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
23366 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
23367 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
23368 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
23369 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23370 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
23371 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23372 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
23373 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23376 static void
23377 ix86_init_builtins (void)
23379 tree float128_type_node = make_node (REAL_TYPE);
23380 tree ftype, decl;
23382 /* The __float80 type. */
23383 if (TYPE_MODE (long_double_type_node) == XFmode)
23384 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
23385 "__float80");
23386 else
23388 /* The __float80 type. */
23389 tree float80_type_node = make_node (REAL_TYPE);
23391 TYPE_PRECISION (float80_type_node) = 80;
23392 layout_type (float80_type_node);
23393 (*lang_hooks.types.register_builtin_type) (float80_type_node,
23394 "__float80");
23397 /* The __float128 type. */
23398 TYPE_PRECISION (float128_type_node) = 128;
23399 layout_type (float128_type_node);
23400 (*lang_hooks.types.register_builtin_type) (float128_type_node,
23401 "__float128");
23403 /* TFmode support builtins. */
23404 ftype = build_function_type (float128_type_node, void_list_node);
23405 decl = add_builtin_function ("__builtin_infq", ftype,
23406 IX86_BUILTIN_INFQ, BUILT_IN_MD,
23407 NULL, NULL_TREE);
23408 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
23410 decl = add_builtin_function ("__builtin_huge_valq", ftype,
23411 IX86_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
23412 NULL, NULL_TREE);
23413 ix86_builtins[(int) IX86_BUILTIN_HUGE_VALQ] = decl;
23415 /* We will expand them to normal call if SSE2 isn't available since
23416 they are used by libgcc. */
23417 ftype = build_function_type_list (float128_type_node,
23418 float128_type_node,
23419 NULL_TREE);
23420 decl = add_builtin_function ("__builtin_fabsq", ftype,
23421 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
23422 "__fabstf2", NULL_TREE);
23423 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
23424 TREE_READONLY (decl) = 1;
23426 ftype = build_function_type_list (float128_type_node,
23427 float128_type_node,
23428 float128_type_node,
23429 NULL_TREE);
23430 decl = add_builtin_function ("__builtin_copysignq", ftype,
23431 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
23432 "__copysigntf3", NULL_TREE);
23433 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
23434 TREE_READONLY (decl) = 1;
23436 ix86_init_mmx_sse_builtins ();
23437 if (TARGET_64BIT)
23438 ix86_init_builtins_va_builtins_abi ();
23441 /* Errors in the source file can cause expand_expr to return const0_rtx
23442 where we expect a vector. To avoid crashing, use one of the vector
23443 clear instructions. */
23444 static rtx
23445 safe_vector_operand (rtx x, enum machine_mode mode)
23447 if (x == const0_rtx)
23448 x = CONST0_RTX (mode);
23449 return x;
23452 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23454 static rtx
23455 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23457 rtx pat;
23458 tree arg0 = CALL_EXPR_ARG (exp, 0);
23459 tree arg1 = CALL_EXPR_ARG (exp, 1);
23460 rtx op0 = expand_normal (arg0);
23461 rtx op1 = expand_normal (arg1);
23462 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23463 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23464 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23466 if (VECTOR_MODE_P (mode0))
23467 op0 = safe_vector_operand (op0, mode0);
23468 if (VECTOR_MODE_P (mode1))
23469 op1 = safe_vector_operand (op1, mode1);
23471 if (optimize || !target
23472 || GET_MODE (target) != tmode
23473 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23474 target = gen_reg_rtx (tmode);
23476 if (GET_MODE (op1) == SImode && mode1 == TImode)
23478 rtx x = gen_reg_rtx (V4SImode);
23479 emit_insn (gen_sse2_loadd (x, op1));
23480 op1 = gen_lowpart (TImode, x);
23483 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23484 op0 = copy_to_mode_reg (mode0, op0);
23485 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23486 op1 = copy_to_mode_reg (mode1, op1);
23488 pat = GEN_FCN (icode) (target, op0, op1);
23489 if (! pat)
23490 return 0;
23492 emit_insn (pat);
23494 return target;
23497 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23499 static rtx
23500 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23501 enum multi_arg_type m_type,
23502 enum insn_code sub_code)
23504 rtx pat;
23505 int i;
23506 int nargs;
23507 bool comparison_p = false;
23508 bool tf_p = false;
23509 bool last_arg_constant = false;
23510 int num_memory = 0;
23511 struct {
23512 rtx op;
23513 enum machine_mode mode;
23514 } args[4];
23516 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23518 switch (m_type)
23520 case MULTI_ARG_3_SF:
23521 case MULTI_ARG_3_DF:
23522 case MULTI_ARG_3_DI:
23523 case MULTI_ARG_3_SI:
23524 case MULTI_ARG_3_SI_DI:
23525 case MULTI_ARG_3_HI:
23526 case MULTI_ARG_3_HI_SI:
23527 case MULTI_ARG_3_QI:
23528 case MULTI_ARG_3_PERMPS:
23529 case MULTI_ARG_3_PERMPD:
23530 nargs = 3;
23531 break;
23533 case MULTI_ARG_2_SF:
23534 case MULTI_ARG_2_DF:
23535 case MULTI_ARG_2_DI:
23536 case MULTI_ARG_2_SI:
23537 case MULTI_ARG_2_HI:
23538 case MULTI_ARG_2_QI:
23539 nargs = 2;
23540 break;
23542 case MULTI_ARG_2_DI_IMM:
23543 case MULTI_ARG_2_SI_IMM:
23544 case MULTI_ARG_2_HI_IMM:
23545 case MULTI_ARG_2_QI_IMM:
23546 nargs = 2;
23547 last_arg_constant = true;
23548 break;
23550 case MULTI_ARG_1_SF:
23551 case MULTI_ARG_1_DF:
23552 case MULTI_ARG_1_DI:
23553 case MULTI_ARG_1_SI:
23554 case MULTI_ARG_1_HI:
23555 case MULTI_ARG_1_QI:
23556 case MULTI_ARG_1_SI_DI:
23557 case MULTI_ARG_1_HI_DI:
23558 case MULTI_ARG_1_HI_SI:
23559 case MULTI_ARG_1_QI_DI:
23560 case MULTI_ARG_1_QI_SI:
23561 case MULTI_ARG_1_QI_HI:
23562 case MULTI_ARG_1_PH2PS:
23563 case MULTI_ARG_1_PS2PH:
23564 nargs = 1;
23565 break;
23567 case MULTI_ARG_2_SF_CMP:
23568 case MULTI_ARG_2_DF_CMP:
23569 case MULTI_ARG_2_DI_CMP:
23570 case MULTI_ARG_2_SI_CMP:
23571 case MULTI_ARG_2_HI_CMP:
23572 case MULTI_ARG_2_QI_CMP:
23573 nargs = 2;
23574 comparison_p = true;
23575 break;
23577 case MULTI_ARG_2_SF_TF:
23578 case MULTI_ARG_2_DF_TF:
23579 case MULTI_ARG_2_DI_TF:
23580 case MULTI_ARG_2_SI_TF:
23581 case MULTI_ARG_2_HI_TF:
23582 case MULTI_ARG_2_QI_TF:
23583 nargs = 2;
23584 tf_p = true;
23585 break;
23587 case MULTI_ARG_UNKNOWN:
23588 default:
23589 gcc_unreachable ();
23592 if (optimize || !target
23593 || GET_MODE (target) != tmode
23594 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23595 target = gen_reg_rtx (tmode);
23597 gcc_assert (nargs <= 4);
23599 for (i = 0; i < nargs; i++)
23601 tree arg = CALL_EXPR_ARG (exp, i);
23602 rtx op = expand_normal (arg);
23603 int adjust = (comparison_p) ? 1 : 0;
23604 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23606 if (last_arg_constant && i == nargs-1)
23608 if (GET_CODE (op) != CONST_INT)
23610 error ("last argument must be an immediate");
23611 return gen_reg_rtx (tmode);
23614 else
23616 if (VECTOR_MODE_P (mode))
23617 op = safe_vector_operand (op, mode);
23619 /* If we aren't optimizing, only allow one memory operand to be
23620 generated. */
23621 if (memory_operand (op, mode))
23622 num_memory++;
23624 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23626 if (optimize
23627 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23628 || num_memory > 1)
23629 op = force_reg (mode, op);
23632 args[i].op = op;
23633 args[i].mode = mode;
23636 switch (nargs)
23638 case 1:
23639 pat = GEN_FCN (icode) (target, args[0].op);
23640 break;
23642 case 2:
23643 if (tf_p)
23644 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23645 GEN_INT ((int)sub_code));
23646 else if (! comparison_p)
23647 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23648 else
23650 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23651 args[0].op,
23652 args[1].op);
23654 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23656 break;
23658 case 3:
23659 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23660 break;
23662 default:
23663 gcc_unreachable ();
23666 if (! pat)
23667 return 0;
23669 emit_insn (pat);
23670 return target;
23673 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23674 insns with vec_merge. */
23676 static rtx
23677 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23678 rtx target)
23680 rtx pat;
23681 tree arg0 = CALL_EXPR_ARG (exp, 0);
23682 rtx op1, op0 = expand_normal (arg0);
23683 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23684 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23686 if (optimize || !target
23687 || GET_MODE (target) != tmode
23688 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23689 target = gen_reg_rtx (tmode);
23691 if (VECTOR_MODE_P (mode0))
23692 op0 = safe_vector_operand (op0, mode0);
23694 if ((optimize && !register_operand (op0, mode0))
23695 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23696 op0 = copy_to_mode_reg (mode0, op0);
23698 op1 = op0;
23699 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23700 op1 = copy_to_mode_reg (mode0, op1);
23702 pat = GEN_FCN (icode) (target, op0, op1);
23703 if (! pat)
23704 return 0;
23705 emit_insn (pat);
23706 return target;
23709 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23711 static rtx
23712 ix86_expand_sse_compare (const struct builtin_description *d,
23713 tree exp, rtx target, bool swap)
23715 rtx pat;
23716 tree arg0 = CALL_EXPR_ARG (exp, 0);
23717 tree arg1 = CALL_EXPR_ARG (exp, 1);
23718 rtx op0 = expand_normal (arg0);
23719 rtx op1 = expand_normal (arg1);
23720 rtx op2;
23721 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23722 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23723 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23724 enum rtx_code comparison = d->comparison;
23726 if (VECTOR_MODE_P (mode0))
23727 op0 = safe_vector_operand (op0, mode0);
23728 if (VECTOR_MODE_P (mode1))
23729 op1 = safe_vector_operand (op1, mode1);
23731 /* Swap operands if we have a comparison that isn't available in
23732 hardware. */
23733 if (swap)
23735 rtx tmp = gen_reg_rtx (mode1);
23736 emit_move_insn (tmp, op1);
23737 op1 = op0;
23738 op0 = tmp;
23741 if (optimize || !target
23742 || GET_MODE (target) != tmode
23743 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23744 target = gen_reg_rtx (tmode);
23746 if ((optimize && !register_operand (op0, mode0))
23747 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23748 op0 = copy_to_mode_reg (mode0, op0);
23749 if ((optimize && !register_operand (op1, mode1))
23750 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23751 op1 = copy_to_mode_reg (mode1, op1);
23753 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23754 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23755 if (! pat)
23756 return 0;
23757 emit_insn (pat);
23758 return target;
23761 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23763 static rtx
23764 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23765 rtx target)
23767 rtx pat;
23768 tree arg0 = CALL_EXPR_ARG (exp, 0);
23769 tree arg1 = CALL_EXPR_ARG (exp, 1);
23770 rtx op0 = expand_normal (arg0);
23771 rtx op1 = expand_normal (arg1);
23772 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23773 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23774 enum rtx_code comparison = d->comparison;
23776 if (VECTOR_MODE_P (mode0))
23777 op0 = safe_vector_operand (op0, mode0);
23778 if (VECTOR_MODE_P (mode1))
23779 op1 = safe_vector_operand (op1, mode1);
23781 /* Swap operands if we have a comparison that isn't available in
23782 hardware. */
23783 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23785 rtx tmp = op1;
23786 op1 = op0;
23787 op0 = tmp;
23790 target = gen_reg_rtx (SImode);
23791 emit_move_insn (target, const0_rtx);
23792 target = gen_rtx_SUBREG (QImode, target, 0);
23794 if ((optimize && !register_operand (op0, mode0))
23795 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23796 op0 = copy_to_mode_reg (mode0, op0);
23797 if ((optimize && !register_operand (op1, mode1))
23798 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23799 op1 = copy_to_mode_reg (mode1, op1);
23801 pat = GEN_FCN (d->icode) (op0, op1);
23802 if (! pat)
23803 return 0;
23804 emit_insn (pat);
23805 emit_insn (gen_rtx_SET (VOIDmode,
23806 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23807 gen_rtx_fmt_ee (comparison, QImode,
23808 SET_DEST (pat),
23809 const0_rtx)));
23811 return SUBREG_REG (target);
23814 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23816 static rtx
23817 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23818 rtx target)
23820 rtx pat;
23821 tree arg0 = CALL_EXPR_ARG (exp, 0);
23822 tree arg1 = CALL_EXPR_ARG (exp, 1);
23823 rtx op0 = expand_normal (arg0);
23824 rtx op1 = expand_normal (arg1);
23825 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23826 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23827 enum rtx_code comparison = d->comparison;
23829 if (VECTOR_MODE_P (mode0))
23830 op0 = safe_vector_operand (op0, mode0);
23831 if (VECTOR_MODE_P (mode1))
23832 op1 = safe_vector_operand (op1, mode1);
23834 target = gen_reg_rtx (SImode);
23835 emit_move_insn (target, const0_rtx);
23836 target = gen_rtx_SUBREG (QImode, target, 0);
23838 if ((optimize && !register_operand (op0, mode0))
23839 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23840 op0 = copy_to_mode_reg (mode0, op0);
23841 if ((optimize && !register_operand (op1, mode1))
23842 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23843 op1 = copy_to_mode_reg (mode1, op1);
23845 pat = GEN_FCN (d->icode) (op0, op1);
23846 if (! pat)
23847 return 0;
23848 emit_insn (pat);
23849 emit_insn (gen_rtx_SET (VOIDmode,
23850 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23851 gen_rtx_fmt_ee (comparison, QImode,
23852 SET_DEST (pat),
23853 const0_rtx)));
23855 return SUBREG_REG (target);
23858 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23860 static rtx
23861 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23862 tree exp, rtx target)
23864 rtx pat;
23865 tree arg0 = CALL_EXPR_ARG (exp, 0);
23866 tree arg1 = CALL_EXPR_ARG (exp, 1);
23867 tree arg2 = CALL_EXPR_ARG (exp, 2);
23868 tree arg3 = CALL_EXPR_ARG (exp, 3);
23869 tree arg4 = CALL_EXPR_ARG (exp, 4);
23870 rtx scratch0, scratch1;
23871 rtx op0 = expand_normal (arg0);
23872 rtx op1 = expand_normal (arg1);
23873 rtx op2 = expand_normal (arg2);
23874 rtx op3 = expand_normal (arg3);
23875 rtx op4 = expand_normal (arg4);
23876 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23878 tmode0 = insn_data[d->icode].operand[0].mode;
23879 tmode1 = insn_data[d->icode].operand[1].mode;
23880 modev2 = insn_data[d->icode].operand[2].mode;
23881 modei3 = insn_data[d->icode].operand[3].mode;
23882 modev4 = insn_data[d->icode].operand[4].mode;
23883 modei5 = insn_data[d->icode].operand[5].mode;
23884 modeimm = insn_data[d->icode].operand[6].mode;
23886 if (VECTOR_MODE_P (modev2))
23887 op0 = safe_vector_operand (op0, modev2);
23888 if (VECTOR_MODE_P (modev4))
23889 op2 = safe_vector_operand (op2, modev4);
23891 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23892 op0 = copy_to_mode_reg (modev2, op0);
23893 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23894 op1 = copy_to_mode_reg (modei3, op1);
23895 if ((optimize && !register_operand (op2, modev4))
23896 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23897 op2 = copy_to_mode_reg (modev4, op2);
23898 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23899 op3 = copy_to_mode_reg (modei5, op3);
23901 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23903 error ("the fifth argument must be a 8-bit immediate");
23904 return const0_rtx;
23907 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23909 if (optimize || !target
23910 || GET_MODE (target) != tmode0
23911 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23912 target = gen_reg_rtx (tmode0);
23914 scratch1 = gen_reg_rtx (tmode1);
23916 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23918 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23920 if (optimize || !target
23921 || GET_MODE (target) != tmode1
23922 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23923 target = gen_reg_rtx (tmode1);
23925 scratch0 = gen_reg_rtx (tmode0);
23927 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23929 else
23931 gcc_assert (d->flag);
23933 scratch0 = gen_reg_rtx (tmode0);
23934 scratch1 = gen_reg_rtx (tmode1);
23936 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23939 if (! pat)
23940 return 0;
23942 emit_insn (pat);
23944 if (d->flag)
23946 target = gen_reg_rtx (SImode);
23947 emit_move_insn (target, const0_rtx);
23948 target = gen_rtx_SUBREG (QImode, target, 0);
23950 emit_insn
23951 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23952 gen_rtx_fmt_ee (EQ, QImode,
23953 gen_rtx_REG ((enum machine_mode) d->flag,
23954 FLAGS_REG),
23955 const0_rtx)));
23956 return SUBREG_REG (target);
23958 else
23959 return target;
23963 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23965 static rtx
23966 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23967 tree exp, rtx target)
23969 rtx pat;
23970 tree arg0 = CALL_EXPR_ARG (exp, 0);
23971 tree arg1 = CALL_EXPR_ARG (exp, 1);
23972 tree arg2 = CALL_EXPR_ARG (exp, 2);
23973 rtx scratch0, scratch1;
23974 rtx op0 = expand_normal (arg0);
23975 rtx op1 = expand_normal (arg1);
23976 rtx op2 = expand_normal (arg2);
23977 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23979 tmode0 = insn_data[d->icode].operand[0].mode;
23980 tmode1 = insn_data[d->icode].operand[1].mode;
23981 modev2 = insn_data[d->icode].operand[2].mode;
23982 modev3 = insn_data[d->icode].operand[3].mode;
23983 modeimm = insn_data[d->icode].operand[4].mode;
23985 if (VECTOR_MODE_P (modev2))
23986 op0 = safe_vector_operand (op0, modev2);
23987 if (VECTOR_MODE_P (modev3))
23988 op1 = safe_vector_operand (op1, modev3);
23990 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23991 op0 = copy_to_mode_reg (modev2, op0);
23992 if ((optimize && !register_operand (op1, modev3))
23993 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23994 op1 = copy_to_mode_reg (modev3, op1);
23996 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23998 error ("the third argument must be a 8-bit immediate");
23999 return const0_rtx;
24002 if (d->code == IX86_BUILTIN_PCMPISTRI128)
24004 if (optimize || !target
24005 || GET_MODE (target) != tmode0
24006 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
24007 target = gen_reg_rtx (tmode0);
24009 scratch1 = gen_reg_rtx (tmode1);
24011 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
24013 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
24015 if (optimize || !target
24016 || GET_MODE (target) != tmode1
24017 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
24018 target = gen_reg_rtx (tmode1);
24020 scratch0 = gen_reg_rtx (tmode0);
24022 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
24024 else
24026 gcc_assert (d->flag);
24028 scratch0 = gen_reg_rtx (tmode0);
24029 scratch1 = gen_reg_rtx (tmode1);
24031 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
24034 if (! pat)
24035 return 0;
24037 emit_insn (pat);
24039 if (d->flag)
24041 target = gen_reg_rtx (SImode);
24042 emit_move_insn (target, const0_rtx);
24043 target = gen_rtx_SUBREG (QImode, target, 0);
24045 emit_insn
24046 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24047 gen_rtx_fmt_ee (EQ, QImode,
24048 gen_rtx_REG ((enum machine_mode) d->flag,
24049 FLAGS_REG),
24050 const0_rtx)));
24051 return SUBREG_REG (target);
24053 else
24054 return target;
24057 /* Subroutine of ix86_expand_builtin to take care of insns with
24058 variable number of operands. */
24060 static rtx
24061 ix86_expand_args_builtin (const struct builtin_description *d,
24062 tree exp, rtx target)
24064 rtx pat, real_target;
24065 unsigned int i, nargs;
24066 unsigned int nargs_constant = 0;
24067 int num_memory = 0;
24068 struct
24070 rtx op;
24071 enum machine_mode mode;
24072 } args[4];
24073 bool last_arg_count = false;
24074 enum insn_code icode = d->icode;
24075 const struct insn_data *insn_p = &insn_data[icode];
24076 enum machine_mode tmode = insn_p->operand[0].mode;
24077 enum machine_mode rmode = VOIDmode;
24078 bool swap = false;
24079 enum rtx_code comparison = d->comparison;
24081 switch ((enum ix86_builtin_type) d->flag)
24083 case INT_FTYPE_V8SF_V8SF_PTEST:
24084 case INT_FTYPE_V4DI_V4DI_PTEST:
24085 case INT_FTYPE_V4DF_V4DF_PTEST:
24086 case INT_FTYPE_V4SF_V4SF_PTEST:
24087 case INT_FTYPE_V2DI_V2DI_PTEST:
24088 case INT_FTYPE_V2DF_V2DF_PTEST:
24089 return ix86_expand_sse_ptest (d, exp, target);
24090 case FLOAT128_FTYPE_FLOAT128:
24091 case FLOAT_FTYPE_FLOAT:
24092 case INT64_FTYPE_V4SF:
24093 case INT64_FTYPE_V2DF:
24094 case INT_FTYPE_V16QI:
24095 case INT_FTYPE_V8QI:
24096 case INT_FTYPE_V8SF:
24097 case INT_FTYPE_V4DF:
24098 case INT_FTYPE_V4SF:
24099 case INT_FTYPE_V2DF:
24100 case V16QI_FTYPE_V16QI:
24101 case V8SI_FTYPE_V8SF:
24102 case V8SI_FTYPE_V4SI:
24103 case V8HI_FTYPE_V8HI:
24104 case V8HI_FTYPE_V16QI:
24105 case V8QI_FTYPE_V8QI:
24106 case V8SF_FTYPE_V8SF:
24107 case V8SF_FTYPE_V8SI:
24108 case V8SF_FTYPE_V4SF:
24109 case V4SI_FTYPE_V4SI:
24110 case V4SI_FTYPE_V16QI:
24111 case V4SI_FTYPE_V4SF:
24112 case V4SI_FTYPE_V8SI:
24113 case V4SI_FTYPE_V8HI:
24114 case V4SI_FTYPE_V4DF:
24115 case V4SI_FTYPE_V2DF:
24116 case V4HI_FTYPE_V4HI:
24117 case V4DF_FTYPE_V4DF:
24118 case V4DF_FTYPE_V4SI:
24119 case V4DF_FTYPE_V4SF:
24120 case V4DF_FTYPE_V2DF:
24121 case V4SF_FTYPE_V4SF:
24122 case V4SF_FTYPE_V4SI:
24123 case V4SF_FTYPE_V8SF:
24124 case V4SF_FTYPE_V4DF:
24125 case V4SF_FTYPE_V2DF:
24126 case V2DI_FTYPE_V2DI:
24127 case V2DI_FTYPE_V16QI:
24128 case V2DI_FTYPE_V8HI:
24129 case V2DI_FTYPE_V4SI:
24130 case V2DF_FTYPE_V2DF:
24131 case V2DF_FTYPE_V4SI:
24132 case V2DF_FTYPE_V4DF:
24133 case V2DF_FTYPE_V4SF:
24134 case V2DF_FTYPE_V2SI:
24135 case V2SI_FTYPE_V2SI:
24136 case V2SI_FTYPE_V4SF:
24137 case V2SI_FTYPE_V2SF:
24138 case V2SI_FTYPE_V2DF:
24139 case V2SF_FTYPE_V2SF:
24140 case V2SF_FTYPE_V2SI:
24141 nargs = 1;
24142 break;
24143 case V4SF_FTYPE_V4SF_VEC_MERGE:
24144 case V2DF_FTYPE_V2DF_VEC_MERGE:
24145 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
24146 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
24147 case V16QI_FTYPE_V16QI_V16QI:
24148 case V16QI_FTYPE_V8HI_V8HI:
24149 case V8QI_FTYPE_V8QI_V8QI:
24150 case V8QI_FTYPE_V4HI_V4HI:
24151 case V8HI_FTYPE_V8HI_V8HI:
24152 case V8HI_FTYPE_V16QI_V16QI:
24153 case V8HI_FTYPE_V4SI_V4SI:
24154 case V8SF_FTYPE_V8SF_V8SF:
24155 case V8SF_FTYPE_V8SF_V8SI:
24156 case V4SI_FTYPE_V4SI_V4SI:
24157 case V4SI_FTYPE_V8HI_V8HI:
24158 case V4SI_FTYPE_V4SF_V4SF:
24159 case V4SI_FTYPE_V2DF_V2DF:
24160 case V4HI_FTYPE_V4HI_V4HI:
24161 case V4HI_FTYPE_V8QI_V8QI:
24162 case V4HI_FTYPE_V2SI_V2SI:
24163 case V4DF_FTYPE_V4DF_V4DF:
24164 case V4DF_FTYPE_V4DF_V4DI:
24165 case V4SF_FTYPE_V4SF_V4SF:
24166 case V4SF_FTYPE_V4SF_V4SI:
24167 case V4SF_FTYPE_V4SF_V2SI:
24168 case V4SF_FTYPE_V4SF_V2DF:
24169 case V4SF_FTYPE_V4SF_DI:
24170 case V4SF_FTYPE_V4SF_SI:
24171 case V2DI_FTYPE_V2DI_V2DI:
24172 case V2DI_FTYPE_V16QI_V16QI:
24173 case V2DI_FTYPE_V4SI_V4SI:
24174 case V2DI_FTYPE_V2DI_V16QI:
24175 case V2DI_FTYPE_V2DF_V2DF:
24176 case V2SI_FTYPE_V2SI_V2SI:
24177 case V2SI_FTYPE_V4HI_V4HI:
24178 case V2SI_FTYPE_V2SF_V2SF:
24179 case V2DF_FTYPE_V2DF_V2DF:
24180 case V2DF_FTYPE_V2DF_V4SF:
24181 case V2DF_FTYPE_V2DF_V2DI:
24182 case V2DF_FTYPE_V2DF_DI:
24183 case V2DF_FTYPE_V2DF_SI:
24184 case V2SF_FTYPE_V2SF_V2SF:
24185 case V1DI_FTYPE_V1DI_V1DI:
24186 case V1DI_FTYPE_V8QI_V8QI:
24187 case V1DI_FTYPE_V2SI_V2SI:
24188 if (comparison == UNKNOWN)
24189 return ix86_expand_binop_builtin (icode, exp, target);
24190 nargs = 2;
24191 break;
24192 case V4SF_FTYPE_V4SF_V4SF_SWAP:
24193 case V2DF_FTYPE_V2DF_V2DF_SWAP:
24194 gcc_assert (comparison != UNKNOWN);
24195 nargs = 2;
24196 swap = true;
24197 break;
24198 case V8HI_FTYPE_V8HI_V8HI_COUNT:
24199 case V8HI_FTYPE_V8HI_SI_COUNT:
24200 case V4SI_FTYPE_V4SI_V4SI_COUNT:
24201 case V4SI_FTYPE_V4SI_SI_COUNT:
24202 case V4HI_FTYPE_V4HI_V4HI_COUNT:
24203 case V4HI_FTYPE_V4HI_SI_COUNT:
24204 case V2DI_FTYPE_V2DI_V2DI_COUNT:
24205 case V2DI_FTYPE_V2DI_SI_COUNT:
24206 case V2SI_FTYPE_V2SI_V2SI_COUNT:
24207 case V2SI_FTYPE_V2SI_SI_COUNT:
24208 case V1DI_FTYPE_V1DI_V1DI_COUNT:
24209 case V1DI_FTYPE_V1DI_SI_COUNT:
24210 nargs = 2;
24211 last_arg_count = true;
24212 break;
24213 case UINT64_FTYPE_UINT64_UINT64:
24214 case UINT_FTYPE_UINT_UINT:
24215 case UINT_FTYPE_UINT_USHORT:
24216 case UINT_FTYPE_UINT_UCHAR:
24217 nargs = 2;
24218 break;
24219 case V2DI2TI_FTYPE_V2DI_INT:
24220 nargs = 2;
24221 rmode = V2DImode;
24222 nargs_constant = 1;
24223 break;
24224 case V8HI_FTYPE_V8HI_INT:
24225 case V8SF_FTYPE_V8SF_INT:
24226 case V4SI_FTYPE_V4SI_INT:
24227 case V4SI_FTYPE_V8SI_INT:
24228 case V4HI_FTYPE_V4HI_INT:
24229 case V4DF_FTYPE_V4DF_INT:
24230 case V4SF_FTYPE_V4SF_INT:
24231 case V4SF_FTYPE_V8SF_INT:
24232 case V2DI_FTYPE_V2DI_INT:
24233 case V2DF_FTYPE_V2DF_INT:
24234 case V2DF_FTYPE_V4DF_INT:
24235 nargs = 2;
24236 nargs_constant = 1;
24237 break;
24238 case V16QI_FTYPE_V16QI_V16QI_V16QI:
24239 case V8SF_FTYPE_V8SF_V8SF_V8SF:
24240 case V4DF_FTYPE_V4DF_V4DF_V4DF:
24241 case V4SF_FTYPE_V4SF_V4SF_V4SF:
24242 case V2DF_FTYPE_V2DF_V2DF_V2DF:
24243 nargs = 3;
24244 break;
24245 case V16QI_FTYPE_V16QI_V16QI_INT:
24246 case V8HI_FTYPE_V8HI_V8HI_INT:
24247 case V8SI_FTYPE_V8SI_V8SI_INT:
24248 case V8SI_FTYPE_V8SI_V4SI_INT:
24249 case V8SF_FTYPE_V8SF_V8SF_INT:
24250 case V8SF_FTYPE_V8SF_V4SF_INT:
24251 case V4SI_FTYPE_V4SI_V4SI_INT:
24252 case V4DF_FTYPE_V4DF_V4DF_INT:
24253 case V4DF_FTYPE_V4DF_V2DF_INT:
24254 case V4SF_FTYPE_V4SF_V4SF_INT:
24255 case V2DI_FTYPE_V2DI_V2DI_INT:
24256 case V2DF_FTYPE_V2DF_V2DF_INT:
24257 nargs = 3;
24258 nargs_constant = 1;
24259 break;
24260 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
24261 nargs = 3;
24262 rmode = V2DImode;
24263 nargs_constant = 1;
24264 break;
24265 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
24266 nargs = 3;
24267 rmode = DImode;
24268 nargs_constant = 1;
24269 break;
24270 case V2DI_FTYPE_V2DI_UINT_UINT:
24271 nargs = 3;
24272 nargs_constant = 2;
24273 break;
24274 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
24275 nargs = 4;
24276 nargs_constant = 2;
24277 break;
24278 default:
24279 gcc_unreachable ();
24282 gcc_assert (nargs <= ARRAY_SIZE (args));
24284 if (comparison != UNKNOWN)
24286 gcc_assert (nargs == 2);
24287 return ix86_expand_sse_compare (d, exp, target, swap);
24290 if (rmode == VOIDmode || rmode == tmode)
24292 if (optimize
24293 || target == 0
24294 || GET_MODE (target) != tmode
24295 || ! (*insn_p->operand[0].predicate) (target, tmode))
24296 target = gen_reg_rtx (tmode);
24297 real_target = target;
24299 else
24301 target = gen_reg_rtx (rmode);
24302 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
24305 for (i = 0; i < nargs; i++)
24307 tree arg = CALL_EXPR_ARG (exp, i);
24308 rtx op = expand_normal (arg);
24309 enum machine_mode mode = insn_p->operand[i + 1].mode;
24310 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
24312 if (last_arg_count && (i + 1) == nargs)
24314 /* SIMD shift insns take either an 8-bit immediate or
24315 register as count. But builtin functions take int as
24316 count. If count doesn't match, we put it in register. */
24317 if (!match)
24319 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
24320 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
24321 op = copy_to_reg (op);
24324 else if ((nargs - i) <= nargs_constant)
24326 if (!match)
24327 switch (icode)
24329 case CODE_FOR_sse4_1_roundpd:
24330 case CODE_FOR_sse4_1_roundps:
24331 case CODE_FOR_sse4_1_roundsd:
24332 case CODE_FOR_sse4_1_roundss:
24333 case CODE_FOR_sse4_1_blendps:
24334 case CODE_FOR_avx_blendpd256:
24335 case CODE_FOR_avx_vpermilv4df:
24336 case CODE_FOR_avx_roundpd256:
24337 case CODE_FOR_avx_roundps256:
24338 error ("the last argument must be a 4-bit immediate");
24339 return const0_rtx;
24341 case CODE_FOR_sse4_1_blendpd:
24342 case CODE_FOR_avx_vpermilv2df:
24343 error ("the last argument must be a 2-bit immediate");
24344 return const0_rtx;
24346 case CODE_FOR_avx_vextractf128v4df:
24347 case CODE_FOR_avx_vextractf128v8sf:
24348 case CODE_FOR_avx_vextractf128v8si:
24349 case CODE_FOR_avx_vinsertf128v4df:
24350 case CODE_FOR_avx_vinsertf128v8sf:
24351 case CODE_FOR_avx_vinsertf128v8si:
24352 error ("the last argument must be a 1-bit immediate");
24353 return const0_rtx;
24355 case CODE_FOR_avx_cmpsdv2df3:
24356 case CODE_FOR_avx_cmpssv4sf3:
24357 case CODE_FOR_avx_cmppdv2df3:
24358 case CODE_FOR_avx_cmppsv4sf3:
24359 case CODE_FOR_avx_cmppdv4df3:
24360 case CODE_FOR_avx_cmppsv8sf3:
24361 error ("the last argument must be a 5-bit immediate");
24362 return const0_rtx;
24364 default:
24365 switch (nargs_constant)
24367 case 2:
24368 if ((nargs - i) == nargs_constant)
24370 error ("the next to last argument must be an 8-bit immediate");
24371 break;
24373 case 1:
24374 error ("the last argument must be an 8-bit immediate");
24375 break;
24376 default:
24377 gcc_unreachable ();
24379 return const0_rtx;
24382 else
24384 if (VECTOR_MODE_P (mode))
24385 op = safe_vector_operand (op, mode);
24387 /* If we aren't optimizing, only allow one memory operand to
24388 be generated. */
24389 if (memory_operand (op, mode))
24390 num_memory++;
24392 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
24394 if (optimize || !match || num_memory > 1)
24395 op = copy_to_mode_reg (mode, op);
24397 else
24399 op = copy_to_reg (op);
24400 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
24404 args[i].op = op;
24405 args[i].mode = mode;
24408 switch (nargs)
24410 case 1:
24411 pat = GEN_FCN (icode) (real_target, args[0].op);
24412 break;
24413 case 2:
24414 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24415 break;
24416 case 3:
24417 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24418 args[2].op);
24419 break;
24420 case 4:
24421 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24422 args[2].op, args[3].op);
24423 break;
24424 default:
24425 gcc_unreachable ();
24428 if (! pat)
24429 return 0;
24431 emit_insn (pat);
24432 return target;
24435 /* Subroutine of ix86_expand_builtin to take care of special insns
24436 with variable number of operands. */
24438 static rtx
24439 ix86_expand_special_args_builtin (const struct builtin_description *d,
24440 tree exp, rtx target)
24442 tree arg;
24443 rtx pat, op;
24444 unsigned int i, nargs, arg_adjust, memory;
24445 struct
24447 rtx op;
24448 enum machine_mode mode;
24449 } args[2];
24450 enum insn_code icode = d->icode;
24451 bool last_arg_constant = false;
24452 const struct insn_data *insn_p = &insn_data[icode];
24453 enum machine_mode tmode = insn_p->operand[0].mode;
24454 enum { load, store } klass;
24456 switch ((enum ix86_special_builtin_type) d->flag)
24458 case VOID_FTYPE_VOID:
24459 emit_insn (GEN_FCN (icode) (target));
24460 return 0;
24461 case V2DI_FTYPE_PV2DI:
24462 case V32QI_FTYPE_PCCHAR:
24463 case V16QI_FTYPE_PCCHAR:
24464 case V8SF_FTYPE_PCV4SF:
24465 case V8SF_FTYPE_PCFLOAT:
24466 case V4SF_FTYPE_PCFLOAT:
24467 case V4DF_FTYPE_PCV2DF:
24468 case V4DF_FTYPE_PCDOUBLE:
24469 case V2DF_FTYPE_PCDOUBLE:
24470 nargs = 1;
24471 klass = load;
24472 memory = 0;
24473 break;
24474 case VOID_FTYPE_PV2SF_V4SF:
24475 case VOID_FTYPE_PV4DI_V4DI:
24476 case VOID_FTYPE_PV2DI_V2DI:
24477 case VOID_FTYPE_PCHAR_V32QI:
24478 case VOID_FTYPE_PCHAR_V16QI:
24479 case VOID_FTYPE_PFLOAT_V8SF:
24480 case VOID_FTYPE_PFLOAT_V4SF:
24481 case VOID_FTYPE_PDOUBLE_V4DF:
24482 case VOID_FTYPE_PDOUBLE_V2DF:
24483 case VOID_FTYPE_PDI_DI:
24484 case VOID_FTYPE_PINT_INT:
24485 nargs = 1;
24486 klass = store;
24487 /* Reserve memory operand for target. */
24488 memory = ARRAY_SIZE (args);
24489 break;
24490 case V4SF_FTYPE_V4SF_PCV2SF:
24491 case V2DF_FTYPE_V2DF_PCDOUBLE:
24492 nargs = 2;
24493 klass = load;
24494 memory = 1;
24495 break;
24496 case V8SF_FTYPE_PCV8SF_V8SF:
24497 case V4DF_FTYPE_PCV4DF_V4DF:
24498 case V4SF_FTYPE_PCV4SF_V4SF:
24499 case V2DF_FTYPE_PCV2DF_V2DF:
24500 nargs = 2;
24501 klass = load;
24502 memory = 0;
24503 break;
24504 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24505 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24506 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24507 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24508 nargs = 2;
24509 klass = store;
24510 /* Reserve memory operand for target. */
24511 memory = ARRAY_SIZE (args);
24512 break;
24513 default:
24514 gcc_unreachable ();
24517 gcc_assert (nargs <= ARRAY_SIZE (args));
24519 if (klass == store)
24521 arg = CALL_EXPR_ARG (exp, 0);
24522 op = expand_normal (arg);
24523 gcc_assert (target == 0);
24524 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24525 arg_adjust = 1;
24527 else
24529 arg_adjust = 0;
24530 if (optimize
24531 || target == 0
24532 || GET_MODE (target) != tmode
24533 || ! (*insn_p->operand[0].predicate) (target, tmode))
24534 target = gen_reg_rtx (tmode);
24537 for (i = 0; i < nargs; i++)
24539 enum machine_mode mode = insn_p->operand[i + 1].mode;
24540 bool match;
24542 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24543 op = expand_normal (arg);
24544 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24546 if (last_arg_constant && (i + 1) == nargs)
24548 if (!match)
24549 switch (icode)
24551 default:
24552 error ("the last argument must be an 8-bit immediate");
24553 return const0_rtx;
24556 else
24558 if (i == memory)
24560 /* This must be the memory operand. */
24561 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24562 gcc_assert (GET_MODE (op) == mode
24563 || GET_MODE (op) == VOIDmode);
24565 else
24567 /* This must be register. */
24568 if (VECTOR_MODE_P (mode))
24569 op = safe_vector_operand (op, mode);
24571 gcc_assert (GET_MODE (op) == mode
24572 || GET_MODE (op) == VOIDmode);
24573 op = copy_to_mode_reg (mode, op);
24577 args[i].op = op;
24578 args[i].mode = mode;
24581 switch (nargs)
24583 case 1:
24584 pat = GEN_FCN (icode) (target, args[0].op);
24585 break;
24586 case 2:
24587 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24588 break;
24589 default:
24590 gcc_unreachable ();
24593 if (! pat)
24594 return 0;
24595 emit_insn (pat);
24596 return klass == store ? 0 : target;
24599 /* Return the integer constant in ARG. Constrain it to be in the range
24600 of the subparts of VEC_TYPE; issue an error if not. */
24602 static int
24603 get_element_number (tree vec_type, tree arg)
24605 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24607 if (!host_integerp (arg, 1)
24608 || (elt = tree_low_cst (arg, 1), elt > max))
24610 error ("selector must be an integer constant in the range 0..%wi", max);
24611 return 0;
24614 return elt;
24617 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24618 ix86_expand_vector_init. We DO have language-level syntax for this, in
24619 the form of (type){ init-list }. Except that since we can't place emms
24620 instructions from inside the compiler, we can't allow the use of MMX
24621 registers unless the user explicitly asks for it. So we do *not* define
24622 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24623 we have builtins invoked by mmintrin.h that gives us license to emit
24624 these sorts of instructions. */
24626 static rtx
24627 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24629 enum machine_mode tmode = TYPE_MODE (type);
24630 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24631 int i, n_elt = GET_MODE_NUNITS (tmode);
24632 rtvec v = rtvec_alloc (n_elt);
24634 gcc_assert (VECTOR_MODE_P (tmode));
24635 gcc_assert (call_expr_nargs (exp) == n_elt);
24637 for (i = 0; i < n_elt; ++i)
24639 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24640 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24643 if (!target || !register_operand (target, tmode))
24644 target = gen_reg_rtx (tmode);
24646 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24647 return target;
24650 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24651 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24652 had a language-level syntax for referencing vector elements. */
24654 static rtx
24655 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24657 enum machine_mode tmode, mode0;
24658 tree arg0, arg1;
24659 int elt;
24660 rtx op0;
24662 arg0 = CALL_EXPR_ARG (exp, 0);
24663 arg1 = CALL_EXPR_ARG (exp, 1);
24665 op0 = expand_normal (arg0);
24666 elt = get_element_number (TREE_TYPE (arg0), arg1);
24668 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24669 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24670 gcc_assert (VECTOR_MODE_P (mode0));
24672 op0 = force_reg (mode0, op0);
24674 if (optimize || !target || !register_operand (target, tmode))
24675 target = gen_reg_rtx (tmode);
24677 ix86_expand_vector_extract (true, target, op0, elt);
24679 return target;
24682 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24683 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24684 a language-level syntax for referencing vector elements. */
24686 static rtx
24687 ix86_expand_vec_set_builtin (tree exp)
24689 enum machine_mode tmode, mode1;
24690 tree arg0, arg1, arg2;
24691 int elt;
24692 rtx op0, op1, target;
24694 arg0 = CALL_EXPR_ARG (exp, 0);
24695 arg1 = CALL_EXPR_ARG (exp, 1);
24696 arg2 = CALL_EXPR_ARG (exp, 2);
24698 tmode = TYPE_MODE (TREE_TYPE (arg0));
24699 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24700 gcc_assert (VECTOR_MODE_P (tmode));
24702 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24703 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24704 elt = get_element_number (TREE_TYPE (arg0), arg2);
24706 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24707 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24709 op0 = force_reg (tmode, op0);
24710 op1 = force_reg (mode1, op1);
24712 /* OP0 is the source of these builtin functions and shouldn't be
24713 modified. Create a copy, use it and return it as target. */
24714 target = gen_reg_rtx (tmode);
24715 emit_move_insn (target, op0);
24716 ix86_expand_vector_set (true, target, op1, elt);
24718 return target;
24721 /* Expand an expression EXP that calls a built-in function,
24722 with result going to TARGET if that's convenient
24723 (and in mode MODE if that's convenient).
24724 SUBTARGET may be used as the target for computing one of EXP's operands.
24725 IGNORE is nonzero if the value is to be ignored. */
24727 static rtx
24728 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24729 enum machine_mode mode ATTRIBUTE_UNUSED,
24730 int ignore ATTRIBUTE_UNUSED)
24732 const struct builtin_description *d;
24733 size_t i;
24734 enum insn_code icode;
24735 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24736 tree arg0, arg1, arg2;
24737 rtx op0, op1, op2, pat;
24738 enum machine_mode mode0, mode1, mode2;
24739 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24741 /* Determine whether the builtin function is available under the current ISA.
24742 Originally the builtin was not created if it wasn't applicable to the
24743 current ISA based on the command line switches. With function specific
24744 options, we need to check in the context of the function making the call
24745 whether it is supported. */
24746 if (ix86_builtins_isa[fcode].isa
24747 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24749 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24750 NULL, NULL, false);
24752 if (!opts)
24753 error ("%qE needs unknown isa option", fndecl);
24754 else
24756 gcc_assert (opts != NULL);
24757 error ("%qE needs isa option %s", fndecl, opts);
24758 free (opts);
24760 return const0_rtx;
24763 switch (fcode)
24765 case IX86_BUILTIN_MASKMOVQ:
24766 case IX86_BUILTIN_MASKMOVDQU:
24767 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24768 ? CODE_FOR_mmx_maskmovq
24769 : CODE_FOR_sse2_maskmovdqu);
24770 /* Note the arg order is different from the operand order. */
24771 arg1 = CALL_EXPR_ARG (exp, 0);
24772 arg2 = CALL_EXPR_ARG (exp, 1);
24773 arg0 = CALL_EXPR_ARG (exp, 2);
24774 op0 = expand_normal (arg0);
24775 op1 = expand_normal (arg1);
24776 op2 = expand_normal (arg2);
24777 mode0 = insn_data[icode].operand[0].mode;
24778 mode1 = insn_data[icode].operand[1].mode;
24779 mode2 = insn_data[icode].operand[2].mode;
24781 op0 = force_reg (Pmode, op0);
24782 op0 = gen_rtx_MEM (mode1, op0);
24784 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24785 op0 = copy_to_mode_reg (mode0, op0);
24786 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24787 op1 = copy_to_mode_reg (mode1, op1);
24788 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24789 op2 = copy_to_mode_reg (mode2, op2);
24790 pat = GEN_FCN (icode) (op0, op1, op2);
24791 if (! pat)
24792 return 0;
24793 emit_insn (pat);
24794 return 0;
24796 case IX86_BUILTIN_LDMXCSR:
24797 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24798 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24799 emit_move_insn (target, op0);
24800 emit_insn (gen_sse_ldmxcsr (target));
24801 return 0;
24803 case IX86_BUILTIN_STMXCSR:
24804 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24805 emit_insn (gen_sse_stmxcsr (target));
24806 return copy_to_mode_reg (SImode, target);
24808 case IX86_BUILTIN_CLFLUSH:
24809 arg0 = CALL_EXPR_ARG (exp, 0);
24810 op0 = expand_normal (arg0);
24811 icode = CODE_FOR_sse2_clflush;
24812 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24813 op0 = copy_to_mode_reg (Pmode, op0);
24815 emit_insn (gen_sse2_clflush (op0));
24816 return 0;
24818 case IX86_BUILTIN_MONITOR:
24819 arg0 = CALL_EXPR_ARG (exp, 0);
24820 arg1 = CALL_EXPR_ARG (exp, 1);
24821 arg2 = CALL_EXPR_ARG (exp, 2);
24822 op0 = expand_normal (arg0);
24823 op1 = expand_normal (arg1);
24824 op2 = expand_normal (arg2);
24825 if (!REG_P (op0))
24826 op0 = copy_to_mode_reg (Pmode, op0);
24827 if (!REG_P (op1))
24828 op1 = copy_to_mode_reg (SImode, op1);
24829 if (!REG_P (op2))
24830 op2 = copy_to_mode_reg (SImode, op2);
24831 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24832 return 0;
24834 case IX86_BUILTIN_MWAIT:
24835 arg0 = CALL_EXPR_ARG (exp, 0);
24836 arg1 = CALL_EXPR_ARG (exp, 1);
24837 op0 = expand_normal (arg0);
24838 op1 = expand_normal (arg1);
24839 if (!REG_P (op0))
24840 op0 = copy_to_mode_reg (SImode, op0);
24841 if (!REG_P (op1))
24842 op1 = copy_to_mode_reg (SImode, op1);
24843 emit_insn (gen_sse3_mwait (op0, op1));
24844 return 0;
24846 case IX86_BUILTIN_VEC_INIT_V2SI:
24847 case IX86_BUILTIN_VEC_INIT_V4HI:
24848 case IX86_BUILTIN_VEC_INIT_V8QI:
24849 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24851 case IX86_BUILTIN_VEC_EXT_V2DF:
24852 case IX86_BUILTIN_VEC_EXT_V2DI:
24853 case IX86_BUILTIN_VEC_EXT_V4SF:
24854 case IX86_BUILTIN_VEC_EXT_V4SI:
24855 case IX86_BUILTIN_VEC_EXT_V8HI:
24856 case IX86_BUILTIN_VEC_EXT_V2SI:
24857 case IX86_BUILTIN_VEC_EXT_V4HI:
24858 case IX86_BUILTIN_VEC_EXT_V16QI:
24859 return ix86_expand_vec_ext_builtin (exp, target);
24861 case IX86_BUILTIN_VEC_SET_V2DI:
24862 case IX86_BUILTIN_VEC_SET_V4SF:
24863 case IX86_BUILTIN_VEC_SET_V4SI:
24864 case IX86_BUILTIN_VEC_SET_V8HI:
24865 case IX86_BUILTIN_VEC_SET_V4HI:
24866 case IX86_BUILTIN_VEC_SET_V16QI:
24867 return ix86_expand_vec_set_builtin (exp);
24869 case IX86_BUILTIN_INFQ:
24870 case IX86_BUILTIN_HUGE_VALQ:
24872 REAL_VALUE_TYPE inf;
24873 rtx tmp;
24875 real_inf (&inf);
24876 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24878 tmp = validize_mem (force_const_mem (mode, tmp));
24880 if (target == 0)
24881 target = gen_reg_rtx (mode);
24883 emit_move_insn (target, tmp);
24884 return target;
24887 default:
24888 break;
24891 for (i = 0, d = bdesc_special_args;
24892 i < ARRAY_SIZE (bdesc_special_args);
24893 i++, d++)
24894 if (d->code == fcode)
24895 return ix86_expand_special_args_builtin (d, exp, target);
24897 for (i = 0, d = bdesc_args;
24898 i < ARRAY_SIZE (bdesc_args);
24899 i++, d++)
24900 if (d->code == fcode)
24901 switch (fcode)
24903 case IX86_BUILTIN_FABSQ:
24904 case IX86_BUILTIN_COPYSIGNQ:
24905 if (!TARGET_SSE2)
24906 /* Emit a normal call if SSE2 isn't available. */
24907 return expand_call (exp, target, ignore);
24908 default:
24909 return ix86_expand_args_builtin (d, exp, target);
24912 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24913 if (d->code == fcode)
24914 return ix86_expand_sse_comi (d, exp, target);
24916 for (i = 0, d = bdesc_pcmpestr;
24917 i < ARRAY_SIZE (bdesc_pcmpestr);
24918 i++, d++)
24919 if (d->code == fcode)
24920 return ix86_expand_sse_pcmpestr (d, exp, target);
24922 for (i = 0, d = bdesc_pcmpistr;
24923 i < ARRAY_SIZE (bdesc_pcmpistr);
24924 i++, d++)
24925 if (d->code == fcode)
24926 return ix86_expand_sse_pcmpistr (d, exp, target);
24928 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24929 if (d->code == fcode)
24930 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24931 (enum multi_arg_type)d->flag,
24932 d->comparison);
24934 gcc_unreachable ();
24937 /* Returns a function decl for a vectorized version of the builtin function
24938 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24939 if it is not available. */
24941 static tree
24942 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24943 tree type_in)
24945 enum machine_mode in_mode, out_mode;
24946 int in_n, out_n;
24948 if (TREE_CODE (type_out) != VECTOR_TYPE
24949 || TREE_CODE (type_in) != VECTOR_TYPE)
24950 return NULL_TREE;
24952 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24953 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24954 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24955 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24957 switch (fn)
24959 case BUILT_IN_SQRT:
24960 if (out_mode == DFmode && out_n == 2
24961 && in_mode == DFmode && in_n == 2)
24962 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24963 break;
24965 case BUILT_IN_SQRTF:
24966 if (out_mode == SFmode && out_n == 4
24967 && in_mode == SFmode && in_n == 4)
24968 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24969 break;
24971 case BUILT_IN_LRINT:
24972 if (out_mode == SImode && out_n == 4
24973 && in_mode == DFmode && in_n == 2)
24974 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24975 break;
24977 case BUILT_IN_LRINTF:
24978 if (out_mode == SImode && out_n == 4
24979 && in_mode == SFmode && in_n == 4)
24980 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24981 break;
24983 default:
24987 /* Dispatch to a handler for a vectorization library. */
24988 if (ix86_veclib_handler)
24989 return (*ix86_veclib_handler)(fn, type_out, type_in);
24991 return NULL_TREE;
24994 /* Handler for an SVML-style interface to
24995 a library with vectorized intrinsics. */
24997 static tree
24998 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
25000 char name[20];
25001 tree fntype, new_fndecl, args;
25002 unsigned arity;
25003 const char *bname;
25004 enum machine_mode el_mode, in_mode;
25005 int n, in_n;
25007 /* The SVML is suitable for unsafe math only. */
25008 if (!flag_unsafe_math_optimizations)
25009 return NULL_TREE;
25011 el_mode = TYPE_MODE (TREE_TYPE (type_out));
25012 n = TYPE_VECTOR_SUBPARTS (type_out);
25013 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25014 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25015 if (el_mode != in_mode
25016 || n != in_n)
25017 return NULL_TREE;
25019 switch (fn)
25021 case BUILT_IN_EXP:
25022 case BUILT_IN_LOG:
25023 case BUILT_IN_LOG10:
25024 case BUILT_IN_POW:
25025 case BUILT_IN_TANH:
25026 case BUILT_IN_TAN:
25027 case BUILT_IN_ATAN:
25028 case BUILT_IN_ATAN2:
25029 case BUILT_IN_ATANH:
25030 case BUILT_IN_CBRT:
25031 case BUILT_IN_SINH:
25032 case BUILT_IN_SIN:
25033 case BUILT_IN_ASINH:
25034 case BUILT_IN_ASIN:
25035 case BUILT_IN_COSH:
25036 case BUILT_IN_COS:
25037 case BUILT_IN_ACOSH:
25038 case BUILT_IN_ACOS:
25039 if (el_mode != DFmode || n != 2)
25040 return NULL_TREE;
25041 break;
25043 case BUILT_IN_EXPF:
25044 case BUILT_IN_LOGF:
25045 case BUILT_IN_LOG10F:
25046 case BUILT_IN_POWF:
25047 case BUILT_IN_TANHF:
25048 case BUILT_IN_TANF:
25049 case BUILT_IN_ATANF:
25050 case BUILT_IN_ATAN2F:
25051 case BUILT_IN_ATANHF:
25052 case BUILT_IN_CBRTF:
25053 case BUILT_IN_SINHF:
25054 case BUILT_IN_SINF:
25055 case BUILT_IN_ASINHF:
25056 case BUILT_IN_ASINF:
25057 case BUILT_IN_COSHF:
25058 case BUILT_IN_COSF:
25059 case BUILT_IN_ACOSHF:
25060 case BUILT_IN_ACOSF:
25061 if (el_mode != SFmode || n != 4)
25062 return NULL_TREE;
25063 break;
25065 default:
25066 return NULL_TREE;
25069 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
25071 if (fn == BUILT_IN_LOGF)
25072 strcpy (name, "vmlsLn4");
25073 else if (fn == BUILT_IN_LOG)
25074 strcpy (name, "vmldLn2");
25075 else if (n == 4)
25077 sprintf (name, "vmls%s", bname+10);
25078 name[strlen (name)-1] = '4';
25080 else
25081 sprintf (name, "vmld%s2", bname+10);
25083 /* Convert to uppercase. */
25084 name[4] &= ~0x20;
25086 arity = 0;
25087 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
25088 args = TREE_CHAIN (args))
25089 arity++;
25091 if (arity == 1)
25092 fntype = build_function_type_list (type_out, type_in, NULL);
25093 else
25094 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
25096 /* Build a function declaration for the vectorized function. */
25097 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
25098 TREE_PUBLIC (new_fndecl) = 1;
25099 DECL_EXTERNAL (new_fndecl) = 1;
25100 DECL_IS_NOVOPS (new_fndecl) = 1;
25101 TREE_READONLY (new_fndecl) = 1;
25103 return new_fndecl;
25106 /* Handler for an ACML-style interface to
25107 a library with vectorized intrinsics. */
25109 static tree
25110 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
25112 char name[20] = "__vr.._";
25113 tree fntype, new_fndecl, args;
25114 unsigned arity;
25115 const char *bname;
25116 enum machine_mode el_mode, in_mode;
25117 int n, in_n;
25119 /* The ACML is 64bits only and suitable for unsafe math only as
25120 it does not correctly support parts of IEEE with the required
25121 precision such as denormals. */
25122 if (!TARGET_64BIT
25123 || !flag_unsafe_math_optimizations)
25124 return NULL_TREE;
25126 el_mode = TYPE_MODE (TREE_TYPE (type_out));
25127 n = TYPE_VECTOR_SUBPARTS (type_out);
25128 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25129 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25130 if (el_mode != in_mode
25131 || n != in_n)
25132 return NULL_TREE;
25134 switch (fn)
25136 case BUILT_IN_SIN:
25137 case BUILT_IN_COS:
25138 case BUILT_IN_EXP:
25139 case BUILT_IN_LOG:
25140 case BUILT_IN_LOG2:
25141 case BUILT_IN_LOG10:
25142 name[4] = 'd';
25143 name[5] = '2';
25144 if (el_mode != DFmode
25145 || n != 2)
25146 return NULL_TREE;
25147 break;
25149 case BUILT_IN_SINF:
25150 case BUILT_IN_COSF:
25151 case BUILT_IN_EXPF:
25152 case BUILT_IN_POWF:
25153 case BUILT_IN_LOGF:
25154 case BUILT_IN_LOG2F:
25155 case BUILT_IN_LOG10F:
25156 name[4] = 's';
25157 name[5] = '4';
25158 if (el_mode != SFmode
25159 || n != 4)
25160 return NULL_TREE;
25161 break;
25163 default:
25164 return NULL_TREE;
25167 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
25168 sprintf (name + 7, "%s", bname+10);
25170 arity = 0;
25171 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
25172 args = TREE_CHAIN (args))
25173 arity++;
25175 if (arity == 1)
25176 fntype = build_function_type_list (type_out, type_in, NULL);
25177 else
25178 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
25180 /* Build a function declaration for the vectorized function. */
25181 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
25182 TREE_PUBLIC (new_fndecl) = 1;
25183 DECL_EXTERNAL (new_fndecl) = 1;
25184 DECL_IS_NOVOPS (new_fndecl) = 1;
25185 TREE_READONLY (new_fndecl) = 1;
25187 return new_fndecl;
25191 /* Returns a decl of a function that implements conversion of an integer vector
25192 into a floating-point vector, or vice-versa. TYPE is the type of the integer
25193 side of the conversion.
25194 Return NULL_TREE if it is not available. */
25196 static tree
25197 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
25199 if (TREE_CODE (type) != VECTOR_TYPE)
25200 return NULL_TREE;
25202 switch (code)
25204 case FLOAT_EXPR:
25205 switch (TYPE_MODE (type))
25207 case V4SImode:
25208 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
25209 default:
25210 return NULL_TREE;
25213 case FIX_TRUNC_EXPR:
25214 switch (TYPE_MODE (type))
25216 case V4SImode:
25217 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
25218 default:
25219 return NULL_TREE;
25221 default:
25222 return NULL_TREE;
25227 /* Returns a code for a target-specific builtin that implements
25228 reciprocal of the function, or NULL_TREE if not available. */
25230 static tree
25231 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
25232 bool sqrt ATTRIBUTE_UNUSED)
25234 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
25235 && flag_finite_math_only && !flag_trapping_math
25236 && flag_unsafe_math_optimizations))
25237 return NULL_TREE;
25239 if (md_fn)
25240 /* Machine dependent builtins. */
25241 switch (fn)
25243 /* Vectorized version of sqrt to rsqrt conversion. */
25244 case IX86_BUILTIN_SQRTPS_NR:
25245 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
25247 default:
25248 return NULL_TREE;
25250 else
25251 /* Normal builtins. */
25252 switch (fn)
25254 /* Sqrt to rsqrt conversion. */
25255 case BUILT_IN_SQRTF:
25256 return ix86_builtins[IX86_BUILTIN_RSQRTF];
25258 default:
25259 return NULL_TREE;
25263 /* Store OPERAND to the memory after reload is completed. This means
25264 that we can't easily use assign_stack_local. */
25266 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25268 rtx result;
25270 gcc_assert (reload_completed);
25271 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25273 result = gen_rtx_MEM (mode,
25274 gen_rtx_PLUS (Pmode,
25275 stack_pointer_rtx,
25276 GEN_INT (-RED_ZONE_SIZE)));
25277 emit_move_insn (result, operand);
25279 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25281 switch (mode)
25283 case HImode:
25284 case SImode:
25285 operand = gen_lowpart (DImode, operand);
25286 /* FALLTHRU */
25287 case DImode:
25288 emit_insn (
25289 gen_rtx_SET (VOIDmode,
25290 gen_rtx_MEM (DImode,
25291 gen_rtx_PRE_DEC (DImode,
25292 stack_pointer_rtx)),
25293 operand));
25294 break;
25295 default:
25296 gcc_unreachable ();
25298 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25300 else
25302 switch (mode)
25304 case DImode:
25306 rtx operands[2];
25307 split_di (&operand, 1, operands, operands + 1);
25308 emit_insn (
25309 gen_rtx_SET (VOIDmode,
25310 gen_rtx_MEM (SImode,
25311 gen_rtx_PRE_DEC (Pmode,
25312 stack_pointer_rtx)),
25313 operands[1]));
25314 emit_insn (
25315 gen_rtx_SET (VOIDmode,
25316 gen_rtx_MEM (SImode,
25317 gen_rtx_PRE_DEC (Pmode,
25318 stack_pointer_rtx)),
25319 operands[0]));
25321 break;
25322 case HImode:
25323 /* Store HImodes as SImodes. */
25324 operand = gen_lowpart (SImode, operand);
25325 /* FALLTHRU */
25326 case SImode:
25327 emit_insn (
25328 gen_rtx_SET (VOIDmode,
25329 gen_rtx_MEM (GET_MODE (operand),
25330 gen_rtx_PRE_DEC (SImode,
25331 stack_pointer_rtx)),
25332 operand));
25333 break;
25334 default:
25335 gcc_unreachable ();
25337 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25339 return result;
25342 /* Free operand from the memory. */
25343 void
25344 ix86_free_from_memory (enum machine_mode mode)
25346 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25348 int size;
25350 if (mode == DImode || TARGET_64BIT)
25351 size = 8;
25352 else
25353 size = 4;
25354 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25355 to pop or add instruction if registers are available. */
25356 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25357 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25358 GEN_INT (size))));
25362 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25363 QImode must go into class Q_REGS.
25364 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25365 movdf to do mem-to-mem moves through integer regs. */
25366 enum reg_class
25367 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25369 enum machine_mode mode = GET_MODE (x);
25371 /* We're only allowed to return a subclass of CLASS. Many of the
25372 following checks fail for NO_REGS, so eliminate that early. */
25373 if (regclass == NO_REGS)
25374 return NO_REGS;
25376 /* All classes can load zeros. */
25377 if (x == CONST0_RTX (mode))
25378 return regclass;
25380 /* Force constants into memory if we are loading a (nonzero) constant into
25381 an MMX or SSE register. This is because there are no MMX/SSE instructions
25382 to load from a constant. */
25383 if (CONSTANT_P (x)
25384 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25385 return NO_REGS;
25387 /* Prefer SSE regs only, if we can use them for math. */
25388 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25389 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25391 /* Floating-point constants need more complex checks. */
25392 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25394 /* General regs can load everything. */
25395 if (reg_class_subset_p (regclass, GENERAL_REGS))
25396 return regclass;
25398 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25399 zero above. We only want to wind up preferring 80387 registers if
25400 we plan on doing computation with them. */
25401 if (TARGET_80387
25402 && standard_80387_constant_p (x))
25404 /* Limit class to non-sse. */
25405 if (regclass == FLOAT_SSE_REGS)
25406 return FLOAT_REGS;
25407 if (regclass == FP_TOP_SSE_REGS)
25408 return FP_TOP_REG;
25409 if (regclass == FP_SECOND_SSE_REGS)
25410 return FP_SECOND_REG;
25411 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25412 return regclass;
25415 return NO_REGS;
25418 /* Generally when we see PLUS here, it's the function invariant
25419 (plus soft-fp const_int). Which can only be computed into general
25420 regs. */
25421 if (GET_CODE (x) == PLUS)
25422 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25424 /* QImode constants are easy to load, but non-constant QImode data
25425 must go into Q_REGS. */
25426 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25428 if (reg_class_subset_p (regclass, Q_REGS))
25429 return regclass;
25430 if (reg_class_subset_p (Q_REGS, regclass))
25431 return Q_REGS;
25432 return NO_REGS;
25435 return regclass;
25438 /* Discourage putting floating-point values in SSE registers unless
25439 SSE math is being used, and likewise for the 387 registers. */
25440 enum reg_class
25441 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25443 enum machine_mode mode = GET_MODE (x);
25445 /* Restrict the output reload class to the register bank that we are doing
25446 math on. If we would like not to return a subset of CLASS, reject this
25447 alternative: if reload cannot do this, it will still use its choice. */
25448 mode = GET_MODE (x);
25449 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25450 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25452 if (X87_FLOAT_MODE_P (mode))
25454 if (regclass == FP_TOP_SSE_REGS)
25455 return FP_TOP_REG;
25456 else if (regclass == FP_SECOND_SSE_REGS)
25457 return FP_SECOND_REG;
25458 else
25459 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25462 return regclass;
25465 static enum reg_class
25466 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25467 enum machine_mode mode,
25468 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25470 /* QImode spills from non-QI registers require
25471 intermediate register on 32bit targets. */
25472 if (!in_p && mode == QImode && !TARGET_64BIT
25473 && (rclass == GENERAL_REGS
25474 || rclass == LEGACY_REGS
25475 || rclass == INDEX_REGS))
25477 int regno;
25479 if (REG_P (x))
25480 regno = REGNO (x);
25481 else
25482 regno = -1;
25484 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25485 regno = true_regnum (x);
25487 /* Return Q_REGS if the operand is in memory. */
25488 if (regno == -1)
25489 return Q_REGS;
25492 return NO_REGS;
25495 /* If we are copying between general and FP registers, we need a memory
25496 location. The same is true for SSE and MMX registers.
25498 To optimize register_move_cost performance, allow inline variant.
25500 The macro can't work reliably when one of the CLASSES is class containing
25501 registers from multiple units (SSE, MMX, integer). We avoid this by never
25502 combining those units in single alternative in the machine description.
25503 Ensure that this constraint holds to avoid unexpected surprises.
25505 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25506 enforce these sanity checks. */
25508 static inline int
25509 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25510 enum machine_mode mode, int strict)
25512 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25513 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25514 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25515 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25516 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25517 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25519 gcc_assert (!strict);
25520 return true;
25523 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25524 return true;
25526 /* ??? This is a lie. We do have moves between mmx/general, and for
25527 mmx/sse2. But by saying we need secondary memory we discourage the
25528 register allocator from using the mmx registers unless needed. */
25529 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25530 return true;
25532 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25534 /* SSE1 doesn't have any direct moves from other classes. */
25535 if (!TARGET_SSE2)
25536 return true;
25538 /* If the target says that inter-unit moves are more expensive
25539 than moving through memory, then don't generate them. */
25540 if (!TARGET_INTER_UNIT_MOVES)
25541 return true;
25543 /* Between SSE and general, we have moves no larger than word size. */
25544 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25545 return true;
25548 return false;
25552 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25553 enum machine_mode mode, int strict)
25555 return inline_secondary_memory_needed (class1, class2, mode, strict);
25558 /* Return true if the registers in CLASS cannot represent the change from
25559 modes FROM to TO. */
25561 bool
25562 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25563 enum reg_class regclass)
25565 if (from == to)
25566 return false;
25568 /* x87 registers can't do subreg at all, as all values are reformatted
25569 to extended precision. */
25570 if (MAYBE_FLOAT_CLASS_P (regclass))
25571 return true;
25573 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25575 /* Vector registers do not support QI or HImode loads. If we don't
25576 disallow a change to these modes, reload will assume it's ok to
25577 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25578 the vec_dupv4hi pattern. */
25579 if (GET_MODE_SIZE (from) < 4)
25580 return true;
25582 /* Vector registers do not support subreg with nonzero offsets, which
25583 are otherwise valid for integer registers. Since we can't see
25584 whether we have a nonzero offset from here, prohibit all
25585 nonparadoxical subregs changing size. */
25586 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25587 return true;
25590 return false;
25593 /* Return the cost of moving data of mode M between a
25594 register and memory. A value of 2 is the default; this cost is
25595 relative to those in `REGISTER_MOVE_COST'.
25597 This function is used extensively by register_move_cost that is used to
25598 build tables at startup. Make it inline in this case.
25599 When IN is 2, return maximum of in and out move cost.
25601 If moving between registers and memory is more expensive than
25602 between two registers, you should define this macro to express the
25603 relative cost.
25605 Model also increased moving costs of QImode registers in non
25606 Q_REGS classes.
25608 static inline int
25609 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25610 int in)
25612 int cost;
25613 if (FLOAT_CLASS_P (regclass))
25615 int index;
25616 switch (mode)
25618 case SFmode:
25619 index = 0;
25620 break;
25621 case DFmode:
25622 index = 1;
25623 break;
25624 case XFmode:
25625 index = 2;
25626 break;
25627 default:
25628 return 100;
25630 if (in == 2)
25631 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25632 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25634 if (SSE_CLASS_P (regclass))
25636 int index;
25637 switch (GET_MODE_SIZE (mode))
25639 case 4:
25640 index = 0;
25641 break;
25642 case 8:
25643 index = 1;
25644 break;
25645 case 16:
25646 index = 2;
25647 break;
25648 default:
25649 return 100;
25651 if (in == 2)
25652 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25653 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25655 if (MMX_CLASS_P (regclass))
25657 int index;
25658 switch (GET_MODE_SIZE (mode))
25660 case 4:
25661 index = 0;
25662 break;
25663 case 8:
25664 index = 1;
25665 break;
25666 default:
25667 return 100;
25669 if (in)
25670 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25671 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25673 switch (GET_MODE_SIZE (mode))
25675 case 1:
25676 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25678 if (!in)
25679 return ix86_cost->int_store[0];
25680 if (TARGET_PARTIAL_REG_DEPENDENCY
25681 && optimize_function_for_speed_p (cfun))
25682 cost = ix86_cost->movzbl_load;
25683 else
25684 cost = ix86_cost->int_load[0];
25685 if (in == 2)
25686 return MAX (cost, ix86_cost->int_store[0]);
25687 return cost;
25689 else
25691 if (in == 2)
25692 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25693 if (in)
25694 return ix86_cost->movzbl_load;
25695 else
25696 return ix86_cost->int_store[0] + 4;
25698 break;
25699 case 2:
25700 if (in == 2)
25701 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25702 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25703 default:
25704 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25705 if (mode == TFmode)
25706 mode = XFmode;
25707 if (in == 2)
25708 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25709 else if (in)
25710 cost = ix86_cost->int_load[2];
25711 else
25712 cost = ix86_cost->int_store[2];
25713 return (cost * (((int) GET_MODE_SIZE (mode)
25714 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25719 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25721 return inline_memory_move_cost (mode, regclass, in);
25725 /* Return the cost of moving data from a register in class CLASS1 to
25726 one in class CLASS2.
25728 It is not required that the cost always equal 2 when FROM is the same as TO;
25729 on some machines it is expensive to move between registers if they are not
25730 general registers. */
25733 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25734 enum reg_class class2)
25736 /* In case we require secondary memory, compute cost of the store followed
25737 by load. In order to avoid bad register allocation choices, we need
25738 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25740 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25742 int cost = 1;
25744 cost += inline_memory_move_cost (mode, class1, 2);
25745 cost += inline_memory_move_cost (mode, class2, 2);
25747 /* In case of copying from general_purpose_register we may emit multiple
25748 stores followed by single load causing memory size mismatch stall.
25749 Count this as arbitrarily high cost of 20. */
25750 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25751 cost += 20;
25753 /* In the case of FP/MMX moves, the registers actually overlap, and we
25754 have to switch modes in order to treat them differently. */
25755 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25756 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25757 cost += 20;
25759 return cost;
25762 /* Moves between SSE/MMX and integer unit are expensive. */
25763 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25764 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25766 /* ??? By keeping returned value relatively high, we limit the number
25767 of moves between integer and MMX/SSE registers for all targets.
25768 Additionally, high value prevents problem with x86_modes_tieable_p(),
25769 where integer modes in MMX/SSE registers are not tieable
25770 because of missing QImode and HImode moves to, from or between
25771 MMX/SSE registers. */
25772 return MAX (8, ix86_cost->mmxsse_to_integer);
25774 if (MAYBE_FLOAT_CLASS_P (class1))
25775 return ix86_cost->fp_move;
25776 if (MAYBE_SSE_CLASS_P (class1))
25777 return ix86_cost->sse_move;
25778 if (MAYBE_MMX_CLASS_P (class1))
25779 return ix86_cost->mmx_move;
25780 return 2;
25783 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25785 bool
25786 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25788 /* Flags and only flags can only hold CCmode values. */
25789 if (CC_REGNO_P (regno))
25790 return GET_MODE_CLASS (mode) == MODE_CC;
25791 if (GET_MODE_CLASS (mode) == MODE_CC
25792 || GET_MODE_CLASS (mode) == MODE_RANDOM
25793 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25794 return 0;
25795 if (FP_REGNO_P (regno))
25796 return VALID_FP_MODE_P (mode);
25797 if (SSE_REGNO_P (regno))
25799 /* We implement the move patterns for all vector modes into and
25800 out of SSE registers, even when no operation instructions
25801 are available. OImode move is available only when AVX is
25802 enabled. */
25803 return ((TARGET_AVX && mode == OImode)
25804 || VALID_AVX256_REG_MODE (mode)
25805 || VALID_SSE_REG_MODE (mode)
25806 || VALID_SSE2_REG_MODE (mode)
25807 || VALID_MMX_REG_MODE (mode)
25808 || VALID_MMX_REG_MODE_3DNOW (mode));
25810 if (MMX_REGNO_P (regno))
25812 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25813 so if the register is available at all, then we can move data of
25814 the given mode into or out of it. */
25815 return (VALID_MMX_REG_MODE (mode)
25816 || VALID_MMX_REG_MODE_3DNOW (mode));
25819 if (mode == QImode)
25821 /* Take care for QImode values - they can be in non-QI regs,
25822 but then they do cause partial register stalls. */
25823 if (regno <= BX_REG || TARGET_64BIT)
25824 return 1;
25825 if (!TARGET_PARTIAL_REG_STALL)
25826 return 1;
25827 return reload_in_progress || reload_completed;
25829 /* We handle both integer and floats in the general purpose registers. */
25830 else if (VALID_INT_MODE_P (mode))
25831 return 1;
25832 else if (VALID_FP_MODE_P (mode))
25833 return 1;
25834 else if (VALID_DFP_MODE_P (mode))
25835 return 1;
25836 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25837 on to use that value in smaller contexts, this can easily force a
25838 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25839 supporting DImode, allow it. */
25840 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25841 return 1;
25843 return 0;
25846 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25847 tieable integer mode. */
25849 static bool
25850 ix86_tieable_integer_mode_p (enum machine_mode mode)
25852 switch (mode)
25854 case HImode:
25855 case SImode:
25856 return true;
25858 case QImode:
25859 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25861 case DImode:
25862 return TARGET_64BIT;
25864 default:
25865 return false;
25869 /* Return true if MODE1 is accessible in a register that can hold MODE2
25870 without copying. That is, all register classes that can hold MODE2
25871 can also hold MODE1. */
25873 bool
25874 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25876 if (mode1 == mode2)
25877 return true;
25879 if (ix86_tieable_integer_mode_p (mode1)
25880 && ix86_tieable_integer_mode_p (mode2))
25881 return true;
25883 /* MODE2 being XFmode implies fp stack or general regs, which means we
25884 can tie any smaller floating point modes to it. Note that we do not
25885 tie this with TFmode. */
25886 if (mode2 == XFmode)
25887 return mode1 == SFmode || mode1 == DFmode;
25889 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25890 that we can tie it with SFmode. */
25891 if (mode2 == DFmode)
25892 return mode1 == SFmode;
25894 /* If MODE2 is only appropriate for an SSE register, then tie with
25895 any other mode acceptable to SSE registers. */
25896 if (GET_MODE_SIZE (mode2) == 16
25897 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25898 return (GET_MODE_SIZE (mode1) == 16
25899 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25901 /* If MODE2 is appropriate for an MMX register, then tie
25902 with any other mode acceptable to MMX registers. */
25903 if (GET_MODE_SIZE (mode2) == 8
25904 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25905 return (GET_MODE_SIZE (mode1) == 8
25906 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25908 return false;
25911 /* Compute a (partial) cost for rtx X. Return true if the complete
25912 cost has been computed, and false if subexpressions should be
25913 scanned. In either case, *TOTAL contains the cost result. */
25915 static bool
25916 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25918 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25919 enum machine_mode mode = GET_MODE (x);
25920 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25922 switch (code)
25924 case CONST_INT:
25925 case CONST:
25926 case LABEL_REF:
25927 case SYMBOL_REF:
25928 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25929 *total = 3;
25930 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25931 *total = 2;
25932 else if (flag_pic && SYMBOLIC_CONST (x)
25933 && (!TARGET_64BIT
25934 || (!GET_CODE (x) != LABEL_REF
25935 && (GET_CODE (x) != SYMBOL_REF
25936 || !SYMBOL_REF_LOCAL_P (x)))))
25937 *total = 1;
25938 else
25939 *total = 0;
25940 return true;
25942 case CONST_DOUBLE:
25943 if (mode == VOIDmode)
25944 *total = 0;
25945 else
25946 switch (standard_80387_constant_p (x))
25948 case 1: /* 0.0 */
25949 *total = 1;
25950 break;
25951 default: /* Other constants */
25952 *total = 2;
25953 break;
25954 case 0:
25955 case -1:
25956 /* Start with (MEM (SYMBOL_REF)), since that's where
25957 it'll probably end up. Add a penalty for size. */
25958 *total = (COSTS_N_INSNS (1)
25959 + (flag_pic != 0 && !TARGET_64BIT)
25960 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25961 break;
25963 return true;
25965 case ZERO_EXTEND:
25966 /* The zero extensions is often completely free on x86_64, so make
25967 it as cheap as possible. */
25968 if (TARGET_64BIT && mode == DImode
25969 && GET_MODE (XEXP (x, 0)) == SImode)
25970 *total = 1;
25971 else if (TARGET_ZERO_EXTEND_WITH_AND)
25972 *total = cost->add;
25973 else
25974 *total = cost->movzx;
25975 return false;
25977 case SIGN_EXTEND:
25978 *total = cost->movsx;
25979 return false;
25981 case ASHIFT:
25982 if (CONST_INT_P (XEXP (x, 1))
25983 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25985 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25986 if (value == 1)
25988 *total = cost->add;
25989 return false;
25991 if ((value == 2 || value == 3)
25992 && cost->lea <= cost->shift_const)
25994 *total = cost->lea;
25995 return false;
25998 /* FALLTHRU */
26000 case ROTATE:
26001 case ASHIFTRT:
26002 case LSHIFTRT:
26003 case ROTATERT:
26004 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
26006 if (CONST_INT_P (XEXP (x, 1)))
26008 if (INTVAL (XEXP (x, 1)) > 32)
26009 *total = cost->shift_const + COSTS_N_INSNS (2);
26010 else
26011 *total = cost->shift_const * 2;
26013 else
26015 if (GET_CODE (XEXP (x, 1)) == AND)
26016 *total = cost->shift_var * 2;
26017 else
26018 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
26021 else
26023 if (CONST_INT_P (XEXP (x, 1)))
26024 *total = cost->shift_const;
26025 else
26026 *total = cost->shift_var;
26028 return false;
26030 case MULT:
26031 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26033 /* ??? SSE scalar cost should be used here. */
26034 *total = cost->fmul;
26035 return false;
26037 else if (X87_FLOAT_MODE_P (mode))
26039 *total = cost->fmul;
26040 return false;
26042 else if (FLOAT_MODE_P (mode))
26044 /* ??? SSE vector cost should be used here. */
26045 *total = cost->fmul;
26046 return false;
26048 else
26050 rtx op0 = XEXP (x, 0);
26051 rtx op1 = XEXP (x, 1);
26052 int nbits;
26053 if (CONST_INT_P (XEXP (x, 1)))
26055 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
26056 for (nbits = 0; value != 0; value &= value - 1)
26057 nbits++;
26059 else
26060 /* This is arbitrary. */
26061 nbits = 7;
26063 /* Compute costs correctly for widening multiplication. */
26064 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
26065 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
26066 == GET_MODE_SIZE (mode))
26068 int is_mulwiden = 0;
26069 enum machine_mode inner_mode = GET_MODE (op0);
26071 if (GET_CODE (op0) == GET_CODE (op1))
26072 is_mulwiden = 1, op1 = XEXP (op1, 0);
26073 else if (CONST_INT_P (op1))
26075 if (GET_CODE (op0) == SIGN_EXTEND)
26076 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
26077 == INTVAL (op1);
26078 else
26079 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
26082 if (is_mulwiden)
26083 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
26086 *total = (cost->mult_init[MODE_INDEX (mode)]
26087 + nbits * cost->mult_bit
26088 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
26090 return true;
26093 case DIV:
26094 case UDIV:
26095 case MOD:
26096 case UMOD:
26097 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26098 /* ??? SSE cost should be used here. */
26099 *total = cost->fdiv;
26100 else if (X87_FLOAT_MODE_P (mode))
26101 *total = cost->fdiv;
26102 else if (FLOAT_MODE_P (mode))
26103 /* ??? SSE vector cost should be used here. */
26104 *total = cost->fdiv;
26105 else
26106 *total = cost->divide[MODE_INDEX (mode)];
26107 return false;
26109 case PLUS:
26110 if (GET_MODE_CLASS (mode) == MODE_INT
26111 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
26113 if (GET_CODE (XEXP (x, 0)) == PLUS
26114 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
26115 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
26116 && CONSTANT_P (XEXP (x, 1)))
26118 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
26119 if (val == 2 || val == 4 || val == 8)
26121 *total = cost->lea;
26122 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26123 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
26124 outer_code, speed);
26125 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26126 return true;
26129 else if (GET_CODE (XEXP (x, 0)) == MULT
26130 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
26132 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
26133 if (val == 2 || val == 4 || val == 8)
26135 *total = cost->lea;
26136 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26137 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26138 return true;
26141 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26143 *total = cost->lea;
26144 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26145 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26146 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26147 return true;
26150 /* FALLTHRU */
26152 case MINUS:
26153 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26155 /* ??? SSE cost should be used here. */
26156 *total = cost->fadd;
26157 return false;
26159 else if (X87_FLOAT_MODE_P (mode))
26161 *total = cost->fadd;
26162 return false;
26164 else if (FLOAT_MODE_P (mode))
26166 /* ??? SSE vector cost should be used here. */
26167 *total = cost->fadd;
26168 return false;
26170 /* FALLTHRU */
26172 case AND:
26173 case IOR:
26174 case XOR:
26175 if (!TARGET_64BIT && mode == DImode)
26177 *total = (cost->add * 2
26178 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26179 << (GET_MODE (XEXP (x, 0)) != DImode))
26180 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26181 << (GET_MODE (XEXP (x, 1)) != DImode)));
26182 return true;
26184 /* FALLTHRU */
26186 case NEG:
26187 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26189 /* ??? SSE cost should be used here. */
26190 *total = cost->fchs;
26191 return false;
26193 else if (X87_FLOAT_MODE_P (mode))
26195 *total = cost->fchs;
26196 return false;
26198 else if (FLOAT_MODE_P (mode))
26200 /* ??? SSE vector cost should be used here. */
26201 *total = cost->fchs;
26202 return false;
26204 /* FALLTHRU */
26206 case NOT:
26207 if (!TARGET_64BIT && mode == DImode)
26208 *total = cost->add * 2;
26209 else
26210 *total = cost->add;
26211 return false;
26213 case COMPARE:
26214 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26215 && XEXP (XEXP (x, 0), 1) == const1_rtx
26216 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26217 && XEXP (x, 1) == const0_rtx)
26219 /* This kind of construct is implemented using test[bwl].
26220 Treat it as if we had an AND. */
26221 *total = (cost->add
26222 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26223 + rtx_cost (const1_rtx, outer_code, speed));
26224 return true;
26226 return false;
26228 case FLOAT_EXTEND:
26229 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26230 *total = 0;
26231 return false;
26233 case ABS:
26234 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26235 /* ??? SSE cost should be used here. */
26236 *total = cost->fabs;
26237 else if (X87_FLOAT_MODE_P (mode))
26238 *total = cost->fabs;
26239 else if (FLOAT_MODE_P (mode))
26240 /* ??? SSE vector cost should be used here. */
26241 *total = cost->fabs;
26242 return false;
26244 case SQRT:
26245 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26246 /* ??? SSE cost should be used here. */
26247 *total = cost->fsqrt;
26248 else if (X87_FLOAT_MODE_P (mode))
26249 *total = cost->fsqrt;
26250 else if (FLOAT_MODE_P (mode))
26251 /* ??? SSE vector cost should be used here. */
26252 *total = cost->fsqrt;
26253 return false;
26255 case UNSPEC:
26256 if (XINT (x, 1) == UNSPEC_TP)
26257 *total = 0;
26258 return false;
26260 default:
26261 return false;
26265 #if TARGET_MACHO
26267 static int current_machopic_label_num;
26269 /* Given a symbol name and its associated stub, write out the
26270 definition of the stub. */
26272 void
26273 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26275 unsigned int length;
26276 char *binder_name, *symbol_name, lazy_ptr_name[32];
26277 int label = ++current_machopic_label_num;
26279 /* For 64-bit we shouldn't get here. */
26280 gcc_assert (!TARGET_64BIT);
26282 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26283 symb = (*targetm.strip_name_encoding) (symb);
26285 length = strlen (stub);
26286 binder_name = XALLOCAVEC (char, length + 32);
26287 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26289 length = strlen (symb);
26290 symbol_name = XALLOCAVEC (char, length + 32);
26291 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26293 sprintf (lazy_ptr_name, "L%d$lz", label);
26295 if (MACHOPIC_PURE)
26296 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26297 else
26298 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26300 fprintf (file, "%s:\n", stub);
26301 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26303 if (MACHOPIC_PURE)
26305 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26306 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26307 fprintf (file, "\tjmp\t*%%edx\n");
26309 else
26310 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26312 fprintf (file, "%s:\n", binder_name);
26314 if (MACHOPIC_PURE)
26316 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26317 fprintf (file, "\tpushl\t%%eax\n");
26319 else
26320 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26322 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
26324 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26325 fprintf (file, "%s:\n", lazy_ptr_name);
26326 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26327 fprintf (file, "\t.long %s\n", binder_name);
26330 void
26331 darwin_x86_file_end (void)
26333 darwin_file_end ();
26334 ix86_file_end ();
26336 #endif /* TARGET_MACHO */
26338 /* Order the registers for register allocator. */
26340 void
26341 x86_order_regs_for_local_alloc (void)
26343 int pos = 0;
26344 int i;
26346 /* First allocate the local general purpose registers. */
26347 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26348 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26349 reg_alloc_order [pos++] = i;
26351 /* Global general purpose registers. */
26352 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26353 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26354 reg_alloc_order [pos++] = i;
26356 /* x87 registers come first in case we are doing FP math
26357 using them. */
26358 if (!TARGET_SSE_MATH)
26359 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26360 reg_alloc_order [pos++] = i;
26362 /* SSE registers. */
26363 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26364 reg_alloc_order [pos++] = i;
26365 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26366 reg_alloc_order [pos++] = i;
26368 /* x87 registers. */
26369 if (TARGET_SSE_MATH)
26370 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26371 reg_alloc_order [pos++] = i;
26373 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26374 reg_alloc_order [pos++] = i;
26376 /* Initialize the rest of array as we do not allocate some registers
26377 at all. */
26378 while (pos < FIRST_PSEUDO_REGISTER)
26379 reg_alloc_order [pos++] = 0;
26382 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26383 struct attribute_spec.handler. */
26384 static tree
26385 ix86_handle_abi_attribute (tree *node, tree name,
26386 tree args ATTRIBUTE_UNUSED,
26387 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26389 if (TREE_CODE (*node) != FUNCTION_TYPE
26390 && TREE_CODE (*node) != METHOD_TYPE
26391 && TREE_CODE (*node) != FIELD_DECL
26392 && TREE_CODE (*node) != TYPE_DECL)
26394 warning (OPT_Wattributes, "%qs attribute only applies to functions",
26395 IDENTIFIER_POINTER (name));
26396 *no_add_attrs = true;
26397 return NULL_TREE;
26399 if (!TARGET_64BIT)
26401 warning (OPT_Wattributes, "%qs attribute only available for 64-bit",
26402 IDENTIFIER_POINTER (name));
26403 *no_add_attrs = true;
26404 return NULL_TREE;
26407 /* Can combine regparm with all attributes but fastcall. */
26408 if (is_attribute_p ("ms_abi", name))
26410 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26412 error ("ms_abi and sysv_abi attributes are not compatible");
26415 return NULL_TREE;
26417 else if (is_attribute_p ("sysv_abi", name))
26419 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26421 error ("ms_abi and sysv_abi attributes are not compatible");
26424 return NULL_TREE;
26427 return NULL_TREE;
26430 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26431 struct attribute_spec.handler. */
26432 static tree
26433 ix86_handle_struct_attribute (tree *node, tree name,
26434 tree args ATTRIBUTE_UNUSED,
26435 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26437 tree *type = NULL;
26438 if (DECL_P (*node))
26440 if (TREE_CODE (*node) == TYPE_DECL)
26441 type = &TREE_TYPE (*node);
26443 else
26444 type = node;
26446 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26447 || TREE_CODE (*type) == UNION_TYPE)))
26449 warning (OPT_Wattributes, "%qs attribute ignored",
26450 IDENTIFIER_POINTER (name));
26451 *no_add_attrs = true;
26454 else if ((is_attribute_p ("ms_struct", name)
26455 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26456 || ((is_attribute_p ("gcc_struct", name)
26457 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26459 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
26460 IDENTIFIER_POINTER (name));
26461 *no_add_attrs = true;
26464 return NULL_TREE;
26467 static bool
26468 ix86_ms_bitfield_layout_p (const_tree record_type)
26470 return (TARGET_MS_BITFIELD_LAYOUT &&
26471 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26472 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26475 /* Returns an expression indicating where the this parameter is
26476 located on entry to the FUNCTION. */
26478 static rtx
26479 x86_this_parameter (tree function)
26481 tree type = TREE_TYPE (function);
26482 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26483 int nregs;
26485 if (TARGET_64BIT)
26487 const int *parm_regs;
26489 if (ix86_function_type_abi (type) == MS_ABI)
26490 parm_regs = x86_64_ms_abi_int_parameter_registers;
26491 else
26492 parm_regs = x86_64_int_parameter_registers;
26493 return gen_rtx_REG (DImode, parm_regs[aggr]);
26496 nregs = ix86_function_regparm (type, function);
26498 if (nregs > 0 && !stdarg_p (type))
26500 int regno;
26502 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26503 regno = aggr ? DX_REG : CX_REG;
26504 else
26506 regno = AX_REG;
26507 if (aggr)
26509 regno = DX_REG;
26510 if (nregs == 1)
26511 return gen_rtx_MEM (SImode,
26512 plus_constant (stack_pointer_rtx, 4));
26515 return gen_rtx_REG (SImode, regno);
26518 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26521 /* Determine whether x86_output_mi_thunk can succeed. */
26523 static bool
26524 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26525 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26526 HOST_WIDE_INT vcall_offset, const_tree function)
26528 /* 64-bit can handle anything. */
26529 if (TARGET_64BIT)
26530 return true;
26532 /* For 32-bit, everything's fine if we have one free register. */
26533 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26534 return true;
26536 /* Need a free register for vcall_offset. */
26537 if (vcall_offset)
26538 return false;
26540 /* Need a free register for GOT references. */
26541 if (flag_pic && !(*targetm.binds_local_p) (function))
26542 return false;
26544 /* Otherwise ok. */
26545 return true;
26548 /* Output the assembler code for a thunk function. THUNK_DECL is the
26549 declaration for the thunk function itself, FUNCTION is the decl for
26550 the target function. DELTA is an immediate constant offset to be
26551 added to THIS. If VCALL_OFFSET is nonzero, the word at
26552 *(*this + vcall_offset) should be added to THIS. */
26554 static void
26555 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
26556 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26557 HOST_WIDE_INT vcall_offset, tree function)
26559 rtx xops[3];
26560 rtx this_param = x86_this_parameter (function);
26561 rtx this_reg, tmp;
26563 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26564 pull it in now and let DELTA benefit. */
26565 if (REG_P (this_param))
26566 this_reg = this_param;
26567 else if (vcall_offset)
26569 /* Put the this parameter into %eax. */
26570 xops[0] = this_param;
26571 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26572 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26574 else
26575 this_reg = NULL_RTX;
26577 /* Adjust the this parameter by a fixed constant. */
26578 if (delta)
26580 xops[0] = GEN_INT (delta);
26581 xops[1] = this_reg ? this_reg : this_param;
26582 if (TARGET_64BIT)
26584 if (!x86_64_general_operand (xops[0], DImode))
26586 tmp = gen_rtx_REG (DImode, R10_REG);
26587 xops[1] = tmp;
26588 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26589 xops[0] = tmp;
26590 xops[1] = this_param;
26592 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26594 else
26595 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26598 /* Adjust the this parameter by a value stored in the vtable. */
26599 if (vcall_offset)
26601 if (TARGET_64BIT)
26602 tmp = gen_rtx_REG (DImode, R10_REG);
26603 else
26605 int tmp_regno = CX_REG;
26606 if (lookup_attribute ("fastcall",
26607 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26608 tmp_regno = AX_REG;
26609 tmp = gen_rtx_REG (SImode, tmp_regno);
26612 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26613 xops[1] = tmp;
26614 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26616 /* Adjust the this parameter. */
26617 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26618 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26620 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26621 xops[0] = GEN_INT (vcall_offset);
26622 xops[1] = tmp2;
26623 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26624 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26626 xops[1] = this_reg;
26627 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26630 /* If necessary, drop THIS back to its stack slot. */
26631 if (this_reg && this_reg != this_param)
26633 xops[0] = this_reg;
26634 xops[1] = this_param;
26635 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26638 xops[0] = XEXP (DECL_RTL (function), 0);
26639 if (TARGET_64BIT)
26641 if (!flag_pic || (*targetm.binds_local_p) (function))
26642 output_asm_insn ("jmp\t%P0", xops);
26643 /* All thunks should be in the same object as their target,
26644 and thus binds_local_p should be true. */
26645 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26646 gcc_unreachable ();
26647 else
26649 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26650 tmp = gen_rtx_CONST (Pmode, tmp);
26651 tmp = gen_rtx_MEM (QImode, tmp);
26652 xops[0] = tmp;
26653 output_asm_insn ("jmp\t%A0", xops);
26656 else
26658 if (!flag_pic || (*targetm.binds_local_p) (function))
26659 output_asm_insn ("jmp\t%P0", xops);
26660 else
26661 #if TARGET_MACHO
26662 if (TARGET_MACHO)
26664 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26665 tmp = (gen_rtx_SYMBOL_REF
26666 (Pmode,
26667 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26668 tmp = gen_rtx_MEM (QImode, tmp);
26669 xops[0] = tmp;
26670 output_asm_insn ("jmp\t%0", xops);
26672 else
26673 #endif /* TARGET_MACHO */
26675 tmp = gen_rtx_REG (SImode, CX_REG);
26676 output_set_got (tmp, NULL_RTX);
26678 xops[1] = tmp;
26679 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26680 output_asm_insn ("jmp\t{*}%1", xops);
26685 static void
26686 x86_file_start (void)
26688 default_file_start ();
26689 #if TARGET_MACHO
26690 darwin_file_start ();
26691 #endif
26692 if (X86_FILE_START_VERSION_DIRECTIVE)
26693 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26694 if (X86_FILE_START_FLTUSED)
26695 fputs ("\t.global\t__fltused\n", asm_out_file);
26696 if (ix86_asm_dialect == ASM_INTEL)
26697 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26701 x86_field_alignment (tree field, int computed)
26703 enum machine_mode mode;
26704 tree type = TREE_TYPE (field);
26706 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26707 return computed;
26708 mode = TYPE_MODE (strip_array_types (type));
26709 if (mode == DFmode || mode == DCmode
26710 || GET_MODE_CLASS (mode) == MODE_INT
26711 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26712 return MIN (32, computed);
26713 return computed;
26716 /* Output assembler code to FILE to increment profiler label # LABELNO
26717 for profiling a function entry. */
26718 void
26719 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26721 if (TARGET_64BIT)
26723 #ifndef NO_PROFILE_COUNTERS
26724 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
26725 #endif
26727 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26728 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
26729 else
26730 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26732 else if (flag_pic)
26734 #ifndef NO_PROFILE_COUNTERS
26735 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
26736 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
26737 #endif
26738 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
26740 else
26742 #ifndef NO_PROFILE_COUNTERS
26743 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
26744 PROFILE_COUNT_REGISTER);
26745 #endif
26746 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26750 /* We don't have exact information about the insn sizes, but we may assume
26751 quite safely that we are informed about all 1 byte insns and memory
26752 address sizes. This is enough to eliminate unnecessary padding in
26753 99% of cases. */
26755 static int
26756 min_insn_size (rtx insn)
26758 int l = 0;
26760 if (!INSN_P (insn) || !active_insn_p (insn))
26761 return 0;
26763 /* Discard alignments we've emit and jump instructions. */
26764 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26765 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26766 return 0;
26767 if (JUMP_P (insn)
26768 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
26769 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
26770 return 0;
26772 /* Important case - calls are always 5 bytes.
26773 It is common to have many calls in the row. */
26774 if (CALL_P (insn)
26775 && symbolic_reference_mentioned_p (PATTERN (insn))
26776 && !SIBLING_CALL_P (insn))
26777 return 5;
26778 if (get_attr_length (insn) <= 1)
26779 return 1;
26781 /* For normal instructions we may rely on the sizes of addresses
26782 and the presence of symbol to require 4 bytes of encoding.
26783 This is not the case for jumps where references are PC relative. */
26784 if (!JUMP_P (insn))
26786 l = get_attr_length_address (insn);
26787 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26788 l = 4;
26790 if (l)
26791 return 1+l;
26792 else
26793 return 2;
26796 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26797 window. */
26799 static void
26800 ix86_avoid_jump_misspredicts (void)
26802 rtx insn, start = get_insns ();
26803 int nbytes = 0, njumps = 0;
26804 int isjump = 0;
26806 /* Look for all minimal intervals of instructions containing 4 jumps.
26807 The intervals are bounded by START and INSN. NBYTES is the total
26808 size of instructions in the interval including INSN and not including
26809 START. When the NBYTES is smaller than 16 bytes, it is possible
26810 that the end of START and INSN ends up in the same 16byte page.
26812 The smallest offset in the page INSN can start is the case where START
26813 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26814 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
26816 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
26819 nbytes += min_insn_size (insn);
26820 if (dump_file)
26821 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
26822 INSN_UID (insn), min_insn_size (insn));
26823 if ((JUMP_P (insn)
26824 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26825 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26826 || CALL_P (insn))
26827 njumps++;
26828 else
26829 continue;
26831 while (njumps > 3)
26833 start = NEXT_INSN (start);
26834 if ((JUMP_P (start)
26835 && GET_CODE (PATTERN (start)) != ADDR_VEC
26836 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26837 || CALL_P (start))
26838 njumps--, isjump = 1;
26839 else
26840 isjump = 0;
26841 nbytes -= min_insn_size (start);
26843 gcc_assert (njumps >= 0);
26844 if (dump_file)
26845 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26846 INSN_UID (start), INSN_UID (insn), nbytes);
26848 if (njumps == 3 && isjump && nbytes < 16)
26850 int padsize = 15 - nbytes + min_insn_size (insn);
26852 if (dump_file)
26853 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26854 INSN_UID (insn), padsize);
26855 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
26860 /* AMD Athlon works faster
26861 when RET is not destination of conditional jump or directly preceded
26862 by other jump instruction. We avoid the penalty by inserting NOP just
26863 before the RET instructions in such cases. */
26864 static void
26865 ix86_pad_returns (void)
26867 edge e;
26868 edge_iterator ei;
26870 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26872 basic_block bb = e->src;
26873 rtx ret = BB_END (bb);
26874 rtx prev;
26875 bool replace = false;
26877 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26878 || optimize_bb_for_size_p (bb))
26879 continue;
26880 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26881 if (active_insn_p (prev) || LABEL_P (prev))
26882 break;
26883 if (prev && LABEL_P (prev))
26885 edge e;
26886 edge_iterator ei;
26888 FOR_EACH_EDGE (e, ei, bb->preds)
26889 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26890 && !(e->flags & EDGE_FALLTHRU))
26891 replace = true;
26893 if (!replace)
26895 prev = prev_active_insn (ret);
26896 if (prev
26897 && ((JUMP_P (prev) && any_condjump_p (prev))
26898 || CALL_P (prev)))
26899 replace = true;
26900 /* Empty functions get branch mispredict even when the jump destination
26901 is not visible to us. */
26902 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26903 replace = true;
26905 if (replace)
26907 emit_insn_before (gen_return_internal_long (), ret);
26908 delete_insn (ret);
26913 /* Implement machine specific optimizations. We implement padding of returns
26914 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26915 static void
26916 ix86_reorg (void)
26918 if (TARGET_PAD_RETURNS && optimize
26919 && optimize_function_for_speed_p (cfun))
26920 ix86_pad_returns ();
26921 if (TARGET_FOUR_JUMP_LIMIT && optimize
26922 && optimize_function_for_speed_p (cfun))
26923 ix86_avoid_jump_misspredicts ();
26926 /* Return nonzero when QImode register that must be represented via REX prefix
26927 is used. */
26928 bool
26929 x86_extended_QIreg_mentioned_p (rtx insn)
26931 int i;
26932 extract_insn_cached (insn);
26933 for (i = 0; i < recog_data.n_operands; i++)
26934 if (REG_P (recog_data.operand[i])
26935 && REGNO (recog_data.operand[i]) > BX_REG)
26936 return true;
26937 return false;
26940 /* Return nonzero when P points to register encoded via REX prefix.
26941 Called via for_each_rtx. */
26942 static int
26943 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26945 unsigned int regno;
26946 if (!REG_P (*p))
26947 return 0;
26948 regno = REGNO (*p);
26949 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26952 /* Return true when INSN mentions register that must be encoded using REX
26953 prefix. */
26954 bool
26955 x86_extended_reg_mentioned_p (rtx insn)
26957 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26958 extended_reg_mentioned_1, NULL);
26961 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26962 optabs would emit if we didn't have TFmode patterns. */
26964 void
26965 x86_emit_floatuns (rtx operands[2])
26967 rtx neglab, donelab, i0, i1, f0, in, out;
26968 enum machine_mode mode, inmode;
26970 inmode = GET_MODE (operands[1]);
26971 gcc_assert (inmode == SImode || inmode == DImode);
26973 out = operands[0];
26974 in = force_reg (inmode, operands[1]);
26975 mode = GET_MODE (out);
26976 neglab = gen_label_rtx ();
26977 donelab = gen_label_rtx ();
26978 f0 = gen_reg_rtx (mode);
26980 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26982 expand_float (out, in, 0);
26984 emit_jump_insn (gen_jump (donelab));
26985 emit_barrier ();
26987 emit_label (neglab);
26989 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26990 1, OPTAB_DIRECT);
26991 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26992 1, OPTAB_DIRECT);
26993 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26995 expand_float (f0, i0, 0);
26997 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26999 emit_label (donelab);
27002 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27003 with all elements equal to VAR. Return true if successful. */
27005 static bool
27006 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27007 rtx target, rtx val)
27009 enum machine_mode hmode, smode, wsmode, wvmode;
27010 rtx x;
27012 switch (mode)
27014 case V2SImode:
27015 case V2SFmode:
27016 if (!mmx_ok)
27017 return false;
27018 /* FALLTHRU */
27020 case V2DFmode:
27021 case V2DImode:
27022 case V4SFmode:
27023 case V4SImode:
27024 val = force_reg (GET_MODE_INNER (mode), val);
27025 x = gen_rtx_VEC_DUPLICATE (mode, val);
27026 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27027 return true;
27029 case V4HImode:
27030 if (!mmx_ok)
27031 return false;
27032 if (TARGET_SSE || TARGET_3DNOW_A)
27034 val = gen_lowpart (SImode, val);
27035 x = gen_rtx_TRUNCATE (HImode, val);
27036 x = gen_rtx_VEC_DUPLICATE (mode, x);
27037 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27038 return true;
27040 else
27042 smode = HImode;
27043 wsmode = SImode;
27044 wvmode = V2SImode;
27045 goto widen;
27048 case V8QImode:
27049 if (!mmx_ok)
27050 return false;
27051 smode = QImode;
27052 wsmode = HImode;
27053 wvmode = V4HImode;
27054 goto widen;
27055 case V8HImode:
27056 if (TARGET_SSE2)
27058 rtx tmp1, tmp2;
27059 /* Extend HImode to SImode using a paradoxical SUBREG. */
27060 tmp1 = gen_reg_rtx (SImode);
27061 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27062 /* Insert the SImode value as low element of V4SImode vector. */
27063 tmp2 = gen_reg_rtx (V4SImode);
27064 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
27065 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
27066 CONST0_RTX (V4SImode),
27067 const1_rtx);
27068 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
27069 /* Cast the V4SImode vector back to a V8HImode vector. */
27070 tmp1 = gen_reg_rtx (V8HImode);
27071 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
27072 /* Duplicate the low short through the whole low SImode word. */
27073 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
27074 /* Cast the V8HImode vector back to a V4SImode vector. */
27075 tmp2 = gen_reg_rtx (V4SImode);
27076 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
27077 /* Replicate the low element of the V4SImode vector. */
27078 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
27079 /* Cast the V2SImode back to V8HImode, and store in target. */
27080 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
27081 return true;
27083 smode = HImode;
27084 wsmode = SImode;
27085 wvmode = V4SImode;
27086 goto widen;
27087 case V16QImode:
27088 if (TARGET_SSE2)
27090 rtx tmp1, tmp2;
27091 /* Extend QImode to SImode using a paradoxical SUBREG. */
27092 tmp1 = gen_reg_rtx (SImode);
27093 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27094 /* Insert the SImode value as low element of V4SImode vector. */
27095 tmp2 = gen_reg_rtx (V4SImode);
27096 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
27097 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
27098 CONST0_RTX (V4SImode),
27099 const1_rtx);
27100 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
27101 /* Cast the V4SImode vector back to a V16QImode vector. */
27102 tmp1 = gen_reg_rtx (V16QImode);
27103 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
27104 /* Duplicate the low byte through the whole low SImode word. */
27105 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
27106 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
27107 /* Cast the V16QImode vector back to a V4SImode vector. */
27108 tmp2 = gen_reg_rtx (V4SImode);
27109 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
27110 /* Replicate the low element of the V4SImode vector. */
27111 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
27112 /* Cast the V2SImode back to V16QImode, and store in target. */
27113 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
27114 return true;
27116 smode = QImode;
27117 wsmode = HImode;
27118 wvmode = V8HImode;
27119 goto widen;
27120 widen:
27121 /* Replicate the value once into the next wider mode and recurse. */
27122 val = convert_modes (wsmode, smode, val, true);
27123 x = expand_simple_binop (wsmode, ASHIFT, val,
27124 GEN_INT (GET_MODE_BITSIZE (smode)),
27125 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27126 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27128 x = gen_reg_rtx (wvmode);
27129 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
27130 gcc_unreachable ();
27131 emit_move_insn (target, gen_lowpart (mode, x));
27132 return true;
27134 case V4DFmode:
27135 hmode = V2DFmode;
27136 goto half;
27137 case V4DImode:
27138 hmode = V2DImode;
27139 goto half;
27140 case V8SFmode:
27141 hmode = V4SFmode;
27142 goto half;
27143 case V8SImode:
27144 hmode = V4SImode;
27145 goto half;
27146 case V16HImode:
27147 hmode = V8HImode;
27148 goto half;
27149 case V32QImode:
27150 hmode = V16QImode;
27151 goto half;
27152 half:
27154 rtx tmp = gen_reg_rtx (hmode);
27155 ix86_expand_vector_init_duplicate (mmx_ok, hmode, tmp, val);
27156 emit_insn (gen_rtx_SET (VOIDmode, target,
27157 gen_rtx_VEC_CONCAT (mode, tmp, tmp)));
27159 return true;
27161 default:
27162 return false;
27166 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27167 whose ONE_VAR element is VAR, and other elements are zero. Return true
27168 if successful. */
27170 static bool
27171 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27172 rtx target, rtx var, int one_var)
27174 enum machine_mode vsimode;
27175 rtx new_target;
27176 rtx x, tmp;
27177 bool use_vector_set = false;
27179 switch (mode)
27181 case V2DImode:
27182 /* For SSE4.1, we normally use vector set. But if the second
27183 element is zero and inter-unit moves are OK, we use movq
27184 instead. */
27185 use_vector_set = (TARGET_64BIT
27186 && TARGET_SSE4_1
27187 && !(TARGET_INTER_UNIT_MOVES
27188 && one_var == 0));
27189 break;
27190 case V16QImode:
27191 case V4SImode:
27192 case V4SFmode:
27193 use_vector_set = TARGET_SSE4_1;
27194 break;
27195 case V8HImode:
27196 use_vector_set = TARGET_SSE2;
27197 break;
27198 case V4HImode:
27199 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27200 break;
27201 case V32QImode:
27202 case V16HImode:
27203 case V8SImode:
27204 case V8SFmode:
27205 case V4DFmode:
27206 use_vector_set = TARGET_AVX;
27207 break;
27208 case V4DImode:
27209 /* Use ix86_expand_vector_set in 64bit mode only. */
27210 use_vector_set = TARGET_AVX && TARGET_64BIT;
27211 break;
27212 default:
27213 break;
27216 if (use_vector_set)
27218 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27219 var = force_reg (GET_MODE_INNER (mode), var);
27220 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27221 return true;
27224 switch (mode)
27226 case V2SFmode:
27227 case V2SImode:
27228 if (!mmx_ok)
27229 return false;
27230 /* FALLTHRU */
27232 case V2DFmode:
27233 case V2DImode:
27234 if (one_var != 0)
27235 return false;
27236 var = force_reg (GET_MODE_INNER (mode), var);
27237 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27238 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27239 return true;
27241 case V4SFmode:
27242 case V4SImode:
27243 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27244 new_target = gen_reg_rtx (mode);
27245 else
27246 new_target = target;
27247 var = force_reg (GET_MODE_INNER (mode), var);
27248 x = gen_rtx_VEC_DUPLICATE (mode, var);
27249 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27250 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27251 if (one_var != 0)
27253 /* We need to shuffle the value to the correct position, so
27254 create a new pseudo to store the intermediate result. */
27256 /* With SSE2, we can use the integer shuffle insns. */
27257 if (mode != V4SFmode && TARGET_SSE2)
27259 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27260 GEN_INT (1),
27261 GEN_INT (one_var == 1 ? 0 : 1),
27262 GEN_INT (one_var == 2 ? 0 : 1),
27263 GEN_INT (one_var == 3 ? 0 : 1)));
27264 if (target != new_target)
27265 emit_move_insn (target, new_target);
27266 return true;
27269 /* Otherwise convert the intermediate result to V4SFmode and
27270 use the SSE1 shuffle instructions. */
27271 if (mode != V4SFmode)
27273 tmp = gen_reg_rtx (V4SFmode);
27274 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27276 else
27277 tmp = new_target;
27279 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27280 GEN_INT (1),
27281 GEN_INT (one_var == 1 ? 0 : 1),
27282 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27283 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27285 if (mode != V4SFmode)
27286 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27287 else if (tmp != target)
27288 emit_move_insn (target, tmp);
27290 else if (target != new_target)
27291 emit_move_insn (target, new_target);
27292 return true;
27294 case V8HImode:
27295 case V16QImode:
27296 vsimode = V4SImode;
27297 goto widen;
27298 case V4HImode:
27299 case V8QImode:
27300 if (!mmx_ok)
27301 return false;
27302 vsimode = V2SImode;
27303 goto widen;
27304 widen:
27305 if (one_var != 0)
27306 return false;
27308 /* Zero extend the variable element to SImode and recurse. */
27309 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27311 x = gen_reg_rtx (vsimode);
27312 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27313 var, one_var))
27314 gcc_unreachable ();
27316 emit_move_insn (target, gen_lowpart (mode, x));
27317 return true;
27319 default:
27320 return false;
27324 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27325 consisting of the values in VALS. It is known that all elements
27326 except ONE_VAR are constants. Return true if successful. */
27328 static bool
27329 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27330 rtx target, rtx vals, int one_var)
27332 rtx var = XVECEXP (vals, 0, one_var);
27333 enum machine_mode wmode;
27334 rtx const_vec, x;
27336 const_vec = copy_rtx (vals);
27337 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27338 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27340 switch (mode)
27342 case V2DFmode:
27343 case V2DImode:
27344 case V2SFmode:
27345 case V2SImode:
27346 /* For the two element vectors, it's just as easy to use
27347 the general case. */
27348 return false;
27350 case V4DImode:
27351 /* Use ix86_expand_vector_set in 64bit mode only. */
27352 if (!TARGET_64BIT)
27353 return false;
27354 case V4DFmode:
27355 case V8SFmode:
27356 case V8SImode:
27357 case V16HImode:
27358 case V32QImode:
27359 case V4SFmode:
27360 case V4SImode:
27361 case V8HImode:
27362 case V4HImode:
27363 break;
27365 case V16QImode:
27366 if (TARGET_SSE4_1)
27367 break;
27368 wmode = V8HImode;
27369 goto widen;
27370 case V8QImode:
27371 wmode = V4HImode;
27372 goto widen;
27373 widen:
27374 /* There's no way to set one QImode entry easily. Combine
27375 the variable value with its adjacent constant value, and
27376 promote to an HImode set. */
27377 x = XVECEXP (vals, 0, one_var ^ 1);
27378 if (one_var & 1)
27380 var = convert_modes (HImode, QImode, var, true);
27381 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27382 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27383 x = GEN_INT (INTVAL (x) & 0xff);
27385 else
27387 var = convert_modes (HImode, QImode, var, true);
27388 x = gen_int_mode (INTVAL (x) << 8, HImode);
27390 if (x != const0_rtx)
27391 var = expand_simple_binop (HImode, IOR, var, x, var,
27392 1, OPTAB_LIB_WIDEN);
27394 x = gen_reg_rtx (wmode);
27395 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27396 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27398 emit_move_insn (target, gen_lowpart (mode, x));
27399 return true;
27401 default:
27402 return false;
27405 emit_move_insn (target, const_vec);
27406 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27407 return true;
27410 /* A subroutine of ix86_expand_vector_init_general. Use vector
27411 concatenate to handle the most general case: all values variable,
27412 and none identical. */
27414 static void
27415 ix86_expand_vector_init_concat (enum machine_mode mode,
27416 rtx target, rtx *ops, int n)
27418 enum machine_mode cmode, hmode = VOIDmode;
27419 rtx first[8], second[4];
27420 rtvec v;
27421 int i, j;
27423 switch (n)
27425 case 2:
27426 switch (mode)
27428 case V8SImode:
27429 cmode = V4SImode;
27430 break;
27431 case V8SFmode:
27432 cmode = V4SFmode;
27433 break;
27434 case V4DImode:
27435 cmode = V2DImode;
27436 break;
27437 case V4DFmode:
27438 cmode = V2DFmode;
27439 break;
27440 case V4SImode:
27441 cmode = V2SImode;
27442 break;
27443 case V4SFmode:
27444 cmode = V2SFmode;
27445 break;
27446 case V2DImode:
27447 cmode = DImode;
27448 break;
27449 case V2SImode:
27450 cmode = SImode;
27451 break;
27452 case V2DFmode:
27453 cmode = DFmode;
27454 break;
27455 case V2SFmode:
27456 cmode = SFmode;
27457 break;
27458 default:
27459 gcc_unreachable ();
27462 if (!register_operand (ops[1], cmode))
27463 ops[1] = force_reg (cmode, ops[1]);
27464 if (!register_operand (ops[0], cmode))
27465 ops[0] = force_reg (cmode, ops[0]);
27466 emit_insn (gen_rtx_SET (VOIDmode, target,
27467 gen_rtx_VEC_CONCAT (mode, ops[0],
27468 ops[1])));
27469 break;
27471 case 4:
27472 switch (mode)
27474 case V4DImode:
27475 cmode = V2DImode;
27476 break;
27477 case V4DFmode:
27478 cmode = V2DFmode;
27479 break;
27480 case V4SImode:
27481 cmode = V2SImode;
27482 break;
27483 case V4SFmode:
27484 cmode = V2SFmode;
27485 break;
27486 default:
27487 gcc_unreachable ();
27489 goto half;
27491 case 8:
27492 switch (mode)
27494 case V8SImode:
27495 cmode = V2SImode;
27496 hmode = V4SImode;
27497 break;
27498 case V8SFmode:
27499 cmode = V2SFmode;
27500 hmode = V4SFmode;
27501 break;
27502 default:
27503 gcc_unreachable ();
27505 goto half;
27507 half:
27508 /* FIXME: We process inputs backward to help RA. PR 36222. */
27509 i = n - 1;
27510 j = (n >> 1) - 1;
27511 for (; i > 0; i -= 2, j--)
27513 first[j] = gen_reg_rtx (cmode);
27514 v = gen_rtvec (2, ops[i - 1], ops[i]);
27515 ix86_expand_vector_init (false, first[j],
27516 gen_rtx_PARALLEL (cmode, v));
27519 n >>= 1;
27520 if (n > 2)
27522 gcc_assert (hmode != VOIDmode);
27523 for (i = j = 0; i < n; i += 2, j++)
27525 second[j] = gen_reg_rtx (hmode);
27526 ix86_expand_vector_init_concat (hmode, second [j],
27527 &first [i], 2);
27529 n >>= 1;
27530 ix86_expand_vector_init_concat (mode, target, second, n);
27532 else
27533 ix86_expand_vector_init_concat (mode, target, first, n);
27534 break;
27536 default:
27537 gcc_unreachable ();
27541 /* A subroutine of ix86_expand_vector_init_general. Use vector
27542 interleave to handle the most general case: all values variable,
27543 and none identical. */
27545 static void
27546 ix86_expand_vector_init_interleave (enum machine_mode mode,
27547 rtx target, rtx *ops, int n)
27549 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27550 int i, j;
27551 rtx op0, op1;
27552 rtx (*gen_load_even) (rtx, rtx, rtx);
27553 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27554 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27556 switch (mode)
27558 case V8HImode:
27559 gen_load_even = gen_vec_setv8hi;
27560 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27561 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27562 inner_mode = HImode;
27563 first_imode = V4SImode;
27564 second_imode = V2DImode;
27565 third_imode = VOIDmode;
27566 break;
27567 case V16QImode:
27568 gen_load_even = gen_vec_setv16qi;
27569 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27570 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27571 inner_mode = QImode;
27572 first_imode = V8HImode;
27573 second_imode = V4SImode;
27574 third_imode = V2DImode;
27575 break;
27576 default:
27577 gcc_unreachable ();
27580 for (i = 0; i < n; i++)
27582 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27583 op0 = gen_reg_rtx (SImode);
27584 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27586 /* Insert the SImode value as low element of V4SImode vector. */
27587 op1 = gen_reg_rtx (V4SImode);
27588 op0 = gen_rtx_VEC_MERGE (V4SImode,
27589 gen_rtx_VEC_DUPLICATE (V4SImode,
27590 op0),
27591 CONST0_RTX (V4SImode),
27592 const1_rtx);
27593 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27595 /* Cast the V4SImode vector back to a vector in orignal mode. */
27596 op0 = gen_reg_rtx (mode);
27597 emit_move_insn (op0, gen_lowpart (mode, op1));
27599 /* Load even elements into the second positon. */
27600 emit_insn ((*gen_load_even) (op0,
27601 force_reg (inner_mode,
27602 ops [i + i + 1]),
27603 const1_rtx));
27605 /* Cast vector to FIRST_IMODE vector. */
27606 ops[i] = gen_reg_rtx (first_imode);
27607 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27610 /* Interleave low FIRST_IMODE vectors. */
27611 for (i = j = 0; i < n; i += 2, j++)
27613 op0 = gen_reg_rtx (first_imode);
27614 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27616 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27617 ops[j] = gen_reg_rtx (second_imode);
27618 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27621 /* Interleave low SECOND_IMODE vectors. */
27622 switch (second_imode)
27624 case V4SImode:
27625 for (i = j = 0; i < n / 2; i += 2, j++)
27627 op0 = gen_reg_rtx (second_imode);
27628 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27629 ops[i + 1]));
27631 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27632 vector. */
27633 ops[j] = gen_reg_rtx (third_imode);
27634 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27636 second_imode = V2DImode;
27637 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27638 /* FALLTHRU */
27640 case V2DImode:
27641 op0 = gen_reg_rtx (second_imode);
27642 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27643 ops[1]));
27645 /* Cast the SECOND_IMODE vector back to a vector on original
27646 mode. */
27647 emit_insn (gen_rtx_SET (VOIDmode, target,
27648 gen_lowpart (mode, op0)));
27649 break;
27651 default:
27652 gcc_unreachable ();
27656 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27657 all values variable, and none identical. */
27659 static void
27660 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27661 rtx target, rtx vals)
27663 rtx ops[32], op0, op1;
27664 enum machine_mode half_mode = VOIDmode;
27665 int n, i;
27667 switch (mode)
27669 case V2SFmode:
27670 case V2SImode:
27671 if (!mmx_ok && !TARGET_SSE)
27672 break;
27673 /* FALLTHRU */
27675 case V8SFmode:
27676 case V8SImode:
27677 case V4DFmode:
27678 case V4DImode:
27679 case V4SFmode:
27680 case V4SImode:
27681 case V2DFmode:
27682 case V2DImode:
27683 n = GET_MODE_NUNITS (mode);
27684 for (i = 0; i < n; i++)
27685 ops[i] = XVECEXP (vals, 0, i);
27686 ix86_expand_vector_init_concat (mode, target, ops, n);
27687 return;
27689 case V32QImode:
27690 half_mode = V16QImode;
27691 goto half;
27693 case V16HImode:
27694 half_mode = V8HImode;
27695 goto half;
27697 half:
27698 n = GET_MODE_NUNITS (mode);
27699 for (i = 0; i < n; i++)
27700 ops[i] = XVECEXP (vals, 0, i);
27701 op0 = gen_reg_rtx (half_mode);
27702 op1 = gen_reg_rtx (half_mode);
27703 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27704 n >> 2);
27705 ix86_expand_vector_init_interleave (half_mode, op1,
27706 &ops [n >> 1], n >> 2);
27707 emit_insn (gen_rtx_SET (VOIDmode, target,
27708 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27709 return;
27711 case V16QImode:
27712 if (!TARGET_SSE4_1)
27713 break;
27714 /* FALLTHRU */
27716 case V8HImode:
27717 if (!TARGET_SSE2)
27718 break;
27720 /* Don't use ix86_expand_vector_init_interleave if we can't
27721 move from GPR to SSE register directly. */
27722 if (!TARGET_INTER_UNIT_MOVES)
27723 break;
27725 n = GET_MODE_NUNITS (mode);
27726 for (i = 0; i < n; i++)
27727 ops[i] = XVECEXP (vals, 0, i);
27728 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27729 return;
27731 case V4HImode:
27732 case V8QImode:
27733 break;
27735 default:
27736 gcc_unreachable ();
27740 int i, j, n_elts, n_words, n_elt_per_word;
27741 enum machine_mode inner_mode;
27742 rtx words[4], shift;
27744 inner_mode = GET_MODE_INNER (mode);
27745 n_elts = GET_MODE_NUNITS (mode);
27746 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27747 n_elt_per_word = n_elts / n_words;
27748 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27750 for (i = 0; i < n_words; ++i)
27752 rtx word = NULL_RTX;
27754 for (j = 0; j < n_elt_per_word; ++j)
27756 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27757 elt = convert_modes (word_mode, inner_mode, elt, true);
27759 if (j == 0)
27760 word = elt;
27761 else
27763 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27764 word, 1, OPTAB_LIB_WIDEN);
27765 word = expand_simple_binop (word_mode, IOR, word, elt,
27766 word, 1, OPTAB_LIB_WIDEN);
27770 words[i] = word;
27773 if (n_words == 1)
27774 emit_move_insn (target, gen_lowpart (mode, words[0]));
27775 else if (n_words == 2)
27777 rtx tmp = gen_reg_rtx (mode);
27778 emit_clobber (tmp);
27779 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27780 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27781 emit_move_insn (target, tmp);
27783 else if (n_words == 4)
27785 rtx tmp = gen_reg_rtx (V4SImode);
27786 gcc_assert (word_mode == SImode);
27787 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27788 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27789 emit_move_insn (target, gen_lowpart (mode, tmp));
27791 else
27792 gcc_unreachable ();
27796 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27797 instructions unless MMX_OK is true. */
27799 void
27800 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27802 enum machine_mode mode = GET_MODE (target);
27803 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27804 int n_elts = GET_MODE_NUNITS (mode);
27805 int n_var = 0, one_var = -1;
27806 bool all_same = true, all_const_zero = true;
27807 int i;
27808 rtx x;
27810 for (i = 0; i < n_elts; ++i)
27812 x = XVECEXP (vals, 0, i);
27813 if (!(CONST_INT_P (x)
27814 || GET_CODE (x) == CONST_DOUBLE
27815 || GET_CODE (x) == CONST_FIXED))
27816 n_var++, one_var = i;
27817 else if (x != CONST0_RTX (inner_mode))
27818 all_const_zero = false;
27819 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27820 all_same = false;
27823 /* Constants are best loaded from the constant pool. */
27824 if (n_var == 0)
27826 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27827 return;
27830 /* If all values are identical, broadcast the value. */
27831 if (all_same
27832 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27833 XVECEXP (vals, 0, 0)))
27834 return;
27836 /* Values where only one field is non-constant are best loaded from
27837 the pool and overwritten via move later. */
27838 if (n_var == 1)
27840 if (all_const_zero
27841 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27842 XVECEXP (vals, 0, one_var),
27843 one_var))
27844 return;
27846 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27847 return;
27850 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27853 void
27854 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27856 enum machine_mode mode = GET_MODE (target);
27857 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27858 enum machine_mode half_mode;
27859 bool use_vec_merge = false;
27860 rtx tmp;
27861 static rtx (*gen_extract[6][2]) (rtx, rtx)
27863 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27864 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27865 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27866 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27867 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27868 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27870 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27872 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27873 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27874 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27875 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27876 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27877 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27879 int i, j, n;
27881 switch (mode)
27883 case V2SFmode:
27884 case V2SImode:
27885 if (mmx_ok)
27887 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27888 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27889 if (elt == 0)
27890 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27891 else
27892 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27893 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27894 return;
27896 break;
27898 case V2DImode:
27899 use_vec_merge = TARGET_SSE4_1;
27900 if (use_vec_merge)
27901 break;
27903 case V2DFmode:
27905 rtx op0, op1;
27907 /* For the two element vectors, we implement a VEC_CONCAT with
27908 the extraction of the other element. */
27910 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27911 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27913 if (elt == 0)
27914 op0 = val, op1 = tmp;
27915 else
27916 op0 = tmp, op1 = val;
27918 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27919 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27921 return;
27923 case V4SFmode:
27924 use_vec_merge = TARGET_SSE4_1;
27925 if (use_vec_merge)
27926 break;
27928 switch (elt)
27930 case 0:
27931 use_vec_merge = true;
27932 break;
27934 case 1:
27935 /* tmp = target = A B C D */
27936 tmp = copy_to_reg (target);
27937 /* target = A A B B */
27938 emit_insn (gen_sse_unpcklps (target, target, target));
27939 /* target = X A B B */
27940 ix86_expand_vector_set (false, target, val, 0);
27941 /* target = A X C D */
27942 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27943 GEN_INT (1), GEN_INT (0),
27944 GEN_INT (2+4), GEN_INT (3+4)));
27945 return;
27947 case 2:
27948 /* tmp = target = A B C D */
27949 tmp = copy_to_reg (target);
27950 /* tmp = X B C D */
27951 ix86_expand_vector_set (false, tmp, val, 0);
27952 /* target = A B X D */
27953 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27954 GEN_INT (0), GEN_INT (1),
27955 GEN_INT (0+4), GEN_INT (3+4)));
27956 return;
27958 case 3:
27959 /* tmp = target = A B C D */
27960 tmp = copy_to_reg (target);
27961 /* tmp = X B C D */
27962 ix86_expand_vector_set (false, tmp, val, 0);
27963 /* target = A B X D */
27964 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27965 GEN_INT (0), GEN_INT (1),
27966 GEN_INT (2+4), GEN_INT (0+4)));
27967 return;
27969 default:
27970 gcc_unreachable ();
27972 break;
27974 case V4SImode:
27975 use_vec_merge = TARGET_SSE4_1;
27976 if (use_vec_merge)
27977 break;
27979 /* Element 0 handled by vec_merge below. */
27980 if (elt == 0)
27982 use_vec_merge = true;
27983 break;
27986 if (TARGET_SSE2)
27988 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27989 store into element 0, then shuffle them back. */
27991 rtx order[4];
27993 order[0] = GEN_INT (elt);
27994 order[1] = const1_rtx;
27995 order[2] = const2_rtx;
27996 order[3] = GEN_INT (3);
27997 order[elt] = const0_rtx;
27999 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28000 order[1], order[2], order[3]));
28002 ix86_expand_vector_set (false, target, val, 0);
28004 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28005 order[1], order[2], order[3]));
28007 else
28009 /* For SSE1, we have to reuse the V4SF code. */
28010 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28011 gen_lowpart (SFmode, val), elt);
28013 return;
28015 case V8HImode:
28016 use_vec_merge = TARGET_SSE2;
28017 break;
28018 case V4HImode:
28019 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28020 break;
28022 case V16QImode:
28023 use_vec_merge = TARGET_SSE4_1;
28024 break;
28026 case V8QImode:
28027 break;
28029 case V32QImode:
28030 half_mode = V16QImode;
28031 j = 0;
28032 n = 16;
28033 goto half;
28035 case V16HImode:
28036 half_mode = V8HImode;
28037 j = 1;
28038 n = 8;
28039 goto half;
28041 case V8SImode:
28042 half_mode = V4SImode;
28043 j = 2;
28044 n = 4;
28045 goto half;
28047 case V4DImode:
28048 half_mode = V2DImode;
28049 j = 3;
28050 n = 2;
28051 goto half;
28053 case V8SFmode:
28054 half_mode = V4SFmode;
28055 j = 4;
28056 n = 4;
28057 goto half;
28059 case V4DFmode:
28060 half_mode = V2DFmode;
28061 j = 5;
28062 n = 2;
28063 goto half;
28065 half:
28066 /* Compute offset. */
28067 i = elt / n;
28068 elt %= n;
28070 gcc_assert (i <= 1);
28072 /* Extract the half. */
28073 tmp = gen_reg_rtx (half_mode);
28074 emit_insn ((*gen_extract[j][i]) (tmp, target));
28076 /* Put val in tmp at elt. */
28077 ix86_expand_vector_set (false, tmp, val, elt);
28079 /* Put it back. */
28080 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28081 return;
28083 default:
28084 break;
28087 if (use_vec_merge)
28089 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28090 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28091 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28093 else
28095 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28097 emit_move_insn (mem, target);
28099 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28100 emit_move_insn (tmp, val);
28102 emit_move_insn (target, mem);
28106 void
28107 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28109 enum machine_mode mode = GET_MODE (vec);
28110 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28111 bool use_vec_extr = false;
28112 rtx tmp;
28114 switch (mode)
28116 case V2SImode:
28117 case V2SFmode:
28118 if (!mmx_ok)
28119 break;
28120 /* FALLTHRU */
28122 case V2DFmode:
28123 case V2DImode:
28124 use_vec_extr = true;
28125 break;
28127 case V4SFmode:
28128 use_vec_extr = TARGET_SSE4_1;
28129 if (use_vec_extr)
28130 break;
28132 switch (elt)
28134 case 0:
28135 tmp = vec;
28136 break;
28138 case 1:
28139 case 3:
28140 tmp = gen_reg_rtx (mode);
28141 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28142 GEN_INT (elt), GEN_INT (elt),
28143 GEN_INT (elt+4), GEN_INT (elt+4)));
28144 break;
28146 case 2:
28147 tmp = gen_reg_rtx (mode);
28148 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
28149 break;
28151 default:
28152 gcc_unreachable ();
28154 vec = tmp;
28155 use_vec_extr = true;
28156 elt = 0;
28157 break;
28159 case V4SImode:
28160 use_vec_extr = TARGET_SSE4_1;
28161 if (use_vec_extr)
28162 break;
28164 if (TARGET_SSE2)
28166 switch (elt)
28168 case 0:
28169 tmp = vec;
28170 break;
28172 case 1:
28173 case 3:
28174 tmp = gen_reg_rtx (mode);
28175 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28176 GEN_INT (elt), GEN_INT (elt),
28177 GEN_INT (elt), GEN_INT (elt)));
28178 break;
28180 case 2:
28181 tmp = gen_reg_rtx (mode);
28182 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
28183 break;
28185 default:
28186 gcc_unreachable ();
28188 vec = tmp;
28189 use_vec_extr = true;
28190 elt = 0;
28192 else
28194 /* For SSE1, we have to reuse the V4SF code. */
28195 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28196 gen_lowpart (V4SFmode, vec), elt);
28197 return;
28199 break;
28201 case V8HImode:
28202 use_vec_extr = TARGET_SSE2;
28203 break;
28204 case V4HImode:
28205 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28206 break;
28208 case V16QImode:
28209 use_vec_extr = TARGET_SSE4_1;
28210 break;
28212 case V8QImode:
28213 /* ??? Could extract the appropriate HImode element and shift. */
28214 default:
28215 break;
28218 if (use_vec_extr)
28220 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28221 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28223 /* Let the rtl optimizers know about the zero extension performed. */
28224 if (inner_mode == QImode || inner_mode == HImode)
28226 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28227 target = gen_lowpart (SImode, target);
28230 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28232 else
28234 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28236 emit_move_insn (mem, vec);
28238 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28239 emit_move_insn (target, tmp);
28243 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28244 pattern to reduce; DEST is the destination; IN is the input vector. */
28246 void
28247 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28249 rtx tmp1, tmp2, tmp3;
28251 tmp1 = gen_reg_rtx (V4SFmode);
28252 tmp2 = gen_reg_rtx (V4SFmode);
28253 tmp3 = gen_reg_rtx (V4SFmode);
28255 emit_insn (gen_sse_movhlps (tmp1, in, in));
28256 emit_insn (fn (tmp2, tmp1, in));
28258 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28259 GEN_INT (1), GEN_INT (1),
28260 GEN_INT (1+4), GEN_INT (1+4)));
28261 emit_insn (fn (dest, tmp2, tmp3));
28264 /* Target hook for scalar_mode_supported_p. */
28265 static bool
28266 ix86_scalar_mode_supported_p (enum machine_mode mode)
28268 if (DECIMAL_FLOAT_MODE_P (mode))
28269 return true;
28270 else if (mode == TFmode)
28271 return true;
28272 else
28273 return default_scalar_mode_supported_p (mode);
28276 /* Implements target hook vector_mode_supported_p. */
28277 static bool
28278 ix86_vector_mode_supported_p (enum machine_mode mode)
28280 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28281 return true;
28282 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28283 return true;
28284 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28285 return true;
28286 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28287 return true;
28288 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28289 return true;
28290 return false;
28293 /* Target hook for c_mode_for_suffix. */
28294 static enum machine_mode
28295 ix86_c_mode_for_suffix (char suffix)
28297 if (suffix == 'q')
28298 return TFmode;
28299 if (suffix == 'w')
28300 return XFmode;
28302 return VOIDmode;
28305 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28307 We do this in the new i386 backend to maintain source compatibility
28308 with the old cc0-based compiler. */
28310 static tree
28311 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28312 tree inputs ATTRIBUTE_UNUSED,
28313 tree clobbers)
28315 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28316 clobbers);
28317 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28318 clobbers);
28319 return clobbers;
28322 /* Implements target vector targetm.asm.encode_section_info. This
28323 is not used by netware. */
28325 static void ATTRIBUTE_UNUSED
28326 ix86_encode_section_info (tree decl, rtx rtl, int first)
28328 default_encode_section_info (decl, rtl, first);
28330 if (TREE_CODE (decl) == VAR_DECL
28331 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28332 && ix86_in_large_data_p (decl))
28333 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28336 /* Worker function for REVERSE_CONDITION. */
28338 enum rtx_code
28339 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28341 return (mode != CCFPmode && mode != CCFPUmode
28342 ? reverse_condition (code)
28343 : reverse_condition_maybe_unordered (code));
28346 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28347 to OPERANDS[0]. */
28349 const char *
28350 output_387_reg_move (rtx insn, rtx *operands)
28352 if (REG_P (operands[0]))
28354 if (REG_P (operands[1])
28355 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28357 if (REGNO (operands[0]) == FIRST_STACK_REG)
28358 return output_387_ffreep (operands, 0);
28359 return "fstp\t%y0";
28361 if (STACK_TOP_P (operands[0]))
28362 return "fld%z1\t%y1";
28363 return "fst\t%y0";
28365 else if (MEM_P (operands[0]))
28367 gcc_assert (REG_P (operands[1]));
28368 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28369 return "fstp%z0\t%y0";
28370 else
28372 /* There is no non-popping store to memory for XFmode.
28373 So if we need one, follow the store with a load. */
28374 if (GET_MODE (operands[0]) == XFmode)
28375 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
28376 else
28377 return "fst%z0\t%y0";
28380 else
28381 gcc_unreachable();
28384 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28385 FP status register is set. */
28387 void
28388 ix86_emit_fp_unordered_jump (rtx label)
28390 rtx reg = gen_reg_rtx (HImode);
28391 rtx temp;
28393 emit_insn (gen_x86_fnstsw_1 (reg));
28395 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28397 emit_insn (gen_x86_sahf_1 (reg));
28399 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28400 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28402 else
28404 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28406 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28407 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28410 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28411 gen_rtx_LABEL_REF (VOIDmode, label),
28412 pc_rtx);
28413 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28415 emit_jump_insn (temp);
28416 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28419 /* Output code to perform a log1p XFmode calculation. */
28421 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28423 rtx label1 = gen_label_rtx ();
28424 rtx label2 = gen_label_rtx ();
28426 rtx tmp = gen_reg_rtx (XFmode);
28427 rtx tmp2 = gen_reg_rtx (XFmode);
28429 emit_insn (gen_absxf2 (tmp, op1));
28430 emit_insn (gen_cmpxf (tmp,
28431 CONST_DOUBLE_FROM_REAL_VALUE (
28432 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28433 XFmode)));
28434 emit_jump_insn (gen_bge (label1));
28436 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28437 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28438 emit_jump (label2);
28440 emit_label (label1);
28441 emit_move_insn (tmp, CONST1_RTX (XFmode));
28442 emit_insn (gen_addxf3 (tmp, op1, tmp));
28443 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28444 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28446 emit_label (label2);
28449 /* Output code to perform a Newton-Rhapson approximation of a single precision
28450 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28452 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28454 rtx x0, x1, e0, e1, two;
28456 x0 = gen_reg_rtx (mode);
28457 e0 = gen_reg_rtx (mode);
28458 e1 = gen_reg_rtx (mode);
28459 x1 = gen_reg_rtx (mode);
28461 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28463 if (VECTOR_MODE_P (mode))
28464 two = ix86_build_const_vector (SFmode, true, two);
28466 two = force_reg (mode, two);
28468 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28470 /* x0 = rcp(b) estimate */
28471 emit_insn (gen_rtx_SET (VOIDmode, x0,
28472 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28473 UNSPEC_RCP)));
28474 /* e0 = x0 * b */
28475 emit_insn (gen_rtx_SET (VOIDmode, e0,
28476 gen_rtx_MULT (mode, x0, b)));
28477 /* e1 = 2. - e0 */
28478 emit_insn (gen_rtx_SET (VOIDmode, e1,
28479 gen_rtx_MINUS (mode, two, e0)));
28480 /* x1 = x0 * e1 */
28481 emit_insn (gen_rtx_SET (VOIDmode, x1,
28482 gen_rtx_MULT (mode, x0, e1)));
28483 /* res = a * x1 */
28484 emit_insn (gen_rtx_SET (VOIDmode, res,
28485 gen_rtx_MULT (mode, a, x1)));
28488 /* Output code to perform a Newton-Rhapson approximation of a
28489 single precision floating point [reciprocal] square root. */
28491 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28492 bool recip)
28494 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28495 REAL_VALUE_TYPE r;
28497 x0 = gen_reg_rtx (mode);
28498 e0 = gen_reg_rtx (mode);
28499 e1 = gen_reg_rtx (mode);
28500 e2 = gen_reg_rtx (mode);
28501 e3 = gen_reg_rtx (mode);
28503 real_from_integer (&r, VOIDmode, -3, -1, 0);
28504 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28506 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28507 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28509 if (VECTOR_MODE_P (mode))
28511 mthree = ix86_build_const_vector (SFmode, true, mthree);
28512 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28515 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28516 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28518 /* x0 = rsqrt(a) estimate */
28519 emit_insn (gen_rtx_SET (VOIDmode, x0,
28520 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28521 UNSPEC_RSQRT)));
28523 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28524 if (!recip)
28526 rtx zero, mask;
28528 zero = gen_reg_rtx (mode);
28529 mask = gen_reg_rtx (mode);
28531 zero = force_reg (mode, CONST0_RTX(mode));
28532 emit_insn (gen_rtx_SET (VOIDmode, mask,
28533 gen_rtx_NE (mode, zero, a)));
28535 emit_insn (gen_rtx_SET (VOIDmode, x0,
28536 gen_rtx_AND (mode, x0, mask)));
28539 /* e0 = x0 * a */
28540 emit_insn (gen_rtx_SET (VOIDmode, e0,
28541 gen_rtx_MULT (mode, x0, a)));
28542 /* e1 = e0 * x0 */
28543 emit_insn (gen_rtx_SET (VOIDmode, e1,
28544 gen_rtx_MULT (mode, e0, x0)));
28546 /* e2 = e1 - 3. */
28547 mthree = force_reg (mode, mthree);
28548 emit_insn (gen_rtx_SET (VOIDmode, e2,
28549 gen_rtx_PLUS (mode, e1, mthree)));
28551 mhalf = force_reg (mode, mhalf);
28552 if (recip)
28553 /* e3 = -.5 * x0 */
28554 emit_insn (gen_rtx_SET (VOIDmode, e3,
28555 gen_rtx_MULT (mode, x0, mhalf)));
28556 else
28557 /* e3 = -.5 * e0 */
28558 emit_insn (gen_rtx_SET (VOIDmode, e3,
28559 gen_rtx_MULT (mode, e0, mhalf)));
28560 /* ret = e2 * e3 */
28561 emit_insn (gen_rtx_SET (VOIDmode, res,
28562 gen_rtx_MULT (mode, e2, e3)));
28565 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28567 static void ATTRIBUTE_UNUSED
28568 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28569 tree decl)
28571 /* With Binutils 2.15, the "@unwind" marker must be specified on
28572 every occurrence of the ".eh_frame" section, not just the first
28573 one. */
28574 if (TARGET_64BIT
28575 && strcmp (name, ".eh_frame") == 0)
28577 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28578 flags & SECTION_WRITE ? "aw" : "a");
28579 return;
28581 default_elf_asm_named_section (name, flags, decl);
28584 /* Return the mangling of TYPE if it is an extended fundamental type. */
28586 static const char *
28587 ix86_mangle_type (const_tree type)
28589 type = TYPE_MAIN_VARIANT (type);
28591 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28592 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28593 return NULL;
28595 switch (TYPE_MODE (type))
28597 case TFmode:
28598 /* __float128 is "g". */
28599 return "g";
28600 case XFmode:
28601 /* "long double" or __float80 is "e". */
28602 return "e";
28603 default:
28604 return NULL;
28608 /* For 32-bit code we can save PIC register setup by using
28609 __stack_chk_fail_local hidden function instead of calling
28610 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28611 register, so it is better to call __stack_chk_fail directly. */
28613 static tree
28614 ix86_stack_protect_fail (void)
28616 return TARGET_64BIT
28617 ? default_external_stack_protect_fail ()
28618 : default_hidden_stack_protect_fail ();
28621 /* Select a format to encode pointers in exception handling data. CODE
28622 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28623 true if the symbol may be affected by dynamic relocations.
28625 ??? All x86 object file formats are capable of representing this.
28626 After all, the relocation needed is the same as for the call insn.
28627 Whether or not a particular assembler allows us to enter such, I
28628 guess we'll have to see. */
28630 asm_preferred_eh_data_format (int code, int global)
28632 if (flag_pic)
28634 int type = DW_EH_PE_sdata8;
28635 if (!TARGET_64BIT
28636 || ix86_cmodel == CM_SMALL_PIC
28637 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28638 type = DW_EH_PE_sdata4;
28639 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28641 if (ix86_cmodel == CM_SMALL
28642 || (ix86_cmodel == CM_MEDIUM && code))
28643 return DW_EH_PE_udata4;
28644 return DW_EH_PE_absptr;
28647 /* Expand copysign from SIGN to the positive value ABS_VALUE
28648 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28649 the sign-bit. */
28650 static void
28651 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28653 enum machine_mode mode = GET_MODE (sign);
28654 rtx sgn = gen_reg_rtx (mode);
28655 if (mask == NULL_RTX)
28657 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28658 if (!VECTOR_MODE_P (mode))
28660 /* We need to generate a scalar mode mask in this case. */
28661 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28662 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28663 mask = gen_reg_rtx (mode);
28664 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28667 else
28668 mask = gen_rtx_NOT (mode, mask);
28669 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28670 gen_rtx_AND (mode, mask, sign)));
28671 emit_insn (gen_rtx_SET (VOIDmode, result,
28672 gen_rtx_IOR (mode, abs_value, sgn)));
28675 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28676 mask for masking out the sign-bit is stored in *SMASK, if that is
28677 non-null. */
28678 static rtx
28679 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28681 enum machine_mode mode = GET_MODE (op0);
28682 rtx xa, mask;
28684 xa = gen_reg_rtx (mode);
28685 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28686 if (!VECTOR_MODE_P (mode))
28688 /* We need to generate a scalar mode mask in this case. */
28689 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28690 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28691 mask = gen_reg_rtx (mode);
28692 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28694 emit_insn (gen_rtx_SET (VOIDmode, xa,
28695 gen_rtx_AND (mode, op0, mask)));
28697 if (smask)
28698 *smask = mask;
28700 return xa;
28703 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28704 swapping the operands if SWAP_OPERANDS is true. The expanded
28705 code is a forward jump to a newly created label in case the
28706 comparison is true. The generated label rtx is returned. */
28707 static rtx
28708 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28709 bool swap_operands)
28711 rtx label, tmp;
28713 if (swap_operands)
28715 tmp = op0;
28716 op0 = op1;
28717 op1 = tmp;
28720 label = gen_label_rtx ();
28721 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28722 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28723 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28724 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28725 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28726 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28727 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28728 JUMP_LABEL (tmp) = label;
28730 return label;
28733 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28734 using comparison code CODE. Operands are swapped for the comparison if
28735 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28736 static rtx
28737 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28738 bool swap_operands)
28740 enum machine_mode mode = GET_MODE (op0);
28741 rtx mask = gen_reg_rtx (mode);
28743 if (swap_operands)
28745 rtx tmp = op0;
28746 op0 = op1;
28747 op1 = tmp;
28750 if (mode == DFmode)
28751 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28752 gen_rtx_fmt_ee (code, mode, op0, op1)));
28753 else
28754 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28755 gen_rtx_fmt_ee (code, mode, op0, op1)));
28757 return mask;
28760 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28761 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28762 static rtx
28763 ix86_gen_TWO52 (enum machine_mode mode)
28765 REAL_VALUE_TYPE TWO52r;
28766 rtx TWO52;
28768 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28769 TWO52 = const_double_from_real_value (TWO52r, mode);
28770 TWO52 = force_reg (mode, TWO52);
28772 return TWO52;
28775 /* Expand SSE sequence for computing lround from OP1 storing
28776 into OP0. */
28777 void
28778 ix86_expand_lround (rtx op0, rtx op1)
28780 /* C code for the stuff we're doing below:
28781 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28782 return (long)tmp;
28784 enum machine_mode mode = GET_MODE (op1);
28785 const struct real_format *fmt;
28786 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28787 rtx adj;
28789 /* load nextafter (0.5, 0.0) */
28790 fmt = REAL_MODE_FORMAT (mode);
28791 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28792 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28794 /* adj = copysign (0.5, op1) */
28795 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28796 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28798 /* adj = op1 + adj */
28799 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28801 /* op0 = (imode)adj */
28802 expand_fix (op0, adj, 0);
28805 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28806 into OPERAND0. */
28807 void
28808 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28810 /* C code for the stuff we're doing below (for do_floor):
28811 xi = (long)op1;
28812 xi -= (double)xi > op1 ? 1 : 0;
28813 return xi;
28815 enum machine_mode fmode = GET_MODE (op1);
28816 enum machine_mode imode = GET_MODE (op0);
28817 rtx ireg, freg, label, tmp;
28819 /* reg = (long)op1 */
28820 ireg = gen_reg_rtx (imode);
28821 expand_fix (ireg, op1, 0);
28823 /* freg = (double)reg */
28824 freg = gen_reg_rtx (fmode);
28825 expand_float (freg, ireg, 0);
28827 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28828 label = ix86_expand_sse_compare_and_jump (UNLE,
28829 freg, op1, !do_floor);
28830 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28831 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28832 emit_move_insn (ireg, tmp);
28834 emit_label (label);
28835 LABEL_NUSES (label) = 1;
28837 emit_move_insn (op0, ireg);
28840 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28841 result in OPERAND0. */
28842 void
28843 ix86_expand_rint (rtx operand0, rtx operand1)
28845 /* C code for the stuff we're doing below:
28846 xa = fabs (operand1);
28847 if (!isless (xa, 2**52))
28848 return operand1;
28849 xa = xa + 2**52 - 2**52;
28850 return copysign (xa, operand1);
28852 enum machine_mode mode = GET_MODE (operand0);
28853 rtx res, xa, label, TWO52, mask;
28855 res = gen_reg_rtx (mode);
28856 emit_move_insn (res, operand1);
28858 /* xa = abs (operand1) */
28859 xa = ix86_expand_sse_fabs (res, &mask);
28861 /* if (!isless (xa, TWO52)) goto label; */
28862 TWO52 = ix86_gen_TWO52 (mode);
28863 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28865 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28866 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28868 ix86_sse_copysign_to_positive (res, xa, res, mask);
28870 emit_label (label);
28871 LABEL_NUSES (label) = 1;
28873 emit_move_insn (operand0, res);
28876 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28877 into OPERAND0. */
28878 void
28879 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28881 /* C code for the stuff we expand below.
28882 double xa = fabs (x), x2;
28883 if (!isless (xa, TWO52))
28884 return x;
28885 xa = xa + TWO52 - TWO52;
28886 x2 = copysign (xa, x);
28887 Compensate. Floor:
28888 if (x2 > x)
28889 x2 -= 1;
28890 Compensate. Ceil:
28891 if (x2 < x)
28892 x2 -= -1;
28893 return x2;
28895 enum machine_mode mode = GET_MODE (operand0);
28896 rtx xa, TWO52, tmp, label, one, res, mask;
28898 TWO52 = ix86_gen_TWO52 (mode);
28900 /* Temporary for holding the result, initialized to the input
28901 operand to ease control flow. */
28902 res = gen_reg_rtx (mode);
28903 emit_move_insn (res, operand1);
28905 /* xa = abs (operand1) */
28906 xa = ix86_expand_sse_fabs (res, &mask);
28908 /* if (!isless (xa, TWO52)) goto label; */
28909 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28911 /* xa = xa + TWO52 - TWO52; */
28912 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28913 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28915 /* xa = copysign (xa, operand1) */
28916 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28918 /* generate 1.0 or -1.0 */
28919 one = force_reg (mode,
28920 const_double_from_real_value (do_floor
28921 ? dconst1 : dconstm1, mode));
28923 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28924 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28925 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28926 gen_rtx_AND (mode, one, tmp)));
28927 /* We always need to subtract here to preserve signed zero. */
28928 tmp = expand_simple_binop (mode, MINUS,
28929 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28930 emit_move_insn (res, tmp);
28932 emit_label (label);
28933 LABEL_NUSES (label) = 1;
28935 emit_move_insn (operand0, res);
28938 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28939 into OPERAND0. */
28940 void
28941 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28943 /* C code for the stuff we expand below.
28944 double xa = fabs (x), x2;
28945 if (!isless (xa, TWO52))
28946 return x;
28947 x2 = (double)(long)x;
28948 Compensate. Floor:
28949 if (x2 > x)
28950 x2 -= 1;
28951 Compensate. Ceil:
28952 if (x2 < x)
28953 x2 += 1;
28954 if (HONOR_SIGNED_ZEROS (mode))
28955 return copysign (x2, x);
28956 return x2;
28958 enum machine_mode mode = GET_MODE (operand0);
28959 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28961 TWO52 = ix86_gen_TWO52 (mode);
28963 /* Temporary for holding the result, initialized to the input
28964 operand to ease control flow. */
28965 res = gen_reg_rtx (mode);
28966 emit_move_insn (res, operand1);
28968 /* xa = abs (operand1) */
28969 xa = ix86_expand_sse_fabs (res, &mask);
28971 /* if (!isless (xa, TWO52)) goto label; */
28972 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28974 /* xa = (double)(long)x */
28975 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28976 expand_fix (xi, res, 0);
28977 expand_float (xa, xi, 0);
28979 /* generate 1.0 */
28980 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28982 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28983 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28984 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28985 gen_rtx_AND (mode, one, tmp)));
28986 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28987 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28988 emit_move_insn (res, tmp);
28990 if (HONOR_SIGNED_ZEROS (mode))
28991 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28993 emit_label (label);
28994 LABEL_NUSES (label) = 1;
28996 emit_move_insn (operand0, res);
28999 /* Expand SSE sequence for computing round from OPERAND1 storing
29000 into OPERAND0. Sequence that works without relying on DImode truncation
29001 via cvttsd2siq that is only available on 64bit targets. */
29002 void
29003 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29005 /* C code for the stuff we expand below.
29006 double xa = fabs (x), xa2, x2;
29007 if (!isless (xa, TWO52))
29008 return x;
29009 Using the absolute value and copying back sign makes
29010 -0.0 -> -0.0 correct.
29011 xa2 = xa + TWO52 - TWO52;
29012 Compensate.
29013 dxa = xa2 - xa;
29014 if (dxa <= -0.5)
29015 xa2 += 1;
29016 else if (dxa > 0.5)
29017 xa2 -= 1;
29018 x2 = copysign (xa2, x);
29019 return x2;
29021 enum machine_mode mode = GET_MODE (operand0);
29022 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29024 TWO52 = ix86_gen_TWO52 (mode);
29026 /* Temporary for holding the result, initialized to the input
29027 operand to ease control flow. */
29028 res = gen_reg_rtx (mode);
29029 emit_move_insn (res, operand1);
29031 /* xa = abs (operand1) */
29032 xa = ix86_expand_sse_fabs (res, &mask);
29034 /* if (!isless (xa, TWO52)) goto label; */
29035 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29037 /* xa2 = xa + TWO52 - TWO52; */
29038 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29039 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29041 /* dxa = xa2 - xa; */
29042 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29044 /* generate 0.5, 1.0 and -0.5 */
29045 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29046 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29047 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29048 0, OPTAB_DIRECT);
29050 /* Compensate. */
29051 tmp = gen_reg_rtx (mode);
29052 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29053 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29054 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29055 gen_rtx_AND (mode, one, tmp)));
29056 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29057 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29058 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29059 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29060 gen_rtx_AND (mode, one, tmp)));
29061 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29063 /* res = copysign (xa2, operand1) */
29064 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29066 emit_label (label);
29067 LABEL_NUSES (label) = 1;
29069 emit_move_insn (operand0, res);
29072 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29073 into OPERAND0. */
29074 void
29075 ix86_expand_trunc (rtx operand0, rtx operand1)
29077 /* C code for SSE variant we expand below.
29078 double xa = fabs (x), x2;
29079 if (!isless (xa, TWO52))
29080 return x;
29081 x2 = (double)(long)x;
29082 if (HONOR_SIGNED_ZEROS (mode))
29083 return copysign (x2, x);
29084 return x2;
29086 enum machine_mode mode = GET_MODE (operand0);
29087 rtx xa, xi, TWO52, label, res, mask;
29089 TWO52 = ix86_gen_TWO52 (mode);
29091 /* Temporary for holding the result, initialized to the input
29092 operand to ease control flow. */
29093 res = gen_reg_rtx (mode);
29094 emit_move_insn (res, operand1);
29096 /* xa = abs (operand1) */
29097 xa = ix86_expand_sse_fabs (res, &mask);
29099 /* if (!isless (xa, TWO52)) goto label; */
29100 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29102 /* x = (double)(long)x */
29103 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29104 expand_fix (xi, res, 0);
29105 expand_float (res, xi, 0);
29107 if (HONOR_SIGNED_ZEROS (mode))
29108 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29110 emit_label (label);
29111 LABEL_NUSES (label) = 1;
29113 emit_move_insn (operand0, res);
29116 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29117 into OPERAND0. */
29118 void
29119 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29121 enum machine_mode mode = GET_MODE (operand0);
29122 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29124 /* C code for SSE variant we expand below.
29125 double xa = fabs (x), x2;
29126 if (!isless (xa, TWO52))
29127 return x;
29128 xa2 = xa + TWO52 - TWO52;
29129 Compensate:
29130 if (xa2 > xa)
29131 xa2 -= 1.0;
29132 x2 = copysign (xa2, x);
29133 return x2;
29136 TWO52 = ix86_gen_TWO52 (mode);
29138 /* Temporary for holding the result, initialized to the input
29139 operand to ease control flow. */
29140 res = gen_reg_rtx (mode);
29141 emit_move_insn (res, operand1);
29143 /* xa = abs (operand1) */
29144 xa = ix86_expand_sse_fabs (res, &smask);
29146 /* if (!isless (xa, TWO52)) goto label; */
29147 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29149 /* res = xa + TWO52 - TWO52; */
29150 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29151 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29152 emit_move_insn (res, tmp);
29154 /* generate 1.0 */
29155 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29157 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29158 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29159 emit_insn (gen_rtx_SET (VOIDmode, mask,
29160 gen_rtx_AND (mode, mask, one)));
29161 tmp = expand_simple_binop (mode, MINUS,
29162 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29163 emit_move_insn (res, tmp);
29165 /* res = copysign (res, operand1) */
29166 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29168 emit_label (label);
29169 LABEL_NUSES (label) = 1;
29171 emit_move_insn (operand0, res);
29174 /* Expand SSE sequence for computing round from OPERAND1 storing
29175 into OPERAND0. */
29176 void
29177 ix86_expand_round (rtx operand0, rtx operand1)
29179 /* C code for the stuff we're doing below:
29180 double xa = fabs (x);
29181 if (!isless (xa, TWO52))
29182 return x;
29183 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29184 return copysign (xa, x);
29186 enum machine_mode mode = GET_MODE (operand0);
29187 rtx res, TWO52, xa, label, xi, half, mask;
29188 const struct real_format *fmt;
29189 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29191 /* Temporary for holding the result, initialized to the input
29192 operand to ease control flow. */
29193 res = gen_reg_rtx (mode);
29194 emit_move_insn (res, operand1);
29196 TWO52 = ix86_gen_TWO52 (mode);
29197 xa = ix86_expand_sse_fabs (res, &mask);
29198 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29200 /* load nextafter (0.5, 0.0) */
29201 fmt = REAL_MODE_FORMAT (mode);
29202 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29203 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29205 /* xa = xa + 0.5 */
29206 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29207 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29209 /* xa = (double)(int64_t)xa */
29210 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29211 expand_fix (xi, xa, 0);
29212 expand_float (xa, xi, 0);
29214 /* res = copysign (xa, operand1) */
29215 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29217 emit_label (label);
29218 LABEL_NUSES (label) = 1;
29220 emit_move_insn (operand0, res);
29224 /* Validate whether a SSE5 instruction is valid or not.
29225 OPERANDS is the array of operands.
29226 NUM is the number of operands.
29227 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
29228 NUM_MEMORY is the maximum number of memory operands to accept.
29229 when COMMUTATIVE is set, operand 1 and 2 can be swapped. */
29231 bool
29232 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
29233 bool uses_oc0, int num_memory, bool commutative)
29235 int mem_mask;
29236 int mem_count;
29237 int i;
29239 /* Count the number of memory arguments */
29240 mem_mask = 0;
29241 mem_count = 0;
29242 for (i = 0; i < num; i++)
29244 enum machine_mode mode = GET_MODE (operands[i]);
29245 if (register_operand (operands[i], mode))
29248 else if (memory_operand (operands[i], mode))
29250 mem_mask |= (1 << i);
29251 mem_count++;
29254 else
29256 rtx pattern = PATTERN (insn);
29258 /* allow 0 for pcmov */
29259 if (GET_CODE (pattern) != SET
29260 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
29261 || i < 2
29262 || operands[i] != CONST0_RTX (mode))
29263 return false;
29267 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
29268 a memory operation. */
29269 if (num_memory < 0)
29271 num_memory = -num_memory;
29272 if ((mem_mask & (1 << (num-1))) != 0)
29274 mem_mask &= ~(1 << (num-1));
29275 mem_count--;
29279 /* If there were no memory operations, allow the insn */
29280 if (mem_mask == 0)
29281 return true;
29283 /* Do not allow the destination register to be a memory operand. */
29284 else if (mem_mask & (1 << 0))
29285 return false;
29287 /* If there are too many memory operations, disallow the instruction. While
29288 the hardware only allows 1 memory reference, before register allocation
29289 for some insns, we allow two memory operations sometimes in order to allow
29290 code like the following to be optimized:
29292 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
29294 or similar cases that are vectorized into using the fmaddss
29295 instruction. */
29296 else if (mem_count > num_memory)
29297 return false;
29299 /* Don't allow more than one memory operation if not optimizing. */
29300 else if (mem_count > 1 && !optimize)
29301 return false;
29303 else if (num == 4 && mem_count == 1)
29305 /* formats (destination is the first argument), example fmaddss:
29306 xmm1, xmm1, xmm2, xmm3/mem
29307 xmm1, xmm1, xmm2/mem, xmm3
29308 xmm1, xmm2, xmm3/mem, xmm1
29309 xmm1, xmm2/mem, xmm3, xmm1 */
29310 if (uses_oc0)
29311 return ((mem_mask == (1 << 1))
29312 || (mem_mask == (1 << 2))
29313 || (mem_mask == (1 << 3)));
29315 /* format, example pmacsdd:
29316 xmm1, xmm2, xmm3/mem, xmm1 */
29317 if (commutative)
29318 return (mem_mask == (1 << 2) || mem_mask == (1 << 1));
29319 else
29320 return (mem_mask == (1 << 2));
29323 else if (num == 4 && num_memory == 2)
29325 /* If there are two memory operations, we can load one of the memory ops
29326 into the destination register. This is for optimizing the
29327 multiply/add ops, which the combiner has optimized both the multiply
29328 and the add insns to have a memory operation. We have to be careful
29329 that the destination doesn't overlap with the inputs. */
29330 rtx op0 = operands[0];
29332 if (reg_mentioned_p (op0, operands[1])
29333 || reg_mentioned_p (op0, operands[2])
29334 || reg_mentioned_p (op0, operands[3]))
29335 return false;
29337 /* formats (destination is the first argument), example fmaddss:
29338 xmm1, xmm1, xmm2, xmm3/mem
29339 xmm1, xmm1, xmm2/mem, xmm3
29340 xmm1, xmm2, xmm3/mem, xmm1
29341 xmm1, xmm2/mem, xmm3, xmm1
29343 For the oc0 case, we will load either operands[1] or operands[3] into
29344 operands[0], so any combination of 2 memory operands is ok. */
29345 if (uses_oc0)
29346 return true;
29348 /* format, example pmacsdd:
29349 xmm1, xmm2, xmm3/mem, xmm1
29351 For the integer multiply/add instructions be more restrictive and
29352 require operands[2] and operands[3] to be the memory operands. */
29353 if (commutative)
29354 return (mem_mask == ((1 << 1) | (1 << 3)) || ((1 << 2) | (1 << 3)));
29355 else
29356 return (mem_mask == ((1 << 2) | (1 << 3)));
29359 else if (num == 3 && num_memory == 1)
29361 /* formats, example protb:
29362 xmm1, xmm2, xmm3/mem
29363 xmm1, xmm2/mem, xmm3 */
29364 if (uses_oc0)
29365 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
29367 /* format, example comeq:
29368 xmm1, xmm2, xmm3/mem */
29369 else
29370 return (mem_mask == (1 << 2));
29373 else
29374 gcc_unreachable ();
29376 return false;
29380 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
29381 hardware will allow by using the destination register to load one of the
29382 memory operations. Presently this is used by the multiply/add routines to
29383 allow 2 memory references. */
29385 void
29386 ix86_expand_sse5_multiple_memory (rtx operands[],
29387 int num,
29388 enum machine_mode mode)
29390 rtx op0 = operands[0];
29391 if (num != 4
29392 || memory_operand (op0, mode)
29393 || reg_mentioned_p (op0, operands[1])
29394 || reg_mentioned_p (op0, operands[2])
29395 || reg_mentioned_p (op0, operands[3]))
29396 gcc_unreachable ();
29398 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
29399 the destination register. */
29400 if (memory_operand (operands[1], mode))
29402 emit_move_insn (op0, operands[1]);
29403 operands[1] = op0;
29405 else if (memory_operand (operands[3], mode))
29407 emit_move_insn (op0, operands[3]);
29408 operands[3] = op0;
29410 else
29411 gcc_unreachable ();
29413 return;
29417 /* Table of valid machine attributes. */
29418 static const struct attribute_spec ix86_attribute_table[] =
29420 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29421 /* Stdcall attribute says callee is responsible for popping arguments
29422 if they are not variable. */
29423 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29424 /* Fastcall attribute says callee is responsible for popping arguments
29425 if they are not variable. */
29426 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29427 /* Cdecl attribute says the callee is a normal C declaration */
29428 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29429 /* Regparm attribute specifies how many integer arguments are to be
29430 passed in registers. */
29431 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29432 /* Sseregparm attribute says we are using x86_64 calling conventions
29433 for FP arguments. */
29434 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29435 /* force_align_arg_pointer says this function realigns the stack at entry. */
29436 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29437 false, true, true, ix86_handle_cconv_attribute },
29438 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29439 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29440 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29441 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29442 #endif
29443 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29444 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29445 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29446 SUBTARGET_ATTRIBUTE_TABLE,
29447 #endif
29448 /* ms_abi and sysv_abi calling convention function attributes. */
29449 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29450 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29451 /* End element. */
29452 { NULL, 0, 0, false, false, false, NULL }
29455 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29456 static int
29457 x86_builtin_vectorization_cost (bool runtime_test)
29459 /* If the branch of the runtime test is taken - i.e. - the vectorized
29460 version is skipped - this incurs a misprediction cost (because the
29461 vectorized version is expected to be the fall-through). So we subtract
29462 the latency of a mispredicted branch from the costs that are incured
29463 when the vectorized version is executed.
29465 TODO: The values in individual target tables have to be tuned or new
29466 fields may be needed. For eg. on K8, the default branch path is the
29467 not-taken path. If the taken path is predicted correctly, the minimum
29468 penalty of going down the taken-path is 1 cycle. If the taken-path is
29469 not predicted correctly, then the minimum penalty is 10 cycles. */
29471 if (runtime_test)
29473 return (-(ix86_cost->cond_taken_branch_cost));
29475 else
29476 return 0;
29479 /* This function returns the calling abi specific va_list type node.
29480 It returns the FNDECL specific va_list type. */
29482 tree
29483 ix86_fn_abi_va_list (tree fndecl)
29485 if (!TARGET_64BIT)
29486 return va_list_type_node;
29487 gcc_assert (fndecl != NULL_TREE);
29489 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
29490 return ms_va_list_type_node;
29491 else
29492 return sysv_va_list_type_node;
29495 /* Returns the canonical va_list type specified by TYPE. If there
29496 is no valid TYPE provided, it return NULL_TREE. */
29498 tree
29499 ix86_canonical_va_list_type (tree type)
29501 tree wtype, htype;
29503 /* Resolve references and pointers to va_list type. */
29504 if (INDIRECT_REF_P (type))
29505 type = TREE_TYPE (type);
29506 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
29507 type = TREE_TYPE (type);
29509 if (TARGET_64BIT)
29511 wtype = va_list_type_node;
29512 gcc_assert (wtype != NULL_TREE);
29513 htype = type;
29514 if (TREE_CODE (wtype) == ARRAY_TYPE)
29516 /* If va_list is an array type, the argument may have decayed
29517 to a pointer type, e.g. by being passed to another function.
29518 In that case, unwrap both types so that we can compare the
29519 underlying records. */
29520 if (TREE_CODE (htype) == ARRAY_TYPE
29521 || POINTER_TYPE_P (htype))
29523 wtype = TREE_TYPE (wtype);
29524 htype = TREE_TYPE (htype);
29527 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29528 return va_list_type_node;
29529 wtype = sysv_va_list_type_node;
29530 gcc_assert (wtype != NULL_TREE);
29531 htype = type;
29532 if (TREE_CODE (wtype) == ARRAY_TYPE)
29534 /* If va_list is an array type, the argument may have decayed
29535 to a pointer type, e.g. by being passed to another function.
29536 In that case, unwrap both types so that we can compare the
29537 underlying records. */
29538 if (TREE_CODE (htype) == ARRAY_TYPE
29539 || POINTER_TYPE_P (htype))
29541 wtype = TREE_TYPE (wtype);
29542 htype = TREE_TYPE (htype);
29545 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29546 return sysv_va_list_type_node;
29547 wtype = ms_va_list_type_node;
29548 gcc_assert (wtype != NULL_TREE);
29549 htype = type;
29550 if (TREE_CODE (wtype) == ARRAY_TYPE)
29552 /* If va_list is an array type, the argument may have decayed
29553 to a pointer type, e.g. by being passed to another function.
29554 In that case, unwrap both types so that we can compare the
29555 underlying records. */
29556 if (TREE_CODE (htype) == ARRAY_TYPE
29557 || POINTER_TYPE_P (htype))
29559 wtype = TREE_TYPE (wtype);
29560 htype = TREE_TYPE (htype);
29563 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29564 return ms_va_list_type_node;
29565 return NULL_TREE;
29567 return std_canonical_va_list_type (type);
29570 /* Iterate through the target-specific builtin types for va_list.
29571 IDX denotes the iterator, *PTREE is set to the result type of
29572 the va_list builtin, and *PNAME to its internal type.
29573 Returns zero if there is no element for this index, otherwise
29574 IDX should be increased upon the next call.
29575 Note, do not iterate a base builtin's name like __builtin_va_list.
29576 Used from c_common_nodes_and_builtins. */
29579 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
29581 if (!TARGET_64BIT)
29582 return 0;
29583 switch (idx) {
29584 case 0:
29585 *ptree = ms_va_list_type_node;
29586 *pname = "__builtin_ms_va_list";
29587 break;
29588 case 1:
29589 *ptree = sysv_va_list_type_node;
29590 *pname = "__builtin_sysv_va_list";
29591 break;
29592 default:
29593 return 0;
29595 return 1;
29598 /* Initialize the GCC target structure. */
29599 #undef TARGET_RETURN_IN_MEMORY
29600 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
29602 #undef TARGET_ATTRIBUTE_TABLE
29603 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
29604 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29605 # undef TARGET_MERGE_DECL_ATTRIBUTES
29606 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
29607 #endif
29609 #undef TARGET_COMP_TYPE_ATTRIBUTES
29610 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
29612 #undef TARGET_INIT_BUILTINS
29613 #define TARGET_INIT_BUILTINS ix86_init_builtins
29614 #undef TARGET_EXPAND_BUILTIN
29615 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
29617 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
29618 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
29619 ix86_builtin_vectorized_function
29621 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
29622 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
29624 #undef TARGET_BUILTIN_RECIPROCAL
29625 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
29627 #undef TARGET_ASM_FUNCTION_EPILOGUE
29628 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
29630 #undef TARGET_ENCODE_SECTION_INFO
29631 #ifndef SUBTARGET_ENCODE_SECTION_INFO
29632 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
29633 #else
29634 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
29635 #endif
29637 #undef TARGET_ASM_OPEN_PAREN
29638 #define TARGET_ASM_OPEN_PAREN ""
29639 #undef TARGET_ASM_CLOSE_PAREN
29640 #define TARGET_ASM_CLOSE_PAREN ""
29642 #undef TARGET_ASM_ALIGNED_HI_OP
29643 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
29644 #undef TARGET_ASM_ALIGNED_SI_OP
29645 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
29646 #ifdef ASM_QUAD
29647 #undef TARGET_ASM_ALIGNED_DI_OP
29648 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
29649 #endif
29651 #undef TARGET_ASM_UNALIGNED_HI_OP
29652 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
29653 #undef TARGET_ASM_UNALIGNED_SI_OP
29654 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
29655 #undef TARGET_ASM_UNALIGNED_DI_OP
29656 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
29658 #undef TARGET_SCHED_ADJUST_COST
29659 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
29660 #undef TARGET_SCHED_ISSUE_RATE
29661 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
29662 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
29663 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
29664 ia32_multipass_dfa_lookahead
29666 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
29667 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
29669 #ifdef HAVE_AS_TLS
29670 #undef TARGET_HAVE_TLS
29671 #define TARGET_HAVE_TLS true
29672 #endif
29673 #undef TARGET_CANNOT_FORCE_CONST_MEM
29674 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
29675 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
29676 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
29678 #undef TARGET_DELEGITIMIZE_ADDRESS
29679 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
29681 #undef TARGET_MS_BITFIELD_LAYOUT_P
29682 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
29684 #if TARGET_MACHO
29685 #undef TARGET_BINDS_LOCAL_P
29686 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
29687 #endif
29688 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29689 #undef TARGET_BINDS_LOCAL_P
29690 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
29691 #endif
29693 #undef TARGET_ASM_OUTPUT_MI_THUNK
29694 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
29695 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
29696 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
29698 #undef TARGET_ASM_FILE_START
29699 #define TARGET_ASM_FILE_START x86_file_start
29701 #undef TARGET_DEFAULT_TARGET_FLAGS
29702 #define TARGET_DEFAULT_TARGET_FLAGS \
29703 (TARGET_DEFAULT \
29704 | TARGET_SUBTARGET_DEFAULT \
29705 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
29707 #undef TARGET_HANDLE_OPTION
29708 #define TARGET_HANDLE_OPTION ix86_handle_option
29710 #undef TARGET_RTX_COSTS
29711 #define TARGET_RTX_COSTS ix86_rtx_costs
29712 #undef TARGET_ADDRESS_COST
29713 #define TARGET_ADDRESS_COST ix86_address_cost
29715 #undef TARGET_FIXED_CONDITION_CODE_REGS
29716 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
29717 #undef TARGET_CC_MODES_COMPATIBLE
29718 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
29720 #undef TARGET_MACHINE_DEPENDENT_REORG
29721 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
29723 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
29724 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
29726 #undef TARGET_BUILD_BUILTIN_VA_LIST
29727 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
29729 #undef TARGET_FN_ABI_VA_LIST
29730 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
29732 #undef TARGET_CANONICAL_VA_LIST_TYPE
29733 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
29735 #undef TARGET_EXPAND_BUILTIN_VA_START
29736 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
29738 #undef TARGET_MD_ASM_CLOBBERS
29739 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
29741 #undef TARGET_PROMOTE_PROTOTYPES
29742 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
29743 #undef TARGET_STRUCT_VALUE_RTX
29744 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
29745 #undef TARGET_SETUP_INCOMING_VARARGS
29746 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
29747 #undef TARGET_MUST_PASS_IN_STACK
29748 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
29749 #undef TARGET_PASS_BY_REFERENCE
29750 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
29751 #undef TARGET_INTERNAL_ARG_POINTER
29752 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
29753 #undef TARGET_UPDATE_STACK_BOUNDARY
29754 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
29755 #undef TARGET_GET_DRAP_RTX
29756 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
29757 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
29758 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
29759 #undef TARGET_STRICT_ARGUMENT_NAMING
29760 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
29762 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
29763 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
29765 #undef TARGET_SCALAR_MODE_SUPPORTED_P
29766 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
29768 #undef TARGET_VECTOR_MODE_SUPPORTED_P
29769 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
29771 #undef TARGET_C_MODE_FOR_SUFFIX
29772 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
29774 #ifdef HAVE_AS_TLS
29775 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
29776 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
29777 #endif
29779 #ifdef SUBTARGET_INSERT_ATTRIBUTES
29780 #undef TARGET_INSERT_ATTRIBUTES
29781 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
29782 #endif
29784 #undef TARGET_MANGLE_TYPE
29785 #define TARGET_MANGLE_TYPE ix86_mangle_type
29787 #undef TARGET_STACK_PROTECT_FAIL
29788 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
29790 #undef TARGET_FUNCTION_VALUE
29791 #define TARGET_FUNCTION_VALUE ix86_function_value
29793 #undef TARGET_SECONDARY_RELOAD
29794 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
29796 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
29797 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
29799 #undef TARGET_SET_CURRENT_FUNCTION
29800 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
29802 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
29803 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
29805 #undef TARGET_OPTION_SAVE
29806 #define TARGET_OPTION_SAVE ix86_function_specific_save
29808 #undef TARGET_OPTION_RESTORE
29809 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
29811 #undef TARGET_OPTION_PRINT
29812 #define TARGET_OPTION_PRINT ix86_function_specific_print
29814 #undef TARGET_OPTION_CAN_INLINE_P
29815 #define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
29817 #undef TARGET_EXPAND_TO_RTL_HOOK
29818 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
29820 struct gcc_target targetm = TARGET_INITIALIZER;
29822 #include "gt-i386.h"