2008-11-18 Kai Tietz <kai.tietz@onevision.com>
[official-gcc.git] / gcc / config / i386 / i386.c
blob38da3a4a1f6a21afca8a5632383c5b20314613e0
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
56 #include "cselib.h"
58 static int x86_builtin_vectorization_cost (bool);
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1039 /* Generic64 should produce code tuned for Nocona and K8. */
1040 static const
1041 struct processor_costs generic64_cost = {
1042 COSTS_N_INSNS (1), /* cost of an add instruction */
1043 /* On all chips taken into consideration lea is 2 cycles and more. With
1044 this cost however our current implementation of synth_mult results in
1045 use of unnecessary temporary registers causing regression on several
1046 SPECfp benchmarks. */
1047 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1048 COSTS_N_INSNS (1), /* variable shift costs */
1049 COSTS_N_INSNS (1), /* constant shift costs */
1050 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1051 COSTS_N_INSNS (4), /* HI */
1052 COSTS_N_INSNS (3), /* SI */
1053 COSTS_N_INSNS (4), /* DI */
1054 COSTS_N_INSNS (2)}, /* other */
1055 0, /* cost of multiply per each bit set */
1056 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1057 COSTS_N_INSNS (26), /* HI */
1058 COSTS_N_INSNS (42), /* SI */
1059 COSTS_N_INSNS (74), /* DI */
1060 COSTS_N_INSNS (74)}, /* other */
1061 COSTS_N_INSNS (1), /* cost of movsx */
1062 COSTS_N_INSNS (1), /* cost of movzx */
1063 8, /* "large" insn */
1064 17, /* MOVE_RATIO */
1065 4, /* cost for loading QImode using movzbl */
1066 {4, 4, 4}, /* cost of loading integer registers
1067 in QImode, HImode and SImode.
1068 Relative to reg-reg move (2). */
1069 {4, 4, 4}, /* cost of storing integer registers */
1070 4, /* cost of reg,reg fld/fst */
1071 {12, 12, 12}, /* cost of loading fp registers
1072 in SFmode, DFmode and XFmode */
1073 {6, 6, 8}, /* cost of storing fp registers
1074 in SFmode, DFmode and XFmode */
1075 2, /* cost of moving MMX register */
1076 {8, 8}, /* cost of loading MMX registers
1077 in SImode and DImode */
1078 {8, 8}, /* cost of storing MMX registers
1079 in SImode and DImode */
1080 2, /* cost of moving SSE register */
1081 {8, 8, 8}, /* cost of loading SSE registers
1082 in SImode, DImode and TImode */
1083 {8, 8, 8}, /* cost of storing SSE registers
1084 in SImode, DImode and TImode */
1085 5, /* MMX or SSE register to integer */
1086 32, /* size of l1 cache. */
1087 512, /* size of l2 cache. */
1088 64, /* size of prefetch block */
1089 6, /* number of parallel prefetches */
1090 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1091 is increased to perhaps more appropriate value of 5. */
1092 3, /* Branch cost */
1093 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1094 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1095 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1096 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1097 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1098 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 {DUMMY_STRINGOP_ALGS,
1102 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1103 1, /* scalar_stmt_cost. */
1104 1, /* scalar load_cost. */
1105 1, /* scalar_store_cost. */
1106 1, /* vec_stmt_cost. */
1107 1, /* vec_to_scalar_cost. */
1108 1, /* scalar_to_vec_cost. */
1109 1, /* vec_align_load_cost. */
1110 2, /* vec_unalign_load_cost. */
1111 1, /* vec_store_cost. */
1112 3, /* cond_taken_branch_cost. */
1113 1, /* cond_not_taken_branch_cost. */
1116 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1117 static const
1118 struct processor_costs generic32_cost = {
1119 COSTS_N_INSNS (1), /* cost of an add instruction */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 256, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1173 DUMMY_STRINGOP_ALGS},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 const struct processor_costs *ix86_cost = &pentium_cost;
1189 /* Processor feature/optimization bitmasks. */
1190 #define m_386 (1<<PROCESSOR_I386)
1191 #define m_486 (1<<PROCESSOR_I486)
1192 #define m_PENT (1<<PROCESSOR_PENTIUM)
1193 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1194 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1195 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1196 #define m_CORE2 (1<<PROCESSOR_CORE2)
1198 #define m_GEODE (1<<PROCESSOR_GEODE)
1199 #define m_K6 (1<<PROCESSOR_K6)
1200 #define m_K6_GEODE (m_K6 | m_GEODE)
1201 #define m_K8 (1<<PROCESSOR_K8)
1202 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1203 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1204 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1205 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1207 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1208 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1210 /* Generic instruction choice should be common subset of supported CPUs
1211 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1212 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1214 /* Feature tests against the various tunings. */
1215 unsigned char ix86_tune_features[X86_TUNE_LAST];
1217 /* Feature tests against the various tunings used to create ix86_tune_features
1218 based on the processor mask. */
1219 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1220 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1221 negatively, so enabling for Generic64 seems like good code size
1222 tradeoff. We can't enable it for 32bit generic because it does not
1223 work well with PPro base chips. */
1224 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1226 /* X86_TUNE_PUSH_MEMORY */
1227 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1228 | m_NOCONA | m_CORE2 | m_GENERIC,
1230 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1231 m_486 | m_PENT,
1233 /* X86_TUNE_UNROLL_STRLEN */
1234 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1236 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1237 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1239 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1240 on simulation result. But after P4 was made, no performance benefit
1241 was observed with branch hints. It also increases the code size.
1242 As a result, icc never generates branch hints. */
1245 /* X86_TUNE_DOUBLE_WITH_ADD */
1246 ~m_386,
1248 /* X86_TUNE_USE_SAHF */
1249 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1250 | m_NOCONA | m_CORE2 | m_GENERIC,
1252 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1253 partial dependencies. */
1254 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1255 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1257 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1258 register stalls on Generic32 compilation setting as well. However
1259 in current implementation the partial register stalls are not eliminated
1260 very well - they can be introduced via subregs synthesized by combine
1261 and can happen in caller/callee saving sequences. Because this option
1262 pays back little on PPro based chips and is in conflict with partial reg
1263 dependencies used by Athlon/P4 based chips, it is better to leave it off
1264 for generic32 for now. */
1265 m_PPRO,
1267 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1268 m_CORE2 | m_GENERIC,
1270 /* X86_TUNE_USE_HIMODE_FIOP */
1271 m_386 | m_486 | m_K6_GEODE,
1273 /* X86_TUNE_USE_SIMODE_FIOP */
1274 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1276 /* X86_TUNE_USE_MOV0 */
1277 m_K6,
1279 /* X86_TUNE_USE_CLTD */
1280 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1282 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1283 m_PENT4,
1285 /* X86_TUNE_SPLIT_LONG_MOVES */
1286 m_PPRO,
1288 /* X86_TUNE_READ_MODIFY_WRITE */
1289 ~m_PENT,
1291 /* X86_TUNE_READ_MODIFY */
1292 ~(m_PENT | m_PPRO),
1294 /* X86_TUNE_PROMOTE_QIMODE */
1295 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1296 | m_GENERIC /* | m_PENT4 ? */,
1298 /* X86_TUNE_FAST_PREFIX */
1299 ~(m_PENT | m_486 | m_386),
1301 /* X86_TUNE_SINGLE_STRINGOP */
1302 m_386 | m_PENT4 | m_NOCONA,
1304 /* X86_TUNE_QIMODE_MATH */
1307 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1308 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1309 might be considered for Generic32 if our scheme for avoiding partial
1310 stalls was more effective. */
1311 ~m_PPRO,
1313 /* X86_TUNE_PROMOTE_QI_REGS */
1316 /* X86_TUNE_PROMOTE_HI_REGS */
1317 m_PPRO,
1319 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1320 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1322 /* X86_TUNE_ADD_ESP_8 */
1323 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1324 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_SUB_ESP_4 */
1327 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1329 /* X86_TUNE_SUB_ESP_8 */
1330 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1331 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1333 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1334 for DFmode copies */
1335 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1336 | m_GENERIC | m_GEODE),
1338 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1339 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1341 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1342 conflict here in between PPro/Pentium4 based chips that thread 128bit
1343 SSE registers as single units versus K8 based chips that divide SSE
1344 registers to two 64bit halves. This knob promotes all store destinations
1345 to be 128bit to allow register renaming on 128bit SSE units, but usually
1346 results in one extra microop on 64bit SSE units. Experimental results
1347 shows that disabling this option on P4 brings over 20% SPECfp regression,
1348 while enabling it on K8 brings roughly 2.4% regression that can be partly
1349 masked by careful scheduling of moves. */
1350 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1352 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1353 m_AMDFAM10,
1355 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1356 are resolved on SSE register parts instead of whole registers, so we may
1357 maintain just lower part of scalar values in proper format leaving the
1358 upper part undefined. */
1359 m_ATHLON_K8,
1361 /* X86_TUNE_SSE_TYPELESS_STORES */
1362 m_AMD_MULTIPLE,
1364 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1365 m_PPRO | m_PENT4 | m_NOCONA,
1367 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1368 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1370 /* X86_TUNE_PROLOGUE_USING_MOVE */
1371 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1373 /* X86_TUNE_EPILOGUE_USING_MOVE */
1374 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1376 /* X86_TUNE_SHIFT1 */
1377 ~m_486,
1379 /* X86_TUNE_USE_FFREEP */
1380 m_AMD_MULTIPLE,
1382 /* X86_TUNE_INTER_UNIT_MOVES */
1383 ~(m_AMD_MULTIPLE | m_GENERIC),
1385 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1386 ~(m_AMDFAM10),
1388 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1389 than 4 branch instructions in the 16 byte window. */
1390 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_SCHEDULE */
1393 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1395 /* X86_TUNE_USE_BT */
1396 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_USE_INCDEC */
1399 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1401 /* X86_TUNE_PAD_RETURNS */
1402 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1404 /* X86_TUNE_EXT_80387_CONSTANTS */
1405 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1407 /* X86_TUNE_SHORTEN_X87_SSE */
1408 ~m_K8,
1410 /* X86_TUNE_AVOID_VECTOR_DECODE */
1411 m_K8 | m_GENERIC64,
1413 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1414 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1415 ~(m_386 | m_486),
1417 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1418 vector path on AMD machines. */
1419 m_K8 | m_GENERIC64 | m_AMDFAM10,
1421 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1422 machines. */
1423 m_K8 | m_GENERIC64 | m_AMDFAM10,
1425 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1426 than a MOV. */
1427 m_PENT,
1429 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1430 but one byte longer. */
1431 m_PENT,
1433 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1434 operand that cannot be represented using a modRM byte. The XOR
1435 replacement is long decoded, so this split helps here as well. */
1436 m_K6,
1438 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1439 from FP to FP. */
1440 m_AMDFAM10 | m_GENERIC,
1442 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1443 from integer to FP. */
1444 m_AMDFAM10,
1446 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1447 with a subsequent conditional jump instruction into a single
1448 compare-and-branch uop. */
1449 m_CORE2,
1452 /* Feature tests against the various architecture variations. */
1453 unsigned char ix86_arch_features[X86_ARCH_LAST];
1455 /* Feature tests against the various architecture variations, used to create
1456 ix86_arch_features based on the processor mask. */
1457 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1458 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1459 ~(m_386 | m_486 | m_PENT | m_K6),
1461 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1462 ~m_386,
1464 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1465 ~(m_386 | m_486),
1467 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1468 ~m_386,
1470 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1471 ~m_386,
1474 static const unsigned int x86_accumulate_outgoing_args
1475 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1477 static const unsigned int x86_arch_always_fancy_math_387
1478 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1479 | m_NOCONA | m_CORE2 | m_GENERIC;
1481 static enum stringop_alg stringop_alg = no_stringop;
1483 /* In case the average insn count for single function invocation is
1484 lower than this constant, emit fast (but longer) prologue and
1485 epilogue code. */
1486 #define FAST_PROLOGUE_INSN_COUNT 20
1488 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1489 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1490 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1491 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1493 /* Array of the smallest class containing reg number REGNO, indexed by
1494 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1496 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1498 /* ax, dx, cx, bx */
1499 AREG, DREG, CREG, BREG,
1500 /* si, di, bp, sp */
1501 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1502 /* FP registers */
1503 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1504 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1505 /* arg pointer */
1506 NON_Q_REGS,
1507 /* flags, fpsr, fpcr, frame */
1508 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1509 /* SSE registers */
1510 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1511 SSE_REGS, SSE_REGS,
1512 /* MMX registers */
1513 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1514 MMX_REGS, MMX_REGS,
1515 /* REX registers */
1516 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1517 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1518 /* SSE REX registers */
1519 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1520 SSE_REGS, SSE_REGS,
1523 /* The "default" register map used in 32bit mode. */
1525 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1527 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1528 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1529 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1530 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1531 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1532 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1533 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1536 static int const x86_64_int_parameter_registers[6] =
1538 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1539 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1542 static int const x86_64_ms_abi_int_parameter_registers[4] =
1544 2 /*RCX*/, 1 /*RDX*/,
1545 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1548 static int const x86_64_int_return_registers[4] =
1550 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1553 /* The "default" register map used in 64bit mode. */
1554 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1556 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1557 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1558 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1559 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1560 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1561 8,9,10,11,12,13,14,15, /* extended integer registers */
1562 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1565 /* Define the register numbers to be used in Dwarf debugging information.
1566 The SVR4 reference port C compiler uses the following register numbers
1567 in its Dwarf output code:
1568 0 for %eax (gcc regno = 0)
1569 1 for %ecx (gcc regno = 2)
1570 2 for %edx (gcc regno = 1)
1571 3 for %ebx (gcc regno = 3)
1572 4 for %esp (gcc regno = 7)
1573 5 for %ebp (gcc regno = 6)
1574 6 for %esi (gcc regno = 4)
1575 7 for %edi (gcc regno = 5)
1576 The following three DWARF register numbers are never generated by
1577 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1578 believes these numbers have these meanings.
1579 8 for %eip (no gcc equivalent)
1580 9 for %eflags (gcc regno = 17)
1581 10 for %trapno (no gcc equivalent)
1582 It is not at all clear how we should number the FP stack registers
1583 for the x86 architecture. If the version of SDB on x86/svr4 were
1584 a bit less brain dead with respect to floating-point then we would
1585 have a precedent to follow with respect to DWARF register numbers
1586 for x86 FP registers, but the SDB on x86/svr4 is so completely
1587 broken with respect to FP registers that it is hardly worth thinking
1588 of it as something to strive for compatibility with.
1589 The version of x86/svr4 SDB I have at the moment does (partially)
1590 seem to believe that DWARF register number 11 is associated with
1591 the x86 register %st(0), but that's about all. Higher DWARF
1592 register numbers don't seem to be associated with anything in
1593 particular, and even for DWARF regno 11, SDB only seems to under-
1594 stand that it should say that a variable lives in %st(0) (when
1595 asked via an `=' command) if we said it was in DWARF regno 11,
1596 but SDB still prints garbage when asked for the value of the
1597 variable in question (via a `/' command).
1598 (Also note that the labels SDB prints for various FP stack regs
1599 when doing an `x' command are all wrong.)
1600 Note that these problems generally don't affect the native SVR4
1601 C compiler because it doesn't allow the use of -O with -g and
1602 because when it is *not* optimizing, it allocates a memory
1603 location for each floating-point variable, and the memory
1604 location is what gets described in the DWARF AT_location
1605 attribute for the variable in question.
1606 Regardless of the severe mental illness of the x86/svr4 SDB, we
1607 do something sensible here and we use the following DWARF
1608 register numbers. Note that these are all stack-top-relative
1609 numbers.
1610 11 for %st(0) (gcc regno = 8)
1611 12 for %st(1) (gcc regno = 9)
1612 13 for %st(2) (gcc regno = 10)
1613 14 for %st(3) (gcc regno = 11)
1614 15 for %st(4) (gcc regno = 12)
1615 16 for %st(5) (gcc regno = 13)
1616 17 for %st(6) (gcc regno = 14)
1617 18 for %st(7) (gcc regno = 15)
1619 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1621 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1622 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1623 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1624 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1625 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1626 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1630 /* Test and compare insns in i386.md store the information needed to
1631 generate branch and scc insns here. */
1633 rtx ix86_compare_op0 = NULL_RTX;
1634 rtx ix86_compare_op1 = NULL_RTX;
1635 rtx ix86_compare_emitted = NULL_RTX;
1637 /* Define the structure for the machine field in struct function. */
1639 struct stack_local_entry GTY(())
1641 unsigned short mode;
1642 unsigned short n;
1643 rtx rtl;
1644 struct stack_local_entry *next;
1647 /* Structure describing stack frame layout.
1648 Stack grows downward:
1650 [arguments]
1651 <- ARG_POINTER
1652 saved pc
1654 saved frame pointer if frame_pointer_needed
1655 <- HARD_FRAME_POINTER
1656 [saved regs]
1658 [padding1] \
1660 [va_arg registers] (
1661 > to_allocate <- FRAME_POINTER
1662 [frame] (
1664 [padding2] /
1666 struct ix86_frame
1668 int nregs;
1669 int padding1;
1670 int va_arg_size;
1671 HOST_WIDE_INT frame;
1672 int padding2;
1673 int outgoing_arguments_size;
1674 int red_zone_size;
1676 HOST_WIDE_INT to_allocate;
1677 /* The offsets relative to ARG_POINTER. */
1678 HOST_WIDE_INT frame_pointer_offset;
1679 HOST_WIDE_INT hard_frame_pointer_offset;
1680 HOST_WIDE_INT stack_pointer_offset;
1682 /* When save_regs_using_mov is set, emit prologue using
1683 move instead of push instructions. */
1684 bool save_regs_using_mov;
1687 /* Code model option. */
1688 enum cmodel ix86_cmodel;
1689 /* Asm dialect. */
1690 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1691 /* TLS dialects. */
1692 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1694 /* Which unit we are generating floating point math for. */
1695 enum fpmath_unit ix86_fpmath;
1697 /* Which cpu are we scheduling for. */
1698 enum attr_cpu ix86_schedule;
1700 /* Which cpu are we optimizing for. */
1701 enum processor_type ix86_tune;
1703 /* Which instruction set architecture to use. */
1704 enum processor_type ix86_arch;
1706 /* true if sse prefetch instruction is not NOOP. */
1707 int x86_prefetch_sse;
1709 /* ix86_regparm_string as a number */
1710 static int ix86_regparm;
1712 /* -mstackrealign option */
1713 extern int ix86_force_align_arg_pointer;
1714 static const char ix86_force_align_arg_pointer_string[]
1715 = "force_align_arg_pointer";
1717 static rtx (*ix86_gen_leave) (void);
1718 static rtx (*ix86_gen_pop1) (rtx);
1719 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1720 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1721 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1722 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1723 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1724 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1726 /* Preferred alignment for stack boundary in bits. */
1727 unsigned int ix86_preferred_stack_boundary;
1729 /* Alignment for incoming stack boundary in bits specified at
1730 command line. */
1731 static unsigned int ix86_user_incoming_stack_boundary;
1733 /* Default alignment for incoming stack boundary in bits. */
1734 static unsigned int ix86_default_incoming_stack_boundary;
1736 /* Alignment for incoming stack boundary in bits. */
1737 unsigned int ix86_incoming_stack_boundary;
1739 /* Values 1-5: see jump.c */
1740 int ix86_branch_cost;
1742 /* Calling abi specific va_list type nodes. */
1743 static GTY(()) tree sysv_va_list_type_node;
1744 static GTY(()) tree ms_va_list_type_node;
1746 /* Variables which are this size or smaller are put in the data/bss
1747 or ldata/lbss sections. */
1749 int ix86_section_threshold = 65536;
1751 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1752 char internal_label_prefix[16];
1753 int internal_label_prefix_len;
1755 /* Fence to use after loop using movnt. */
1756 tree x86_mfence;
1758 /* Register class used for passing given 64bit part of the argument.
1759 These represent classes as documented by the PS ABI, with the exception
1760 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1761 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1763 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1764 whenever possible (upper half does contain padding). */
1765 enum x86_64_reg_class
1767 X86_64_NO_CLASS,
1768 X86_64_INTEGER_CLASS,
1769 X86_64_INTEGERSI_CLASS,
1770 X86_64_AVX_CLASS,
1771 X86_64_SSE_CLASS,
1772 X86_64_SSESF_CLASS,
1773 X86_64_SSEDF_CLASS,
1774 X86_64_SSEUP_CLASS,
1775 X86_64_X87_CLASS,
1776 X86_64_X87UP_CLASS,
1777 X86_64_COMPLEX_X87_CLASS,
1778 X86_64_MEMORY_CLASS
1780 static const char * const x86_64_reg_class_name[] =
1782 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1783 "sseup", "x87", "x87up", "cplx87", "no"
1786 #define MAX_CLASSES 4
1788 /* Table of constants used by fldpi, fldln2, etc.... */
1789 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1790 static bool ext_80387_constants_init = 0;
1793 static struct machine_function * ix86_init_machine_status (void);
1794 static rtx ix86_function_value (const_tree, const_tree, bool);
1795 static int ix86_function_regparm (const_tree, const_tree);
1796 static void ix86_compute_frame_layout (struct ix86_frame *);
1797 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1798 rtx, rtx, int);
1799 static void ix86_add_new_builtins (int);
1801 enum ix86_function_specific_strings
1803 IX86_FUNCTION_SPECIFIC_ARCH,
1804 IX86_FUNCTION_SPECIFIC_TUNE,
1805 IX86_FUNCTION_SPECIFIC_FPMATH,
1806 IX86_FUNCTION_SPECIFIC_MAX
1809 static char *ix86_target_string (int, int, const char *, const char *,
1810 const char *, bool);
1811 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1812 static void ix86_function_specific_save (struct cl_target_option *);
1813 static void ix86_function_specific_restore (struct cl_target_option *);
1814 static void ix86_function_specific_print (FILE *, int,
1815 struct cl_target_option *);
1816 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1817 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1818 static bool ix86_can_inline_p (tree, tree);
1819 static void ix86_set_current_function (tree);
1822 /* The svr4 ABI for the i386 says that records and unions are returned
1823 in memory. */
1824 #ifndef DEFAULT_PCC_STRUCT_RETURN
1825 #define DEFAULT_PCC_STRUCT_RETURN 1
1826 #endif
1828 /* Whether -mtune= or -march= were specified */
1829 static int ix86_tune_defaulted;
1830 static int ix86_arch_specified;
1832 /* Bit flags that specify the ISA we are compiling for. */
1833 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1835 /* A mask of ix86_isa_flags that includes bit X if X
1836 was set or cleared on the command line. */
1837 static int ix86_isa_flags_explicit;
1839 /* Define a set of ISAs which are available when a given ISA is
1840 enabled. MMX and SSE ISAs are handled separately. */
1842 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1843 #define OPTION_MASK_ISA_3DNOW_SET \
1844 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1846 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1847 #define OPTION_MASK_ISA_SSE2_SET \
1848 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1849 #define OPTION_MASK_ISA_SSE3_SET \
1850 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1851 #define OPTION_MASK_ISA_SSSE3_SET \
1852 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1853 #define OPTION_MASK_ISA_SSE4_1_SET \
1854 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1855 #define OPTION_MASK_ISA_SSE4_2_SET \
1856 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1857 #define OPTION_MASK_ISA_AVX_SET \
1858 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1859 #define OPTION_MASK_ISA_FMA_SET \
1860 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1862 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1863 as -msse4.2. */
1864 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1866 #define OPTION_MASK_ISA_SSE4A_SET \
1867 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1868 #define OPTION_MASK_ISA_SSE5_SET \
1869 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1871 /* AES and PCLMUL need SSE2 because they use xmm registers */
1872 #define OPTION_MASK_ISA_AES_SET \
1873 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1874 #define OPTION_MASK_ISA_PCLMUL_SET \
1875 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1877 #define OPTION_MASK_ISA_ABM_SET \
1878 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1879 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1880 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1881 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1883 /* Define a set of ISAs which aren't available when a given ISA is
1884 disabled. MMX and SSE ISAs are handled separately. */
1886 #define OPTION_MASK_ISA_MMX_UNSET \
1887 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1888 #define OPTION_MASK_ISA_3DNOW_UNSET \
1889 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1890 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1892 #define OPTION_MASK_ISA_SSE_UNSET \
1893 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1894 #define OPTION_MASK_ISA_SSE2_UNSET \
1895 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1896 #define OPTION_MASK_ISA_SSE3_UNSET \
1897 (OPTION_MASK_ISA_SSE3 \
1898 | OPTION_MASK_ISA_SSSE3_UNSET \
1899 | OPTION_MASK_ISA_SSE4A_UNSET )
1900 #define OPTION_MASK_ISA_SSSE3_UNSET \
1901 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1902 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1903 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1904 #define OPTION_MASK_ISA_SSE4_2_UNSET \
1905 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
1906 #define OPTION_MASK_ISA_AVX_UNSET \
1907 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET)
1908 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
1910 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1911 as -mno-sse4.1. */
1912 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1914 #define OPTION_MASK_ISA_SSE4A_UNSET \
1915 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1916 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1917 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
1918 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
1919 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
1920 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
1921 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
1922 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
1924 /* Vectorization library interface and handlers. */
1925 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1926 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1927 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1929 /* Processor target table, indexed by processor number */
1930 struct ptt
1932 const struct processor_costs *cost; /* Processor costs */
1933 const int align_loop; /* Default alignments. */
1934 const int align_loop_max_skip;
1935 const int align_jump;
1936 const int align_jump_max_skip;
1937 const int align_func;
1940 static const struct ptt processor_target_table[PROCESSOR_max] =
1942 {&i386_cost, 4, 3, 4, 3, 4},
1943 {&i486_cost, 16, 15, 16, 15, 16},
1944 {&pentium_cost, 16, 7, 16, 7, 16},
1945 {&pentiumpro_cost, 16, 15, 16, 10, 16},
1946 {&geode_cost, 0, 0, 0, 0, 0},
1947 {&k6_cost, 32, 7, 32, 7, 32},
1948 {&athlon_cost, 16, 7, 16, 7, 16},
1949 {&pentium4_cost, 0, 0, 0, 0, 0},
1950 {&k8_cost, 16, 7, 16, 7, 16},
1951 {&nocona_cost, 0, 0, 0, 0, 0},
1952 {&core2_cost, 16, 10, 16, 10, 16},
1953 {&generic32_cost, 16, 7, 16, 7, 16},
1954 {&generic64_cost, 16, 10, 16, 10, 16},
1955 {&amdfam10_cost, 32, 24, 32, 7, 32}
1958 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
1960 "generic",
1961 "i386",
1962 "i486",
1963 "pentium",
1964 "pentium-mmx",
1965 "pentiumpro",
1966 "pentium2",
1967 "pentium3",
1968 "pentium4",
1969 "pentium-m",
1970 "prescott",
1971 "nocona",
1972 "core2",
1973 "geode",
1974 "k6",
1975 "k6-2",
1976 "k6-3",
1977 "athlon",
1978 "athlon-4",
1979 "k8",
1980 "amdfam10"
1983 /* Implement TARGET_HANDLE_OPTION. */
1985 static bool
1986 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1988 switch (code)
1990 case OPT_mmmx:
1991 if (value)
1993 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1994 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1996 else
1998 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
1999 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2001 return true;
2003 case OPT_m3dnow:
2004 if (value)
2006 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2007 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2009 else
2011 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2012 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2014 return true;
2016 case OPT_m3dnowa:
2017 return false;
2019 case OPT_msse:
2020 if (value)
2022 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2023 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2025 else
2027 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2028 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2030 return true;
2032 case OPT_msse2:
2033 if (value)
2035 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2036 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2038 else
2040 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2041 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2043 return true;
2045 case OPT_msse3:
2046 if (value)
2048 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2049 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2051 else
2053 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2054 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2056 return true;
2058 case OPT_mssse3:
2059 if (value)
2061 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2062 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2064 else
2066 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2067 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2069 return true;
2071 case OPT_msse4_1:
2072 if (value)
2074 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2075 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2077 else
2079 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2080 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2082 return true;
2084 case OPT_msse4_2:
2085 if (value)
2087 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2088 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2090 else
2092 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2093 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2095 return true;
2097 case OPT_mavx:
2098 if (value)
2100 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2101 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2103 else
2105 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2106 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2108 return true;
2110 case OPT_mfma:
2111 if (value)
2113 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2116 else
2118 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2119 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2121 return true;
2123 case OPT_msse4:
2124 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2125 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2126 return true;
2128 case OPT_mno_sse4:
2129 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2130 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2131 return true;
2133 case OPT_msse4a:
2134 if (value)
2136 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2137 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2139 else
2141 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2142 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2144 return true;
2146 case OPT_msse5:
2147 if (value)
2149 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
2150 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
2152 else
2154 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
2155 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
2157 return true;
2159 case OPT_mabm:
2160 if (value)
2162 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2163 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2165 else
2167 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2168 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2170 return true;
2172 case OPT_mpopcnt:
2173 if (value)
2175 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2176 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2178 else
2180 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2181 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2183 return true;
2185 case OPT_msahf:
2186 if (value)
2188 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2189 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2191 else
2193 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2194 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2196 return true;
2198 case OPT_mcx16:
2199 if (value)
2201 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2202 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2204 else
2206 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2207 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2209 return true;
2211 case OPT_maes:
2212 if (value)
2214 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2215 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2217 else
2219 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2220 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2222 return true;
2224 case OPT_mpclmul:
2225 if (value)
2227 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2228 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2230 else
2232 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2233 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2235 return true;
2237 default:
2238 return true;
2242 /* Return a string the documents the current -m options. The caller is
2243 responsible for freeing the string. */
2245 static char *
2246 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2247 const char *fpmath, bool add_nl_p)
2249 struct ix86_target_opts
2251 const char *option; /* option string */
2252 int mask; /* isa mask options */
2255 /* This table is ordered so that options like -msse5 or -msse4.2 that imply
2256 preceding options while match those first. */
2257 static struct ix86_target_opts isa_opts[] =
2259 { "-m64", OPTION_MASK_ISA_64BIT },
2260 { "-msse5", OPTION_MASK_ISA_SSE5 },
2261 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2262 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2263 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2264 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2265 { "-msse3", OPTION_MASK_ISA_SSE3 },
2266 { "-msse2", OPTION_MASK_ISA_SSE2 },
2267 { "-msse", OPTION_MASK_ISA_SSE },
2268 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2269 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2270 { "-mmmx", OPTION_MASK_ISA_MMX },
2271 { "-mabm", OPTION_MASK_ISA_ABM },
2272 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2273 { "-maes", OPTION_MASK_ISA_AES },
2274 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2277 /* Flag options. */
2278 static struct ix86_target_opts flag_opts[] =
2280 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2281 { "-m80387", MASK_80387 },
2282 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2283 { "-malign-double", MASK_ALIGN_DOUBLE },
2284 { "-mcld", MASK_CLD },
2285 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2286 { "-mieee-fp", MASK_IEEE_FP },
2287 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2288 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2289 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2290 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2291 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2292 { "-mno-fused-madd", MASK_NO_FUSED_MADD },
2293 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2294 { "-mno-red-zone", MASK_NO_RED_ZONE },
2295 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2296 { "-mrecip", MASK_RECIP },
2297 { "-mrtd", MASK_RTD },
2298 { "-msseregparm", MASK_SSEREGPARM },
2299 { "-mstack-arg-probe", MASK_STACK_PROBE },
2300 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2303 const char *opts[ (sizeof (isa_opts) / sizeof (isa_opts[0])
2304 + sizeof (flag_opts) / sizeof (flag_opts[0])
2305 + 6)][2];
2307 char isa_other[40];
2308 char target_other[40];
2309 unsigned num = 0;
2310 unsigned i, j;
2311 char *ret;
2312 char *ptr;
2313 size_t len;
2314 size_t line_len;
2315 size_t sep_len;
2317 memset (opts, '\0', sizeof (opts));
2319 /* Add -march= option. */
2320 if (arch)
2322 opts[num][0] = "-march=";
2323 opts[num++][1] = arch;
2326 /* Add -mtune= option. */
2327 if (tune)
2329 opts[num][0] = "-mtune=";
2330 opts[num++][1] = tune;
2333 /* Pick out the options in isa options. */
2334 for (i = 0; i < sizeof (isa_opts) / sizeof (isa_opts[0]); i++)
2336 if ((isa & isa_opts[i].mask) != 0)
2338 opts[num++][0] = isa_opts[i].option;
2339 isa &= ~ isa_opts[i].mask;
2343 if (isa && add_nl_p)
2345 opts[num++][0] = isa_other;
2346 sprintf (isa_other, "(other isa: 0x%x)", isa);
2349 /* Add flag options. */
2350 for (i = 0; i < sizeof (flag_opts) / sizeof (flag_opts[0]); i++)
2352 if ((flags & flag_opts[i].mask) != 0)
2354 opts[num++][0] = flag_opts[i].option;
2355 flags &= ~ flag_opts[i].mask;
2359 if (flags && add_nl_p)
2361 opts[num++][0] = target_other;
2362 sprintf (target_other, "(other flags: 0x%x)", isa);
2365 /* Add -fpmath= option. */
2366 if (fpmath)
2368 opts[num][0] = "-mfpmath=";
2369 opts[num++][1] = fpmath;
2372 /* Any options? */
2373 if (num == 0)
2374 return NULL;
2376 gcc_assert (num < sizeof (opts) / sizeof (opts[0]));
2378 /* Size the string. */
2379 len = 0;
2380 sep_len = (add_nl_p) ? 3 : 1;
2381 for (i = 0; i < num; i++)
2383 len += sep_len;
2384 for (j = 0; j < 2; j++)
2385 if (opts[i][j])
2386 len += strlen (opts[i][j]);
2389 /* Build the string. */
2390 ret = ptr = (char *) xmalloc (len);
2391 line_len = 0;
2393 for (i = 0; i < num; i++)
2395 size_t len2[2];
2397 for (j = 0; j < 2; j++)
2398 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2400 if (i != 0)
2402 *ptr++ = ' ';
2403 line_len++;
2405 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2407 *ptr++ = '\\';
2408 *ptr++ = '\n';
2409 line_len = 0;
2413 for (j = 0; j < 2; j++)
2414 if (opts[i][j])
2416 memcpy (ptr, opts[i][j], len2[j]);
2417 ptr += len2[j];
2418 line_len += len2[j];
2422 *ptr = '\0';
2423 gcc_assert (ret + len >= ptr);
2425 return ret;
2428 /* Function that is callable from the debugger to print the current
2429 options. */
2430 void
2431 ix86_debug_options (void)
2433 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2434 ix86_arch_string, ix86_tune_string,
2435 ix86_fpmath_string, true);
2437 if (opts)
2439 fprintf (stderr, "%s\n\n", opts);
2440 free (opts);
2442 else
2443 fprintf (stderr, "<no options>\n\n");
2445 return;
2448 /* Sometimes certain combinations of command options do not make
2449 sense on a particular target machine. You can define a macro
2450 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2451 defined, is executed once just after all the command options have
2452 been parsed.
2454 Don't use this macro to turn on various extra optimizations for
2455 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2457 void
2458 override_options (bool main_args_p)
2460 int i;
2461 unsigned int ix86_arch_mask, ix86_tune_mask;
2462 const char *prefix;
2463 const char *suffix;
2464 const char *sw;
2466 /* Comes from final.c -- no real reason to change it. */
2467 #define MAX_CODE_ALIGN 16
2469 enum pta_flags
2471 PTA_SSE = 1 << 0,
2472 PTA_SSE2 = 1 << 1,
2473 PTA_SSE3 = 1 << 2,
2474 PTA_MMX = 1 << 3,
2475 PTA_PREFETCH_SSE = 1 << 4,
2476 PTA_3DNOW = 1 << 5,
2477 PTA_3DNOW_A = 1 << 6,
2478 PTA_64BIT = 1 << 7,
2479 PTA_SSSE3 = 1 << 8,
2480 PTA_CX16 = 1 << 9,
2481 PTA_POPCNT = 1 << 10,
2482 PTA_ABM = 1 << 11,
2483 PTA_SSE4A = 1 << 12,
2484 PTA_NO_SAHF = 1 << 13,
2485 PTA_SSE4_1 = 1 << 14,
2486 PTA_SSE4_2 = 1 << 15,
2487 PTA_SSE5 = 1 << 16,
2488 PTA_AES = 1 << 17,
2489 PTA_PCLMUL = 1 << 18,
2490 PTA_AVX = 1 << 19,
2491 PTA_FMA = 1 << 20
2494 static struct pta
2496 const char *const name; /* processor name or nickname. */
2497 const enum processor_type processor;
2498 const enum attr_cpu schedule;
2499 const unsigned /*enum pta_flags*/ flags;
2501 const processor_alias_table[] =
2503 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2504 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2505 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2506 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2507 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2508 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2509 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2510 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2511 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2512 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2513 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2514 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2515 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2516 PTA_MMX | PTA_SSE},
2517 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2518 PTA_MMX | PTA_SSE},
2519 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2520 PTA_MMX | PTA_SSE | PTA_SSE2},
2521 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2522 PTA_MMX |PTA_SSE | PTA_SSE2},
2523 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2524 PTA_MMX | PTA_SSE | PTA_SSE2},
2525 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2526 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2527 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2528 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2529 | PTA_CX16 | PTA_NO_SAHF},
2530 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2531 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2532 | PTA_SSSE3 | PTA_CX16},
2533 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2534 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2535 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2536 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2537 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2538 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2539 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2540 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2541 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2542 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2543 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2544 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2545 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2546 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2547 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2548 {"x86-64", PROCESSOR_K8, CPU_K8,
2549 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2550 {"k8", PROCESSOR_K8, CPU_K8,
2551 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2552 | PTA_SSE2 | PTA_NO_SAHF},
2553 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2554 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2555 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2556 {"opteron", PROCESSOR_K8, CPU_K8,
2557 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2558 | PTA_SSE2 | PTA_NO_SAHF},
2559 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2560 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2561 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2562 {"athlon64", PROCESSOR_K8, CPU_K8,
2563 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2564 | PTA_SSE2 | PTA_NO_SAHF},
2565 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2566 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2567 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2568 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2569 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2570 | PTA_SSE2 | PTA_NO_SAHF},
2571 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2572 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2573 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2574 {"barcelona", PROCESSOR_AMDFAM10, PROCESSOR_AMDFAM10,
2575 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2576 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2577 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2578 0 /* flags are only used for -march switch. */ },
2579 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2580 PTA_64BIT /* flags are only used for -march switch. */ },
2583 int const pta_size = ARRAY_SIZE (processor_alias_table);
2585 /* Set up prefix/suffix so the error messages refer to either the command
2586 line argument, or the attribute(target). */
2587 if (main_args_p)
2589 prefix = "-m";
2590 suffix = "";
2591 sw = "switch";
2593 else
2595 prefix = "option(\"";
2596 suffix = "\")";
2597 sw = "attribute";
2600 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2601 SUBTARGET_OVERRIDE_OPTIONS;
2602 #endif
2604 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2605 SUBSUBTARGET_OVERRIDE_OPTIONS;
2606 #endif
2608 /* -fPIC is the default for x86_64. */
2609 if (TARGET_MACHO && TARGET_64BIT)
2610 flag_pic = 2;
2612 /* Set the default values for switches whose default depends on TARGET_64BIT
2613 in case they weren't overwritten by command line options. */
2614 if (TARGET_64BIT)
2616 /* Mach-O doesn't support omitting the frame pointer for now. */
2617 if (flag_omit_frame_pointer == 2)
2618 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2619 if (flag_asynchronous_unwind_tables == 2)
2620 flag_asynchronous_unwind_tables = 1;
2621 if (flag_pcc_struct_return == 2)
2622 flag_pcc_struct_return = 0;
2624 else
2626 if (flag_omit_frame_pointer == 2)
2627 flag_omit_frame_pointer = 0;
2628 if (flag_asynchronous_unwind_tables == 2)
2629 flag_asynchronous_unwind_tables = 0;
2630 if (flag_pcc_struct_return == 2)
2631 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2634 /* Need to check -mtune=generic first. */
2635 if (ix86_tune_string)
2637 if (!strcmp (ix86_tune_string, "generic")
2638 || !strcmp (ix86_tune_string, "i686")
2639 /* As special support for cross compilers we read -mtune=native
2640 as -mtune=generic. With native compilers we won't see the
2641 -mtune=native, as it was changed by the driver. */
2642 || !strcmp (ix86_tune_string, "native"))
2644 if (TARGET_64BIT)
2645 ix86_tune_string = "generic64";
2646 else
2647 ix86_tune_string = "generic32";
2649 /* If this call is for setting the option attribute, allow the
2650 generic32/generic64 that was previously set. */
2651 else if (!main_args_p
2652 && (!strcmp (ix86_tune_string, "generic32")
2653 || !strcmp (ix86_tune_string, "generic64")))
2655 else if (!strncmp (ix86_tune_string, "generic", 7))
2656 error ("bad value (%s) for %stune=%s %s",
2657 ix86_tune_string, prefix, suffix, sw);
2659 else
2661 if (ix86_arch_string)
2662 ix86_tune_string = ix86_arch_string;
2663 if (!ix86_tune_string)
2665 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2666 ix86_tune_defaulted = 1;
2669 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2670 need to use a sensible tune option. */
2671 if (!strcmp (ix86_tune_string, "generic")
2672 || !strcmp (ix86_tune_string, "x86-64")
2673 || !strcmp (ix86_tune_string, "i686"))
2675 if (TARGET_64BIT)
2676 ix86_tune_string = "generic64";
2677 else
2678 ix86_tune_string = "generic32";
2681 if (ix86_stringop_string)
2683 if (!strcmp (ix86_stringop_string, "rep_byte"))
2684 stringop_alg = rep_prefix_1_byte;
2685 else if (!strcmp (ix86_stringop_string, "libcall"))
2686 stringop_alg = libcall;
2687 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2688 stringop_alg = rep_prefix_4_byte;
2689 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
2690 stringop_alg = rep_prefix_8_byte;
2691 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2692 stringop_alg = loop_1_byte;
2693 else if (!strcmp (ix86_stringop_string, "loop"))
2694 stringop_alg = loop;
2695 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2696 stringop_alg = unrolled_loop;
2697 else
2698 error ("bad value (%s) for %sstringop-strategy=%s %s",
2699 ix86_stringop_string, prefix, suffix, sw);
2701 if (!strcmp (ix86_tune_string, "x86-64"))
2702 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2703 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2704 prefix, suffix, prefix, suffix, prefix, suffix);
2706 if (!ix86_arch_string)
2707 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2708 else
2709 ix86_arch_specified = 1;
2711 if (!strcmp (ix86_arch_string, "generic"))
2712 error ("generic CPU can be used only for %stune=%s %s",
2713 prefix, suffix, sw);
2714 if (!strncmp (ix86_arch_string, "generic", 7))
2715 error ("bad value (%s) for %sarch=%s %s",
2716 ix86_arch_string, prefix, suffix, sw);
2718 if (ix86_cmodel_string != 0)
2720 if (!strcmp (ix86_cmodel_string, "small"))
2721 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2722 else if (!strcmp (ix86_cmodel_string, "medium"))
2723 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2724 else if (!strcmp (ix86_cmodel_string, "large"))
2725 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2726 else if (flag_pic)
2727 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2728 else if (!strcmp (ix86_cmodel_string, "32"))
2729 ix86_cmodel = CM_32;
2730 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2731 ix86_cmodel = CM_KERNEL;
2732 else
2733 error ("bad value (%s) for %scmodel=%s %s",
2734 ix86_cmodel_string, prefix, suffix, sw);
2736 else
2738 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2739 use of rip-relative addressing. This eliminates fixups that
2740 would otherwise be needed if this object is to be placed in a
2741 DLL, and is essentially just as efficient as direct addressing. */
2742 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2743 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2744 else if (TARGET_64BIT)
2745 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2746 else
2747 ix86_cmodel = CM_32;
2749 if (ix86_asm_string != 0)
2751 if (! TARGET_MACHO
2752 && !strcmp (ix86_asm_string, "intel"))
2753 ix86_asm_dialect = ASM_INTEL;
2754 else if (!strcmp (ix86_asm_string, "att"))
2755 ix86_asm_dialect = ASM_ATT;
2756 else
2757 error ("bad value (%s) for %sasm=%s %s",
2758 ix86_asm_string, prefix, suffix, sw);
2760 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2761 error ("code model %qs not supported in the %s bit mode",
2762 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2763 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2764 sorry ("%i-bit mode not compiled in",
2765 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2767 for (i = 0; i < pta_size; i++)
2768 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2770 ix86_schedule = processor_alias_table[i].schedule;
2771 ix86_arch = processor_alias_table[i].processor;
2772 /* Default cpu tuning to the architecture. */
2773 ix86_tune = ix86_arch;
2775 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2776 error ("CPU you selected does not support x86-64 "
2777 "instruction set");
2779 if (processor_alias_table[i].flags & PTA_MMX
2780 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2781 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2782 if (processor_alias_table[i].flags & PTA_3DNOW
2783 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2784 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2785 if (processor_alias_table[i].flags & PTA_3DNOW_A
2786 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2787 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2788 if (processor_alias_table[i].flags & PTA_SSE
2789 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2790 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2791 if (processor_alias_table[i].flags & PTA_SSE2
2792 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2793 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2794 if (processor_alias_table[i].flags & PTA_SSE3
2795 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2796 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2797 if (processor_alias_table[i].flags & PTA_SSSE3
2798 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2799 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2800 if (processor_alias_table[i].flags & PTA_SSE4_1
2801 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2802 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2803 if (processor_alias_table[i].flags & PTA_SSE4_2
2804 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2805 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2806 if (processor_alias_table[i].flags & PTA_AVX
2807 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2808 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2809 if (processor_alias_table[i].flags & PTA_FMA
2810 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2811 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2812 if (processor_alias_table[i].flags & PTA_SSE4A
2813 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2814 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2815 if (processor_alias_table[i].flags & PTA_SSE5
2816 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2817 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2818 if (processor_alias_table[i].flags & PTA_ABM
2819 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2820 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2821 if (processor_alias_table[i].flags & PTA_CX16
2822 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2823 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2824 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2825 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2826 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2827 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2828 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2829 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2830 if (processor_alias_table[i].flags & PTA_AES
2831 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2832 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2833 if (processor_alias_table[i].flags & PTA_PCLMUL
2834 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2835 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2836 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2837 x86_prefetch_sse = true;
2839 break;
2842 if (i == pta_size)
2843 error ("bad value (%s) for %sarch=%s %s",
2844 ix86_arch_string, prefix, suffix, sw);
2846 ix86_arch_mask = 1u << ix86_arch;
2847 for (i = 0; i < X86_ARCH_LAST; ++i)
2848 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2850 for (i = 0; i < pta_size; i++)
2851 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2853 ix86_schedule = processor_alias_table[i].schedule;
2854 ix86_tune = processor_alias_table[i].processor;
2855 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2857 if (ix86_tune_defaulted)
2859 ix86_tune_string = "x86-64";
2860 for (i = 0; i < pta_size; i++)
2861 if (! strcmp (ix86_tune_string,
2862 processor_alias_table[i].name))
2863 break;
2864 ix86_schedule = processor_alias_table[i].schedule;
2865 ix86_tune = processor_alias_table[i].processor;
2867 else
2868 error ("CPU you selected does not support x86-64 "
2869 "instruction set");
2871 /* Intel CPUs have always interpreted SSE prefetch instructions as
2872 NOPs; so, we can enable SSE prefetch instructions even when
2873 -mtune (rather than -march) points us to a processor that has them.
2874 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2875 higher processors. */
2876 if (TARGET_CMOVE
2877 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2878 x86_prefetch_sse = true;
2879 break;
2881 if (i == pta_size)
2882 error ("bad value (%s) for %stune=%s %s",
2883 ix86_tune_string, prefix, suffix, sw);
2885 ix86_tune_mask = 1u << ix86_tune;
2886 for (i = 0; i < X86_TUNE_LAST; ++i)
2887 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
2889 if (optimize_size)
2890 ix86_cost = &ix86_size_cost;
2891 else
2892 ix86_cost = processor_target_table[ix86_tune].cost;
2894 /* Arrange to set up i386_stack_locals for all functions. */
2895 init_machine_status = ix86_init_machine_status;
2897 /* Validate -mregparm= value. */
2898 if (ix86_regparm_string)
2900 if (TARGET_64BIT)
2901 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
2902 i = atoi (ix86_regparm_string);
2903 if (i < 0 || i > REGPARM_MAX)
2904 error ("%sregparm=%d%s is not between 0 and %d",
2905 prefix, i, suffix, REGPARM_MAX);
2906 else
2907 ix86_regparm = i;
2909 if (TARGET_64BIT)
2910 ix86_regparm = REGPARM_MAX;
2912 /* If the user has provided any of the -malign-* options,
2913 warn and use that value only if -falign-* is not set.
2914 Remove this code in GCC 3.2 or later. */
2915 if (ix86_align_loops_string)
2917 warning (0, "%salign-loops%s is obsolete, use %salign-loops%s",
2918 prefix, suffix, prefix, suffix);
2919 if (align_loops == 0)
2921 i = atoi (ix86_align_loops_string);
2922 if (i < 0 || i > MAX_CODE_ALIGN)
2923 error ("%salign-loops=%d%s is not between 0 and %d",
2924 prefix, i, suffix, MAX_CODE_ALIGN);
2925 else
2926 align_loops = 1 << i;
2930 if (ix86_align_jumps_string)
2932 warning (0, "%salign-jumps%s is obsolete, use %salign-jumps%s",
2933 prefix, suffix, prefix, suffix);
2934 if (align_jumps == 0)
2936 i = atoi (ix86_align_jumps_string);
2937 if (i < 0 || i > MAX_CODE_ALIGN)
2938 error ("%salign-loops=%d%s is not between 0 and %d",
2939 prefix, i, suffix, MAX_CODE_ALIGN);
2940 else
2941 align_jumps = 1 << i;
2945 if (ix86_align_funcs_string)
2947 warning (0, "%salign-functions%s is obsolete, use %salign-functions%s",
2948 prefix, suffix, prefix, suffix);
2949 if (align_functions == 0)
2951 i = atoi (ix86_align_funcs_string);
2952 if (i < 0 || i > MAX_CODE_ALIGN)
2953 error ("%salign-loops=%d%s is not between 0 and %d",
2954 prefix, i, suffix, MAX_CODE_ALIGN);
2955 else
2956 align_functions = 1 << i;
2960 /* Default align_* from the processor table. */
2961 if (align_loops == 0)
2963 align_loops = processor_target_table[ix86_tune].align_loop;
2964 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2966 if (align_jumps == 0)
2968 align_jumps = processor_target_table[ix86_tune].align_jump;
2969 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2971 if (align_functions == 0)
2973 align_functions = processor_target_table[ix86_tune].align_func;
2976 /* Validate -mbranch-cost= value, or provide default. */
2977 ix86_branch_cost = ix86_cost->branch_cost;
2978 if (ix86_branch_cost_string)
2980 i = atoi (ix86_branch_cost_string);
2981 if (i < 0 || i > 5)
2982 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
2983 else
2984 ix86_branch_cost = i;
2986 if (ix86_section_threshold_string)
2988 i = atoi (ix86_section_threshold_string);
2989 if (i < 0)
2990 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
2991 else
2992 ix86_section_threshold = i;
2995 if (ix86_tls_dialect_string)
2997 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2998 ix86_tls_dialect = TLS_DIALECT_GNU;
2999 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3000 ix86_tls_dialect = TLS_DIALECT_GNU2;
3001 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3002 ix86_tls_dialect = TLS_DIALECT_SUN;
3003 else
3004 error ("bad value (%s) for %stls-dialect=%s %s",
3005 ix86_tls_dialect_string, prefix, suffix, sw);
3008 if (ix87_precision_string)
3010 i = atoi (ix87_precision_string);
3011 if (i != 32 && i != 64 && i != 80)
3012 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3015 if (TARGET_64BIT)
3017 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3019 /* Enable by default the SSE and MMX builtins. Do allow the user to
3020 explicitly disable any of these. In particular, disabling SSE and
3021 MMX for kernel code is extremely useful. */
3022 if (!ix86_arch_specified)
3023 ix86_isa_flags
3024 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3025 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3027 if (TARGET_RTD)
3028 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3030 else
3032 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3034 if (!ix86_arch_specified)
3035 ix86_isa_flags
3036 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3038 /* i386 ABI does not specify red zone. It still makes sense to use it
3039 when programmer takes care to stack from being destroyed. */
3040 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3041 target_flags |= MASK_NO_RED_ZONE;
3044 /* Keep nonleaf frame pointers. */
3045 if (flag_omit_frame_pointer)
3046 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3047 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3048 flag_omit_frame_pointer = 1;
3050 /* If we're doing fast math, we don't care about comparison order
3051 wrt NaNs. This lets us use a shorter comparison sequence. */
3052 if (flag_finite_math_only)
3053 target_flags &= ~MASK_IEEE_FP;
3055 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3056 since the insns won't need emulation. */
3057 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3058 target_flags &= ~MASK_NO_FANCY_MATH_387;
3060 /* Likewise, if the target doesn't have a 387, or we've specified
3061 software floating point, don't use 387 inline intrinsics. */
3062 if (!TARGET_80387)
3063 target_flags |= MASK_NO_FANCY_MATH_387;
3065 /* Turn on MMX builtins for -msse. */
3066 if (TARGET_SSE)
3068 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3069 x86_prefetch_sse = true;
3072 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3073 if (TARGET_SSE4_2 || TARGET_ABM)
3074 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3076 /* Validate -mpreferred-stack-boundary= value or default it to
3077 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3078 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3079 if (ix86_preferred_stack_boundary_string)
3081 i = atoi (ix86_preferred_stack_boundary_string);
3082 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3083 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3084 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3085 else
3086 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3089 /* Set the default value for -mstackrealign. */
3090 if (ix86_force_align_arg_pointer == -1)
3091 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3093 /* Validate -mincoming-stack-boundary= value or default it to
3094 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3095 if (ix86_force_align_arg_pointer)
3096 ix86_default_incoming_stack_boundary = MIN_STACK_BOUNDARY;
3097 else
3098 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3099 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3100 if (ix86_incoming_stack_boundary_string)
3102 i = atoi (ix86_incoming_stack_boundary_string);
3103 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3104 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3105 i, TARGET_64BIT ? 4 : 2);
3106 else
3108 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3109 ix86_incoming_stack_boundary
3110 = ix86_user_incoming_stack_boundary;
3114 /* Accept -msseregparm only if at least SSE support is enabled. */
3115 if (TARGET_SSEREGPARM
3116 && ! TARGET_SSE)
3117 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3119 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3120 if (ix86_fpmath_string != 0)
3122 if (! strcmp (ix86_fpmath_string, "387"))
3123 ix86_fpmath = FPMATH_387;
3124 else if (! strcmp (ix86_fpmath_string, "sse"))
3126 if (!TARGET_SSE)
3128 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3129 ix86_fpmath = FPMATH_387;
3131 else
3132 ix86_fpmath = FPMATH_SSE;
3134 else if (! strcmp (ix86_fpmath_string, "387,sse")
3135 || ! strcmp (ix86_fpmath_string, "387+sse")
3136 || ! strcmp (ix86_fpmath_string, "sse,387")
3137 || ! strcmp (ix86_fpmath_string, "sse+387")
3138 || ! strcmp (ix86_fpmath_string, "both"))
3140 if (!TARGET_SSE)
3142 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3143 ix86_fpmath = FPMATH_387;
3145 else if (!TARGET_80387)
3147 warning (0, "387 instruction set disabled, using SSE arithmetics");
3148 ix86_fpmath = FPMATH_SSE;
3150 else
3151 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3153 else
3154 error ("bad value (%s) for %sfpmath=%s %s",
3155 ix86_fpmath_string, prefix, suffix, sw);
3158 /* If the i387 is disabled, then do not return values in it. */
3159 if (!TARGET_80387)
3160 target_flags &= ~MASK_FLOAT_RETURNS;
3162 /* Use external vectorized library in vectorizing intrinsics. */
3163 if (ix86_veclibabi_string)
3165 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3166 ix86_veclib_handler = ix86_veclibabi_svml;
3167 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3168 ix86_veclib_handler = ix86_veclibabi_acml;
3169 else
3170 error ("unknown vectorization library ABI type (%s) for "
3171 "%sveclibabi=%s %s", ix86_veclibabi_string,
3172 prefix, suffix, sw);
3175 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3176 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3177 && !optimize_size)
3178 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3180 /* ??? Unwind info is not correct around the CFG unless either a frame
3181 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3182 unwind info generation to be aware of the CFG and propagating states
3183 around edges. */
3184 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3185 || flag_exceptions || flag_non_call_exceptions)
3186 && flag_omit_frame_pointer
3187 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3189 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3190 warning (0, "unwind tables currently require either a frame pointer "
3191 "or %saccumulate-outgoing-args%s for correctness",
3192 prefix, suffix);
3193 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3196 /* If stack probes are required, the space used for large function
3197 arguments on the stack must also be probed, so enable
3198 -maccumulate-outgoing-args so this happens in the prologue. */
3199 if (TARGET_STACK_PROBE
3200 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3202 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3203 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3204 "for correctness", prefix, suffix);
3205 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3208 /* For sane SSE instruction set generation we need fcomi instruction.
3209 It is safe to enable all CMOVE instructions. */
3210 if (TARGET_SSE)
3211 TARGET_CMOVE = 1;
3213 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3215 char *p;
3216 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3217 p = strchr (internal_label_prefix, 'X');
3218 internal_label_prefix_len = p - internal_label_prefix;
3219 *p = '\0';
3222 /* When scheduling description is not available, disable scheduler pass
3223 so it won't slow down the compilation and make x87 code slower. */
3224 if (!TARGET_SCHEDULE)
3225 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3227 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3228 set_param_value ("simultaneous-prefetches",
3229 ix86_cost->simultaneous_prefetches);
3230 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3231 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3232 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3233 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3234 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3235 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3237 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3238 can be optimized to ap = __builtin_next_arg (0). */
3239 if (!TARGET_64BIT)
3240 targetm.expand_builtin_va_start = NULL;
3242 if (TARGET_64BIT)
3244 ix86_gen_leave = gen_leave_rex64;
3245 ix86_gen_pop1 = gen_popdi1;
3246 ix86_gen_add3 = gen_adddi3;
3247 ix86_gen_sub3 = gen_subdi3;
3248 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3249 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3250 ix86_gen_monitor = gen_sse3_monitor64;
3251 ix86_gen_andsp = gen_anddi3;
3253 else
3255 ix86_gen_leave = gen_leave;
3256 ix86_gen_pop1 = gen_popsi1;
3257 ix86_gen_add3 = gen_addsi3;
3258 ix86_gen_sub3 = gen_subsi3;
3259 ix86_gen_sub3_carry = gen_subsi3_carry;
3260 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3261 ix86_gen_monitor = gen_sse3_monitor;
3262 ix86_gen_andsp = gen_andsi3;
3265 #ifdef USE_IX86_CLD
3266 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3267 if (!TARGET_64BIT)
3268 target_flags |= MASK_CLD & ~target_flags_explicit;
3269 #endif
3271 /* Save the initial options in case the user does function specific options */
3272 if (main_args_p)
3273 target_option_default_node = target_option_current_node
3274 = build_target_option_node ();
3277 /* Save the current options */
3279 static void
3280 ix86_function_specific_save (struct cl_target_option *ptr)
3282 gcc_assert (IN_RANGE (ix86_arch, 0, 255));
3283 gcc_assert (IN_RANGE (ix86_schedule, 0, 255));
3284 gcc_assert (IN_RANGE (ix86_tune, 0, 255));
3285 gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
3286 gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
3288 ptr->arch = ix86_arch;
3289 ptr->schedule = ix86_schedule;
3290 ptr->tune = ix86_tune;
3291 ptr->fpmath = ix86_fpmath;
3292 ptr->branch_cost = ix86_branch_cost;
3293 ptr->tune_defaulted = ix86_tune_defaulted;
3294 ptr->arch_specified = ix86_arch_specified;
3295 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3296 ptr->target_flags_explicit = target_flags_explicit;
3299 /* Restore the current options */
3301 static void
3302 ix86_function_specific_restore (struct cl_target_option *ptr)
3304 enum processor_type old_tune = ix86_tune;
3305 enum processor_type old_arch = ix86_arch;
3306 unsigned int ix86_arch_mask, ix86_tune_mask;
3307 int i;
3309 ix86_arch = ptr->arch;
3310 ix86_schedule = ptr->schedule;
3311 ix86_tune = ptr->tune;
3312 ix86_fpmath = ptr->fpmath;
3313 ix86_branch_cost = ptr->branch_cost;
3314 ix86_tune_defaulted = ptr->tune_defaulted;
3315 ix86_arch_specified = ptr->arch_specified;
3316 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3317 target_flags_explicit = ptr->target_flags_explicit;
3319 /* Recreate the arch feature tests if the arch changed */
3320 if (old_arch != ix86_arch)
3322 ix86_arch_mask = 1u << ix86_arch;
3323 for (i = 0; i < X86_ARCH_LAST; ++i)
3324 ix86_arch_features[i]
3325 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3328 /* Recreate the tune optimization tests */
3329 if (old_tune != ix86_tune)
3331 ix86_tune_mask = 1u << ix86_tune;
3332 for (i = 0; i < X86_TUNE_LAST; ++i)
3333 ix86_tune_features[i]
3334 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3338 /* Print the current options */
3340 static void
3341 ix86_function_specific_print (FILE *file, int indent,
3342 struct cl_target_option *ptr)
3344 char *target_string
3345 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3346 NULL, NULL, NULL, false);
3348 fprintf (file, "%*sarch = %d (%s)\n",
3349 indent, "",
3350 ptr->arch,
3351 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3352 ? cpu_names[ptr->arch]
3353 : "<unknown>"));
3355 fprintf (file, "%*stune = %d (%s)\n",
3356 indent, "",
3357 ptr->tune,
3358 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3359 ? cpu_names[ptr->tune]
3360 : "<unknown>"));
3362 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3363 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3364 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3365 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3367 if (target_string)
3369 fprintf (file, "%*s%s\n", indent, "", target_string);
3370 free (target_string);
3375 /* Inner function to process the attribute((target(...))), take an argument and
3376 set the current options from the argument. If we have a list, recursively go
3377 over the list. */
3379 static bool
3380 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3382 char *next_optstr;
3383 bool ret = true;
3385 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3386 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3387 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3388 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3390 enum ix86_opt_type
3392 ix86_opt_unknown,
3393 ix86_opt_yes,
3394 ix86_opt_no,
3395 ix86_opt_str,
3396 ix86_opt_isa
3399 static const struct
3401 const char *string;
3402 size_t len;
3403 enum ix86_opt_type type;
3404 int opt;
3405 int mask;
3406 } attrs[] = {
3407 /* isa options */
3408 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3409 IX86_ATTR_ISA ("abm", OPT_mabm),
3410 IX86_ATTR_ISA ("aes", OPT_maes),
3411 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3412 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3413 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3414 IX86_ATTR_ISA ("sse", OPT_msse),
3415 IX86_ATTR_ISA ("sse2", OPT_msse2),
3416 IX86_ATTR_ISA ("sse3", OPT_msse3),
3417 IX86_ATTR_ISA ("sse4", OPT_msse4),
3418 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3419 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3420 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3421 IX86_ATTR_ISA ("sse5", OPT_msse5),
3422 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3424 /* string options */
3425 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3426 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3427 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3429 /* flag options */
3430 IX86_ATTR_YES ("cld",
3431 OPT_mcld,
3432 MASK_CLD),
3434 IX86_ATTR_NO ("fancy-math-387",
3435 OPT_mfancy_math_387,
3436 MASK_NO_FANCY_MATH_387),
3438 IX86_ATTR_NO ("fused-madd",
3439 OPT_mfused_madd,
3440 MASK_NO_FUSED_MADD),
3442 IX86_ATTR_YES ("ieee-fp",
3443 OPT_mieee_fp,
3444 MASK_IEEE_FP),
3446 IX86_ATTR_YES ("inline-all-stringops",
3447 OPT_minline_all_stringops,
3448 MASK_INLINE_ALL_STRINGOPS),
3450 IX86_ATTR_YES ("inline-stringops-dynamically",
3451 OPT_minline_stringops_dynamically,
3452 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3454 IX86_ATTR_NO ("align-stringops",
3455 OPT_mno_align_stringops,
3456 MASK_NO_ALIGN_STRINGOPS),
3458 IX86_ATTR_YES ("recip",
3459 OPT_mrecip,
3460 MASK_RECIP),
3464 /* If this is a list, recurse to get the options. */
3465 if (TREE_CODE (args) == TREE_LIST)
3467 bool ret = true;
3469 for (; args; args = TREE_CHAIN (args))
3470 if (TREE_VALUE (args)
3471 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3472 ret = false;
3474 return ret;
3477 else if (TREE_CODE (args) != STRING_CST)
3478 gcc_unreachable ();
3480 /* Handle multiple arguments separated by commas. */
3481 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3483 while (next_optstr && *next_optstr != '\0')
3485 char *p = next_optstr;
3486 char *orig_p = p;
3487 char *comma = strchr (next_optstr, ',');
3488 const char *opt_string;
3489 size_t len, opt_len;
3490 int opt;
3491 bool opt_set_p;
3492 char ch;
3493 unsigned i;
3494 enum ix86_opt_type type = ix86_opt_unknown;
3495 int mask = 0;
3497 if (comma)
3499 *comma = '\0';
3500 len = comma - next_optstr;
3501 next_optstr = comma + 1;
3503 else
3505 len = strlen (p);
3506 next_optstr = NULL;
3509 /* Recognize no-xxx. */
3510 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3512 opt_set_p = false;
3513 p += 3;
3514 len -= 3;
3516 else
3517 opt_set_p = true;
3519 /* Find the option. */
3520 ch = *p;
3521 opt = N_OPTS;
3522 for (i = 0; i < sizeof (attrs) / sizeof (attrs[0]); i++)
3524 type = attrs[i].type;
3525 opt_len = attrs[i].len;
3526 if (ch == attrs[i].string[0]
3527 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3528 && memcmp (p, attrs[i].string, opt_len) == 0)
3530 opt = attrs[i].opt;
3531 mask = attrs[i].mask;
3532 opt_string = attrs[i].string;
3533 break;
3537 /* Process the option. */
3538 if (opt == N_OPTS)
3540 error ("attribute(target(\"%s\")) is unknown", orig_p);
3541 ret = false;
3544 else if (type == ix86_opt_isa)
3545 ix86_handle_option (opt, p, opt_set_p);
3547 else if (type == ix86_opt_yes || type == ix86_opt_no)
3549 if (type == ix86_opt_no)
3550 opt_set_p = !opt_set_p;
3552 if (opt_set_p)
3553 target_flags |= mask;
3554 else
3555 target_flags &= ~mask;
3558 else if (type == ix86_opt_str)
3560 if (p_strings[opt])
3562 error ("option(\"%s\") was already specified", opt_string);
3563 ret = false;
3565 else
3566 p_strings[opt] = xstrdup (p + opt_len);
3569 else
3570 gcc_unreachable ();
3573 return ret;
3576 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3578 tree
3579 ix86_valid_target_attribute_tree (tree args)
3581 const char *orig_arch_string = ix86_arch_string;
3582 const char *orig_tune_string = ix86_tune_string;
3583 const char *orig_fpmath_string = ix86_fpmath_string;
3584 int orig_tune_defaulted = ix86_tune_defaulted;
3585 int orig_arch_specified = ix86_arch_specified;
3586 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3587 tree t = NULL_TREE;
3588 int i;
3589 struct cl_target_option *def
3590 = TREE_TARGET_OPTION (target_option_default_node);
3592 /* Process each of the options on the chain. */
3593 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3594 return NULL_TREE;
3596 /* If the changed options are different from the default, rerun override_options,
3597 and then save the options away. The string options are are attribute options,
3598 and will be undone when we copy the save structure. */
3599 if (ix86_isa_flags != def->ix86_isa_flags
3600 || target_flags != def->target_flags
3601 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3602 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3603 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3605 /* If we are using the default tune= or arch=, undo the string assigned,
3606 and use the default. */
3607 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3608 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3609 else if (!orig_arch_specified)
3610 ix86_arch_string = NULL;
3612 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3613 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3614 else if (orig_tune_defaulted)
3615 ix86_tune_string = NULL;
3617 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3618 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3619 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3620 else if (!TARGET_64BIT && TARGET_SSE)
3621 ix86_fpmath_string = "sse,387";
3623 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3624 override_options (false);
3626 /* Add any builtin functions with the new isa if any. */
3627 ix86_add_new_builtins (ix86_isa_flags);
3629 /* Save the current options unless we are validating options for
3630 #pragma. */
3631 t = build_target_option_node ();
3633 ix86_arch_string = orig_arch_string;
3634 ix86_tune_string = orig_tune_string;
3635 ix86_fpmath_string = orig_fpmath_string;
3637 /* Free up memory allocated to hold the strings */
3638 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3639 if (option_strings[i])
3640 free (option_strings[i]);
3643 return t;
3646 /* Hook to validate attribute((target("string"))). */
3648 static bool
3649 ix86_valid_target_attribute_p (tree fndecl,
3650 tree ARG_UNUSED (name),
3651 tree args,
3652 int ARG_UNUSED (flags))
3654 struct cl_target_option cur_target;
3655 bool ret = true;
3656 tree old_optimize = build_optimization_node ();
3657 tree new_target, new_optimize;
3658 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3660 /* If the function changed the optimization levels as well as setting target
3661 options, start with the optimizations specified. */
3662 if (func_optimize && func_optimize != old_optimize)
3663 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3665 /* The target attributes may also change some optimization flags, so update
3666 the optimization options if necessary. */
3667 cl_target_option_save (&cur_target);
3668 new_target = ix86_valid_target_attribute_tree (args);
3669 new_optimize = build_optimization_node ();
3671 if (!new_target)
3672 ret = false;
3674 else if (fndecl)
3676 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3678 if (old_optimize != new_optimize)
3679 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3682 cl_target_option_restore (&cur_target);
3684 if (old_optimize != new_optimize)
3685 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3687 return ret;
3691 /* Hook to determine if one function can safely inline another. */
3693 static bool
3694 ix86_can_inline_p (tree caller, tree callee)
3696 bool ret = false;
3697 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3698 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3700 /* If callee has no option attributes, then it is ok to inline. */
3701 if (!callee_tree)
3702 ret = true;
3704 /* If caller has no option attributes, but callee does then it is not ok to
3705 inline. */
3706 else if (!caller_tree)
3707 ret = false;
3709 else
3711 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3712 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3714 /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
3715 can inline a SSE2 function but a SSE2 function can't inline a SSE5
3716 function. */
3717 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3718 != callee_opts->ix86_isa_flags)
3719 ret = false;
3721 /* See if we have the same non-isa options. */
3722 else if (caller_opts->target_flags != callee_opts->target_flags)
3723 ret = false;
3725 /* See if arch, tune, etc. are the same. */
3726 else if (caller_opts->arch != callee_opts->arch)
3727 ret = false;
3729 else if (caller_opts->tune != callee_opts->tune)
3730 ret = false;
3732 else if (caller_opts->fpmath != callee_opts->fpmath)
3733 ret = false;
3735 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3736 ret = false;
3738 else
3739 ret = true;
3742 return ret;
3746 /* Remember the last target of ix86_set_current_function. */
3747 static GTY(()) tree ix86_previous_fndecl;
3749 /* Establish appropriate back-end context for processing the function
3750 FNDECL. The argument might be NULL to indicate processing at top
3751 level, outside of any function scope. */
3752 static void
3753 ix86_set_current_function (tree fndecl)
3755 /* Only change the context if the function changes. This hook is called
3756 several times in the course of compiling a function, and we don't want to
3757 slow things down too much or call target_reinit when it isn't safe. */
3758 if (fndecl && fndecl != ix86_previous_fndecl)
3760 tree old_tree = (ix86_previous_fndecl
3761 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3762 : NULL_TREE);
3764 tree new_tree = (fndecl
3765 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3766 : NULL_TREE);
3768 ix86_previous_fndecl = fndecl;
3769 if (old_tree == new_tree)
3772 else if (new_tree)
3774 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3775 target_reinit ();
3778 else if (old_tree)
3780 struct cl_target_option *def
3781 = TREE_TARGET_OPTION (target_option_current_node);
3783 cl_target_option_restore (def);
3784 target_reinit ();
3790 /* Return true if this goes in large data/bss. */
3792 static bool
3793 ix86_in_large_data_p (tree exp)
3795 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3796 return false;
3798 /* Functions are never large data. */
3799 if (TREE_CODE (exp) == FUNCTION_DECL)
3800 return false;
3802 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3804 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3805 if (strcmp (section, ".ldata") == 0
3806 || strcmp (section, ".lbss") == 0)
3807 return true;
3808 return false;
3810 else
3812 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
3814 /* If this is an incomplete type with size 0, then we can't put it
3815 in data because it might be too big when completed. */
3816 if (!size || size > ix86_section_threshold)
3817 return true;
3820 return false;
3823 /* Switch to the appropriate section for output of DECL.
3824 DECL is either a `VAR_DECL' node or a constant of some sort.
3825 RELOC indicates whether forming the initial value of DECL requires
3826 link-time relocations. */
3828 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
3829 ATTRIBUTE_UNUSED;
3831 static section *
3832 x86_64_elf_select_section (tree decl, int reloc,
3833 unsigned HOST_WIDE_INT align)
3835 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3836 && ix86_in_large_data_p (decl))
3838 const char *sname = NULL;
3839 unsigned int flags = SECTION_WRITE;
3840 switch (categorize_decl_for_section (decl, reloc))
3842 case SECCAT_DATA:
3843 sname = ".ldata";
3844 break;
3845 case SECCAT_DATA_REL:
3846 sname = ".ldata.rel";
3847 break;
3848 case SECCAT_DATA_REL_LOCAL:
3849 sname = ".ldata.rel.local";
3850 break;
3851 case SECCAT_DATA_REL_RO:
3852 sname = ".ldata.rel.ro";
3853 break;
3854 case SECCAT_DATA_REL_RO_LOCAL:
3855 sname = ".ldata.rel.ro.local";
3856 break;
3857 case SECCAT_BSS:
3858 sname = ".lbss";
3859 flags |= SECTION_BSS;
3860 break;
3861 case SECCAT_RODATA:
3862 case SECCAT_RODATA_MERGE_STR:
3863 case SECCAT_RODATA_MERGE_STR_INIT:
3864 case SECCAT_RODATA_MERGE_CONST:
3865 sname = ".lrodata";
3866 flags = 0;
3867 break;
3868 case SECCAT_SRODATA:
3869 case SECCAT_SDATA:
3870 case SECCAT_SBSS:
3871 gcc_unreachable ();
3872 case SECCAT_TEXT:
3873 case SECCAT_TDATA:
3874 case SECCAT_TBSS:
3875 /* We don't split these for medium model. Place them into
3876 default sections and hope for best. */
3877 break;
3878 case SECCAT_EMUTLS_VAR:
3879 case SECCAT_EMUTLS_TMPL:
3880 gcc_unreachable ();
3882 if (sname)
3884 /* We might get called with string constants, but get_named_section
3885 doesn't like them as they are not DECLs. Also, we need to set
3886 flags in that case. */
3887 if (!DECL_P (decl))
3888 return get_section (sname, flags, NULL);
3889 return get_named_section (decl, sname, reloc);
3892 return default_elf_select_section (decl, reloc, align);
3895 /* Build up a unique section name, expressed as a
3896 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
3897 RELOC indicates whether the initial value of EXP requires
3898 link-time relocations. */
3900 static void ATTRIBUTE_UNUSED
3901 x86_64_elf_unique_section (tree decl, int reloc)
3903 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3904 && ix86_in_large_data_p (decl))
3906 const char *prefix = NULL;
3907 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
3908 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3910 switch (categorize_decl_for_section (decl, reloc))
3912 case SECCAT_DATA:
3913 case SECCAT_DATA_REL:
3914 case SECCAT_DATA_REL_LOCAL:
3915 case SECCAT_DATA_REL_RO:
3916 case SECCAT_DATA_REL_RO_LOCAL:
3917 prefix = one_only ? ".ld" : ".ldata";
3918 break;
3919 case SECCAT_BSS:
3920 prefix = one_only ? ".lb" : ".lbss";
3921 break;
3922 case SECCAT_RODATA:
3923 case SECCAT_RODATA_MERGE_STR:
3924 case SECCAT_RODATA_MERGE_STR_INIT:
3925 case SECCAT_RODATA_MERGE_CONST:
3926 prefix = one_only ? ".lr" : ".lrodata";
3927 break;
3928 case SECCAT_SRODATA:
3929 case SECCAT_SDATA:
3930 case SECCAT_SBSS:
3931 gcc_unreachable ();
3932 case SECCAT_TEXT:
3933 case SECCAT_TDATA:
3934 case SECCAT_TBSS:
3935 /* We don't split these for medium model. Place them into
3936 default sections and hope for best. */
3937 break;
3938 case SECCAT_EMUTLS_VAR:
3939 prefix = targetm.emutls.var_section;
3940 break;
3941 case SECCAT_EMUTLS_TMPL:
3942 prefix = targetm.emutls.tmpl_section;
3943 break;
3945 if (prefix)
3947 const char *name, *linkonce;
3948 char *string;
3950 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3951 name = targetm.strip_name_encoding (name);
3953 /* If we're using one_only, then there needs to be a .gnu.linkonce
3954 prefix to the section name. */
3955 linkonce = one_only ? ".gnu.linkonce" : "";
3957 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
3959 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
3960 return;
3963 default_unique_section (decl, reloc);
3966 #ifdef COMMON_ASM_OP
3967 /* This says how to output assembler code to declare an
3968 uninitialized external linkage data object.
3970 For medium model x86-64 we need to use .largecomm opcode for
3971 large objects. */
3972 void
3973 x86_elf_aligned_common (FILE *file,
3974 const char *name, unsigned HOST_WIDE_INT size,
3975 int align)
3977 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3978 && size > (unsigned int)ix86_section_threshold)
3979 fprintf (file, ".largecomm\t");
3980 else
3981 fprintf (file, "%s", COMMON_ASM_OP);
3982 assemble_name (file, name);
3983 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
3984 size, align / BITS_PER_UNIT);
3986 #endif
3988 /* Utility function for targets to use in implementing
3989 ASM_OUTPUT_ALIGNED_BSS. */
3991 void
3992 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
3993 const char *name, unsigned HOST_WIDE_INT size,
3994 int align)
3996 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3997 && size > (unsigned int)ix86_section_threshold)
3998 switch_to_section (get_named_section (decl, ".lbss", 0));
3999 else
4000 switch_to_section (bss_section);
4001 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4002 #ifdef ASM_DECLARE_OBJECT_NAME
4003 last_assemble_variable_decl = decl;
4004 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4005 #else
4006 /* Standard thing is just output label for the object. */
4007 ASM_OUTPUT_LABEL (file, name);
4008 #endif /* ASM_DECLARE_OBJECT_NAME */
4009 ASM_OUTPUT_SKIP (file, size ? size : 1);
4012 void
4013 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4015 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4016 make the problem with not enough registers even worse. */
4017 #ifdef INSN_SCHEDULING
4018 if (level > 1)
4019 flag_schedule_insns = 0;
4020 #endif
4022 if (TARGET_MACHO)
4023 /* The Darwin libraries never set errno, so we might as well
4024 avoid calling them when that's the only reason we would. */
4025 flag_errno_math = 0;
4027 /* The default values of these switches depend on the TARGET_64BIT
4028 that is not known at this moment. Mark these values with 2 and
4029 let user the to override these. In case there is no command line option
4030 specifying them, we will set the defaults in override_options. */
4031 if (optimize >= 1)
4032 flag_omit_frame_pointer = 2;
4033 flag_pcc_struct_return = 2;
4034 flag_asynchronous_unwind_tables = 2;
4035 flag_vect_cost_model = 1;
4036 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4037 SUBTARGET_OPTIMIZATION_OPTIONS;
4038 #endif
4041 /* Decide whether we can make a sibling call to a function. DECL is the
4042 declaration of the function being targeted by the call and EXP is the
4043 CALL_EXPR representing the call. */
4045 static bool
4046 ix86_function_ok_for_sibcall (tree decl, tree exp)
4048 tree func;
4049 rtx a, b;
4051 /* If we are generating position-independent code, we cannot sibcall
4052 optimize any indirect call, or a direct call to a global function,
4053 as the PLT requires %ebx be live. */
4054 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4055 return false;
4057 if (decl)
4058 func = decl;
4059 else
4061 func = TREE_TYPE (CALL_EXPR_FN (exp));
4062 if (POINTER_TYPE_P (func))
4063 func = TREE_TYPE (func);
4066 /* Check that the return value locations are the same. Like
4067 if we are returning floats on the 80387 register stack, we cannot
4068 make a sibcall from a function that doesn't return a float to a
4069 function that does or, conversely, from a function that does return
4070 a float to a function that doesn't; the necessary stack adjustment
4071 would not be executed. This is also the place we notice
4072 differences in the return value ABI. Note that it is ok for one
4073 of the functions to have void return type as long as the return
4074 value of the other is passed in a register. */
4075 a = ix86_function_value (TREE_TYPE (exp), func, false);
4076 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4077 cfun->decl, false);
4078 if (STACK_REG_P (a) || STACK_REG_P (b))
4080 if (!rtx_equal_p (a, b))
4081 return false;
4083 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4085 else if (!rtx_equal_p (a, b))
4086 return false;
4088 /* If this call is indirect, we'll need to be able to use a call-clobbered
4089 register for the address of the target function. Make sure that all
4090 such registers are not used for passing parameters. */
4091 if (!decl && !TARGET_64BIT)
4093 tree type;
4095 /* We're looking at the CALL_EXPR, we need the type of the function. */
4096 type = CALL_EXPR_FN (exp); /* pointer expression */
4097 type = TREE_TYPE (type); /* pointer type */
4098 type = TREE_TYPE (type); /* function type */
4100 if (ix86_function_regparm (type, NULL) >= 3)
4102 /* ??? Need to count the actual number of registers to be used,
4103 not the possible number of registers. Fix later. */
4104 return false;
4108 /* Dllimport'd functions are also called indirectly. */
4109 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
4110 && !TARGET_64BIT
4111 && decl && DECL_DLLIMPORT_P (decl)
4112 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
4113 return false;
4115 /* Otherwise okay. That also includes certain types of indirect calls. */
4116 return true;
4119 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4120 calling convention attributes;
4121 arguments as in struct attribute_spec.handler. */
4123 static tree
4124 ix86_handle_cconv_attribute (tree *node, tree name,
4125 tree args,
4126 int flags ATTRIBUTE_UNUSED,
4127 bool *no_add_attrs)
4129 if (TREE_CODE (*node) != FUNCTION_TYPE
4130 && TREE_CODE (*node) != METHOD_TYPE
4131 && TREE_CODE (*node) != FIELD_DECL
4132 && TREE_CODE (*node) != TYPE_DECL)
4134 warning (OPT_Wattributes, "%qs attribute only applies to functions",
4135 IDENTIFIER_POINTER (name));
4136 *no_add_attrs = true;
4137 return NULL_TREE;
4140 /* Can combine regparm with all attributes but fastcall. */
4141 if (is_attribute_p ("regparm", name))
4143 tree cst;
4145 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4147 error ("fastcall and regparm attributes are not compatible");
4150 cst = TREE_VALUE (args);
4151 if (TREE_CODE (cst) != INTEGER_CST)
4153 warning (OPT_Wattributes,
4154 "%qs attribute requires an integer constant argument",
4155 IDENTIFIER_POINTER (name));
4156 *no_add_attrs = true;
4158 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4160 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
4161 IDENTIFIER_POINTER (name), REGPARM_MAX);
4162 *no_add_attrs = true;
4165 return NULL_TREE;
4168 if (TARGET_64BIT)
4170 /* Do not warn when emulating the MS ABI. */
4171 if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
4172 warning (OPT_Wattributes, "%qs attribute ignored",
4173 IDENTIFIER_POINTER (name));
4174 *no_add_attrs = true;
4175 return NULL_TREE;
4178 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4179 if (is_attribute_p ("fastcall", name))
4181 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4183 error ("fastcall and cdecl attributes are not compatible");
4185 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4187 error ("fastcall and stdcall attributes are not compatible");
4189 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4191 error ("fastcall and regparm attributes are not compatible");
4195 /* Can combine stdcall with fastcall (redundant), regparm and
4196 sseregparm. */
4197 else if (is_attribute_p ("stdcall", name))
4199 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4201 error ("stdcall and cdecl attributes are not compatible");
4203 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4205 error ("stdcall and fastcall attributes are not compatible");
4209 /* Can combine cdecl with regparm and sseregparm. */
4210 else if (is_attribute_p ("cdecl", name))
4212 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4214 error ("stdcall and cdecl attributes are not compatible");
4216 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4218 error ("fastcall and cdecl attributes are not compatible");
4222 /* Can combine sseregparm with all attributes. */
4224 return NULL_TREE;
4227 /* Return 0 if the attributes for two types are incompatible, 1 if they
4228 are compatible, and 2 if they are nearly compatible (which causes a
4229 warning to be generated). */
4231 static int
4232 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4234 /* Check for mismatch of non-default calling convention. */
4235 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4237 if (TREE_CODE (type1) != FUNCTION_TYPE
4238 && TREE_CODE (type1) != METHOD_TYPE)
4239 return 1;
4241 /* Check for mismatched fastcall/regparm types. */
4242 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4243 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4244 || (ix86_function_regparm (type1, NULL)
4245 != ix86_function_regparm (type2, NULL)))
4246 return 0;
4248 /* Check for mismatched sseregparm types. */
4249 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4250 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4251 return 0;
4253 /* Check for mismatched return types (cdecl vs stdcall). */
4254 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4255 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4256 return 0;
4258 return 1;
4261 /* Return the regparm value for a function with the indicated TYPE and DECL.
4262 DECL may be NULL when calling function indirectly
4263 or considering a libcall. */
4265 static int
4266 ix86_function_regparm (const_tree type, const_tree decl)
4268 tree attr;
4269 int regparm = ix86_regparm;
4271 static bool error_issued;
4273 if (TARGET_64BIT)
4275 if (ix86_function_type_abi (type) == DEFAULT_ABI)
4276 return regparm;
4277 return DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
4280 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4281 if (attr)
4283 regparm
4284 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4286 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4288 /* We can't use regparm(3) for nested functions because
4289 these pass static chain pointer in %ecx register. */
4290 if (!error_issued && regparm == 3
4291 && decl_function_context (decl)
4292 && !DECL_NO_STATIC_CHAIN (decl))
4294 error ("nested functions are limited to 2 register parameters");
4295 error_issued = true;
4296 return 0;
4300 return regparm;
4303 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4304 return 2;
4306 /* Use register calling convention for local functions when possible. */
4307 if (decl && TREE_CODE (decl) == FUNCTION_DECL
4308 && !profile_flag)
4310 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4311 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4312 if (i && i->local)
4314 int local_regparm, globals = 0, regno;
4315 struct function *f;
4317 /* Make sure no regparm register is taken by a
4318 fixed register variable. */
4319 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4320 if (fixed_regs[local_regparm])
4321 break;
4323 /* We can't use regparm(3) for nested functions as these use
4324 static chain pointer in third argument. */
4325 if (local_regparm == 3
4326 && decl_function_context (decl)
4327 && !DECL_NO_STATIC_CHAIN (decl))
4328 local_regparm = 2;
4330 /* If the function realigns its stackpointer, the prologue will
4331 clobber %ecx. If we've already generated code for the callee,
4332 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
4333 scanning the attributes for the self-realigning property. */
4334 f = DECL_STRUCT_FUNCTION (decl);
4335 /* Since current internal arg pointer won't conflict with
4336 parameter passing regs, so no need to change stack
4337 realignment and adjust regparm number.
4339 Each fixed register usage increases register pressure,
4340 so less registers should be used for argument passing.
4341 This functionality can be overriden by an explicit
4342 regparm value. */
4343 for (regno = 0; regno <= DI_REG; regno++)
4344 if (fixed_regs[regno])
4345 globals++;
4347 local_regparm
4348 = globals < local_regparm ? local_regparm - globals : 0;
4350 if (local_regparm > regparm)
4351 regparm = local_regparm;
4355 return regparm;
4358 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4359 DFmode (2) arguments in SSE registers for a function with the
4360 indicated TYPE and DECL. DECL may be NULL when calling function
4361 indirectly or considering a libcall. Otherwise return 0. */
4363 static int
4364 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4366 gcc_assert (!TARGET_64BIT);
4368 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4369 by the sseregparm attribute. */
4370 if (TARGET_SSEREGPARM
4371 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4373 if (!TARGET_SSE)
4375 if (warn)
4377 if (decl)
4378 error ("Calling %qD with attribute sseregparm without "
4379 "SSE/SSE2 enabled", decl);
4380 else
4381 error ("Calling %qT with attribute sseregparm without "
4382 "SSE/SSE2 enabled", type);
4384 return 0;
4387 return 2;
4390 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4391 (and DFmode for SSE2) arguments in SSE registers. */
4392 if (decl && TARGET_SSE_MATH && !profile_flag)
4394 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4395 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4396 if (i && i->local)
4397 return TARGET_SSE2 ? 2 : 1;
4400 return 0;
4403 /* Return true if EAX is live at the start of the function. Used by
4404 ix86_expand_prologue to determine if we need special help before
4405 calling allocate_stack_worker. */
4407 static bool
4408 ix86_eax_live_at_start_p (void)
4410 /* Cheat. Don't bother working forward from ix86_function_regparm
4411 to the function type to whether an actual argument is located in
4412 eax. Instead just look at cfg info, which is still close enough
4413 to correct at this point. This gives false positives for broken
4414 functions that might use uninitialized data that happens to be
4415 allocated in eax, but who cares? */
4416 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4419 /* Value is the number of bytes of arguments automatically
4420 popped when returning from a subroutine call.
4421 FUNDECL is the declaration node of the function (as a tree),
4422 FUNTYPE is the data type of the function (as a tree),
4423 or for a library call it is an identifier node for the subroutine name.
4424 SIZE is the number of bytes of arguments passed on the stack.
4426 On the 80386, the RTD insn may be used to pop them if the number
4427 of args is fixed, but if the number is variable then the caller
4428 must pop them all. RTD can't be used for library calls now
4429 because the library is compiled with the Unix compiler.
4430 Use of RTD is a selectable option, since it is incompatible with
4431 standard Unix calling sequences. If the option is not selected,
4432 the caller must always pop the args.
4434 The attribute stdcall is equivalent to RTD on a per module basis. */
4437 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4439 int rtd;
4441 /* None of the 64-bit ABIs pop arguments. */
4442 if (TARGET_64BIT)
4443 return 0;
4445 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4447 /* Cdecl functions override -mrtd, and never pop the stack. */
4448 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4450 /* Stdcall and fastcall functions will pop the stack if not
4451 variable args. */
4452 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4453 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4454 rtd = 1;
4456 if (rtd && ! stdarg_p (funtype))
4457 return size;
4460 /* Lose any fake structure return argument if it is passed on the stack. */
4461 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4462 && !KEEP_AGGREGATE_RETURN_POINTER)
4464 int nregs = ix86_function_regparm (funtype, fundecl);
4465 if (nregs == 0)
4466 return GET_MODE_SIZE (Pmode);
4469 return 0;
4472 /* Argument support functions. */
4474 /* Return true when register may be used to pass function parameters. */
4475 bool
4476 ix86_function_arg_regno_p (int regno)
4478 int i;
4479 const int *parm_regs;
4481 if (!TARGET_64BIT)
4483 if (TARGET_MACHO)
4484 return (regno < REGPARM_MAX
4485 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4486 else
4487 return (regno < REGPARM_MAX
4488 || (TARGET_MMX && MMX_REGNO_P (regno)
4489 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4490 || (TARGET_SSE && SSE_REGNO_P (regno)
4491 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4494 if (TARGET_MACHO)
4496 if (SSE_REGNO_P (regno) && TARGET_SSE)
4497 return true;
4499 else
4501 if (TARGET_SSE && SSE_REGNO_P (regno)
4502 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4503 return true;
4506 /* TODO: The function should depend on current function ABI but
4507 builtins.c would need updating then. Therefore we use the
4508 default ABI. */
4510 /* RAX is used as hidden argument to va_arg functions. */
4511 if (DEFAULT_ABI == SYSV_ABI && regno == AX_REG)
4512 return true;
4514 if (DEFAULT_ABI == MS_ABI)
4515 parm_regs = x86_64_ms_abi_int_parameter_registers;
4516 else
4517 parm_regs = x86_64_int_parameter_registers;
4518 for (i = 0; i < (DEFAULT_ABI == MS_ABI ? X64_REGPARM_MAX
4519 : X86_64_REGPARM_MAX); i++)
4520 if (regno == parm_regs[i])
4521 return true;
4522 return false;
4525 /* Return if we do not know how to pass TYPE solely in registers. */
4527 static bool
4528 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4530 if (must_pass_in_stack_var_size_or_pad (mode, type))
4531 return true;
4533 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4534 The layout_type routine is crafty and tries to trick us into passing
4535 currently unsupported vector types on the stack by using TImode. */
4536 return (!TARGET_64BIT && mode == TImode
4537 && type && TREE_CODE (type) != VECTOR_TYPE);
4540 /* It returns the size, in bytes, of the area reserved for arguments passed
4541 in registers for the function represented by fndecl dependent to the used
4542 abi format. */
4544 ix86_reg_parm_stack_space (const_tree fndecl)
4546 int call_abi = 0;
4547 /* For libcalls it is possible that there is no fndecl at hand.
4548 Therefore assume for this case the default abi of the target. */
4549 if (!fndecl)
4550 call_abi = DEFAULT_ABI;
4551 else
4552 call_abi = ix86_function_abi (fndecl);
4553 if (call_abi == 1)
4554 return 32;
4555 return 0;
4558 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4559 call abi used. */
4561 ix86_function_type_abi (const_tree fntype)
4563 if (TARGET_64BIT && fntype != NULL)
4565 int abi;
4566 if (DEFAULT_ABI == SYSV_ABI)
4567 abi = lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)) ? MS_ABI : SYSV_ABI;
4568 else
4569 abi = lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)) ? SYSV_ABI : MS_ABI;
4571 return abi;
4573 return DEFAULT_ABI;
4577 ix86_function_abi (const_tree fndecl)
4579 if (! fndecl)
4580 return DEFAULT_ABI;
4581 return ix86_function_type_abi (TREE_TYPE (fndecl));
4584 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4585 call abi used. */
4587 ix86_cfun_abi (void)
4589 if (! cfun || ! TARGET_64BIT)
4590 return DEFAULT_ABI;
4591 return cfun->machine->call_abi;
4594 /* regclass.c */
4595 extern void init_regs (void);
4597 /* Implementation of call abi switching target hook. Specific to FNDECL
4598 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4599 for more details.
4600 To prevent redudant calls of costy function init_regs (), it checks not to
4601 reset register usage for default abi. */
4602 void
4603 ix86_call_abi_override (const_tree fndecl)
4605 if (fndecl == NULL_TREE)
4606 cfun->machine->call_abi = DEFAULT_ABI;
4607 else
4608 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4609 if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
4611 if (call_used_regs[4 /*RSI*/] != 0 || call_used_regs[5 /*RDI*/] != 0)
4613 call_used_regs[4 /*RSI*/] = 0;
4614 call_used_regs[5 /*RDI*/] = 0;
4615 init_regs ();
4618 else if (TARGET_64BIT)
4620 if (call_used_regs[4 /*RSI*/] != 1 || call_used_regs[5 /*RDI*/] != 1)
4622 call_used_regs[4 /*RSI*/] = 1;
4623 call_used_regs[5 /*RDI*/] = 1;
4624 init_regs ();
4629 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4630 for a call to a function whose data type is FNTYPE.
4631 For a library call, FNTYPE is 0. */
4633 void
4634 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4635 tree fntype, /* tree ptr for function decl */
4636 rtx libname, /* SYMBOL_REF of library name or 0 */
4637 tree fndecl)
4639 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4640 memset (cum, 0, sizeof (*cum));
4642 cum->call_abi = ix86_function_type_abi (fntype);
4643 /* Set up the number of registers to use for passing arguments. */
4644 cum->nregs = ix86_regparm;
4645 if (TARGET_64BIT)
4647 if (cum->call_abi != DEFAULT_ABI)
4648 cum->nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX
4649 : X64_REGPARM_MAX;
4651 if (TARGET_SSE)
4653 cum->sse_nregs = SSE_REGPARM_MAX;
4654 if (TARGET_64BIT)
4656 if (cum->call_abi != DEFAULT_ABI)
4657 cum->sse_nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
4658 : X64_SSE_REGPARM_MAX;
4661 if (TARGET_MMX)
4662 cum->mmx_nregs = MMX_REGPARM_MAX;
4663 cum->warn_avx = true;
4664 cum->warn_sse = true;
4665 cum->warn_mmx = true;
4667 /* Because type might mismatch in between caller and callee, we need to
4668 use actual type of function for local calls.
4669 FIXME: cgraph_analyze can be told to actually record if function uses
4670 va_start so for local functions maybe_vaarg can be made aggressive
4671 helping K&R code.
4672 FIXME: once typesytem is fixed, we won't need this code anymore. */
4673 if (i && i->local)
4674 fntype = TREE_TYPE (fndecl);
4675 cum->maybe_vaarg = (fntype
4676 ? (!prototype_p (fntype) || stdarg_p (fntype))
4677 : !libname);
4679 if (!TARGET_64BIT)
4681 /* If there are variable arguments, then we won't pass anything
4682 in registers in 32-bit mode. */
4683 if (stdarg_p (fntype))
4685 cum->nregs = 0;
4686 cum->sse_nregs = 0;
4687 cum->mmx_nregs = 0;
4688 cum->warn_avx = 0;
4689 cum->warn_sse = 0;
4690 cum->warn_mmx = 0;
4691 return;
4694 /* Use ecx and edx registers if function has fastcall attribute,
4695 else look for regparm information. */
4696 if (fntype)
4698 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4700 cum->nregs = 2;
4701 cum->fastcall = 1;
4703 else
4704 cum->nregs = ix86_function_regparm (fntype, fndecl);
4707 /* Set up the number of SSE registers used for passing SFmode
4708 and DFmode arguments. Warn for mismatching ABI. */
4709 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4713 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4714 But in the case of vector types, it is some vector mode.
4716 When we have only some of our vector isa extensions enabled, then there
4717 are some modes for which vector_mode_supported_p is false. For these
4718 modes, the generic vector support in gcc will choose some non-vector mode
4719 in order to implement the type. By computing the natural mode, we'll
4720 select the proper ABI location for the operand and not depend on whatever
4721 the middle-end decides to do with these vector types. */
4723 static enum machine_mode
4724 type_natural_mode (const_tree type)
4726 enum machine_mode mode = TYPE_MODE (type);
4728 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4730 HOST_WIDE_INT size = int_size_in_bytes (type);
4731 if ((size == 8 || size == 16)
4732 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4733 && TYPE_VECTOR_SUBPARTS (type) > 1)
4735 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4737 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4738 mode = MIN_MODE_VECTOR_FLOAT;
4739 else
4740 mode = MIN_MODE_VECTOR_INT;
4742 /* Get the mode which has this inner mode and number of units. */
4743 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4744 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4745 && GET_MODE_INNER (mode) == innermode)
4746 return mode;
4748 gcc_unreachable ();
4752 return mode;
4755 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4756 this may not agree with the mode that the type system has chosen for the
4757 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4758 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4760 static rtx
4761 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4762 unsigned int regno)
4764 rtx tmp;
4766 if (orig_mode != BLKmode)
4767 tmp = gen_rtx_REG (orig_mode, regno);
4768 else
4770 tmp = gen_rtx_REG (mode, regno);
4771 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4772 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4775 return tmp;
4778 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4779 of this code is to classify each 8bytes of incoming argument by the register
4780 class and assign registers accordingly. */
4782 /* Return the union class of CLASS1 and CLASS2.
4783 See the x86-64 PS ABI for details. */
4785 static enum x86_64_reg_class
4786 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
4788 /* Rule #1: If both classes are equal, this is the resulting class. */
4789 if (class1 == class2)
4790 return class1;
4792 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
4793 the other class. */
4794 if (class1 == X86_64_NO_CLASS)
4795 return class2;
4796 if (class2 == X86_64_NO_CLASS)
4797 return class1;
4799 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
4800 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
4801 return X86_64_MEMORY_CLASS;
4803 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
4804 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
4805 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
4806 return X86_64_INTEGERSI_CLASS;
4807 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
4808 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
4809 return X86_64_INTEGER_CLASS;
4811 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
4812 MEMORY is used. */
4813 if (class1 == X86_64_X87_CLASS
4814 || class1 == X86_64_X87UP_CLASS
4815 || class1 == X86_64_COMPLEX_X87_CLASS
4816 || class2 == X86_64_X87_CLASS
4817 || class2 == X86_64_X87UP_CLASS
4818 || class2 == X86_64_COMPLEX_X87_CLASS)
4819 return X86_64_MEMORY_CLASS;
4821 /* Rule #6: Otherwise class SSE is used. */
4822 return X86_64_SSE_CLASS;
4825 /* Classify the argument of type TYPE and mode MODE.
4826 CLASSES will be filled by the register class used to pass each word
4827 of the operand. The number of words is returned. In case the parameter
4828 should be passed in memory, 0 is returned. As a special case for zero
4829 sized containers, classes[0] will be NO_CLASS and 1 is returned.
4831 BIT_OFFSET is used internally for handling records and specifies offset
4832 of the offset in bits modulo 256 to avoid overflow cases.
4834 See the x86-64 PS ABI for details.
4837 static int
4838 classify_argument (enum machine_mode mode, const_tree type,
4839 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
4841 HOST_WIDE_INT bytes =
4842 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4843 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4845 /* Variable sized entities are always passed/returned in memory. */
4846 if (bytes < 0)
4847 return 0;
4849 if (mode != VOIDmode
4850 && targetm.calls.must_pass_in_stack (mode, type))
4851 return 0;
4853 if (type && AGGREGATE_TYPE_P (type))
4855 int i;
4856 tree field;
4857 enum x86_64_reg_class subclasses[MAX_CLASSES];
4859 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
4860 if (bytes > 16)
4861 return 0;
4863 for (i = 0; i < words; i++)
4864 classes[i] = X86_64_NO_CLASS;
4866 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
4867 signalize memory class, so handle it as special case. */
4868 if (!words)
4870 classes[0] = X86_64_NO_CLASS;
4871 return 1;
4874 /* Classify each field of record and merge classes. */
4875 switch (TREE_CODE (type))
4877 case RECORD_TYPE:
4878 /* And now merge the fields of structure. */
4879 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4881 if (TREE_CODE (field) == FIELD_DECL)
4883 int num;
4885 if (TREE_TYPE (field) == error_mark_node)
4886 continue;
4888 /* Bitfields are always classified as integer. Handle them
4889 early, since later code would consider them to be
4890 misaligned integers. */
4891 if (DECL_BIT_FIELD (field))
4893 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4894 i < ((int_bit_position (field) + (bit_offset % 64))
4895 + tree_low_cst (DECL_SIZE (field), 0)
4896 + 63) / 8 / 8; i++)
4897 classes[i] =
4898 merge_classes (X86_64_INTEGER_CLASS,
4899 classes[i]);
4901 else
4903 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4904 TREE_TYPE (field), subclasses,
4905 (int_bit_position (field)
4906 + bit_offset) % 256);
4907 if (!num)
4908 return 0;
4909 for (i = 0; i < num; i++)
4911 int pos =
4912 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4913 classes[i + pos] =
4914 merge_classes (subclasses[i], classes[i + pos]);
4919 break;
4921 case ARRAY_TYPE:
4922 /* Arrays are handled as small records. */
4924 int num;
4925 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
4926 TREE_TYPE (type), subclasses, bit_offset);
4927 if (!num)
4928 return 0;
4930 /* The partial classes are now full classes. */
4931 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
4932 subclasses[0] = X86_64_SSE_CLASS;
4933 if (subclasses[0] == X86_64_INTEGERSI_CLASS
4934 && !((bit_offset % 64) == 0 && bytes == 4))
4935 subclasses[0] = X86_64_INTEGER_CLASS;
4937 for (i = 0; i < words; i++)
4938 classes[i] = subclasses[i % num];
4940 break;
4942 case UNION_TYPE:
4943 case QUAL_UNION_TYPE:
4944 /* Unions are similar to RECORD_TYPE but offset is always 0.
4946 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4948 if (TREE_CODE (field) == FIELD_DECL)
4950 int num;
4952 if (TREE_TYPE (field) == error_mark_node)
4953 continue;
4955 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4956 TREE_TYPE (field), subclasses,
4957 bit_offset);
4958 if (!num)
4959 return 0;
4960 for (i = 0; i < num; i++)
4961 classes[i] = merge_classes (subclasses[i], classes[i]);
4964 break;
4966 default:
4967 gcc_unreachable ();
4970 /* Final merger cleanup. */
4971 for (i = 0; i < words; i++)
4973 /* If one class is MEMORY, everything should be passed in
4974 memory. */
4975 if (classes[i] == X86_64_MEMORY_CLASS)
4976 return 0;
4978 /* The X86_64_SSEUP_CLASS should be always preceded by
4979 X86_64_SSE_CLASS. */
4980 if (classes[i] == X86_64_SSEUP_CLASS
4981 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
4982 classes[i] = X86_64_SSE_CLASS;
4984 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
4985 if (classes[i] == X86_64_X87UP_CLASS
4986 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
4987 classes[i] = X86_64_SSE_CLASS;
4989 return words;
4992 /* Compute alignment needed. We align all types to natural boundaries with
4993 exception of XFmode that is aligned to 64bits. */
4994 if (mode != VOIDmode && mode != BLKmode)
4996 int mode_alignment = GET_MODE_BITSIZE (mode);
4998 if (mode == XFmode)
4999 mode_alignment = 128;
5000 else if (mode == XCmode)
5001 mode_alignment = 256;
5002 if (COMPLEX_MODE_P (mode))
5003 mode_alignment /= 2;
5004 /* Misaligned fields are always returned in memory. */
5005 if (bit_offset % mode_alignment)
5006 return 0;
5009 /* for V1xx modes, just use the base mode */
5010 if (VECTOR_MODE_P (mode) && mode != V1DImode
5011 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5012 mode = GET_MODE_INNER (mode);
5014 /* Classification of atomic types. */
5015 switch (mode)
5017 case SDmode:
5018 case DDmode:
5019 classes[0] = X86_64_SSE_CLASS;
5020 return 1;
5021 case TDmode:
5022 classes[0] = X86_64_SSE_CLASS;
5023 classes[1] = X86_64_SSEUP_CLASS;
5024 return 2;
5025 case DImode:
5026 case SImode:
5027 case HImode:
5028 case QImode:
5029 case CSImode:
5030 case CHImode:
5031 case CQImode:
5032 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5033 classes[0] = X86_64_INTEGERSI_CLASS;
5034 else
5035 classes[0] = X86_64_INTEGER_CLASS;
5036 return 1;
5037 case CDImode:
5038 case TImode:
5039 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5040 return 2;
5041 case CTImode:
5042 case COImode:
5043 case OImode:
5044 return 0;
5045 case SFmode:
5046 if (!(bit_offset % 64))
5047 classes[0] = X86_64_SSESF_CLASS;
5048 else
5049 classes[0] = X86_64_SSE_CLASS;
5050 return 1;
5051 case DFmode:
5052 classes[0] = X86_64_SSEDF_CLASS;
5053 return 1;
5054 case XFmode:
5055 classes[0] = X86_64_X87_CLASS;
5056 classes[1] = X86_64_X87UP_CLASS;
5057 return 2;
5058 case TFmode:
5059 classes[0] = X86_64_SSE_CLASS;
5060 classes[1] = X86_64_SSEUP_CLASS;
5061 return 2;
5062 case SCmode:
5063 classes[0] = X86_64_SSE_CLASS;
5064 return 1;
5065 case DCmode:
5066 classes[0] = X86_64_SSEDF_CLASS;
5067 classes[1] = X86_64_SSEDF_CLASS;
5068 return 2;
5069 case XCmode:
5070 classes[0] = X86_64_COMPLEX_X87_CLASS;
5071 return 1;
5072 case TCmode:
5073 /* This modes is larger than 16 bytes. */
5074 return 0;
5075 case V8SFmode:
5076 case V8SImode:
5077 case V32QImode:
5078 case V16HImode:
5079 case V4DFmode:
5080 case V4DImode:
5081 classes[0] = X86_64_AVX_CLASS;
5082 return 1;
5083 case V4SFmode:
5084 case V4SImode:
5085 case V16QImode:
5086 case V8HImode:
5087 case V2DFmode:
5088 case V2DImode:
5089 classes[0] = X86_64_SSE_CLASS;
5090 classes[1] = X86_64_SSEUP_CLASS;
5091 return 2;
5092 case V1DImode:
5093 case V2SFmode:
5094 case V2SImode:
5095 case V4HImode:
5096 case V8QImode:
5097 classes[0] = X86_64_SSE_CLASS;
5098 return 1;
5099 case BLKmode:
5100 case VOIDmode:
5101 return 0;
5102 default:
5103 gcc_assert (VECTOR_MODE_P (mode));
5105 if (bytes > 16)
5106 return 0;
5108 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5110 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5111 classes[0] = X86_64_INTEGERSI_CLASS;
5112 else
5113 classes[0] = X86_64_INTEGER_CLASS;
5114 classes[1] = X86_64_INTEGER_CLASS;
5115 return 1 + (bytes > 8);
5119 /* Examine the argument and return set number of register required in each
5120 class. Return 0 iff parameter should be passed in memory. */
5121 static int
5122 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5123 int *int_nregs, int *sse_nregs)
5125 enum x86_64_reg_class regclass[MAX_CLASSES];
5126 int n = classify_argument (mode, type, regclass, 0);
5128 *int_nregs = 0;
5129 *sse_nregs = 0;
5130 if (!n)
5131 return 0;
5132 for (n--; n >= 0; n--)
5133 switch (regclass[n])
5135 case X86_64_INTEGER_CLASS:
5136 case X86_64_INTEGERSI_CLASS:
5137 (*int_nregs)++;
5138 break;
5139 case X86_64_AVX_CLASS:
5140 case X86_64_SSE_CLASS:
5141 case X86_64_SSESF_CLASS:
5142 case X86_64_SSEDF_CLASS:
5143 (*sse_nregs)++;
5144 break;
5145 case X86_64_NO_CLASS:
5146 case X86_64_SSEUP_CLASS:
5147 break;
5148 case X86_64_X87_CLASS:
5149 case X86_64_X87UP_CLASS:
5150 if (!in_return)
5151 return 0;
5152 break;
5153 case X86_64_COMPLEX_X87_CLASS:
5154 return in_return ? 2 : 0;
5155 case X86_64_MEMORY_CLASS:
5156 gcc_unreachable ();
5158 return 1;
5161 /* Construct container for the argument used by GCC interface. See
5162 FUNCTION_ARG for the detailed description. */
5164 static rtx
5165 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5166 const_tree type, int in_return, int nintregs, int nsseregs,
5167 const int *intreg, int sse_regno)
5169 /* The following variables hold the static issued_error state. */
5170 static bool issued_sse_arg_error;
5171 static bool issued_sse_ret_error;
5172 static bool issued_x87_ret_error;
5174 enum machine_mode tmpmode;
5175 int bytes =
5176 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5177 enum x86_64_reg_class regclass[MAX_CLASSES];
5178 int n;
5179 int i;
5180 int nexps = 0;
5181 int needed_sseregs, needed_intregs;
5182 rtx exp[MAX_CLASSES];
5183 rtx ret;
5185 n = classify_argument (mode, type, regclass, 0);
5186 if (!n)
5187 return NULL;
5188 if (!examine_argument (mode, type, in_return, &needed_intregs,
5189 &needed_sseregs))
5190 return NULL;
5191 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5192 return NULL;
5194 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5195 some less clueful developer tries to use floating-point anyway. */
5196 if (needed_sseregs && !TARGET_SSE)
5198 if (in_return)
5200 if (!issued_sse_ret_error)
5202 error ("SSE register return with SSE disabled");
5203 issued_sse_ret_error = true;
5206 else if (!issued_sse_arg_error)
5208 error ("SSE register argument with SSE disabled");
5209 issued_sse_arg_error = true;
5211 return NULL;
5214 /* Likewise, error if the ABI requires us to return values in the
5215 x87 registers and the user specified -mno-80387. */
5216 if (!TARGET_80387 && in_return)
5217 for (i = 0; i < n; i++)
5218 if (regclass[i] == X86_64_X87_CLASS
5219 || regclass[i] == X86_64_X87UP_CLASS
5220 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5222 if (!issued_x87_ret_error)
5224 error ("x87 register return with x87 disabled");
5225 issued_x87_ret_error = true;
5227 return NULL;
5230 /* First construct simple cases. Avoid SCmode, since we want to use
5231 single register to pass this type. */
5232 if (n == 1 && mode != SCmode)
5233 switch (regclass[0])
5235 case X86_64_INTEGER_CLASS:
5236 case X86_64_INTEGERSI_CLASS:
5237 return gen_rtx_REG (mode, intreg[0]);
5238 case X86_64_AVX_CLASS:
5239 case X86_64_SSE_CLASS:
5240 case X86_64_SSESF_CLASS:
5241 case X86_64_SSEDF_CLASS:
5242 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
5243 case X86_64_X87_CLASS:
5244 case X86_64_COMPLEX_X87_CLASS:
5245 return gen_rtx_REG (mode, FIRST_STACK_REG);
5246 case X86_64_NO_CLASS:
5247 /* Zero sized array, struct or class. */
5248 return NULL;
5249 default:
5250 gcc_unreachable ();
5252 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5253 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5254 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5256 if (n == 2
5257 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5258 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5259 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5260 && regclass[1] == X86_64_INTEGER_CLASS
5261 && (mode == CDImode || mode == TImode || mode == TFmode)
5262 && intreg[0] + 1 == intreg[1])
5263 return gen_rtx_REG (mode, intreg[0]);
5265 /* Otherwise figure out the entries of the PARALLEL. */
5266 for (i = 0; i < n; i++)
5268 switch (regclass[i])
5270 case X86_64_NO_CLASS:
5271 break;
5272 case X86_64_INTEGER_CLASS:
5273 case X86_64_INTEGERSI_CLASS:
5274 /* Merge TImodes on aligned occasions here too. */
5275 if (i * 8 + 8 > bytes)
5276 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5277 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5278 tmpmode = SImode;
5279 else
5280 tmpmode = DImode;
5281 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5282 if (tmpmode == BLKmode)
5283 tmpmode = DImode;
5284 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5285 gen_rtx_REG (tmpmode, *intreg),
5286 GEN_INT (i*8));
5287 intreg++;
5288 break;
5289 case X86_64_SSESF_CLASS:
5290 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5291 gen_rtx_REG (SFmode,
5292 SSE_REGNO (sse_regno)),
5293 GEN_INT (i*8));
5294 sse_regno++;
5295 break;
5296 case X86_64_SSEDF_CLASS:
5297 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5298 gen_rtx_REG (DFmode,
5299 SSE_REGNO (sse_regno)),
5300 GEN_INT (i*8));
5301 sse_regno++;
5302 break;
5303 case X86_64_SSE_CLASS:
5304 if (i < n - 1 && regclass[i + 1] == X86_64_SSEUP_CLASS)
5305 tmpmode = TImode;
5306 else
5307 tmpmode = DImode;
5308 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5309 gen_rtx_REG (tmpmode,
5310 SSE_REGNO (sse_regno)),
5311 GEN_INT (i*8));
5312 if (tmpmode == TImode)
5313 i++;
5314 sse_regno++;
5315 break;
5316 default:
5317 gcc_unreachable ();
5321 /* Empty aligned struct, union or class. */
5322 if (nexps == 0)
5323 return NULL;
5325 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5326 for (i = 0; i < nexps; i++)
5327 XVECEXP (ret, 0, i) = exp [i];
5328 return ret;
5331 /* Update the data in CUM to advance over an argument of mode MODE
5332 and data type TYPE. (TYPE is null for libcalls where that information
5333 may not be available.) */
5335 static void
5336 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5337 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5339 switch (mode)
5341 default:
5342 break;
5344 case BLKmode:
5345 if (bytes < 0)
5346 break;
5347 /* FALLTHRU */
5349 case DImode:
5350 case SImode:
5351 case HImode:
5352 case QImode:
5353 cum->words += words;
5354 cum->nregs -= words;
5355 cum->regno += words;
5357 if (cum->nregs <= 0)
5359 cum->nregs = 0;
5360 cum->regno = 0;
5362 break;
5364 case DFmode:
5365 if (cum->float_in_sse < 2)
5366 break;
5367 case SFmode:
5368 if (cum->float_in_sse < 1)
5369 break;
5370 /* FALLTHRU */
5372 case OImode:
5373 case V8SFmode:
5374 case V8SImode:
5375 case V32QImode:
5376 case V16HImode:
5377 case V4DFmode:
5378 case V4DImode:
5379 case TImode:
5380 case V16QImode:
5381 case V8HImode:
5382 case V4SImode:
5383 case V2DImode:
5384 case V4SFmode:
5385 case V2DFmode:
5386 if (!type || !AGGREGATE_TYPE_P (type))
5388 cum->sse_words += words;
5389 cum->sse_nregs -= 1;
5390 cum->sse_regno += 1;
5391 if (cum->sse_nregs <= 0)
5393 cum->sse_nregs = 0;
5394 cum->sse_regno = 0;
5397 break;
5399 case V8QImode:
5400 case V4HImode:
5401 case V2SImode:
5402 case V2SFmode:
5403 case V1DImode:
5404 if (!type || !AGGREGATE_TYPE_P (type))
5406 cum->mmx_words += words;
5407 cum->mmx_nregs -= 1;
5408 cum->mmx_regno += 1;
5409 if (cum->mmx_nregs <= 0)
5411 cum->mmx_nregs = 0;
5412 cum->mmx_regno = 0;
5415 break;
5419 static void
5420 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5421 tree type, HOST_WIDE_INT words, int named)
5423 int int_nregs, sse_nregs;
5425 /* Unnamed 256bit vector mode parameters are passed on stack. */
5426 if (!named && VALID_AVX256_REG_MODE (mode))
5427 return;
5429 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5430 cum->words += words;
5431 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5433 cum->nregs -= int_nregs;
5434 cum->sse_nregs -= sse_nregs;
5435 cum->regno += int_nregs;
5436 cum->sse_regno += sse_nregs;
5438 else
5439 cum->words += words;
5442 static void
5443 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5444 HOST_WIDE_INT words)
5446 /* Otherwise, this should be passed indirect. */
5447 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5449 cum->words += words;
5450 if (cum->nregs > 0)
5452 cum->nregs -= 1;
5453 cum->regno += 1;
5457 void
5458 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5459 tree type, int named)
5461 HOST_WIDE_INT bytes, words;
5463 if (mode == BLKmode)
5464 bytes = int_size_in_bytes (type);
5465 else
5466 bytes = GET_MODE_SIZE (mode);
5467 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5469 if (type)
5470 mode = type_natural_mode (type);
5472 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5473 function_arg_advance_ms_64 (cum, bytes, words);
5474 else if (TARGET_64BIT)
5475 function_arg_advance_64 (cum, mode, type, words, named);
5476 else
5477 function_arg_advance_32 (cum, mode, type, bytes, words);
5480 /* Define where to put the arguments to a function.
5481 Value is zero to push the argument on the stack,
5482 or a hard register in which to store the argument.
5484 MODE is the argument's machine mode.
5485 TYPE is the data type of the argument (as a tree).
5486 This is null for libcalls where that information may
5487 not be available.
5488 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5489 the preceding args and about the function being called.
5490 NAMED is nonzero if this argument is a named parameter
5491 (otherwise it is an extra parameter matching an ellipsis). */
5493 static rtx
5494 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5495 enum machine_mode orig_mode, tree type,
5496 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5498 static bool warnedavx, warnedsse, warnedmmx;
5500 /* Avoid the AL settings for the Unix64 ABI. */
5501 if (mode == VOIDmode)
5502 return constm1_rtx;
5504 switch (mode)
5506 default:
5507 break;
5509 case BLKmode:
5510 if (bytes < 0)
5511 break;
5512 /* FALLTHRU */
5513 case DImode:
5514 case SImode:
5515 case HImode:
5516 case QImode:
5517 if (words <= cum->nregs)
5519 int regno = cum->regno;
5521 /* Fastcall allocates the first two DWORD (SImode) or
5522 smaller arguments to ECX and EDX if it isn't an
5523 aggregate type . */
5524 if (cum->fastcall)
5526 if (mode == BLKmode
5527 || mode == DImode
5528 || (type && AGGREGATE_TYPE_P (type)))
5529 break;
5531 /* ECX not EAX is the first allocated register. */
5532 if (regno == AX_REG)
5533 regno = CX_REG;
5535 return gen_rtx_REG (mode, regno);
5537 break;
5539 case DFmode:
5540 if (cum->float_in_sse < 2)
5541 break;
5542 case SFmode:
5543 if (cum->float_in_sse < 1)
5544 break;
5545 /* FALLTHRU */
5546 case TImode:
5547 /* In 32bit, we pass TImode in xmm registers. */
5548 case V16QImode:
5549 case V8HImode:
5550 case V4SImode:
5551 case V2DImode:
5552 case V4SFmode:
5553 case V2DFmode:
5554 if (!type || !AGGREGATE_TYPE_P (type))
5556 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5558 warnedsse = true;
5559 warning (0, "SSE vector argument without SSE enabled "
5560 "changes the ABI");
5562 if (cum->sse_nregs)
5563 return gen_reg_or_parallel (mode, orig_mode,
5564 cum->sse_regno + FIRST_SSE_REG);
5566 break;
5568 case OImode:
5569 /* In 32bit, we pass OImode in ymm registers. */
5570 case V8SFmode:
5571 case V8SImode:
5572 case V32QImode:
5573 case V16HImode:
5574 case V4DFmode:
5575 case V4DImode:
5576 if (!type || !AGGREGATE_TYPE_P (type))
5578 if (!TARGET_AVX && !warnedavx && cum->warn_avx)
5580 warnedavx = true;
5581 warning (0, "AVX vector argument without AVX enabled "
5582 "changes the ABI");
5584 if (cum->sse_nregs)
5585 return gen_reg_or_parallel (mode, orig_mode,
5586 cum->sse_regno + FIRST_SSE_REG);
5588 break;
5590 case V8QImode:
5591 case V4HImode:
5592 case V2SImode:
5593 case V2SFmode:
5594 case V1DImode:
5595 if (!type || !AGGREGATE_TYPE_P (type))
5597 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5599 warnedmmx = true;
5600 warning (0, "MMX vector argument without MMX enabled "
5601 "changes the ABI");
5603 if (cum->mmx_nregs)
5604 return gen_reg_or_parallel (mode, orig_mode,
5605 cum->mmx_regno + FIRST_MMX_REG);
5607 break;
5610 return NULL_RTX;
5613 static rtx
5614 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5615 enum machine_mode orig_mode, tree type, int named)
5617 static bool warnedavx;
5619 /* Handle a hidden AL argument containing number of registers
5620 for varargs x86-64 functions. */
5621 if (mode == VOIDmode)
5622 return GEN_INT (cum->maybe_vaarg
5623 ? (cum->sse_nregs < 0
5624 ? (cum->call_abi == DEFAULT_ABI
5625 ? SSE_REGPARM_MAX
5626 : (DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
5627 : X64_SSE_REGPARM_MAX))
5628 : cum->sse_regno)
5629 : -1);
5631 switch (mode)
5633 default:
5634 break;
5636 case V8SFmode:
5637 case V8SImode:
5638 case V32QImode:
5639 case V16HImode:
5640 case V4DFmode:
5641 case V4DImode:
5642 /* In 64bit, we pass TImode in interger registers and OImode on
5643 stack. */
5644 if (!type || !AGGREGATE_TYPE_P (type))
5646 if (!TARGET_AVX && !warnedavx && cum->warn_avx)
5648 warnedavx = true;
5649 warning (0, "AVX vector argument without AVX enabled "
5650 "changes the ABI");
5654 /* Unnamed 256bit vector mode parameters are passed on stack. */
5655 if (!named)
5656 return NULL;
5657 break;
5660 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5661 cum->sse_nregs,
5662 &x86_64_int_parameter_registers [cum->regno],
5663 cum->sse_regno);
5666 static rtx
5667 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5668 enum machine_mode orig_mode, int named,
5669 HOST_WIDE_INT bytes)
5671 unsigned int regno;
5673 /* Avoid the AL settings for the Unix64 ABI. */
5674 if (mode == VOIDmode)
5675 return constm1_rtx;
5677 /* If we've run out of registers, it goes on the stack. */
5678 if (cum->nregs == 0)
5679 return NULL_RTX;
5681 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
5683 /* Only floating point modes are passed in anything but integer regs. */
5684 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
5686 if (named)
5687 regno = cum->regno + FIRST_SSE_REG;
5688 else
5690 rtx t1, t2;
5692 /* Unnamed floating parameters are passed in both the
5693 SSE and integer registers. */
5694 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
5695 t2 = gen_rtx_REG (mode, regno);
5696 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
5697 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
5698 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
5701 /* Handle aggregated types passed in register. */
5702 if (orig_mode == BLKmode)
5704 if (bytes > 0 && bytes <= 8)
5705 mode = (bytes > 4 ? DImode : SImode);
5706 if (mode == BLKmode)
5707 mode = DImode;
5710 return gen_reg_or_parallel (mode, orig_mode, regno);
5714 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
5715 tree type, int named)
5717 enum machine_mode mode = omode;
5718 HOST_WIDE_INT bytes, words;
5720 if (mode == BLKmode)
5721 bytes = int_size_in_bytes (type);
5722 else
5723 bytes = GET_MODE_SIZE (mode);
5724 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5726 /* To simplify the code below, represent vector types with a vector mode
5727 even if MMX/SSE are not active. */
5728 if (type && TREE_CODE (type) == VECTOR_TYPE)
5729 mode = type_natural_mode (type);
5731 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5732 return function_arg_ms_64 (cum, mode, omode, named, bytes);
5733 else if (TARGET_64BIT)
5734 return function_arg_64 (cum, mode, omode, type, named);
5735 else
5736 return function_arg_32 (cum, mode, omode, type, bytes, words);
5739 /* A C expression that indicates when an argument must be passed by
5740 reference. If nonzero for an argument, a copy of that argument is
5741 made in memory and a pointer to the argument is passed instead of
5742 the argument itself. The pointer is passed in whatever way is
5743 appropriate for passing a pointer to that type. */
5745 static bool
5746 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5747 enum machine_mode mode ATTRIBUTE_UNUSED,
5748 const_tree type, bool named ATTRIBUTE_UNUSED)
5750 /* See Windows x64 Software Convention. */
5751 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5753 int msize = (int) GET_MODE_SIZE (mode);
5754 if (type)
5756 /* Arrays are passed by reference. */
5757 if (TREE_CODE (type) == ARRAY_TYPE)
5758 return true;
5760 if (AGGREGATE_TYPE_P (type))
5762 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
5763 are passed by reference. */
5764 msize = int_size_in_bytes (type);
5768 /* __m128 is passed by reference. */
5769 switch (msize) {
5770 case 1: case 2: case 4: case 8:
5771 break;
5772 default:
5773 return true;
5776 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
5777 return 1;
5779 return 0;
5782 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
5783 ABI. */
5784 static bool
5785 contains_aligned_value_p (tree type)
5787 enum machine_mode mode = TYPE_MODE (type);
5788 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
5789 || mode == TDmode
5790 || mode == TFmode
5791 || mode == TCmode)
5792 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
5793 return true;
5794 if (TYPE_ALIGN (type) < 128)
5795 return false;
5797 if (AGGREGATE_TYPE_P (type))
5799 /* Walk the aggregates recursively. */
5800 switch (TREE_CODE (type))
5802 case RECORD_TYPE:
5803 case UNION_TYPE:
5804 case QUAL_UNION_TYPE:
5806 tree field;
5808 /* Walk all the structure fields. */
5809 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5811 if (TREE_CODE (field) == FIELD_DECL
5812 && contains_aligned_value_p (TREE_TYPE (field)))
5813 return true;
5815 break;
5818 case ARRAY_TYPE:
5819 /* Just for use if some languages passes arrays by value. */
5820 if (contains_aligned_value_p (TREE_TYPE (type)))
5821 return true;
5822 break;
5824 default:
5825 gcc_unreachable ();
5828 return false;
5831 /* Gives the alignment boundary, in bits, of an argument with the
5832 specified mode and type. */
5835 ix86_function_arg_boundary (enum machine_mode mode, tree type)
5837 int align;
5838 if (type)
5840 /* Since canonical type is used for call, we convert it to
5841 canonical type if needed. */
5842 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
5843 type = TYPE_CANONICAL (type);
5844 align = TYPE_ALIGN (type);
5846 else
5847 align = GET_MODE_ALIGNMENT (mode);
5848 if (align < PARM_BOUNDARY)
5849 align = PARM_BOUNDARY;
5850 /* In 32bit, only _Decimal128 and __float128 are aligned to their
5851 natural boundaries. */
5852 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
5854 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
5855 make an exception for SSE modes since these require 128bit
5856 alignment.
5858 The handling here differs from field_alignment. ICC aligns MMX
5859 arguments to 4 byte boundaries, while structure fields are aligned
5860 to 8 byte boundaries. */
5861 if (!type)
5863 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
5864 align = PARM_BOUNDARY;
5866 else
5868 if (!contains_aligned_value_p (type))
5869 align = PARM_BOUNDARY;
5872 if (align > BIGGEST_ALIGNMENT)
5873 align = BIGGEST_ALIGNMENT;
5874 return align;
5877 /* Return true if N is a possible register number of function value. */
5879 bool
5880 ix86_function_value_regno_p (int regno)
5882 switch (regno)
5884 case 0:
5885 return true;
5887 case FIRST_FLOAT_REG:
5888 /* TODO: The function should depend on current function ABI but
5889 builtins.c would need updating then. Therefore we use the
5890 default ABI. */
5891 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
5892 return false;
5893 return TARGET_FLOAT_RETURNS_IN_80387;
5895 case FIRST_SSE_REG:
5896 return TARGET_SSE;
5898 case FIRST_MMX_REG:
5899 if (TARGET_MACHO || TARGET_64BIT)
5900 return false;
5901 return TARGET_MMX;
5904 return false;
5907 /* Define how to find the value returned by a function.
5908 VALTYPE is the data type of the value (as a tree).
5909 If the precise function being called is known, FUNC is its FUNCTION_DECL;
5910 otherwise, FUNC is 0. */
5912 static rtx
5913 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
5914 const_tree fntype, const_tree fn)
5916 unsigned int regno;
5918 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
5919 we normally prevent this case when mmx is not available. However
5920 some ABIs may require the result to be returned like DImode. */
5921 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
5922 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
5924 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
5925 we prevent this case when sse is not available. However some ABIs
5926 may require the result to be returned like integer TImode. */
5927 else if (mode == TImode
5928 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
5929 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
5931 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
5932 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
5933 regno = FIRST_FLOAT_REG;
5934 else
5935 /* Most things go in %eax. */
5936 regno = AX_REG;
5938 /* Override FP return register with %xmm0 for local functions when
5939 SSE math is enabled or for functions with sseregparm attribute. */
5940 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
5942 int sse_level = ix86_function_sseregparm (fntype, fn, false);
5943 if ((sse_level >= 1 && mode == SFmode)
5944 || (sse_level == 2 && mode == DFmode))
5945 regno = FIRST_SSE_REG;
5948 return gen_rtx_REG (orig_mode, regno);
5951 static rtx
5952 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
5953 const_tree valtype)
5955 rtx ret;
5957 /* Handle libcalls, which don't provide a type node. */
5958 if (valtype == NULL)
5960 switch (mode)
5962 case SFmode:
5963 case SCmode:
5964 case DFmode:
5965 case DCmode:
5966 case TFmode:
5967 case SDmode:
5968 case DDmode:
5969 case TDmode:
5970 return gen_rtx_REG (mode, FIRST_SSE_REG);
5971 case XFmode:
5972 case XCmode:
5973 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
5974 case TCmode:
5975 return NULL;
5976 default:
5977 return gen_rtx_REG (mode, AX_REG);
5981 ret = construct_container (mode, orig_mode, valtype, 1,
5982 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
5983 x86_64_int_return_registers, 0);
5985 /* For zero sized structures, construct_container returns NULL, but we
5986 need to keep rest of compiler happy by returning meaningful value. */
5987 if (!ret)
5988 ret = gen_rtx_REG (orig_mode, AX_REG);
5990 return ret;
5993 static rtx
5994 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
5996 unsigned int regno = AX_REG;
5998 if (TARGET_SSE)
6000 switch (GET_MODE_SIZE (mode))
6002 case 16:
6003 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6004 && !COMPLEX_MODE_P (mode))
6005 regno = FIRST_SSE_REG;
6006 break;
6007 case 8:
6008 case 4:
6009 if (mode == SFmode || mode == DFmode)
6010 regno = FIRST_SSE_REG;
6011 break;
6012 default:
6013 break;
6016 return gen_rtx_REG (orig_mode, regno);
6019 static rtx
6020 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6021 enum machine_mode orig_mode, enum machine_mode mode)
6023 const_tree fn, fntype;
6025 fn = NULL_TREE;
6026 if (fntype_or_decl && DECL_P (fntype_or_decl))
6027 fn = fntype_or_decl;
6028 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6030 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6031 return function_value_ms_64 (orig_mode, mode);
6032 else if (TARGET_64BIT)
6033 return function_value_64 (orig_mode, mode, valtype);
6034 else
6035 return function_value_32 (orig_mode, mode, fntype, fn);
6038 static rtx
6039 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6040 bool outgoing ATTRIBUTE_UNUSED)
6042 enum machine_mode mode, orig_mode;
6044 orig_mode = TYPE_MODE (valtype);
6045 mode = type_natural_mode (valtype);
6046 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6050 ix86_libcall_value (enum machine_mode mode)
6052 return ix86_function_value_1 (NULL, NULL, mode, mode);
6055 /* Return true iff type is returned in memory. */
6057 static int ATTRIBUTE_UNUSED
6058 return_in_memory_32 (const_tree type, enum machine_mode mode)
6060 HOST_WIDE_INT size;
6062 if (mode == BLKmode)
6063 return 1;
6065 size = int_size_in_bytes (type);
6067 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6068 return 0;
6070 if (VECTOR_MODE_P (mode) || mode == TImode)
6072 /* User-created vectors small enough to fit in EAX. */
6073 if (size < 8)
6074 return 0;
6076 /* MMX/3dNow values are returned in MM0,
6077 except when it doesn't exits. */
6078 if (size == 8)
6079 return (TARGET_MMX ? 0 : 1);
6081 /* SSE values are returned in XMM0, except when it doesn't exist. */
6082 if (size == 16)
6083 return (TARGET_SSE ? 0 : 1);
6086 if (mode == XFmode)
6087 return 0;
6089 if (size > 12)
6090 return 1;
6091 return 0;
6094 static int ATTRIBUTE_UNUSED
6095 return_in_memory_64 (const_tree type, enum machine_mode mode)
6097 int needed_intregs, needed_sseregs;
6098 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6101 static int ATTRIBUTE_UNUSED
6102 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6104 HOST_WIDE_INT size = int_size_in_bytes (type);
6106 /* __m128 is returned in xmm0. */
6107 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6108 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6109 return 0;
6111 /* Otherwise, the size must be exactly in [1248]. */
6112 return (size != 1 && size != 2 && size != 4 && size != 8);
6115 static bool
6116 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6118 #ifdef SUBTARGET_RETURN_IN_MEMORY
6119 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6120 #else
6121 const enum machine_mode mode = type_natural_mode (type);
6123 if (TARGET_64BIT_MS_ABI)
6124 return return_in_memory_ms_64 (type, mode);
6125 else if (TARGET_64BIT)
6126 return return_in_memory_64 (type, mode);
6127 else
6128 return return_in_memory_32 (type, mode);
6129 #endif
6132 /* Return false iff TYPE is returned in memory. This version is used
6133 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6134 but differs notably in that when MMX is available, 8-byte vectors
6135 are returned in memory, rather than in MMX registers. */
6137 bool
6138 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6140 int size;
6141 enum machine_mode mode = type_natural_mode (type);
6143 if (TARGET_64BIT)
6144 return return_in_memory_64 (type, mode);
6146 if (mode == BLKmode)
6147 return 1;
6149 size = int_size_in_bytes (type);
6151 if (VECTOR_MODE_P (mode))
6153 /* Return in memory only if MMX registers *are* available. This
6154 seems backwards, but it is consistent with the existing
6155 Solaris x86 ABI. */
6156 if (size == 8)
6157 return TARGET_MMX;
6158 if (size == 16)
6159 return !TARGET_SSE;
6161 else if (mode == TImode)
6162 return !TARGET_SSE;
6163 else if (mode == XFmode)
6164 return 0;
6166 return size > 12;
6169 /* When returning SSE vector types, we have a choice of either
6170 (1) being abi incompatible with a -march switch, or
6171 (2) generating an error.
6172 Given no good solution, I think the safest thing is one warning.
6173 The user won't be able to use -Werror, but....
6175 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6176 called in response to actually generating a caller or callee that
6177 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6178 via aggregate_value_p for general type probing from tree-ssa. */
6180 static rtx
6181 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6183 static bool warnedsse, warnedmmx;
6185 if (!TARGET_64BIT && type)
6187 /* Look at the return type of the function, not the function type. */
6188 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6190 if (!TARGET_SSE && !warnedsse)
6192 if (mode == TImode
6193 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6195 warnedsse = true;
6196 warning (0, "SSE vector return without SSE enabled "
6197 "changes the ABI");
6201 if (!TARGET_MMX && !warnedmmx)
6203 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6205 warnedmmx = true;
6206 warning (0, "MMX vector return without MMX enabled "
6207 "changes the ABI");
6212 return NULL;
6216 /* Create the va_list data type. */
6218 /* Returns the calling convention specific va_list date type.
6219 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6221 static tree
6222 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6224 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6226 /* For i386 we use plain pointer to argument area. */
6227 if (!TARGET_64BIT || abi == MS_ABI)
6228 return build_pointer_type (char_type_node);
6230 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6231 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6233 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
6234 unsigned_type_node);
6235 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
6236 unsigned_type_node);
6237 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6238 ptr_type_node);
6239 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6240 ptr_type_node);
6242 va_list_gpr_counter_field = f_gpr;
6243 va_list_fpr_counter_field = f_fpr;
6245 DECL_FIELD_CONTEXT (f_gpr) = record;
6246 DECL_FIELD_CONTEXT (f_fpr) = record;
6247 DECL_FIELD_CONTEXT (f_ovf) = record;
6248 DECL_FIELD_CONTEXT (f_sav) = record;
6250 TREE_CHAIN (record) = type_decl;
6251 TYPE_NAME (record) = type_decl;
6252 TYPE_FIELDS (record) = f_gpr;
6253 TREE_CHAIN (f_gpr) = f_fpr;
6254 TREE_CHAIN (f_fpr) = f_ovf;
6255 TREE_CHAIN (f_ovf) = f_sav;
6257 layout_type (record);
6259 /* The correct type is an array type of one element. */
6260 return build_array_type (record, build_index_type (size_zero_node));
6263 /* Setup the builtin va_list data type and for 64-bit the additional
6264 calling convention specific va_list data types. */
6266 static tree
6267 ix86_build_builtin_va_list (void)
6269 tree ret = ix86_build_builtin_va_list_abi (DEFAULT_ABI);
6271 /* Initialize abi specific va_list builtin types. */
6272 if (TARGET_64BIT)
6274 tree t;
6275 if (DEFAULT_ABI == MS_ABI)
6277 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6278 if (TREE_CODE (t) != RECORD_TYPE)
6279 t = build_variant_type_copy (t);
6280 sysv_va_list_type_node = t;
6282 else
6284 t = ret;
6285 if (TREE_CODE (t) != RECORD_TYPE)
6286 t = build_variant_type_copy (t);
6287 sysv_va_list_type_node = t;
6289 if (DEFAULT_ABI != MS_ABI)
6291 t = ix86_build_builtin_va_list_abi (MS_ABI);
6292 if (TREE_CODE (t) != RECORD_TYPE)
6293 t = build_variant_type_copy (t);
6294 ms_va_list_type_node = t;
6296 else
6298 t = ret;
6299 if (TREE_CODE (t) != RECORD_TYPE)
6300 t = build_variant_type_copy (t);
6301 ms_va_list_type_node = t;
6305 return ret;
6308 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6310 static void
6311 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6313 rtx save_area, mem;
6314 rtx label;
6315 rtx label_ref;
6316 rtx tmp_reg;
6317 rtx nsse_reg;
6318 alias_set_type set;
6319 int i;
6320 int regparm = ix86_regparm;
6322 if (cum->call_abi != DEFAULT_ABI)
6323 regparm = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
6325 /* GPR size of varargs save area. */
6326 if (cfun->va_list_gpr_size)
6327 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6328 else
6329 ix86_varargs_gpr_size = 0;
6331 /* FPR size of varargs save area. We don't need it if we don't pass
6332 anything in SSE registers. */
6333 if (cum->sse_nregs && cfun->va_list_fpr_size)
6334 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6335 else
6336 ix86_varargs_fpr_size = 0;
6338 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6339 return;
6341 save_area = frame_pointer_rtx;
6342 set = get_varargs_alias_set ();
6344 for (i = cum->regno;
6345 i < regparm
6346 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6347 i++)
6349 mem = gen_rtx_MEM (Pmode,
6350 plus_constant (save_area, i * UNITS_PER_WORD));
6351 MEM_NOTRAP_P (mem) = 1;
6352 set_mem_alias_set (mem, set);
6353 emit_move_insn (mem, gen_rtx_REG (Pmode,
6354 x86_64_int_parameter_registers[i]));
6357 if (ix86_varargs_fpr_size)
6359 /* Now emit code to save SSE registers. The AX parameter contains number
6360 of SSE parameter registers used to call this function. We use
6361 sse_prologue_save insn template that produces computed jump across
6362 SSE saves. We need some preparation work to get this working. */
6364 label = gen_label_rtx ();
6365 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6367 /* Compute address to jump to :
6368 label - eax*4 + nnamed_sse_arguments*4 Or
6369 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6370 tmp_reg = gen_reg_rtx (Pmode);
6371 nsse_reg = gen_reg_rtx (Pmode);
6372 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6373 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6374 gen_rtx_MULT (Pmode, nsse_reg,
6375 GEN_INT (4))));
6377 /* vmovaps is one byte longer than movaps. */
6378 if (TARGET_AVX)
6379 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6380 gen_rtx_PLUS (Pmode, tmp_reg,
6381 nsse_reg)));
6383 if (cum->sse_regno)
6384 emit_move_insn
6385 (nsse_reg,
6386 gen_rtx_CONST (DImode,
6387 gen_rtx_PLUS (DImode,
6388 label_ref,
6389 GEN_INT (cum->sse_regno
6390 * (TARGET_AVX ? 5 : 4)))));
6391 else
6392 emit_move_insn (nsse_reg, label_ref);
6393 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6395 /* Compute address of memory block we save into. We always use pointer
6396 pointing 127 bytes after first byte to store - this is needed to keep
6397 instruction size limited by 4 bytes (5 bytes for AVX) with one
6398 byte displacement. */
6399 tmp_reg = gen_reg_rtx (Pmode);
6400 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6401 plus_constant (save_area,
6402 ix86_varargs_gpr_size + 127)));
6403 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6404 MEM_NOTRAP_P (mem) = 1;
6405 set_mem_alias_set (mem, set);
6406 set_mem_align (mem, BITS_PER_WORD);
6408 /* And finally do the dirty job! */
6409 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6410 GEN_INT (cum->sse_regno), label));
6414 static void
6415 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6417 alias_set_type set = get_varargs_alias_set ();
6418 int i;
6420 for (i = cum->regno; i < X64_REGPARM_MAX; i++)
6422 rtx reg, mem;
6424 mem = gen_rtx_MEM (Pmode,
6425 plus_constant (virtual_incoming_args_rtx,
6426 i * UNITS_PER_WORD));
6427 MEM_NOTRAP_P (mem) = 1;
6428 set_mem_alias_set (mem, set);
6430 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6431 emit_move_insn (mem, reg);
6435 static void
6436 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6437 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6438 int no_rtl)
6440 CUMULATIVE_ARGS next_cum;
6441 tree fntype;
6443 /* This argument doesn't appear to be used anymore. Which is good,
6444 because the old code here didn't suppress rtl generation. */
6445 gcc_assert (!no_rtl);
6447 if (!TARGET_64BIT)
6448 return;
6450 fntype = TREE_TYPE (current_function_decl);
6452 /* For varargs, we do not want to skip the dummy va_dcl argument.
6453 For stdargs, we do want to skip the last named argument. */
6454 next_cum = *cum;
6455 if (stdarg_p (fntype))
6456 function_arg_advance (&next_cum, mode, type, 1);
6458 if (cum->call_abi == MS_ABI)
6459 setup_incoming_varargs_ms_64 (&next_cum);
6460 else
6461 setup_incoming_varargs_64 (&next_cum);
6464 /* Checks if TYPE is of kind va_list char *. */
6466 static bool
6467 is_va_list_char_pointer (tree type)
6469 tree canonic;
6471 /* For 32-bit it is always true. */
6472 if (!TARGET_64BIT)
6473 return true;
6474 canonic = ix86_canonical_va_list_type (type);
6475 return (canonic == ms_va_list_type_node
6476 || (DEFAULT_ABI == MS_ABI && canonic == va_list_type_node));
6479 /* Implement va_start. */
6481 static void
6482 ix86_va_start (tree valist, rtx nextarg)
6484 HOST_WIDE_INT words, n_gpr, n_fpr;
6485 tree f_gpr, f_fpr, f_ovf, f_sav;
6486 tree gpr, fpr, ovf, sav, t;
6487 tree type;
6489 /* Only 64bit target needs something special. */
6490 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6492 std_expand_builtin_va_start (valist, nextarg);
6493 return;
6496 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6497 f_fpr = TREE_CHAIN (f_gpr);
6498 f_ovf = TREE_CHAIN (f_fpr);
6499 f_sav = TREE_CHAIN (f_ovf);
6501 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6502 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6503 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6504 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6505 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6507 /* Count number of gp and fp argument registers used. */
6508 words = crtl->args.info.words;
6509 n_gpr = crtl->args.info.regno;
6510 n_fpr = crtl->args.info.sse_regno;
6512 if (cfun->va_list_gpr_size)
6514 type = TREE_TYPE (gpr);
6515 t = build2 (MODIFY_EXPR, type,
6516 gpr, build_int_cst (type, n_gpr * 8));
6517 TREE_SIDE_EFFECTS (t) = 1;
6518 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6521 if (TARGET_SSE && cfun->va_list_fpr_size)
6523 type = TREE_TYPE (fpr);
6524 t = build2 (MODIFY_EXPR, type, fpr,
6525 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6526 TREE_SIDE_EFFECTS (t) = 1;
6527 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6530 /* Find the overflow area. */
6531 type = TREE_TYPE (ovf);
6532 t = make_tree (type, crtl->args.internal_arg_pointer);
6533 if (words != 0)
6534 t = build2 (POINTER_PLUS_EXPR, type, t,
6535 size_int (words * UNITS_PER_WORD));
6536 t = build2 (MODIFY_EXPR, type, ovf, t);
6537 TREE_SIDE_EFFECTS (t) = 1;
6538 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6540 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6542 /* Find the register save area.
6543 Prologue of the function save it right above stack frame. */
6544 type = TREE_TYPE (sav);
6545 t = make_tree (type, frame_pointer_rtx);
6546 if (!ix86_varargs_gpr_size)
6547 t = build2 (POINTER_PLUS_EXPR, type, t,
6548 size_int (-8 * X86_64_REGPARM_MAX));
6549 t = build2 (MODIFY_EXPR, type, sav, t);
6550 TREE_SIDE_EFFECTS (t) = 1;
6551 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6555 /* Implement va_arg. */
6557 static tree
6558 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6559 gimple_seq *post_p)
6561 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6562 tree f_gpr, f_fpr, f_ovf, f_sav;
6563 tree gpr, fpr, ovf, sav, t;
6564 int size, rsize;
6565 tree lab_false, lab_over = NULL_TREE;
6566 tree addr, t2;
6567 rtx container;
6568 int indirect_p = 0;
6569 tree ptrtype;
6570 enum machine_mode nat_mode;
6571 int arg_boundary;
6573 /* Only 64bit target needs something special. */
6574 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6575 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6577 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6578 f_fpr = TREE_CHAIN (f_gpr);
6579 f_ovf = TREE_CHAIN (f_fpr);
6580 f_sav = TREE_CHAIN (f_ovf);
6582 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
6583 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
6584 valist = build_va_arg_indirect_ref (valist);
6585 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6586 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6587 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6589 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6590 if (indirect_p)
6591 type = build_pointer_type (type);
6592 size = int_size_in_bytes (type);
6593 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6595 nat_mode = type_natural_mode (type);
6596 switch (nat_mode)
6598 case V8SFmode:
6599 case V8SImode:
6600 case V32QImode:
6601 case V16HImode:
6602 case V4DFmode:
6603 case V4DImode:
6604 /* Unnamed 256bit vector mode parameters are passed on stack. */
6605 if (ix86_cfun_abi () == SYSV_ABI)
6607 container = NULL;
6608 break;
6611 default:
6612 container = construct_container (nat_mode, TYPE_MODE (type),
6613 type, 0, X86_64_REGPARM_MAX,
6614 X86_64_SSE_REGPARM_MAX, intreg,
6616 break;
6619 /* Pull the value out of the saved registers. */
6621 addr = create_tmp_var (ptr_type_node, "addr");
6622 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
6624 if (container)
6626 int needed_intregs, needed_sseregs;
6627 bool need_temp;
6628 tree int_addr, sse_addr;
6630 lab_false = create_artificial_label ();
6631 lab_over = create_artificial_label ();
6633 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6635 need_temp = (!REG_P (container)
6636 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6637 || TYPE_ALIGN (type) > 128));
6639 /* In case we are passing structure, verify that it is consecutive block
6640 on the register save area. If not we need to do moves. */
6641 if (!need_temp && !REG_P (container))
6643 /* Verify that all registers are strictly consecutive */
6644 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6646 int i;
6648 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6650 rtx slot = XVECEXP (container, 0, i);
6651 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
6652 || INTVAL (XEXP (slot, 1)) != i * 16)
6653 need_temp = 1;
6656 else
6658 int i;
6660 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6662 rtx slot = XVECEXP (container, 0, i);
6663 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
6664 || INTVAL (XEXP (slot, 1)) != i * 8)
6665 need_temp = 1;
6669 if (!need_temp)
6671 int_addr = addr;
6672 sse_addr = addr;
6674 else
6676 int_addr = create_tmp_var (ptr_type_node, "int_addr");
6677 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
6678 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
6679 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
6682 /* First ensure that we fit completely in registers. */
6683 if (needed_intregs)
6685 t = build_int_cst (TREE_TYPE (gpr),
6686 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
6687 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
6688 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6689 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6690 gimplify_and_add (t, pre_p);
6692 if (needed_sseregs)
6694 t = build_int_cst (TREE_TYPE (fpr),
6695 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
6696 + X86_64_REGPARM_MAX * 8);
6697 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
6698 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6699 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6700 gimplify_and_add (t, pre_p);
6703 /* Compute index to start of area used for integer regs. */
6704 if (needed_intregs)
6706 /* int_addr = gpr + sav; */
6707 t = fold_convert (sizetype, gpr);
6708 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6709 gimplify_assign (int_addr, t, pre_p);
6711 if (needed_sseregs)
6713 /* sse_addr = fpr + sav; */
6714 t = fold_convert (sizetype, fpr);
6715 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6716 gimplify_assign (sse_addr, t, pre_p);
6718 if (need_temp)
6720 int i;
6721 tree temp = create_tmp_var (type, "va_arg_tmp");
6723 /* addr = &temp; */
6724 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
6725 gimplify_assign (addr, t, pre_p);
6727 for (i = 0; i < XVECLEN (container, 0); i++)
6729 rtx slot = XVECEXP (container, 0, i);
6730 rtx reg = XEXP (slot, 0);
6731 enum machine_mode mode = GET_MODE (reg);
6732 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
6733 tree addr_type = build_pointer_type (piece_type);
6734 tree src_addr, src;
6735 int src_offset;
6736 tree dest_addr, dest;
6738 if (SSE_REGNO_P (REGNO (reg)))
6740 src_addr = sse_addr;
6741 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
6743 else
6745 src_addr = int_addr;
6746 src_offset = REGNO (reg) * 8;
6748 src_addr = fold_convert (addr_type, src_addr);
6749 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
6750 size_int (src_offset));
6751 src = build_va_arg_indirect_ref (src_addr);
6753 dest_addr = fold_convert (addr_type, addr);
6754 dest_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, dest_addr,
6755 size_int (INTVAL (XEXP (slot, 1))));
6756 dest = build_va_arg_indirect_ref (dest_addr);
6758 gimplify_assign (dest, src, pre_p);
6762 if (needed_intregs)
6764 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
6765 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
6766 gimplify_assign (gpr, t, pre_p);
6769 if (needed_sseregs)
6771 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
6772 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
6773 gimplify_assign (fpr, t, pre_p);
6776 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
6778 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
6781 /* ... otherwise out of the overflow area. */
6783 /* When we align parameter on stack for caller, if the parameter
6784 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6785 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
6786 here with caller. */
6787 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
6788 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
6789 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
6791 /* Care for on-stack alignment if needed. */
6792 if (arg_boundary <= 64
6793 || integer_zerop (TYPE_SIZE (type)))
6794 t = ovf;
6795 else
6797 HOST_WIDE_INT align = arg_boundary / 8;
6798 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
6799 size_int (align - 1));
6800 t = fold_convert (sizetype, t);
6801 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6802 size_int (-align));
6803 t = fold_convert (TREE_TYPE (ovf), t);
6805 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
6806 gimplify_assign (addr, t, pre_p);
6808 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
6809 size_int (rsize * UNITS_PER_WORD));
6810 gimplify_assign (unshare_expr (ovf), t, pre_p);
6812 if (container)
6813 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
6815 ptrtype = build_pointer_type (type);
6816 addr = fold_convert (ptrtype, addr);
6818 if (indirect_p)
6819 addr = build_va_arg_indirect_ref (addr);
6820 return build_va_arg_indirect_ref (addr);
6823 /* Return nonzero if OPNUM's MEM should be matched
6824 in movabs* patterns. */
6827 ix86_check_movabs (rtx insn, int opnum)
6829 rtx set, mem;
6831 set = PATTERN (insn);
6832 if (GET_CODE (set) == PARALLEL)
6833 set = XVECEXP (set, 0, 0);
6834 gcc_assert (GET_CODE (set) == SET);
6835 mem = XEXP (set, opnum);
6836 while (GET_CODE (mem) == SUBREG)
6837 mem = SUBREG_REG (mem);
6838 gcc_assert (MEM_P (mem));
6839 return (volatile_ok || !MEM_VOLATILE_P (mem));
6842 /* Initialize the table of extra 80387 mathematical constants. */
6844 static void
6845 init_ext_80387_constants (void)
6847 static const char * cst[5] =
6849 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
6850 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
6851 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
6852 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
6853 "3.1415926535897932385128089594061862044", /* 4: fldpi */
6855 int i;
6857 for (i = 0; i < 5; i++)
6859 real_from_string (&ext_80387_constants_table[i], cst[i]);
6860 /* Ensure each constant is rounded to XFmode precision. */
6861 real_convert (&ext_80387_constants_table[i],
6862 XFmode, &ext_80387_constants_table[i]);
6865 ext_80387_constants_init = 1;
6868 /* Return true if the constant is something that can be loaded with
6869 a special instruction. */
6872 standard_80387_constant_p (rtx x)
6874 enum machine_mode mode = GET_MODE (x);
6876 REAL_VALUE_TYPE r;
6878 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
6879 return -1;
6881 if (x == CONST0_RTX (mode))
6882 return 1;
6883 if (x == CONST1_RTX (mode))
6884 return 2;
6886 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6888 /* For XFmode constants, try to find a special 80387 instruction when
6889 optimizing for size or on those CPUs that benefit from them. */
6890 if (mode == XFmode
6891 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
6893 int i;
6895 if (! ext_80387_constants_init)
6896 init_ext_80387_constants ();
6898 for (i = 0; i < 5; i++)
6899 if (real_identical (&r, &ext_80387_constants_table[i]))
6900 return i + 3;
6903 /* Load of the constant -0.0 or -1.0 will be split as
6904 fldz;fchs or fld1;fchs sequence. */
6905 if (real_isnegzero (&r))
6906 return 8;
6907 if (real_identical (&r, &dconstm1))
6908 return 9;
6910 return 0;
6913 /* Return the opcode of the special instruction to be used to load
6914 the constant X. */
6916 const char *
6917 standard_80387_constant_opcode (rtx x)
6919 switch (standard_80387_constant_p (x))
6921 case 1:
6922 return "fldz";
6923 case 2:
6924 return "fld1";
6925 case 3:
6926 return "fldlg2";
6927 case 4:
6928 return "fldln2";
6929 case 5:
6930 return "fldl2e";
6931 case 6:
6932 return "fldl2t";
6933 case 7:
6934 return "fldpi";
6935 case 8:
6936 case 9:
6937 return "#";
6938 default:
6939 gcc_unreachable ();
6943 /* Return the CONST_DOUBLE representing the 80387 constant that is
6944 loaded by the specified special instruction. The argument IDX
6945 matches the return value from standard_80387_constant_p. */
6948 standard_80387_constant_rtx (int idx)
6950 int i;
6952 if (! ext_80387_constants_init)
6953 init_ext_80387_constants ();
6955 switch (idx)
6957 case 3:
6958 case 4:
6959 case 5:
6960 case 6:
6961 case 7:
6962 i = idx - 3;
6963 break;
6965 default:
6966 gcc_unreachable ();
6969 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
6970 XFmode);
6973 /* Return 1 if mode is a valid mode for sse. */
6974 static int
6975 standard_sse_mode_p (enum machine_mode mode)
6977 switch (mode)
6979 case V16QImode:
6980 case V8HImode:
6981 case V4SImode:
6982 case V2DImode:
6983 case V4SFmode:
6984 case V2DFmode:
6985 return 1;
6987 default:
6988 return 0;
6992 /* Return 1 if X is all 0s. For all 1s, return 2 if X is in 128bit
6993 SSE modes and SSE2 is enabled, return 3 if X is in 256bit AVX
6994 modes and AVX is enabled. */
6997 standard_sse_constant_p (rtx x)
6999 enum machine_mode mode = GET_MODE (x);
7001 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7002 return 1;
7003 if (vector_all_ones_operand (x, mode))
7005 if (standard_sse_mode_p (mode))
7006 return TARGET_SSE2 ? 2 : -2;
7007 else if (VALID_AVX256_REG_MODE (mode))
7008 return TARGET_AVX ? 3 : -3;
7011 return 0;
7014 /* Return the opcode of the special instruction to be used to load
7015 the constant X. */
7017 const char *
7018 standard_sse_constant_opcode (rtx insn, rtx x)
7020 switch (standard_sse_constant_p (x))
7022 case 1:
7023 switch (get_attr_mode (insn))
7025 case MODE_V4SF:
7026 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7027 case MODE_V2DF:
7028 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7029 case MODE_TI:
7030 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7031 case MODE_V8SF:
7032 return "vxorps\t%x0, %x0, %x0";
7033 case MODE_V4DF:
7034 return "vxorpd\t%x0, %x0, %x0";
7035 case MODE_OI:
7036 return "vpxor\t%x0, %x0, %x0";
7037 default:
7038 gcc_unreachable ();
7040 case 2:
7041 if (TARGET_AVX)
7042 switch (get_attr_mode (insn))
7044 case MODE_V4SF:
7045 case MODE_V2DF:
7046 case MODE_TI:
7047 return "vpcmpeqd\t%0, %0, %0";
7048 break;
7049 default:
7050 gcc_unreachable ();
7052 else
7053 return "pcmpeqd\t%0, %0";
7055 gcc_unreachable ();
7058 /* Returns 1 if OP contains a symbol reference */
7061 symbolic_reference_mentioned_p (rtx op)
7063 const char *fmt;
7064 int i;
7066 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7067 return 1;
7069 fmt = GET_RTX_FORMAT (GET_CODE (op));
7070 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7072 if (fmt[i] == 'E')
7074 int j;
7076 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7077 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7078 return 1;
7081 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7082 return 1;
7085 return 0;
7088 /* Return 1 if it is appropriate to emit `ret' instructions in the
7089 body of a function. Do this only if the epilogue is simple, needing a
7090 couple of insns. Prior to reloading, we can't tell how many registers
7091 must be saved, so return 0 then. Return 0 if there is no frame
7092 marker to de-allocate. */
7095 ix86_can_use_return_insn_p (void)
7097 struct ix86_frame frame;
7099 if (! reload_completed || frame_pointer_needed)
7100 return 0;
7102 /* Don't allow more than 32 pop, since that's all we can do
7103 with one instruction. */
7104 if (crtl->args.pops_args
7105 && crtl->args.size >= 32768)
7106 return 0;
7108 ix86_compute_frame_layout (&frame);
7109 return frame.to_allocate == 0 && frame.nregs == 0;
7112 /* Value should be nonzero if functions must have frame pointers.
7113 Zero means the frame pointer need not be set up (and parms may
7114 be accessed via the stack pointer) in functions that seem suitable. */
7117 ix86_frame_pointer_required (void)
7119 /* If we accessed previous frames, then the generated code expects
7120 to be able to access the saved ebp value in our frame. */
7121 if (cfun->machine->accesses_prev_frame)
7122 return 1;
7124 /* Several x86 os'es need a frame pointer for other reasons,
7125 usually pertaining to setjmp. */
7126 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7127 return 1;
7129 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7130 the frame pointer by default. Turn it back on now if we've not
7131 got a leaf function. */
7132 if (TARGET_OMIT_LEAF_FRAME_POINTER
7133 && (!current_function_is_leaf
7134 || ix86_current_function_calls_tls_descriptor))
7135 return 1;
7137 if (crtl->profile)
7138 return 1;
7140 return 0;
7143 /* Record that the current function accesses previous call frames. */
7145 void
7146 ix86_setup_frame_addresses (void)
7148 cfun->machine->accesses_prev_frame = 1;
7151 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7152 # define USE_HIDDEN_LINKONCE 1
7153 #else
7154 # define USE_HIDDEN_LINKONCE 0
7155 #endif
7157 static int pic_labels_used;
7159 /* Fills in the label name that should be used for a pc thunk for
7160 the given register. */
7162 static void
7163 get_pc_thunk_name (char name[32], unsigned int regno)
7165 gcc_assert (!TARGET_64BIT);
7167 if (USE_HIDDEN_LINKONCE)
7168 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7169 else
7170 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7174 /* This function generates code for -fpic that loads %ebx with
7175 the return address of the caller and then returns. */
7177 void
7178 ix86_file_end (void)
7180 rtx xops[2];
7181 int regno;
7183 for (regno = 0; regno < 8; ++regno)
7185 char name[32];
7187 if (! ((pic_labels_used >> regno) & 1))
7188 continue;
7190 get_pc_thunk_name (name, regno);
7192 #if TARGET_MACHO
7193 if (TARGET_MACHO)
7195 switch_to_section (darwin_sections[text_coal_section]);
7196 fputs ("\t.weak_definition\t", asm_out_file);
7197 assemble_name (asm_out_file, name);
7198 fputs ("\n\t.private_extern\t", asm_out_file);
7199 assemble_name (asm_out_file, name);
7200 fputs ("\n", asm_out_file);
7201 ASM_OUTPUT_LABEL (asm_out_file, name);
7203 else
7204 #endif
7205 if (USE_HIDDEN_LINKONCE)
7207 tree decl;
7209 decl = build_decl (FUNCTION_DECL, get_identifier (name),
7210 error_mark_node);
7211 TREE_PUBLIC (decl) = 1;
7212 TREE_STATIC (decl) = 1;
7213 DECL_ONE_ONLY (decl) = 1;
7215 (*targetm.asm_out.unique_section) (decl, 0);
7216 switch_to_section (get_named_section (decl, NULL, 0));
7218 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7219 fputs ("\t.hidden\t", asm_out_file);
7220 assemble_name (asm_out_file, name);
7221 fputc ('\n', asm_out_file);
7222 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7224 else
7226 switch_to_section (text_section);
7227 ASM_OUTPUT_LABEL (asm_out_file, name);
7230 xops[0] = gen_rtx_REG (Pmode, regno);
7231 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7232 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7233 output_asm_insn ("ret", xops);
7236 if (NEED_INDICATE_EXEC_STACK)
7237 file_end_indicate_exec_stack ();
7240 /* Emit code for the SET_GOT patterns. */
7242 const char *
7243 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7245 rtx xops[3];
7247 xops[0] = dest;
7249 if (TARGET_VXWORKS_RTP && flag_pic)
7251 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7252 xops[2] = gen_rtx_MEM (Pmode,
7253 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7254 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7256 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7257 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7258 an unadorned address. */
7259 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7260 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7261 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7262 return "";
7265 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7267 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7269 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7271 if (!flag_pic)
7272 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7273 else
7274 output_asm_insn ("call\t%a2", xops);
7276 #if TARGET_MACHO
7277 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7278 is what will be referenced by the Mach-O PIC subsystem. */
7279 if (!label)
7280 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7281 #endif
7283 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7284 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7286 if (flag_pic)
7287 output_asm_insn ("pop%z0\t%0", xops);
7289 else
7291 char name[32];
7292 get_pc_thunk_name (name, REGNO (dest));
7293 pic_labels_used |= 1 << REGNO (dest);
7295 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7296 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7297 output_asm_insn ("call\t%X2", xops);
7298 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7299 is what will be referenced by the Mach-O PIC subsystem. */
7300 #if TARGET_MACHO
7301 if (!label)
7302 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7303 else
7304 targetm.asm_out.internal_label (asm_out_file, "L",
7305 CODE_LABEL_NUMBER (label));
7306 #endif
7309 if (TARGET_MACHO)
7310 return "";
7312 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7313 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7314 else
7315 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7317 return "";
7320 /* Generate an "push" pattern for input ARG. */
7322 static rtx
7323 gen_push (rtx arg)
7325 return gen_rtx_SET (VOIDmode,
7326 gen_rtx_MEM (Pmode,
7327 gen_rtx_PRE_DEC (Pmode,
7328 stack_pointer_rtx)),
7329 arg);
7332 /* Return >= 0 if there is an unused call-clobbered register available
7333 for the entire function. */
7335 static unsigned int
7336 ix86_select_alt_pic_regnum (void)
7338 if (current_function_is_leaf && !crtl->profile
7339 && !ix86_current_function_calls_tls_descriptor)
7341 int i, drap;
7342 /* Can't use the same register for both PIC and DRAP. */
7343 if (crtl->drap_reg)
7344 drap = REGNO (crtl->drap_reg);
7345 else
7346 drap = -1;
7347 for (i = 2; i >= 0; --i)
7348 if (i != drap && !df_regs_ever_live_p (i))
7349 return i;
7352 return INVALID_REGNUM;
7355 /* Return 1 if we need to save REGNO. */
7356 static int
7357 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7359 if (pic_offset_table_rtx
7360 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7361 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7362 || crtl->profile
7363 || crtl->calls_eh_return
7364 || crtl->uses_const_pool))
7366 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7367 return 0;
7368 return 1;
7371 if (crtl->calls_eh_return && maybe_eh_return)
7373 unsigned i;
7374 for (i = 0; ; i++)
7376 unsigned test = EH_RETURN_DATA_REGNO (i);
7377 if (test == INVALID_REGNUM)
7378 break;
7379 if (test == regno)
7380 return 1;
7384 if (crtl->drap_reg
7385 && regno == REGNO (crtl->drap_reg))
7386 return 1;
7388 return (df_regs_ever_live_p (regno)
7389 && !call_used_regs[regno]
7390 && !fixed_regs[regno]
7391 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7394 /* Return number of registers to be saved on the stack. */
7396 static int
7397 ix86_nsaved_regs (void)
7399 int nregs = 0;
7400 int regno;
7402 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
7403 if (ix86_save_reg (regno, true))
7404 nregs++;
7405 return nregs;
7408 /* Given FROM and TO register numbers, say whether this elimination is
7409 allowed. If stack alignment is needed, we can only replace argument
7410 pointer with hard frame pointer, or replace frame pointer with stack
7411 pointer. Otherwise, frame pointer elimination is automatically
7412 handled and all other eliminations are valid. */
7415 ix86_can_eliminate (int from, int to)
7417 if (stack_realign_fp)
7418 return ((from == ARG_POINTER_REGNUM
7419 && to == HARD_FRAME_POINTER_REGNUM)
7420 || (from == FRAME_POINTER_REGNUM
7421 && to == STACK_POINTER_REGNUM));
7422 else
7423 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : 1;
7426 /* Return the offset between two registers, one to be eliminated, and the other
7427 its replacement, at the start of a routine. */
7429 HOST_WIDE_INT
7430 ix86_initial_elimination_offset (int from, int to)
7432 struct ix86_frame frame;
7433 ix86_compute_frame_layout (&frame);
7435 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7436 return frame.hard_frame_pointer_offset;
7437 else if (from == FRAME_POINTER_REGNUM
7438 && to == HARD_FRAME_POINTER_REGNUM)
7439 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7440 else
7442 gcc_assert (to == STACK_POINTER_REGNUM);
7444 if (from == ARG_POINTER_REGNUM)
7445 return frame.stack_pointer_offset;
7447 gcc_assert (from == FRAME_POINTER_REGNUM);
7448 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7452 /* Fill structure ix86_frame about frame of currently computed function. */
7454 static void
7455 ix86_compute_frame_layout (struct ix86_frame *frame)
7457 HOST_WIDE_INT total_size;
7458 unsigned int stack_alignment_needed;
7459 HOST_WIDE_INT offset;
7460 unsigned int preferred_alignment;
7461 HOST_WIDE_INT size = get_frame_size ();
7463 frame->nregs = ix86_nsaved_regs ();
7464 total_size = size;
7466 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7467 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7469 gcc_assert (!size || stack_alignment_needed);
7470 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7471 gcc_assert (preferred_alignment <= stack_alignment_needed);
7473 /* During reload iteration the amount of registers saved can change.
7474 Recompute the value as needed. Do not recompute when amount of registers
7475 didn't change as reload does multiple calls to the function and does not
7476 expect the decision to change within single iteration. */
7477 if (!optimize_function_for_size_p (cfun)
7478 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7480 int count = frame->nregs;
7482 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7483 /* The fast prologue uses move instead of push to save registers. This
7484 is significantly longer, but also executes faster as modern hardware
7485 can execute the moves in parallel, but can't do that for push/pop.
7487 Be careful about choosing what prologue to emit: When function takes
7488 many instructions to execute we may use slow version as well as in
7489 case function is known to be outside hot spot (this is known with
7490 feedback only). Weight the size of function by number of registers
7491 to save as it is cheap to use one or two push instructions but very
7492 slow to use many of them. */
7493 if (count)
7494 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7495 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7496 || (flag_branch_probabilities
7497 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7498 cfun->machine->use_fast_prologue_epilogue = false;
7499 else
7500 cfun->machine->use_fast_prologue_epilogue
7501 = !expensive_function_p (count);
7503 if (TARGET_PROLOGUE_USING_MOVE
7504 && cfun->machine->use_fast_prologue_epilogue)
7505 frame->save_regs_using_mov = true;
7506 else
7507 frame->save_regs_using_mov = false;
7510 /* Skip return address and saved base pointer. */
7511 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7513 frame->hard_frame_pointer_offset = offset;
7515 /* Set offset to aligned because the realigned frame starts from
7516 here. */
7517 if (stack_realign_fp)
7518 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7520 /* Register save area */
7521 offset += frame->nregs * UNITS_PER_WORD;
7523 /* Va-arg area */
7524 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7525 offset += frame->va_arg_size;
7527 /* Align start of frame for local function. */
7528 frame->padding1 = ((offset + stack_alignment_needed - 1)
7529 & -stack_alignment_needed) - offset;
7531 offset += frame->padding1;
7533 /* Frame pointer points here. */
7534 frame->frame_pointer_offset = offset;
7536 offset += size;
7538 /* Add outgoing arguments area. Can be skipped if we eliminated
7539 all the function calls as dead code.
7540 Skipping is however impossible when function calls alloca. Alloca
7541 expander assumes that last crtl->outgoing_args_size
7542 of stack frame are unused. */
7543 if (ACCUMULATE_OUTGOING_ARGS
7544 && (!current_function_is_leaf || cfun->calls_alloca
7545 || ix86_current_function_calls_tls_descriptor))
7547 offset += crtl->outgoing_args_size;
7548 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7550 else
7551 frame->outgoing_arguments_size = 0;
7553 /* Align stack boundary. Only needed if we're calling another function
7554 or using alloca. */
7555 if (!current_function_is_leaf || cfun->calls_alloca
7556 || ix86_current_function_calls_tls_descriptor)
7557 frame->padding2 = ((offset + preferred_alignment - 1)
7558 & -preferred_alignment) - offset;
7559 else
7560 frame->padding2 = 0;
7562 offset += frame->padding2;
7564 /* We've reached end of stack frame. */
7565 frame->stack_pointer_offset = offset;
7567 /* Size prologue needs to allocate. */
7568 frame->to_allocate =
7569 (size + frame->padding1 + frame->padding2
7570 + frame->outgoing_arguments_size + frame->va_arg_size);
7572 if ((!frame->to_allocate && frame->nregs <= 1)
7573 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7574 frame->save_regs_using_mov = false;
7576 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && current_function_sp_is_unchanging
7577 && current_function_is_leaf
7578 && !ix86_current_function_calls_tls_descriptor)
7580 frame->red_zone_size = frame->to_allocate;
7581 if (frame->save_regs_using_mov)
7582 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7583 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7584 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7586 else
7587 frame->red_zone_size = 0;
7588 frame->to_allocate -= frame->red_zone_size;
7589 frame->stack_pointer_offset -= frame->red_zone_size;
7590 #if 0
7591 fprintf (stderr, "\n");
7592 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
7593 fprintf (stderr, "size: %ld\n", (long)size);
7594 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
7595 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
7596 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
7597 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
7598 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
7599 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
7600 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
7601 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
7602 (long)frame->hard_frame_pointer_offset);
7603 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
7604 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
7605 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
7606 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
7607 #endif
7610 /* Emit code to save registers in the prologue. */
7612 static void
7613 ix86_emit_save_regs (void)
7615 unsigned int regno;
7616 rtx insn;
7618 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
7619 if (ix86_save_reg (regno, true))
7621 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7622 RTX_FRAME_RELATED_P (insn) = 1;
7626 /* Emit code to save registers using MOV insns. First register
7627 is restored from POINTER + OFFSET. */
7628 static void
7629 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7631 unsigned int regno;
7632 rtx insn;
7634 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7635 if (ix86_save_reg (regno, true))
7637 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7638 Pmode, offset),
7639 gen_rtx_REG (Pmode, regno));
7640 RTX_FRAME_RELATED_P (insn) = 1;
7641 offset += UNITS_PER_WORD;
7645 /* Expand prologue or epilogue stack adjustment.
7646 The pattern exist to put a dependency on all ebp-based memory accesses.
7647 STYLE should be negative if instructions should be marked as frame related,
7648 zero if %r11 register is live and cannot be freely used and positive
7649 otherwise. */
7651 static void
7652 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
7654 rtx insn;
7656 if (! TARGET_64BIT)
7657 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
7658 else if (x86_64_immediate_operand (offset, DImode))
7659 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
7660 else
7662 rtx r11;
7663 /* r11 is used by indirect sibcall return as well, set before the
7664 epilogue and used after the epilogue. ATM indirect sibcall
7665 shouldn't be used together with huge frame sizes in one
7666 function because of the frame_size check in sibcall.c. */
7667 gcc_assert (style);
7668 r11 = gen_rtx_REG (DImode, R11_REG);
7669 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
7670 if (style < 0)
7671 RTX_FRAME_RELATED_P (insn) = 1;
7672 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
7673 offset));
7675 if (style < 0)
7676 RTX_FRAME_RELATED_P (insn) = 1;
7679 /* Find an available register to be used as dynamic realign argument
7680 pointer regsiter. Such a register will be written in prologue and
7681 used in begin of body, so it must not be
7682 1. parameter passing register.
7683 2. GOT pointer.
7684 We reuse static-chain register if it is available. Otherwise, we
7685 use DI for i386 and R13 for x86-64. We chose R13 since it has
7686 shorter encoding.
7688 Return: the regno of chosen register. */
7690 static unsigned int
7691 find_drap_reg (void)
7693 tree decl = cfun->decl;
7695 if (TARGET_64BIT)
7697 /* Use R13 for nested function or function need static chain.
7698 Since function with tail call may use any caller-saved
7699 registers in epilogue, DRAP must not use caller-saved
7700 register in such case. */
7701 if ((decl_function_context (decl)
7702 && !DECL_NO_STATIC_CHAIN (decl))
7703 || crtl->tail_call_emit)
7704 return R13_REG;
7706 return R10_REG;
7708 else
7710 /* Use DI for nested function or function need static chain.
7711 Since function with tail call may use any caller-saved
7712 registers in epilogue, DRAP must not use caller-saved
7713 register in such case. */
7714 if ((decl_function_context (decl)
7715 && !DECL_NO_STATIC_CHAIN (decl))
7716 || crtl->tail_call_emit)
7717 return DI_REG;
7719 /* Reuse static chain register if it isn't used for parameter
7720 passing. */
7721 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
7722 && !lookup_attribute ("fastcall",
7723 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
7724 return CX_REG;
7725 else
7726 return DI_REG;
7730 /* Update incoming stack boundary and estimated stack alignment. */
7732 static void
7733 ix86_update_stack_boundary (void)
7735 /* Prefer the one specified at command line. */
7736 ix86_incoming_stack_boundary
7737 = (ix86_user_incoming_stack_boundary
7738 ? ix86_user_incoming_stack_boundary
7739 : ix86_default_incoming_stack_boundary);
7741 /* Incoming stack alignment can be changed on individual functions
7742 via force_align_arg_pointer attribute. We use the smallest
7743 incoming stack boundary. */
7744 if (ix86_incoming_stack_boundary > MIN_STACK_BOUNDARY
7745 && lookup_attribute (ix86_force_align_arg_pointer_string,
7746 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
7747 ix86_incoming_stack_boundary = MIN_STACK_BOUNDARY;
7749 /* Stack at entrance of main is aligned by runtime. We use the
7750 smallest incoming stack boundary. */
7751 if (ix86_incoming_stack_boundary > MAIN_STACK_BOUNDARY
7752 && DECL_NAME (current_function_decl)
7753 && MAIN_NAME_P (DECL_NAME (current_function_decl))
7754 && DECL_FILE_SCOPE_P (current_function_decl))
7755 ix86_incoming_stack_boundary = MAIN_STACK_BOUNDARY;
7757 /* x86_64 vararg needs 16byte stack alignment for register save
7758 area. */
7759 if (TARGET_64BIT
7760 && cfun->stdarg
7761 && crtl->stack_alignment_estimated < 128)
7762 crtl->stack_alignment_estimated = 128;
7765 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
7766 needed or an rtx for DRAP otherwise. */
7768 static rtx
7769 ix86_get_drap_rtx (void)
7771 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
7772 crtl->need_drap = true;
7774 if (stack_realign_drap)
7776 /* Assign DRAP to vDRAP and returns vDRAP */
7777 unsigned int regno = find_drap_reg ();
7778 rtx drap_vreg;
7779 rtx arg_ptr;
7780 rtx seq, insn;
7782 arg_ptr = gen_rtx_REG (Pmode, regno);
7783 crtl->drap_reg = arg_ptr;
7785 start_sequence ();
7786 drap_vreg = copy_to_reg (arg_ptr);
7787 seq = get_insns ();
7788 end_sequence ();
7790 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
7791 RTX_FRAME_RELATED_P (insn) = 1;
7792 return drap_vreg;
7794 else
7795 return NULL;
7798 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
7800 static rtx
7801 ix86_internal_arg_pointer (void)
7803 return virtual_incoming_args_rtx;
7806 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
7807 This is called from dwarf2out.c to emit call frame instructions
7808 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
7809 static void
7810 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
7812 rtx unspec = SET_SRC (pattern);
7813 gcc_assert (GET_CODE (unspec) == UNSPEC);
7815 switch (index)
7817 case UNSPEC_REG_SAVE:
7818 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
7819 SET_DEST (pattern));
7820 break;
7821 case UNSPEC_DEF_CFA:
7822 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
7823 INTVAL (XVECEXP (unspec, 0, 0)));
7824 break;
7825 default:
7826 gcc_unreachable ();
7830 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
7831 to be generated in correct form. */
7832 static void
7833 ix86_finalize_stack_realign_flags (void)
7835 /* Check if stack realign is really needed after reload, and
7836 stores result in cfun */
7837 unsigned int incoming_stack_boundary
7838 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
7839 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
7840 unsigned int stack_realign = (incoming_stack_boundary
7841 < (current_function_is_leaf
7842 ? crtl->max_used_stack_slot_alignment
7843 : crtl->stack_alignment_needed));
7845 if (crtl->stack_realign_finalized)
7847 /* After stack_realign_needed is finalized, we can't no longer
7848 change it. */
7849 gcc_assert (crtl->stack_realign_needed == stack_realign);
7851 else
7853 crtl->stack_realign_needed = stack_realign;
7854 crtl->stack_realign_finalized = true;
7858 /* Expand the prologue into a bunch of separate insns. */
7860 void
7861 ix86_expand_prologue (void)
7863 rtx insn;
7864 bool pic_reg_used;
7865 struct ix86_frame frame;
7866 HOST_WIDE_INT allocate;
7868 ix86_finalize_stack_realign_flags ();
7870 /* DRAP should not coexist with stack_realign_fp */
7871 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
7873 ix86_compute_frame_layout (&frame);
7875 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
7876 of DRAP is needed and stack realignment is really needed after reload */
7877 if (crtl->drap_reg && crtl->stack_realign_needed)
7879 rtx x, y;
7880 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
7881 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
7882 ? 0 : UNITS_PER_WORD);
7884 gcc_assert (stack_realign_drap);
7886 /* Grab the argument pointer. */
7887 x = plus_constant (stack_pointer_rtx,
7888 (UNITS_PER_WORD + param_ptr_offset));
7889 y = crtl->drap_reg;
7891 /* Only need to push parameter pointer reg if it is caller
7892 saved reg */
7893 if (!call_used_regs[REGNO (crtl->drap_reg)])
7895 /* Push arg pointer reg */
7896 insn = emit_insn (gen_push (y));
7897 RTX_FRAME_RELATED_P (insn) = 1;
7900 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
7901 RTX_FRAME_RELATED_P (insn) = 1;
7903 /* Align the stack. */
7904 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
7905 stack_pointer_rtx,
7906 GEN_INT (-align_bytes)));
7907 RTX_FRAME_RELATED_P (insn) = 1;
7909 /* Replicate the return address on the stack so that return
7910 address can be reached via (argp - 1) slot. This is needed
7911 to implement macro RETURN_ADDR_RTX and intrinsic function
7912 expand_builtin_return_addr etc. */
7913 x = crtl->drap_reg;
7914 x = gen_frame_mem (Pmode,
7915 plus_constant (x, -UNITS_PER_WORD));
7916 insn = emit_insn (gen_push (x));
7917 RTX_FRAME_RELATED_P (insn) = 1;
7920 /* Note: AT&T enter does NOT have reversed args. Enter is probably
7921 slower on all targets. Also sdb doesn't like it. */
7923 if (frame_pointer_needed)
7925 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
7926 RTX_FRAME_RELATED_P (insn) = 1;
7928 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7929 RTX_FRAME_RELATED_P (insn) = 1;
7932 if (stack_realign_fp)
7934 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
7935 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
7937 /* Align the stack. */
7938 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
7939 stack_pointer_rtx,
7940 GEN_INT (-align_bytes)));
7941 RTX_FRAME_RELATED_P (insn) = 1;
7944 allocate = frame.to_allocate;
7946 if (!frame.save_regs_using_mov)
7947 ix86_emit_save_regs ();
7948 else
7949 allocate += frame.nregs * UNITS_PER_WORD;
7951 /* When using red zone we may start register saving before allocating
7952 the stack frame saving one cycle of the prologue. However I will
7953 avoid doing this if I am going to have to probe the stack since
7954 at least on x86_64 the stack probe can turn into a call that clobbers
7955 a red zone location */
7956 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
7957 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
7958 ix86_emit_save_regs_using_mov ((frame_pointer_needed
7959 && !crtl->stack_realign_needed)
7960 ? hard_frame_pointer_rtx
7961 : stack_pointer_rtx,
7962 -frame.nregs * UNITS_PER_WORD);
7964 if (allocate == 0)
7966 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
7967 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
7968 GEN_INT (-allocate), -1);
7969 else
7971 /* Only valid for Win32. */
7972 rtx eax = gen_rtx_REG (Pmode, AX_REG);
7973 bool eax_live;
7974 rtx t;
7976 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
7978 if (cfun->machine->call_abi == MS_ABI)
7979 eax_live = false;
7980 else
7981 eax_live = ix86_eax_live_at_start_p ();
7983 if (eax_live)
7985 emit_insn (gen_push (eax));
7986 allocate -= UNITS_PER_WORD;
7989 emit_move_insn (eax, GEN_INT (allocate));
7991 if (TARGET_64BIT)
7992 insn = gen_allocate_stack_worker_64 (eax);
7993 else
7994 insn = gen_allocate_stack_worker_32 (eax);
7995 insn = emit_insn (insn);
7996 RTX_FRAME_RELATED_P (insn) = 1;
7997 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
7998 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
7999 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8000 t, REG_NOTES (insn));
8002 if (eax_live)
8004 if (frame_pointer_needed)
8005 t = plus_constant (hard_frame_pointer_rtx,
8006 allocate
8007 - frame.to_allocate
8008 - frame.nregs * UNITS_PER_WORD);
8009 else
8010 t = plus_constant (stack_pointer_rtx, allocate);
8011 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8015 if (frame.save_regs_using_mov
8016 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8017 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8019 if (!frame_pointer_needed
8020 || !frame.to_allocate
8021 || crtl->stack_realign_needed)
8022 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8023 frame.to_allocate);
8024 else
8025 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8026 -frame.nregs * UNITS_PER_WORD);
8029 pic_reg_used = false;
8030 if (pic_offset_table_rtx
8031 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8032 || crtl->profile))
8034 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8036 if (alt_pic_reg_used != INVALID_REGNUM)
8037 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8039 pic_reg_used = true;
8042 if (pic_reg_used)
8044 if (TARGET_64BIT)
8046 if (ix86_cmodel == CM_LARGE_PIC)
8048 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8049 rtx label = gen_label_rtx ();
8050 emit_label (label);
8051 LABEL_PRESERVE_P (label) = 1;
8052 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8053 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8054 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8055 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8056 pic_offset_table_rtx, tmp_reg));
8058 else
8059 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8061 else
8062 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8065 /* Prevent function calls from being scheduled before the call to mcount.
8066 In the pic_reg_used case, make sure that the got load isn't deleted. */
8067 if (crtl->profile)
8069 if (pic_reg_used)
8070 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8071 emit_insn (gen_blockage ());
8074 if (crtl->drap_reg && !crtl->stack_realign_needed)
8076 /* vDRAP is setup but after reload it turns out stack realign
8077 isn't necessary, here we will emit prologue to setup DRAP
8078 without stack realign adjustment */
8079 int drap_bp_offset = UNITS_PER_WORD * 2;
8080 rtx x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8081 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8084 /* Emit cld instruction if stringops are used in the function. */
8085 if (TARGET_CLD && ix86_current_function_needs_cld)
8086 emit_insn (gen_cld ());
8089 /* Emit code to restore saved registers using MOV insns. First register
8090 is restored from POINTER + OFFSET. */
8091 static void
8092 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8093 int maybe_eh_return)
8095 int regno;
8096 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8098 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8099 if (ix86_save_reg (regno, maybe_eh_return))
8101 /* Ensure that adjust_address won't be forced to produce pointer
8102 out of range allowed by x86-64 instruction set. */
8103 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8105 rtx r11;
8107 r11 = gen_rtx_REG (DImode, R11_REG);
8108 emit_move_insn (r11, GEN_INT (offset));
8109 emit_insn (gen_adddi3 (r11, r11, pointer));
8110 base_address = gen_rtx_MEM (Pmode, r11);
8111 offset = 0;
8113 emit_move_insn (gen_rtx_REG (Pmode, regno),
8114 adjust_address (base_address, Pmode, offset));
8115 offset += UNITS_PER_WORD;
8119 /* Restore function stack, frame, and registers. */
8121 void
8122 ix86_expand_epilogue (int style)
8124 int regno;
8125 int sp_valid;
8126 struct ix86_frame frame;
8127 HOST_WIDE_INT offset;
8129 ix86_finalize_stack_realign_flags ();
8131 /* When stack is realigned, SP must be valid. */
8132 sp_valid = (!frame_pointer_needed
8133 || current_function_sp_is_unchanging
8134 || stack_realign_fp);
8136 ix86_compute_frame_layout (&frame);
8138 /* Calculate start of saved registers relative to ebp. Special care
8139 must be taken for the normal return case of a function using
8140 eh_return: the eax and edx registers are marked as saved, but not
8141 restored along this path. */
8142 offset = frame.nregs;
8143 if (crtl->calls_eh_return && style != 2)
8144 offset -= 2;
8145 offset *= -UNITS_PER_WORD;
8147 /* If we're only restoring one register and sp is not valid then
8148 using a move instruction to restore the register since it's
8149 less work than reloading sp and popping the register.
8151 The default code result in stack adjustment using add/lea instruction,
8152 while this code results in LEAVE instruction (or discrete equivalent),
8153 so it is profitable in some other cases as well. Especially when there
8154 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8155 and there is exactly one register to pop. This heuristic may need some
8156 tuning in future. */
8157 if ((!sp_valid && frame.nregs <= 1)
8158 || (TARGET_EPILOGUE_USING_MOVE
8159 && cfun->machine->use_fast_prologue_epilogue
8160 && (frame.nregs > 1 || frame.to_allocate))
8161 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
8162 || (frame_pointer_needed && TARGET_USE_LEAVE
8163 && cfun->machine->use_fast_prologue_epilogue
8164 && frame.nregs == 1)
8165 || crtl->calls_eh_return)
8167 /* Restore registers. We can use ebp or esp to address the memory
8168 locations. If both are available, default to ebp, since offsets
8169 are known to be small. Only exception is esp pointing directly
8170 to the end of block of saved registers, where we may simplify
8171 addressing mode.
8173 If we are realigning stack with bp and sp, regs restore can't
8174 be addressed by bp. sp must be used instead. */
8176 if (!frame_pointer_needed
8177 || (sp_valid && !frame.to_allocate)
8178 || stack_realign_fp)
8179 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8180 frame.to_allocate, style == 2);
8181 else
8182 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8183 offset, style == 2);
8185 /* eh_return epilogues need %ecx added to the stack pointer. */
8186 if (style == 2)
8188 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8190 /* Stack align doesn't work with eh_return. */
8191 gcc_assert (!crtl->stack_realign_needed);
8193 if (frame_pointer_needed)
8195 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
8196 tmp = plus_constant (tmp, UNITS_PER_WORD);
8197 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
8199 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
8200 emit_move_insn (hard_frame_pointer_rtx, tmp);
8202 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
8203 const0_rtx, style);
8205 else
8207 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
8208 tmp = plus_constant (tmp, (frame.to_allocate
8209 + frame.nregs * UNITS_PER_WORD));
8210 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
8213 else if (!frame_pointer_needed)
8214 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8215 GEN_INT (frame.to_allocate
8216 + frame.nregs * UNITS_PER_WORD),
8217 style);
8218 /* If not an i386, mov & pop is faster than "leave". */
8219 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
8220 || !cfun->machine->use_fast_prologue_epilogue)
8221 emit_insn ((*ix86_gen_leave) ());
8222 else
8224 pro_epilogue_adjust_stack (stack_pointer_rtx,
8225 hard_frame_pointer_rtx,
8226 const0_rtx, style);
8228 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8231 else
8233 /* First step is to deallocate the stack frame so that we can
8234 pop the registers.
8236 If we realign stack with frame pointer, then stack pointer
8237 won't be able to recover via lea $offset(%bp), %sp, because
8238 there is a padding area between bp and sp for realign.
8239 "add $to_allocate, %sp" must be used instead. */
8240 if (!sp_valid)
8242 gcc_assert (frame_pointer_needed);
8243 gcc_assert (!stack_realign_fp);
8244 pro_epilogue_adjust_stack (stack_pointer_rtx,
8245 hard_frame_pointer_rtx,
8246 GEN_INT (offset), style);
8248 else if (frame.to_allocate)
8249 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8250 GEN_INT (frame.to_allocate), style);
8252 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8253 if (ix86_save_reg (regno, false))
8254 emit_insn ((*ix86_gen_pop1) (gen_rtx_REG (Pmode, regno)));
8255 if (frame_pointer_needed)
8257 /* Leave results in shorter dependency chains on CPUs that are
8258 able to grok it fast. */
8259 if (TARGET_USE_LEAVE)
8260 emit_insn ((*ix86_gen_leave) ());
8261 else
8263 /* For stack realigned really happens, recover stack
8264 pointer to hard frame pointer is a must, if not using
8265 leave. */
8266 if (stack_realign_fp)
8267 pro_epilogue_adjust_stack (stack_pointer_rtx,
8268 hard_frame_pointer_rtx,
8269 const0_rtx, style);
8270 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8275 if (crtl->drap_reg && crtl->stack_realign_needed)
8277 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8278 ? 0 : UNITS_PER_WORD);
8279 gcc_assert (stack_realign_drap);
8280 emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
8281 crtl->drap_reg,
8282 GEN_INT (-(UNITS_PER_WORD
8283 + param_ptr_offset))));
8284 if (!call_used_regs[REGNO (crtl->drap_reg)])
8285 emit_insn ((*ix86_gen_pop1) (crtl->drap_reg));
8289 /* Sibcall epilogues don't want a return instruction. */
8290 if (style == 0)
8291 return;
8293 if (crtl->args.pops_args && crtl->args.size)
8295 rtx popc = GEN_INT (crtl->args.pops_args);
8297 /* i386 can only pop 64K bytes. If asked to pop more, pop
8298 return address, do explicit add, and jump indirectly to the
8299 caller. */
8301 if (crtl->args.pops_args >= 65536)
8303 rtx ecx = gen_rtx_REG (SImode, CX_REG);
8305 /* There is no "pascal" calling convention in any 64bit ABI. */
8306 gcc_assert (!TARGET_64BIT);
8308 emit_insn (gen_popsi1 (ecx));
8309 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
8310 emit_jump_insn (gen_return_indirect_internal (ecx));
8312 else
8313 emit_jump_insn (gen_return_pop_internal (popc));
8315 else
8316 emit_jump_insn (gen_return_internal ());
8319 /* Reset from the function's potential modifications. */
8321 static void
8322 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8323 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8325 if (pic_offset_table_rtx)
8326 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
8327 #if TARGET_MACHO
8328 /* Mach-O doesn't support labels at the end of objects, so if
8329 it looks like we might want one, insert a NOP. */
8331 rtx insn = get_last_insn ();
8332 while (insn
8333 && NOTE_P (insn)
8334 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
8335 insn = PREV_INSN (insn);
8336 if (insn
8337 && (LABEL_P (insn)
8338 || (NOTE_P (insn)
8339 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
8340 fputs ("\tnop\n", file);
8342 #endif
8346 /* Extract the parts of an RTL expression that is a valid memory address
8347 for an instruction. Return 0 if the structure of the address is
8348 grossly off. Return -1 if the address contains ASHIFT, so it is not
8349 strictly valid, but still used for computing length of lea instruction. */
8352 ix86_decompose_address (rtx addr, struct ix86_address *out)
8354 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
8355 rtx base_reg, index_reg;
8356 HOST_WIDE_INT scale = 1;
8357 rtx scale_rtx = NULL_RTX;
8358 int retval = 1;
8359 enum ix86_address_seg seg = SEG_DEFAULT;
8361 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
8362 base = addr;
8363 else if (GET_CODE (addr) == PLUS)
8365 rtx addends[4], op;
8366 int n = 0, i;
8368 op = addr;
8371 if (n >= 4)
8372 return 0;
8373 addends[n++] = XEXP (op, 1);
8374 op = XEXP (op, 0);
8376 while (GET_CODE (op) == PLUS);
8377 if (n >= 4)
8378 return 0;
8379 addends[n] = op;
8381 for (i = n; i >= 0; --i)
8383 op = addends[i];
8384 switch (GET_CODE (op))
8386 case MULT:
8387 if (index)
8388 return 0;
8389 index = XEXP (op, 0);
8390 scale_rtx = XEXP (op, 1);
8391 break;
8393 case UNSPEC:
8394 if (XINT (op, 1) == UNSPEC_TP
8395 && TARGET_TLS_DIRECT_SEG_REFS
8396 && seg == SEG_DEFAULT)
8397 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
8398 else
8399 return 0;
8400 break;
8402 case REG:
8403 case SUBREG:
8404 if (!base)
8405 base = op;
8406 else if (!index)
8407 index = op;
8408 else
8409 return 0;
8410 break;
8412 case CONST:
8413 case CONST_INT:
8414 case SYMBOL_REF:
8415 case LABEL_REF:
8416 if (disp)
8417 return 0;
8418 disp = op;
8419 break;
8421 default:
8422 return 0;
8426 else if (GET_CODE (addr) == MULT)
8428 index = XEXP (addr, 0); /* index*scale */
8429 scale_rtx = XEXP (addr, 1);
8431 else if (GET_CODE (addr) == ASHIFT)
8433 rtx tmp;
8435 /* We're called for lea too, which implements ashift on occasion. */
8436 index = XEXP (addr, 0);
8437 tmp = XEXP (addr, 1);
8438 if (!CONST_INT_P (tmp))
8439 return 0;
8440 scale = INTVAL (tmp);
8441 if ((unsigned HOST_WIDE_INT) scale > 3)
8442 return 0;
8443 scale = 1 << scale;
8444 retval = -1;
8446 else
8447 disp = addr; /* displacement */
8449 /* Extract the integral value of scale. */
8450 if (scale_rtx)
8452 if (!CONST_INT_P (scale_rtx))
8453 return 0;
8454 scale = INTVAL (scale_rtx);
8457 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
8458 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
8460 /* Allow arg pointer and stack pointer as index if there is not scaling. */
8461 if (base_reg && index_reg && scale == 1
8462 && (index_reg == arg_pointer_rtx
8463 || index_reg == frame_pointer_rtx
8464 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
8466 rtx tmp;
8467 tmp = base, base = index, index = tmp;
8468 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
8471 /* Special case: %ebp cannot be encoded as a base without a displacement. */
8472 if ((base_reg == hard_frame_pointer_rtx
8473 || base_reg == frame_pointer_rtx
8474 || base_reg == arg_pointer_rtx) && !disp)
8475 disp = const0_rtx;
8477 /* Special case: on K6, [%esi] makes the instruction vector decoded.
8478 Avoid this by transforming to [%esi+0].
8479 Reload calls address legitimization without cfun defined, so we need
8480 to test cfun for being non-NULL. */
8481 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
8482 && base_reg && !index_reg && !disp
8483 && REG_P (base_reg)
8484 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
8485 disp = const0_rtx;
8487 /* Special case: encode reg+reg instead of reg*2. */
8488 if (!base && index && scale && scale == 2)
8489 base = index, base_reg = index_reg, scale = 1;
8491 /* Special case: scaling cannot be encoded without base or displacement. */
8492 if (!base && !disp && index && scale != 1)
8493 disp = const0_rtx;
8495 out->base = base;
8496 out->index = index;
8497 out->disp = disp;
8498 out->scale = scale;
8499 out->seg = seg;
8501 return retval;
8504 /* Return cost of the memory address x.
8505 For i386, it is better to use a complex address than let gcc copy
8506 the address into a reg and make a new pseudo. But not if the address
8507 requires to two regs - that would mean more pseudos with longer
8508 lifetimes. */
8509 static int
8510 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
8512 struct ix86_address parts;
8513 int cost = 1;
8514 int ok = ix86_decompose_address (x, &parts);
8516 gcc_assert (ok);
8518 if (parts.base && GET_CODE (parts.base) == SUBREG)
8519 parts.base = SUBREG_REG (parts.base);
8520 if (parts.index && GET_CODE (parts.index) == SUBREG)
8521 parts.index = SUBREG_REG (parts.index);
8523 /* Attempt to minimize number of registers in the address. */
8524 if ((parts.base
8525 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
8526 || (parts.index
8527 && (!REG_P (parts.index)
8528 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
8529 cost++;
8531 if (parts.base
8532 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
8533 && parts.index
8534 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
8535 && parts.base != parts.index)
8536 cost++;
8538 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
8539 since it's predecode logic can't detect the length of instructions
8540 and it degenerates to vector decoded. Increase cost of such
8541 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
8542 to split such addresses or even refuse such addresses at all.
8544 Following addressing modes are affected:
8545 [base+scale*index]
8546 [scale*index+disp]
8547 [base+index]
8549 The first and last case may be avoidable by explicitly coding the zero in
8550 memory address, but I don't have AMD-K6 machine handy to check this
8551 theory. */
8553 if (TARGET_K6
8554 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
8555 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
8556 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
8557 cost += 10;
8559 return cost;
8562 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
8563 this is used for to form addresses to local data when -fPIC is in
8564 use. */
8566 static bool
8567 darwin_local_data_pic (rtx disp)
8569 return (GET_CODE (disp) == UNSPEC
8570 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
8573 /* Determine if a given RTX is a valid constant. We already know this
8574 satisfies CONSTANT_P. */
8576 bool
8577 legitimate_constant_p (rtx x)
8579 switch (GET_CODE (x))
8581 case CONST:
8582 x = XEXP (x, 0);
8584 if (GET_CODE (x) == PLUS)
8586 if (!CONST_INT_P (XEXP (x, 1)))
8587 return false;
8588 x = XEXP (x, 0);
8591 if (TARGET_MACHO && darwin_local_data_pic (x))
8592 return true;
8594 /* Only some unspecs are valid as "constants". */
8595 if (GET_CODE (x) == UNSPEC)
8596 switch (XINT (x, 1))
8598 case UNSPEC_GOT:
8599 case UNSPEC_GOTOFF:
8600 case UNSPEC_PLTOFF:
8601 return TARGET_64BIT;
8602 case UNSPEC_TPOFF:
8603 case UNSPEC_NTPOFF:
8604 x = XVECEXP (x, 0, 0);
8605 return (GET_CODE (x) == SYMBOL_REF
8606 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8607 case UNSPEC_DTPOFF:
8608 x = XVECEXP (x, 0, 0);
8609 return (GET_CODE (x) == SYMBOL_REF
8610 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
8611 default:
8612 return false;
8615 /* We must have drilled down to a symbol. */
8616 if (GET_CODE (x) == LABEL_REF)
8617 return true;
8618 if (GET_CODE (x) != SYMBOL_REF)
8619 return false;
8620 /* FALLTHRU */
8622 case SYMBOL_REF:
8623 /* TLS symbols are never valid. */
8624 if (SYMBOL_REF_TLS_MODEL (x))
8625 return false;
8627 /* DLLIMPORT symbols are never valid. */
8628 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
8629 && SYMBOL_REF_DLLIMPORT_P (x))
8630 return false;
8631 break;
8633 case CONST_DOUBLE:
8634 if (GET_MODE (x) == TImode
8635 && x != CONST0_RTX (TImode)
8636 && !TARGET_64BIT)
8637 return false;
8638 break;
8640 case CONST_VECTOR:
8641 if (x == CONST0_RTX (GET_MODE (x)))
8642 return true;
8643 return false;
8645 default:
8646 break;
8649 /* Otherwise we handle everything else in the move patterns. */
8650 return true;
8653 /* Determine if it's legal to put X into the constant pool. This
8654 is not possible for the address of thread-local symbols, which
8655 is checked above. */
8657 static bool
8658 ix86_cannot_force_const_mem (rtx x)
8660 /* We can always put integral constants and vectors in memory. */
8661 switch (GET_CODE (x))
8663 case CONST_INT:
8664 case CONST_DOUBLE:
8665 case CONST_VECTOR:
8666 return false;
8668 default:
8669 break;
8671 return !legitimate_constant_p (x);
8674 /* Determine if a given RTX is a valid constant address. */
8676 bool
8677 constant_address_p (rtx x)
8679 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
8682 /* Nonzero if the constant value X is a legitimate general operand
8683 when generating PIC code. It is given that flag_pic is on and
8684 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
8686 bool
8687 legitimate_pic_operand_p (rtx x)
8689 rtx inner;
8691 switch (GET_CODE (x))
8693 case CONST:
8694 inner = XEXP (x, 0);
8695 if (GET_CODE (inner) == PLUS
8696 && CONST_INT_P (XEXP (inner, 1)))
8697 inner = XEXP (inner, 0);
8699 /* Only some unspecs are valid as "constants". */
8700 if (GET_CODE (inner) == UNSPEC)
8701 switch (XINT (inner, 1))
8703 case UNSPEC_GOT:
8704 case UNSPEC_GOTOFF:
8705 case UNSPEC_PLTOFF:
8706 return TARGET_64BIT;
8707 case UNSPEC_TPOFF:
8708 x = XVECEXP (inner, 0, 0);
8709 return (GET_CODE (x) == SYMBOL_REF
8710 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8711 case UNSPEC_MACHOPIC_OFFSET:
8712 return legitimate_pic_address_disp_p (x);
8713 default:
8714 return false;
8716 /* FALLTHRU */
8718 case SYMBOL_REF:
8719 case LABEL_REF:
8720 return legitimate_pic_address_disp_p (x);
8722 default:
8723 return true;
8727 /* Determine if a given CONST RTX is a valid memory displacement
8728 in PIC mode. */
8731 legitimate_pic_address_disp_p (rtx disp)
8733 bool saw_plus;
8735 /* In 64bit mode we can allow direct addresses of symbols and labels
8736 when they are not dynamic symbols. */
8737 if (TARGET_64BIT)
8739 rtx op0 = disp, op1;
8741 switch (GET_CODE (disp))
8743 case LABEL_REF:
8744 return true;
8746 case CONST:
8747 if (GET_CODE (XEXP (disp, 0)) != PLUS)
8748 break;
8749 op0 = XEXP (XEXP (disp, 0), 0);
8750 op1 = XEXP (XEXP (disp, 0), 1);
8751 if (!CONST_INT_P (op1)
8752 || INTVAL (op1) >= 16*1024*1024
8753 || INTVAL (op1) < -16*1024*1024)
8754 break;
8755 if (GET_CODE (op0) == LABEL_REF)
8756 return true;
8757 if (GET_CODE (op0) != SYMBOL_REF)
8758 break;
8759 /* FALLTHRU */
8761 case SYMBOL_REF:
8762 /* TLS references should always be enclosed in UNSPEC. */
8763 if (SYMBOL_REF_TLS_MODEL (op0))
8764 return false;
8765 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
8766 && ix86_cmodel != CM_LARGE_PIC)
8767 return true;
8768 break;
8770 default:
8771 break;
8774 if (GET_CODE (disp) != CONST)
8775 return 0;
8776 disp = XEXP (disp, 0);
8778 if (TARGET_64BIT)
8780 /* We are unsafe to allow PLUS expressions. This limit allowed distance
8781 of GOT tables. We should not need these anyway. */
8782 if (GET_CODE (disp) != UNSPEC
8783 || (XINT (disp, 1) != UNSPEC_GOTPCREL
8784 && XINT (disp, 1) != UNSPEC_GOTOFF
8785 && XINT (disp, 1) != UNSPEC_PLTOFF))
8786 return 0;
8788 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
8789 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
8790 return 0;
8791 return 1;
8794 saw_plus = false;
8795 if (GET_CODE (disp) == PLUS)
8797 if (!CONST_INT_P (XEXP (disp, 1)))
8798 return 0;
8799 disp = XEXP (disp, 0);
8800 saw_plus = true;
8803 if (TARGET_MACHO && darwin_local_data_pic (disp))
8804 return 1;
8806 if (GET_CODE (disp) != UNSPEC)
8807 return 0;
8809 switch (XINT (disp, 1))
8811 case UNSPEC_GOT:
8812 if (saw_plus)
8813 return false;
8814 /* We need to check for both symbols and labels because VxWorks loads
8815 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
8816 details. */
8817 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8818 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
8819 case UNSPEC_GOTOFF:
8820 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
8821 While ABI specify also 32bit relocation but we don't produce it in
8822 small PIC model at all. */
8823 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8824 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
8825 && !TARGET_64BIT)
8826 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
8827 return false;
8828 case UNSPEC_GOTTPOFF:
8829 case UNSPEC_GOTNTPOFF:
8830 case UNSPEC_INDNTPOFF:
8831 if (saw_plus)
8832 return false;
8833 disp = XVECEXP (disp, 0, 0);
8834 return (GET_CODE (disp) == SYMBOL_REF
8835 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
8836 case UNSPEC_NTPOFF:
8837 disp = XVECEXP (disp, 0, 0);
8838 return (GET_CODE (disp) == SYMBOL_REF
8839 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
8840 case UNSPEC_DTPOFF:
8841 disp = XVECEXP (disp, 0, 0);
8842 return (GET_CODE (disp) == SYMBOL_REF
8843 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
8846 return 0;
8849 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
8850 memory address for an instruction. The MODE argument is the machine mode
8851 for the MEM expression that wants to use this address.
8853 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
8854 convert common non-canonical forms to canonical form so that they will
8855 be recognized. */
8858 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
8859 rtx addr, int strict)
8861 struct ix86_address parts;
8862 rtx base, index, disp;
8863 HOST_WIDE_INT scale;
8864 const char *reason = NULL;
8865 rtx reason_rtx = NULL_RTX;
8867 if (ix86_decompose_address (addr, &parts) <= 0)
8869 reason = "decomposition failed";
8870 goto report_error;
8873 base = parts.base;
8874 index = parts.index;
8875 disp = parts.disp;
8876 scale = parts.scale;
8878 /* Validate base register.
8880 Don't allow SUBREG's that span more than a word here. It can lead to spill
8881 failures when the base is one word out of a two word structure, which is
8882 represented internally as a DImode int. */
8884 if (base)
8886 rtx reg;
8887 reason_rtx = base;
8889 if (REG_P (base))
8890 reg = base;
8891 else if (GET_CODE (base) == SUBREG
8892 && REG_P (SUBREG_REG (base))
8893 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
8894 <= UNITS_PER_WORD)
8895 reg = SUBREG_REG (base);
8896 else
8898 reason = "base is not a register";
8899 goto report_error;
8902 if (GET_MODE (base) != Pmode)
8904 reason = "base is not in Pmode";
8905 goto report_error;
8908 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
8909 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
8911 reason = "base is not valid";
8912 goto report_error;
8916 /* Validate index register.
8918 Don't allow SUBREG's that span more than a word here -- same as above. */
8920 if (index)
8922 rtx reg;
8923 reason_rtx = index;
8925 if (REG_P (index))
8926 reg = index;
8927 else if (GET_CODE (index) == SUBREG
8928 && REG_P (SUBREG_REG (index))
8929 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
8930 <= UNITS_PER_WORD)
8931 reg = SUBREG_REG (index);
8932 else
8934 reason = "index is not a register";
8935 goto report_error;
8938 if (GET_MODE (index) != Pmode)
8940 reason = "index is not in Pmode";
8941 goto report_error;
8944 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
8945 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
8947 reason = "index is not valid";
8948 goto report_error;
8952 /* Validate scale factor. */
8953 if (scale != 1)
8955 reason_rtx = GEN_INT (scale);
8956 if (!index)
8958 reason = "scale without index";
8959 goto report_error;
8962 if (scale != 2 && scale != 4 && scale != 8)
8964 reason = "scale is not a valid multiplier";
8965 goto report_error;
8969 /* Validate displacement. */
8970 if (disp)
8972 reason_rtx = disp;
8974 if (GET_CODE (disp) == CONST
8975 && GET_CODE (XEXP (disp, 0)) == UNSPEC
8976 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
8977 switch (XINT (XEXP (disp, 0), 1))
8979 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
8980 used. While ABI specify also 32bit relocations, we don't produce
8981 them at all and use IP relative instead. */
8982 case UNSPEC_GOT:
8983 case UNSPEC_GOTOFF:
8984 gcc_assert (flag_pic);
8985 if (!TARGET_64BIT)
8986 goto is_legitimate_pic;
8987 reason = "64bit address unspec";
8988 goto report_error;
8990 case UNSPEC_GOTPCREL:
8991 gcc_assert (flag_pic);
8992 goto is_legitimate_pic;
8994 case UNSPEC_GOTTPOFF:
8995 case UNSPEC_GOTNTPOFF:
8996 case UNSPEC_INDNTPOFF:
8997 case UNSPEC_NTPOFF:
8998 case UNSPEC_DTPOFF:
8999 break;
9001 default:
9002 reason = "invalid address unspec";
9003 goto report_error;
9006 else if (SYMBOLIC_CONST (disp)
9007 && (flag_pic
9008 || (TARGET_MACHO
9009 #if TARGET_MACHO
9010 && MACHOPIC_INDIRECT
9011 && !machopic_operand_p (disp)
9012 #endif
9016 is_legitimate_pic:
9017 if (TARGET_64BIT && (index || base))
9019 /* foo@dtpoff(%rX) is ok. */
9020 if (GET_CODE (disp) != CONST
9021 || GET_CODE (XEXP (disp, 0)) != PLUS
9022 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9023 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9024 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9025 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9027 reason = "non-constant pic memory reference";
9028 goto report_error;
9031 else if (! legitimate_pic_address_disp_p (disp))
9033 reason = "displacement is an invalid pic construct";
9034 goto report_error;
9037 /* This code used to verify that a symbolic pic displacement
9038 includes the pic_offset_table_rtx register.
9040 While this is good idea, unfortunately these constructs may
9041 be created by "adds using lea" optimization for incorrect
9042 code like:
9044 int a;
9045 int foo(int i)
9047 return *(&a+i);
9050 This code is nonsensical, but results in addressing
9051 GOT table with pic_offset_table_rtx base. We can't
9052 just refuse it easily, since it gets matched by
9053 "addsi3" pattern, that later gets split to lea in the
9054 case output register differs from input. While this
9055 can be handled by separate addsi pattern for this case
9056 that never results in lea, this seems to be easier and
9057 correct fix for crash to disable this test. */
9059 else if (GET_CODE (disp) != LABEL_REF
9060 && !CONST_INT_P (disp)
9061 && (GET_CODE (disp) != CONST
9062 || !legitimate_constant_p (disp))
9063 && (GET_CODE (disp) != SYMBOL_REF
9064 || !legitimate_constant_p (disp)))
9066 reason = "displacement is not constant";
9067 goto report_error;
9069 else if (TARGET_64BIT
9070 && !x86_64_immediate_operand (disp, VOIDmode))
9072 reason = "displacement is out of range";
9073 goto report_error;
9077 /* Everything looks valid. */
9078 return TRUE;
9080 report_error:
9081 return FALSE;
9084 /* Return a unique alias set for the GOT. */
9086 static alias_set_type
9087 ix86_GOT_alias_set (void)
9089 static alias_set_type set = -1;
9090 if (set == -1)
9091 set = new_alias_set ();
9092 return set;
9095 /* Return a legitimate reference for ORIG (an address) using the
9096 register REG. If REG is 0, a new pseudo is generated.
9098 There are two types of references that must be handled:
9100 1. Global data references must load the address from the GOT, via
9101 the PIC reg. An insn is emitted to do this load, and the reg is
9102 returned.
9104 2. Static data references, constant pool addresses, and code labels
9105 compute the address as an offset from the GOT, whose base is in
9106 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9107 differentiate them from global data objects. The returned
9108 address is the PIC reg + an unspec constant.
9110 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
9111 reg also appears in the address. */
9113 static rtx
9114 legitimize_pic_address (rtx orig, rtx reg)
9116 rtx addr = orig;
9117 rtx new_rtx = orig;
9118 rtx base;
9120 #if TARGET_MACHO
9121 if (TARGET_MACHO && !TARGET_64BIT)
9123 if (reg == 0)
9124 reg = gen_reg_rtx (Pmode);
9125 /* Use the generic Mach-O PIC machinery. */
9126 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
9128 #endif
9130 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
9131 new_rtx = addr;
9132 else if (TARGET_64BIT
9133 && ix86_cmodel != CM_SMALL_PIC
9134 && gotoff_operand (addr, Pmode))
9136 rtx tmpreg;
9137 /* This symbol may be referenced via a displacement from the PIC
9138 base address (@GOTOFF). */
9140 if (reload_in_progress)
9141 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9142 if (GET_CODE (addr) == CONST)
9143 addr = XEXP (addr, 0);
9144 if (GET_CODE (addr) == PLUS)
9146 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9147 UNSPEC_GOTOFF);
9148 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9150 else
9151 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9152 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9153 if (!reg)
9154 tmpreg = gen_reg_rtx (Pmode);
9155 else
9156 tmpreg = reg;
9157 emit_move_insn (tmpreg, new_rtx);
9159 if (reg != 0)
9161 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
9162 tmpreg, 1, OPTAB_DIRECT);
9163 new_rtx = reg;
9165 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
9167 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
9169 /* This symbol may be referenced via a displacement from the PIC
9170 base address (@GOTOFF). */
9172 if (reload_in_progress)
9173 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9174 if (GET_CODE (addr) == CONST)
9175 addr = XEXP (addr, 0);
9176 if (GET_CODE (addr) == PLUS)
9178 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9179 UNSPEC_GOTOFF);
9180 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9182 else
9183 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9184 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9185 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9187 if (reg != 0)
9189 emit_move_insn (reg, new_rtx);
9190 new_rtx = reg;
9193 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
9194 /* We can't use @GOTOFF for text labels on VxWorks;
9195 see gotoff_operand. */
9196 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
9198 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9200 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
9201 return legitimize_dllimport_symbol (addr, true);
9202 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
9203 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
9204 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
9206 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
9207 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
9211 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
9213 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
9214 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9215 new_rtx = gen_const_mem (Pmode, new_rtx);
9216 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9218 if (reg == 0)
9219 reg = gen_reg_rtx (Pmode);
9220 /* Use directly gen_movsi, otherwise the address is loaded
9221 into register for CSE. We don't want to CSE this addresses,
9222 instead we CSE addresses from the GOT table, so skip this. */
9223 emit_insn (gen_movsi (reg, new_rtx));
9224 new_rtx = reg;
9226 else
9228 /* This symbol must be referenced via a load from the
9229 Global Offset Table (@GOT). */
9231 if (reload_in_progress)
9232 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9233 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
9234 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9235 if (TARGET_64BIT)
9236 new_rtx = force_reg (Pmode, new_rtx);
9237 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9238 new_rtx = gen_const_mem (Pmode, new_rtx);
9239 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9241 if (reg == 0)
9242 reg = gen_reg_rtx (Pmode);
9243 emit_move_insn (reg, new_rtx);
9244 new_rtx = reg;
9247 else
9249 if (CONST_INT_P (addr)
9250 && !x86_64_immediate_operand (addr, VOIDmode))
9252 if (reg)
9254 emit_move_insn (reg, addr);
9255 new_rtx = reg;
9257 else
9258 new_rtx = force_reg (Pmode, addr);
9260 else if (GET_CODE (addr) == CONST)
9262 addr = XEXP (addr, 0);
9264 /* We must match stuff we generate before. Assume the only
9265 unspecs that can get here are ours. Not that we could do
9266 anything with them anyway.... */
9267 if (GET_CODE (addr) == UNSPEC
9268 || (GET_CODE (addr) == PLUS
9269 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
9270 return orig;
9271 gcc_assert (GET_CODE (addr) == PLUS);
9273 if (GET_CODE (addr) == PLUS)
9275 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
9277 /* Check first to see if this is a constant offset from a @GOTOFF
9278 symbol reference. */
9279 if (gotoff_operand (op0, Pmode)
9280 && CONST_INT_P (op1))
9282 if (!TARGET_64BIT)
9284 if (reload_in_progress)
9285 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9286 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
9287 UNSPEC_GOTOFF);
9288 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
9289 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9290 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9292 if (reg != 0)
9294 emit_move_insn (reg, new_rtx);
9295 new_rtx = reg;
9298 else
9300 if (INTVAL (op1) < -16*1024*1024
9301 || INTVAL (op1) >= 16*1024*1024)
9303 if (!x86_64_immediate_operand (op1, Pmode))
9304 op1 = force_reg (Pmode, op1);
9305 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
9309 else
9311 base = legitimize_pic_address (XEXP (addr, 0), reg);
9312 new_rtx = legitimize_pic_address (XEXP (addr, 1),
9313 base == reg ? NULL_RTX : reg);
9315 if (CONST_INT_P (new_rtx))
9316 new_rtx = plus_constant (base, INTVAL (new_rtx));
9317 else
9319 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
9321 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
9322 new_rtx = XEXP (new_rtx, 1);
9324 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
9329 return new_rtx;
9332 /* Load the thread pointer. If TO_REG is true, force it into a register. */
9334 static rtx
9335 get_thread_pointer (int to_reg)
9337 rtx tp, reg, insn;
9339 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
9340 if (!to_reg)
9341 return tp;
9343 reg = gen_reg_rtx (Pmode);
9344 insn = gen_rtx_SET (VOIDmode, reg, tp);
9345 insn = emit_insn (insn);
9347 return reg;
9350 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
9351 false if we expect this to be used for a memory address and true if
9352 we expect to load the address into a register. */
9354 static rtx
9355 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
9357 rtx dest, base, off, pic, tp;
9358 int type;
9360 switch (model)
9362 case TLS_MODEL_GLOBAL_DYNAMIC:
9363 dest = gen_reg_rtx (Pmode);
9364 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9366 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9368 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
9370 start_sequence ();
9371 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
9372 insns = get_insns ();
9373 end_sequence ();
9375 RTL_CONST_CALL_P (insns) = 1;
9376 emit_libcall_block (insns, dest, rax, x);
9378 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9379 emit_insn (gen_tls_global_dynamic_64 (dest, x));
9380 else
9381 emit_insn (gen_tls_global_dynamic_32 (dest, x));
9383 if (TARGET_GNU2_TLS)
9385 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
9387 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9389 break;
9391 case TLS_MODEL_LOCAL_DYNAMIC:
9392 base = gen_reg_rtx (Pmode);
9393 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9395 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9397 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
9399 start_sequence ();
9400 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
9401 insns = get_insns ();
9402 end_sequence ();
9404 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
9405 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
9406 RTL_CONST_CALL_P (insns) = 1;
9407 emit_libcall_block (insns, base, rax, note);
9409 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9410 emit_insn (gen_tls_local_dynamic_base_64 (base));
9411 else
9412 emit_insn (gen_tls_local_dynamic_base_32 (base));
9414 if (TARGET_GNU2_TLS)
9416 rtx x = ix86_tls_module_base ();
9418 set_unique_reg_note (get_last_insn (), REG_EQUIV,
9419 gen_rtx_MINUS (Pmode, x, tp));
9422 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
9423 off = gen_rtx_CONST (Pmode, off);
9425 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
9427 if (TARGET_GNU2_TLS)
9429 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
9431 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9434 break;
9436 case TLS_MODEL_INITIAL_EXEC:
9437 if (TARGET_64BIT)
9439 pic = NULL;
9440 type = UNSPEC_GOTNTPOFF;
9442 else if (flag_pic)
9444 if (reload_in_progress)
9445 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9446 pic = pic_offset_table_rtx;
9447 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
9449 else if (!TARGET_ANY_GNU_TLS)
9451 pic = gen_reg_rtx (Pmode);
9452 emit_insn (gen_set_got (pic));
9453 type = UNSPEC_GOTTPOFF;
9455 else
9457 pic = NULL;
9458 type = UNSPEC_INDNTPOFF;
9461 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
9462 off = gen_rtx_CONST (Pmode, off);
9463 if (pic)
9464 off = gen_rtx_PLUS (Pmode, pic, off);
9465 off = gen_const_mem (Pmode, off);
9466 set_mem_alias_set (off, ix86_GOT_alias_set ());
9468 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9470 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9471 off = force_reg (Pmode, off);
9472 return gen_rtx_PLUS (Pmode, base, off);
9474 else
9476 base = get_thread_pointer (true);
9477 dest = gen_reg_rtx (Pmode);
9478 emit_insn (gen_subsi3 (dest, base, off));
9480 break;
9482 case TLS_MODEL_LOCAL_EXEC:
9483 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
9484 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9485 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
9486 off = gen_rtx_CONST (Pmode, off);
9488 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9490 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9491 return gen_rtx_PLUS (Pmode, base, off);
9493 else
9495 base = get_thread_pointer (true);
9496 dest = gen_reg_rtx (Pmode);
9497 emit_insn (gen_subsi3 (dest, base, off));
9499 break;
9501 default:
9502 gcc_unreachable ();
9505 return dest;
9508 /* Create or return the unique __imp_DECL dllimport symbol corresponding
9509 to symbol DECL. */
9511 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
9512 htab_t dllimport_map;
9514 static tree
9515 get_dllimport_decl (tree decl)
9517 struct tree_map *h, in;
9518 void **loc;
9519 const char *name;
9520 const char *prefix;
9521 size_t namelen, prefixlen;
9522 char *imp_name;
9523 tree to;
9524 rtx rtl;
9526 if (!dllimport_map)
9527 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
9529 in.hash = htab_hash_pointer (decl);
9530 in.base.from = decl;
9531 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
9532 h = (struct tree_map *) *loc;
9533 if (h)
9534 return h->to;
9536 *loc = h = GGC_NEW (struct tree_map);
9537 h->hash = in.hash;
9538 h->base.from = decl;
9539 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
9540 DECL_ARTIFICIAL (to) = 1;
9541 DECL_IGNORED_P (to) = 1;
9542 DECL_EXTERNAL (to) = 1;
9543 TREE_READONLY (to) = 1;
9545 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9546 name = targetm.strip_name_encoding (name);
9547 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
9548 ? "*__imp_" : "*__imp__";
9549 namelen = strlen (name);
9550 prefixlen = strlen (prefix);
9551 imp_name = (char *) alloca (namelen + prefixlen + 1);
9552 memcpy (imp_name, prefix, prefixlen);
9553 memcpy (imp_name + prefixlen, name, namelen + 1);
9555 name = ggc_alloc_string (imp_name, namelen + prefixlen);
9556 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
9557 SET_SYMBOL_REF_DECL (rtl, to);
9558 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
9560 rtl = gen_const_mem (Pmode, rtl);
9561 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
9563 SET_DECL_RTL (to, rtl);
9564 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
9566 return to;
9569 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
9570 true if we require the result be a register. */
9572 static rtx
9573 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
9575 tree imp_decl;
9576 rtx x;
9578 gcc_assert (SYMBOL_REF_DECL (symbol));
9579 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
9581 x = DECL_RTL (imp_decl);
9582 if (want_reg)
9583 x = force_reg (Pmode, x);
9584 return x;
9587 /* Try machine-dependent ways of modifying an illegitimate address
9588 to be legitimate. If we find one, return the new, valid address.
9589 This macro is used in only one place: `memory_address' in explow.c.
9591 OLDX is the address as it was before break_out_memory_refs was called.
9592 In some cases it is useful to look at this to decide what needs to be done.
9594 MODE and WIN are passed so that this macro can use
9595 GO_IF_LEGITIMATE_ADDRESS.
9597 It is always safe for this macro to do nothing. It exists to recognize
9598 opportunities to optimize the output.
9600 For the 80386, we handle X+REG by loading X into a register R and
9601 using R+REG. R will go in a general reg and indexing will be used.
9602 However, if REG is a broken-out memory address or multiplication,
9603 nothing needs to be done because REG can certainly go in a general reg.
9605 When -fpic is used, special handling is needed for symbolic references.
9606 See comments by legitimize_pic_address in i386.c for details. */
9609 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
9611 int changed = 0;
9612 unsigned log;
9614 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
9615 if (log)
9616 return legitimize_tls_address (x, (enum tls_model) log, false);
9617 if (GET_CODE (x) == CONST
9618 && GET_CODE (XEXP (x, 0)) == PLUS
9619 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9620 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
9622 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
9623 (enum tls_model) log, false);
9624 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9627 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9629 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
9630 return legitimize_dllimport_symbol (x, true);
9631 if (GET_CODE (x) == CONST
9632 && GET_CODE (XEXP (x, 0)) == PLUS
9633 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9634 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
9636 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
9637 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9641 if (flag_pic && SYMBOLIC_CONST (x))
9642 return legitimize_pic_address (x, 0);
9644 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
9645 if (GET_CODE (x) == ASHIFT
9646 && CONST_INT_P (XEXP (x, 1))
9647 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
9649 changed = 1;
9650 log = INTVAL (XEXP (x, 1));
9651 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
9652 GEN_INT (1 << log));
9655 if (GET_CODE (x) == PLUS)
9657 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
9659 if (GET_CODE (XEXP (x, 0)) == ASHIFT
9660 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9661 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
9663 changed = 1;
9664 log = INTVAL (XEXP (XEXP (x, 0), 1));
9665 XEXP (x, 0) = gen_rtx_MULT (Pmode,
9666 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
9667 GEN_INT (1 << log));
9670 if (GET_CODE (XEXP (x, 1)) == ASHIFT
9671 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
9672 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
9674 changed = 1;
9675 log = INTVAL (XEXP (XEXP (x, 1), 1));
9676 XEXP (x, 1) = gen_rtx_MULT (Pmode,
9677 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
9678 GEN_INT (1 << log));
9681 /* Put multiply first if it isn't already. */
9682 if (GET_CODE (XEXP (x, 1)) == MULT)
9684 rtx tmp = XEXP (x, 0);
9685 XEXP (x, 0) = XEXP (x, 1);
9686 XEXP (x, 1) = tmp;
9687 changed = 1;
9690 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
9691 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
9692 created by virtual register instantiation, register elimination, and
9693 similar optimizations. */
9694 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
9696 changed = 1;
9697 x = gen_rtx_PLUS (Pmode,
9698 gen_rtx_PLUS (Pmode, XEXP (x, 0),
9699 XEXP (XEXP (x, 1), 0)),
9700 XEXP (XEXP (x, 1), 1));
9703 /* Canonicalize
9704 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
9705 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
9706 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
9707 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
9708 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
9709 && CONSTANT_P (XEXP (x, 1)))
9711 rtx constant;
9712 rtx other = NULL_RTX;
9714 if (CONST_INT_P (XEXP (x, 1)))
9716 constant = XEXP (x, 1);
9717 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
9719 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
9721 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
9722 other = XEXP (x, 1);
9724 else
9725 constant = 0;
9727 if (constant)
9729 changed = 1;
9730 x = gen_rtx_PLUS (Pmode,
9731 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
9732 XEXP (XEXP (XEXP (x, 0), 1), 0)),
9733 plus_constant (other, INTVAL (constant)));
9737 if (changed && legitimate_address_p (mode, x, FALSE))
9738 return x;
9740 if (GET_CODE (XEXP (x, 0)) == MULT)
9742 changed = 1;
9743 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
9746 if (GET_CODE (XEXP (x, 1)) == MULT)
9748 changed = 1;
9749 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
9752 if (changed
9753 && REG_P (XEXP (x, 1))
9754 && REG_P (XEXP (x, 0)))
9755 return x;
9757 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
9759 changed = 1;
9760 x = legitimize_pic_address (x, 0);
9763 if (changed && legitimate_address_p (mode, x, FALSE))
9764 return x;
9766 if (REG_P (XEXP (x, 0)))
9768 rtx temp = gen_reg_rtx (Pmode);
9769 rtx val = force_operand (XEXP (x, 1), temp);
9770 if (val != temp)
9771 emit_move_insn (temp, val);
9773 XEXP (x, 1) = temp;
9774 return x;
9777 else if (REG_P (XEXP (x, 1)))
9779 rtx temp = gen_reg_rtx (Pmode);
9780 rtx val = force_operand (XEXP (x, 0), temp);
9781 if (val != temp)
9782 emit_move_insn (temp, val);
9784 XEXP (x, 0) = temp;
9785 return x;
9789 return x;
9792 /* Print an integer constant expression in assembler syntax. Addition
9793 and subtraction are the only arithmetic that may appear in these
9794 expressions. FILE is the stdio stream to write to, X is the rtx, and
9795 CODE is the operand print code from the output string. */
9797 static void
9798 output_pic_addr_const (FILE *file, rtx x, int code)
9800 char buf[256];
9802 switch (GET_CODE (x))
9804 case PC:
9805 gcc_assert (flag_pic);
9806 putc ('.', file);
9807 break;
9809 case SYMBOL_REF:
9810 if (! TARGET_MACHO || TARGET_64BIT)
9811 output_addr_const (file, x);
9812 else
9814 const char *name = XSTR (x, 0);
9816 /* Mark the decl as referenced so that cgraph will
9817 output the function. */
9818 if (SYMBOL_REF_DECL (x))
9819 mark_decl_referenced (SYMBOL_REF_DECL (x));
9821 #if TARGET_MACHO
9822 if (MACHOPIC_INDIRECT
9823 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
9824 name = machopic_indirection_name (x, /*stub_p=*/true);
9825 #endif
9826 assemble_name (file, name);
9828 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
9829 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
9830 fputs ("@PLT", file);
9831 break;
9833 case LABEL_REF:
9834 x = XEXP (x, 0);
9835 /* FALLTHRU */
9836 case CODE_LABEL:
9837 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
9838 assemble_name (asm_out_file, buf);
9839 break;
9841 case CONST_INT:
9842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
9843 break;
9845 case CONST:
9846 /* This used to output parentheses around the expression,
9847 but that does not work on the 386 (either ATT or BSD assembler). */
9848 output_pic_addr_const (file, XEXP (x, 0), code);
9849 break;
9851 case CONST_DOUBLE:
9852 if (GET_MODE (x) == VOIDmode)
9854 /* We can use %d if the number is <32 bits and positive. */
9855 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
9856 fprintf (file, "0x%lx%08lx",
9857 (unsigned long) CONST_DOUBLE_HIGH (x),
9858 (unsigned long) CONST_DOUBLE_LOW (x));
9859 else
9860 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
9862 else
9863 /* We can't handle floating point constants;
9864 PRINT_OPERAND must handle them. */
9865 output_operand_lossage ("floating constant misused");
9866 break;
9868 case PLUS:
9869 /* Some assemblers need integer constants to appear first. */
9870 if (CONST_INT_P (XEXP (x, 0)))
9872 output_pic_addr_const (file, XEXP (x, 0), code);
9873 putc ('+', file);
9874 output_pic_addr_const (file, XEXP (x, 1), code);
9876 else
9878 gcc_assert (CONST_INT_P (XEXP (x, 1)));
9879 output_pic_addr_const (file, XEXP (x, 1), code);
9880 putc ('+', file);
9881 output_pic_addr_const (file, XEXP (x, 0), code);
9883 break;
9885 case MINUS:
9886 if (!TARGET_MACHO)
9887 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
9888 output_pic_addr_const (file, XEXP (x, 0), code);
9889 putc ('-', file);
9890 output_pic_addr_const (file, XEXP (x, 1), code);
9891 if (!TARGET_MACHO)
9892 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
9893 break;
9895 case UNSPEC:
9896 gcc_assert (XVECLEN (x, 0) == 1);
9897 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
9898 switch (XINT (x, 1))
9900 case UNSPEC_GOT:
9901 fputs ("@GOT", file);
9902 break;
9903 case UNSPEC_GOTOFF:
9904 fputs ("@GOTOFF", file);
9905 break;
9906 case UNSPEC_PLTOFF:
9907 fputs ("@PLTOFF", file);
9908 break;
9909 case UNSPEC_GOTPCREL:
9910 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9911 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
9912 break;
9913 case UNSPEC_GOTTPOFF:
9914 /* FIXME: This might be @TPOFF in Sun ld too. */
9915 fputs ("@GOTTPOFF", file);
9916 break;
9917 case UNSPEC_TPOFF:
9918 fputs ("@TPOFF", file);
9919 break;
9920 case UNSPEC_NTPOFF:
9921 if (TARGET_64BIT)
9922 fputs ("@TPOFF", file);
9923 else
9924 fputs ("@NTPOFF", file);
9925 break;
9926 case UNSPEC_DTPOFF:
9927 fputs ("@DTPOFF", file);
9928 break;
9929 case UNSPEC_GOTNTPOFF:
9930 if (TARGET_64BIT)
9931 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9932 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
9933 else
9934 fputs ("@GOTNTPOFF", file);
9935 break;
9936 case UNSPEC_INDNTPOFF:
9937 fputs ("@INDNTPOFF", file);
9938 break;
9939 #if TARGET_MACHO
9940 case UNSPEC_MACHOPIC_OFFSET:
9941 putc ('-', file);
9942 machopic_output_function_base_name (file);
9943 break;
9944 #endif
9945 default:
9946 output_operand_lossage ("invalid UNSPEC as operand");
9947 break;
9949 break;
9951 default:
9952 output_operand_lossage ("invalid expression as operand");
9956 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9957 We need to emit DTP-relative relocations. */
9959 static void ATTRIBUTE_UNUSED
9960 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
9962 fputs (ASM_LONG, file);
9963 output_addr_const (file, x);
9964 fputs ("@DTPOFF", file);
9965 switch (size)
9967 case 4:
9968 break;
9969 case 8:
9970 fputs (", 0", file);
9971 break;
9972 default:
9973 gcc_unreachable ();
9977 /* Return true if X is a representation of the PIC register. This copes
9978 with calls from ix86_find_base_term, where the register might have
9979 been replaced by a cselib value. */
9981 static bool
9982 ix86_pic_register_p (rtx x)
9984 if (GET_CODE (x) == VALUE)
9985 return (pic_offset_table_rtx
9986 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
9987 else
9988 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
9991 /* In the name of slightly smaller debug output, and to cater to
9992 general assembler lossage, recognize PIC+GOTOFF and turn it back
9993 into a direct symbol reference.
9995 On Darwin, this is necessary to avoid a crash, because Darwin
9996 has a different PIC label for each routine but the DWARF debugging
9997 information is not associated with any particular routine, so it's
9998 necessary to remove references to the PIC label from RTL stored by
9999 the DWARF output code. */
10001 static rtx
10002 ix86_delegitimize_address (rtx orig_x)
10004 rtx x = orig_x;
10005 /* reg_addend is NULL or a multiple of some register. */
10006 rtx reg_addend = NULL_RTX;
10007 /* const_addend is NULL or a const_int. */
10008 rtx const_addend = NULL_RTX;
10009 /* This is the result, or NULL. */
10010 rtx result = NULL_RTX;
10012 if (MEM_P (x))
10013 x = XEXP (x, 0);
10015 if (TARGET_64BIT)
10017 if (GET_CODE (x) != CONST
10018 || GET_CODE (XEXP (x, 0)) != UNSPEC
10019 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10020 || !MEM_P (orig_x))
10021 return orig_x;
10022 return XVECEXP (XEXP (x, 0), 0, 0);
10025 if (GET_CODE (x) != PLUS
10026 || GET_CODE (XEXP (x, 1)) != CONST)
10027 return orig_x;
10029 if (ix86_pic_register_p (XEXP (x, 0)))
10030 /* %ebx + GOT/GOTOFF */
10032 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10034 /* %ebx + %reg * scale + GOT/GOTOFF */
10035 reg_addend = XEXP (x, 0);
10036 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10037 reg_addend = XEXP (reg_addend, 1);
10038 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10039 reg_addend = XEXP (reg_addend, 0);
10040 else
10041 return orig_x;
10042 if (!REG_P (reg_addend)
10043 && GET_CODE (reg_addend) != MULT
10044 && GET_CODE (reg_addend) != ASHIFT)
10045 return orig_x;
10047 else
10048 return orig_x;
10050 x = XEXP (XEXP (x, 1), 0);
10051 if (GET_CODE (x) == PLUS
10052 && CONST_INT_P (XEXP (x, 1)))
10054 const_addend = XEXP (x, 1);
10055 x = XEXP (x, 0);
10058 if (GET_CODE (x) == UNSPEC
10059 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10060 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10061 result = XVECEXP (x, 0, 0);
10063 if (TARGET_MACHO && darwin_local_data_pic (x)
10064 && !MEM_P (orig_x))
10065 result = XVECEXP (x, 0, 0);
10067 if (! result)
10068 return orig_x;
10070 if (const_addend)
10071 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10072 if (reg_addend)
10073 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10074 return result;
10077 /* If X is a machine specific address (i.e. a symbol or label being
10078 referenced as a displacement from the GOT implemented using an
10079 UNSPEC), then return the base term. Otherwise return X. */
10082 ix86_find_base_term (rtx x)
10084 rtx term;
10086 if (TARGET_64BIT)
10088 if (GET_CODE (x) != CONST)
10089 return x;
10090 term = XEXP (x, 0);
10091 if (GET_CODE (term) == PLUS
10092 && (CONST_INT_P (XEXP (term, 1))
10093 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10094 term = XEXP (term, 0);
10095 if (GET_CODE (term) != UNSPEC
10096 || XINT (term, 1) != UNSPEC_GOTPCREL)
10097 return x;
10099 return XVECEXP (term, 0, 0);
10102 return ix86_delegitimize_address (x);
10105 static void
10106 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10107 int fp, FILE *file)
10109 const char *suffix;
10111 if (mode == CCFPmode || mode == CCFPUmode)
10113 enum rtx_code second_code, bypass_code;
10114 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
10115 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
10116 code = ix86_fp_compare_code_to_integer (code);
10117 mode = CCmode;
10119 if (reverse)
10120 code = reverse_condition (code);
10122 switch (code)
10124 case EQ:
10125 switch (mode)
10127 case CCAmode:
10128 suffix = "a";
10129 break;
10131 case CCCmode:
10132 suffix = "c";
10133 break;
10135 case CCOmode:
10136 suffix = "o";
10137 break;
10139 case CCSmode:
10140 suffix = "s";
10141 break;
10143 default:
10144 suffix = "e";
10146 break;
10147 case NE:
10148 switch (mode)
10150 case CCAmode:
10151 suffix = "na";
10152 break;
10154 case CCCmode:
10155 suffix = "nc";
10156 break;
10158 case CCOmode:
10159 suffix = "no";
10160 break;
10162 case CCSmode:
10163 suffix = "ns";
10164 break;
10166 default:
10167 suffix = "ne";
10169 break;
10170 case GT:
10171 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
10172 suffix = "g";
10173 break;
10174 case GTU:
10175 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
10176 Those same assemblers have the same but opposite lossage on cmov. */
10177 if (mode == CCmode)
10178 suffix = fp ? "nbe" : "a";
10179 else if (mode == CCCmode)
10180 suffix = "b";
10181 else
10182 gcc_unreachable ();
10183 break;
10184 case LT:
10185 switch (mode)
10187 case CCNOmode:
10188 case CCGOCmode:
10189 suffix = "s";
10190 break;
10192 case CCmode:
10193 case CCGCmode:
10194 suffix = "l";
10195 break;
10197 default:
10198 gcc_unreachable ();
10200 break;
10201 case LTU:
10202 gcc_assert (mode == CCmode || mode == CCCmode);
10203 suffix = "b";
10204 break;
10205 case GE:
10206 switch (mode)
10208 case CCNOmode:
10209 case CCGOCmode:
10210 suffix = "ns";
10211 break;
10213 case CCmode:
10214 case CCGCmode:
10215 suffix = "ge";
10216 break;
10218 default:
10219 gcc_unreachable ();
10221 break;
10222 case GEU:
10223 /* ??? As above. */
10224 gcc_assert (mode == CCmode || mode == CCCmode);
10225 suffix = fp ? "nb" : "ae";
10226 break;
10227 case LE:
10228 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
10229 suffix = "le";
10230 break;
10231 case LEU:
10232 /* ??? As above. */
10233 if (mode == CCmode)
10234 suffix = "be";
10235 else if (mode == CCCmode)
10236 suffix = fp ? "nb" : "ae";
10237 else
10238 gcc_unreachable ();
10239 break;
10240 case UNORDERED:
10241 suffix = fp ? "u" : "p";
10242 break;
10243 case ORDERED:
10244 suffix = fp ? "nu" : "np";
10245 break;
10246 default:
10247 gcc_unreachable ();
10249 fputs (suffix, file);
10252 /* Print the name of register X to FILE based on its machine mode and number.
10253 If CODE is 'w', pretend the mode is HImode.
10254 If CODE is 'b', pretend the mode is QImode.
10255 If CODE is 'k', pretend the mode is SImode.
10256 If CODE is 'q', pretend the mode is DImode.
10257 If CODE is 'x', pretend the mode is V4SFmode.
10258 If CODE is 't', pretend the mode is V8SFmode.
10259 If CODE is 'h', pretend the reg is the 'high' byte register.
10260 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
10261 If CODE is 'd', duplicate the operand for AVX instruction.
10264 void
10265 print_reg (rtx x, int code, FILE *file)
10267 const char *reg;
10268 bool duplicated = code == 'd' && TARGET_AVX;
10270 gcc_assert (x == pc_rtx
10271 || (REGNO (x) != ARG_POINTER_REGNUM
10272 && REGNO (x) != FRAME_POINTER_REGNUM
10273 && REGNO (x) != FLAGS_REG
10274 && REGNO (x) != FPSR_REG
10275 && REGNO (x) != FPCR_REG));
10277 if (ASSEMBLER_DIALECT == ASM_ATT)
10278 putc ('%', file);
10280 if (x == pc_rtx)
10282 gcc_assert (TARGET_64BIT);
10283 fputs ("rip", file);
10284 return;
10287 if (code == 'w' || MMX_REG_P (x))
10288 code = 2;
10289 else if (code == 'b')
10290 code = 1;
10291 else if (code == 'k')
10292 code = 4;
10293 else if (code == 'q')
10294 code = 8;
10295 else if (code == 'y')
10296 code = 3;
10297 else if (code == 'h')
10298 code = 0;
10299 else if (code == 'x')
10300 code = 16;
10301 else if (code == 't')
10302 code = 32;
10303 else
10304 code = GET_MODE_SIZE (GET_MODE (x));
10306 /* Irritatingly, AMD extended registers use different naming convention
10307 from the normal registers. */
10308 if (REX_INT_REG_P (x))
10310 gcc_assert (TARGET_64BIT);
10311 switch (code)
10313 case 0:
10314 error ("extended registers have no high halves");
10315 break;
10316 case 1:
10317 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
10318 break;
10319 case 2:
10320 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
10321 break;
10322 case 4:
10323 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
10324 break;
10325 case 8:
10326 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
10327 break;
10328 default:
10329 error ("unsupported operand size for extended register");
10330 break;
10332 return;
10335 reg = NULL;
10336 switch (code)
10338 case 3:
10339 if (STACK_TOP_P (x))
10341 reg = "st(0)";
10342 break;
10344 /* FALLTHRU */
10345 case 8:
10346 case 4:
10347 case 12:
10348 if (! ANY_FP_REG_P (x))
10349 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
10350 /* FALLTHRU */
10351 case 16:
10352 case 2:
10353 normal:
10354 reg = hi_reg_name[REGNO (x)];
10355 break;
10356 case 1:
10357 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
10358 goto normal;
10359 reg = qi_reg_name[REGNO (x)];
10360 break;
10361 case 0:
10362 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
10363 goto normal;
10364 reg = qi_high_reg_name[REGNO (x)];
10365 break;
10366 case 32:
10367 if (SSE_REG_P (x))
10369 gcc_assert (!duplicated);
10370 putc ('y', file);
10371 fputs (hi_reg_name[REGNO (x)] + 1, file);
10372 return;
10374 break;
10375 default:
10376 gcc_unreachable ();
10379 fputs (reg, file);
10380 if (duplicated)
10382 if (ASSEMBLER_DIALECT == ASM_ATT)
10383 fprintf (file, ", %%%s", reg);
10384 else
10385 fprintf (file, ", %s", reg);
10389 /* Locate some local-dynamic symbol still in use by this function
10390 so that we can print its name in some tls_local_dynamic_base
10391 pattern. */
10393 static int
10394 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10396 rtx x = *px;
10398 if (GET_CODE (x) == SYMBOL_REF
10399 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10401 cfun->machine->some_ld_name = XSTR (x, 0);
10402 return 1;
10405 return 0;
10408 static const char *
10409 get_some_local_dynamic_name (void)
10411 rtx insn;
10413 if (cfun->machine->some_ld_name)
10414 return cfun->machine->some_ld_name;
10416 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10417 if (INSN_P (insn)
10418 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10419 return cfun->machine->some_ld_name;
10421 gcc_unreachable ();
10424 /* Meaning of CODE:
10425 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
10426 C -- print opcode suffix for set/cmov insn.
10427 c -- like C, but print reversed condition
10428 E,e -- likewise, but for compare-and-branch fused insn.
10429 F,f -- likewise, but for floating-point.
10430 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
10431 otherwise nothing
10432 R -- print the prefix for register names.
10433 z -- print the opcode suffix for the size of the current operand.
10434 * -- print a star (in certain assembler syntax)
10435 A -- print an absolute memory reference.
10436 w -- print the operand as if it's a "word" (HImode) even if it isn't.
10437 s -- print a shift double count, followed by the assemblers argument
10438 delimiter.
10439 b -- print the QImode name of the register for the indicated operand.
10440 %b0 would print %al if operands[0] is reg 0.
10441 w -- likewise, print the HImode name of the register.
10442 k -- likewise, print the SImode name of the register.
10443 q -- likewise, print the DImode name of the register.
10444 x -- likewise, print the V4SFmode name of the register.
10445 t -- likewise, print the V8SFmode name of the register.
10446 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
10447 y -- print "st(0)" instead of "st" as a register.
10448 d -- print duplicated register operand for AVX instruction.
10449 D -- print condition for SSE cmp instruction.
10450 P -- if PIC, print an @PLT suffix.
10451 X -- don't print any sort of PIC '@' suffix for a symbol.
10452 & -- print some in-use local-dynamic symbol name.
10453 H -- print a memory address offset by 8; used for sse high-parts
10454 Y -- print condition for SSE5 com* instruction.
10455 + -- print a branch hint as 'cs' or 'ds' prefix
10456 ; -- print a semicolon (after prefixes due to bug in older gas).
10459 void
10460 print_operand (FILE *file, rtx x, int code)
10462 if (code)
10464 switch (code)
10466 case '*':
10467 if (ASSEMBLER_DIALECT == ASM_ATT)
10468 putc ('*', file);
10469 return;
10471 case '&':
10472 assemble_name (file, get_some_local_dynamic_name ());
10473 return;
10475 case 'A':
10476 switch (ASSEMBLER_DIALECT)
10478 case ASM_ATT:
10479 putc ('*', file);
10480 break;
10482 case ASM_INTEL:
10483 /* Intel syntax. For absolute addresses, registers should not
10484 be surrounded by braces. */
10485 if (!REG_P (x))
10487 putc ('[', file);
10488 PRINT_OPERAND (file, x, 0);
10489 putc (']', file);
10490 return;
10492 break;
10494 default:
10495 gcc_unreachable ();
10498 PRINT_OPERAND (file, x, 0);
10499 return;
10502 case 'L':
10503 if (ASSEMBLER_DIALECT == ASM_ATT)
10504 putc ('l', file);
10505 return;
10507 case 'W':
10508 if (ASSEMBLER_DIALECT == ASM_ATT)
10509 putc ('w', file);
10510 return;
10512 case 'B':
10513 if (ASSEMBLER_DIALECT == ASM_ATT)
10514 putc ('b', file);
10515 return;
10517 case 'Q':
10518 if (ASSEMBLER_DIALECT == ASM_ATT)
10519 putc ('l', file);
10520 return;
10522 case 'S':
10523 if (ASSEMBLER_DIALECT == ASM_ATT)
10524 putc ('s', file);
10525 return;
10527 case 'T':
10528 if (ASSEMBLER_DIALECT == ASM_ATT)
10529 putc ('t', file);
10530 return;
10532 case 'z':
10533 /* 387 opcodes don't get size suffixes if the operands are
10534 registers. */
10535 if (STACK_REG_P (x))
10536 return;
10538 /* Likewise if using Intel opcodes. */
10539 if (ASSEMBLER_DIALECT == ASM_INTEL)
10540 return;
10542 /* This is the size of op from size of operand. */
10543 switch (GET_MODE_SIZE (GET_MODE (x)))
10545 case 1:
10546 putc ('b', file);
10547 return;
10549 case 2:
10550 if (MEM_P (x))
10552 #ifdef HAVE_GAS_FILDS_FISTS
10553 putc ('s', file);
10554 #endif
10555 return;
10557 else
10558 putc ('w', file);
10559 return;
10561 case 4:
10562 if (GET_MODE (x) == SFmode)
10564 putc ('s', file);
10565 return;
10567 else
10568 putc ('l', file);
10569 return;
10571 case 12:
10572 case 16:
10573 putc ('t', file);
10574 return;
10576 case 8:
10577 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10579 if (MEM_P (x))
10581 #ifdef GAS_MNEMONICS
10582 putc ('q', file);
10583 #else
10584 putc ('l', file);
10585 putc ('l', file);
10586 #endif
10588 else
10589 putc ('q', file);
10591 else
10592 putc ('l', file);
10593 return;
10595 default:
10596 gcc_unreachable ();
10599 case 'd':
10600 case 'b':
10601 case 'w':
10602 case 'k':
10603 case 'q':
10604 case 'h':
10605 case 't':
10606 case 'y':
10607 case 'x':
10608 case 'X':
10609 case 'P':
10610 break;
10612 case 's':
10613 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
10615 PRINT_OPERAND (file, x, 0);
10616 fputs (", ", file);
10618 return;
10620 case 'D':
10621 /* Little bit of braindamage here. The SSE compare instructions
10622 does use completely different names for the comparisons that the
10623 fp conditional moves. */
10624 if (TARGET_AVX)
10626 switch (GET_CODE (x))
10628 case EQ:
10629 fputs ("eq", file);
10630 break;
10631 case UNEQ:
10632 fputs ("eq_us", file);
10633 break;
10634 case LT:
10635 fputs ("lt", file);
10636 break;
10637 case UNLT:
10638 fputs ("nge", file);
10639 break;
10640 case LE:
10641 fputs ("le", file);
10642 break;
10643 case UNLE:
10644 fputs ("ngt", file);
10645 break;
10646 case UNORDERED:
10647 fputs ("unord", file);
10648 break;
10649 case NE:
10650 fputs ("neq", file);
10651 break;
10652 case LTGT:
10653 fputs ("neq_oq", file);
10654 break;
10655 case GE:
10656 fputs ("ge", file);
10657 break;
10658 case UNGE:
10659 fputs ("nlt", file);
10660 break;
10661 case GT:
10662 fputs ("gt", file);
10663 break;
10664 case UNGT:
10665 fputs ("nle", file);
10666 break;
10667 case ORDERED:
10668 fputs ("ord", file);
10669 break;
10670 default:
10671 gcc_unreachable ();
10674 else
10676 switch (GET_CODE (x))
10678 case EQ:
10679 case UNEQ:
10680 fputs ("eq", file);
10681 break;
10682 case LT:
10683 case UNLT:
10684 fputs ("lt", file);
10685 break;
10686 case LE:
10687 case UNLE:
10688 fputs ("le", file);
10689 break;
10690 case UNORDERED:
10691 fputs ("unord", file);
10692 break;
10693 case NE:
10694 case LTGT:
10695 fputs ("neq", file);
10696 break;
10697 case UNGE:
10698 case GE:
10699 fputs ("nlt", file);
10700 break;
10701 case UNGT:
10702 case GT:
10703 fputs ("nle", file);
10704 break;
10705 case ORDERED:
10706 fputs ("ord", file);
10707 break;
10708 default:
10709 gcc_unreachable ();
10712 return;
10713 case 'O':
10714 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10715 if (ASSEMBLER_DIALECT == ASM_ATT)
10717 switch (GET_MODE (x))
10719 case HImode: putc ('w', file); break;
10720 case SImode:
10721 case SFmode: putc ('l', file); break;
10722 case DImode:
10723 case DFmode: putc ('q', file); break;
10724 default: gcc_unreachable ();
10726 putc ('.', file);
10728 #endif
10729 return;
10730 case 'C':
10731 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
10732 return;
10733 case 'F':
10734 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10735 if (ASSEMBLER_DIALECT == ASM_ATT)
10736 putc ('.', file);
10737 #endif
10738 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
10739 return;
10741 /* Like above, but reverse condition */
10742 case 'c':
10743 /* Check to see if argument to %c is really a constant
10744 and not a condition code which needs to be reversed. */
10745 if (!COMPARISON_P (x))
10747 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
10748 return;
10750 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
10751 return;
10752 case 'f':
10753 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10754 if (ASSEMBLER_DIALECT == ASM_ATT)
10755 putc ('.', file);
10756 #endif
10757 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
10758 return;
10760 case 'E':
10761 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
10762 return;
10764 case 'e':
10765 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
10766 return;
10768 case 'H':
10769 /* It doesn't actually matter what mode we use here, as we're
10770 only going to use this for printing. */
10771 x = adjust_address_nv (x, DImode, 8);
10772 break;
10774 case '+':
10776 rtx x;
10778 if (!optimize
10779 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
10780 return;
10782 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
10783 if (x)
10785 int pred_val = INTVAL (XEXP (x, 0));
10787 if (pred_val < REG_BR_PROB_BASE * 45 / 100
10788 || pred_val > REG_BR_PROB_BASE * 55 / 100)
10790 int taken = pred_val > REG_BR_PROB_BASE / 2;
10791 int cputaken = final_forward_branch_p (current_output_insn) == 0;
10793 /* Emit hints only in the case default branch prediction
10794 heuristics would fail. */
10795 if (taken != cputaken)
10797 /* We use 3e (DS) prefix for taken branches and
10798 2e (CS) prefix for not taken branches. */
10799 if (taken)
10800 fputs ("ds ; ", file);
10801 else
10802 fputs ("cs ; ", file);
10806 return;
10809 case 'Y':
10810 switch (GET_CODE (x))
10812 case NE:
10813 fputs ("neq", file);
10814 break;
10815 case EQ:
10816 fputs ("eq", file);
10817 break;
10818 case GE:
10819 case GEU:
10820 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
10821 break;
10822 case GT:
10823 case GTU:
10824 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
10825 break;
10826 case LE:
10827 case LEU:
10828 fputs ("le", file);
10829 break;
10830 case LT:
10831 case LTU:
10832 fputs ("lt", file);
10833 break;
10834 case UNORDERED:
10835 fputs ("unord", file);
10836 break;
10837 case ORDERED:
10838 fputs ("ord", file);
10839 break;
10840 case UNEQ:
10841 fputs ("ueq", file);
10842 break;
10843 case UNGE:
10844 fputs ("nlt", file);
10845 break;
10846 case UNGT:
10847 fputs ("nle", file);
10848 break;
10849 case UNLE:
10850 fputs ("ule", file);
10851 break;
10852 case UNLT:
10853 fputs ("ult", file);
10854 break;
10855 case LTGT:
10856 fputs ("une", file);
10857 break;
10858 default:
10859 gcc_unreachable ();
10861 return;
10863 case ';':
10864 #if TARGET_MACHO
10865 fputs (" ; ", file);
10866 #else
10867 fputc (' ', file);
10868 #endif
10869 return;
10871 default:
10872 output_operand_lossage ("invalid operand code '%c'", code);
10876 if (REG_P (x))
10877 print_reg (x, code, file);
10879 else if (MEM_P (x))
10881 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
10882 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
10883 && GET_MODE (x) != BLKmode)
10885 const char * size;
10886 switch (GET_MODE_SIZE (GET_MODE (x)))
10888 case 1: size = "BYTE"; break;
10889 case 2: size = "WORD"; break;
10890 case 4: size = "DWORD"; break;
10891 case 8: size = "QWORD"; break;
10892 case 12: size = "XWORD"; break;
10893 case 16:
10894 if (GET_MODE (x) == XFmode)
10895 size = "XWORD";
10896 else
10897 size = "XMMWORD";
10898 break;
10899 default:
10900 gcc_unreachable ();
10903 /* Check for explicit size override (codes 'b', 'w' and 'k') */
10904 if (code == 'b')
10905 size = "BYTE";
10906 else if (code == 'w')
10907 size = "WORD";
10908 else if (code == 'k')
10909 size = "DWORD";
10911 fputs (size, file);
10912 fputs (" PTR ", file);
10915 x = XEXP (x, 0);
10916 /* Avoid (%rip) for call operands. */
10917 if (CONSTANT_ADDRESS_P (x) && code == 'P'
10918 && !CONST_INT_P (x))
10919 output_addr_const (file, x);
10920 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
10921 output_operand_lossage ("invalid constraints for operand");
10922 else
10923 output_address (x);
10926 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
10928 REAL_VALUE_TYPE r;
10929 long l;
10931 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10932 REAL_VALUE_TO_TARGET_SINGLE (r, l);
10934 if (ASSEMBLER_DIALECT == ASM_ATT)
10935 putc ('$', file);
10936 fprintf (file, "0x%08lx", (long unsigned int) l);
10939 /* These float cases don't actually occur as immediate operands. */
10940 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
10942 char dstr[30];
10944 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10945 fprintf (file, "%s", dstr);
10948 else if (GET_CODE (x) == CONST_DOUBLE
10949 && GET_MODE (x) == XFmode)
10951 char dstr[30];
10953 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10954 fprintf (file, "%s", dstr);
10957 else
10959 /* We have patterns that allow zero sets of memory, for instance.
10960 In 64-bit mode, we should probably support all 8-byte vectors,
10961 since we can in fact encode that into an immediate. */
10962 if (GET_CODE (x) == CONST_VECTOR)
10964 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
10965 x = const0_rtx;
10968 if (code != 'P')
10970 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
10972 if (ASSEMBLER_DIALECT == ASM_ATT)
10973 putc ('$', file);
10975 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
10976 || GET_CODE (x) == LABEL_REF)
10978 if (ASSEMBLER_DIALECT == ASM_ATT)
10979 putc ('$', file);
10980 else
10981 fputs ("OFFSET FLAT:", file);
10984 if (CONST_INT_P (x))
10985 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10986 else if (flag_pic)
10987 output_pic_addr_const (file, x, code);
10988 else
10989 output_addr_const (file, x);
10993 /* Print a memory operand whose address is ADDR. */
10995 void
10996 print_operand_address (FILE *file, rtx addr)
10998 struct ix86_address parts;
10999 rtx base, index, disp;
11000 int scale;
11001 int ok = ix86_decompose_address (addr, &parts);
11003 gcc_assert (ok);
11005 base = parts.base;
11006 index = parts.index;
11007 disp = parts.disp;
11008 scale = parts.scale;
11010 switch (parts.seg)
11012 case SEG_DEFAULT:
11013 break;
11014 case SEG_FS:
11015 case SEG_GS:
11016 if (ASSEMBLER_DIALECT == ASM_ATT)
11017 putc ('%', file);
11018 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11019 break;
11020 default:
11021 gcc_unreachable ();
11024 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11025 if (TARGET_64BIT && !base && !index)
11027 rtx symbol = disp;
11029 if (GET_CODE (disp) == CONST
11030 && GET_CODE (XEXP (disp, 0)) == PLUS
11031 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11032 symbol = XEXP (XEXP (disp, 0), 0);
11034 if (GET_CODE (symbol) == LABEL_REF
11035 || (GET_CODE (symbol) == SYMBOL_REF
11036 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11037 base = pc_rtx;
11039 if (!base && !index)
11041 /* Displacement only requires special attention. */
11043 if (CONST_INT_P (disp))
11045 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
11046 fputs ("ds:", file);
11047 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
11049 else if (flag_pic)
11050 output_pic_addr_const (file, disp, 0);
11051 else
11052 output_addr_const (file, disp);
11054 else
11056 if (ASSEMBLER_DIALECT == ASM_ATT)
11058 if (disp)
11060 if (flag_pic)
11061 output_pic_addr_const (file, disp, 0);
11062 else if (GET_CODE (disp) == LABEL_REF)
11063 output_asm_label (disp);
11064 else
11065 output_addr_const (file, disp);
11068 putc ('(', file);
11069 if (base)
11070 print_reg (base, 0, file);
11071 if (index)
11073 putc (',', file);
11074 print_reg (index, 0, file);
11075 if (scale != 1)
11076 fprintf (file, ",%d", scale);
11078 putc (')', file);
11080 else
11082 rtx offset = NULL_RTX;
11084 if (disp)
11086 /* Pull out the offset of a symbol; print any symbol itself. */
11087 if (GET_CODE (disp) == CONST
11088 && GET_CODE (XEXP (disp, 0)) == PLUS
11089 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11091 offset = XEXP (XEXP (disp, 0), 1);
11092 disp = gen_rtx_CONST (VOIDmode,
11093 XEXP (XEXP (disp, 0), 0));
11096 if (flag_pic)
11097 output_pic_addr_const (file, disp, 0);
11098 else if (GET_CODE (disp) == LABEL_REF)
11099 output_asm_label (disp);
11100 else if (CONST_INT_P (disp))
11101 offset = disp;
11102 else
11103 output_addr_const (file, disp);
11106 putc ('[', file);
11107 if (base)
11109 print_reg (base, 0, file);
11110 if (offset)
11112 if (INTVAL (offset) >= 0)
11113 putc ('+', file);
11114 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11117 else if (offset)
11118 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11119 else
11120 putc ('0', file);
11122 if (index)
11124 putc ('+', file);
11125 print_reg (index, 0, file);
11126 if (scale != 1)
11127 fprintf (file, "*%d", scale);
11129 putc (']', file);
11134 bool
11135 output_addr_const_extra (FILE *file, rtx x)
11137 rtx op;
11139 if (GET_CODE (x) != UNSPEC)
11140 return false;
11142 op = XVECEXP (x, 0, 0);
11143 switch (XINT (x, 1))
11145 case UNSPEC_GOTTPOFF:
11146 output_addr_const (file, op);
11147 /* FIXME: This might be @TPOFF in Sun ld. */
11148 fputs ("@GOTTPOFF", file);
11149 break;
11150 case UNSPEC_TPOFF:
11151 output_addr_const (file, op);
11152 fputs ("@TPOFF", file);
11153 break;
11154 case UNSPEC_NTPOFF:
11155 output_addr_const (file, op);
11156 if (TARGET_64BIT)
11157 fputs ("@TPOFF", file);
11158 else
11159 fputs ("@NTPOFF", file);
11160 break;
11161 case UNSPEC_DTPOFF:
11162 output_addr_const (file, op);
11163 fputs ("@DTPOFF", file);
11164 break;
11165 case UNSPEC_GOTNTPOFF:
11166 output_addr_const (file, op);
11167 if (TARGET_64BIT)
11168 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11169 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
11170 else
11171 fputs ("@GOTNTPOFF", file);
11172 break;
11173 case UNSPEC_INDNTPOFF:
11174 output_addr_const (file, op);
11175 fputs ("@INDNTPOFF", file);
11176 break;
11177 #if TARGET_MACHO
11178 case UNSPEC_MACHOPIC_OFFSET:
11179 output_addr_const (file, op);
11180 putc ('-', file);
11181 machopic_output_function_base_name (file);
11182 break;
11183 #endif
11185 default:
11186 return false;
11189 return true;
11192 /* Split one or more DImode RTL references into pairs of SImode
11193 references. The RTL can be REG, offsettable MEM, integer constant, or
11194 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11195 split and "num" is its length. lo_half and hi_half are output arrays
11196 that parallel "operands". */
11198 void
11199 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11201 while (num--)
11203 rtx op = operands[num];
11205 /* simplify_subreg refuse to split volatile memory addresses,
11206 but we still have to handle it. */
11207 if (MEM_P (op))
11209 lo_half[num] = adjust_address (op, SImode, 0);
11210 hi_half[num] = adjust_address (op, SImode, 4);
11212 else
11214 lo_half[num] = simplify_gen_subreg (SImode, op,
11215 GET_MODE (op) == VOIDmode
11216 ? DImode : GET_MODE (op), 0);
11217 hi_half[num] = simplify_gen_subreg (SImode, op,
11218 GET_MODE (op) == VOIDmode
11219 ? DImode : GET_MODE (op), 4);
11223 /* Split one or more TImode RTL references into pairs of DImode
11224 references. The RTL can be REG, offsettable MEM, integer constant, or
11225 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11226 split and "num" is its length. lo_half and hi_half are output arrays
11227 that parallel "operands". */
11229 void
11230 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11232 while (num--)
11234 rtx op = operands[num];
11236 /* simplify_subreg refuse to split volatile memory addresses, but we
11237 still have to handle it. */
11238 if (MEM_P (op))
11240 lo_half[num] = adjust_address (op, DImode, 0);
11241 hi_half[num] = adjust_address (op, DImode, 8);
11243 else
11245 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
11246 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
11251 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
11252 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
11253 is the expression of the binary operation. The output may either be
11254 emitted here, or returned to the caller, like all output_* functions.
11256 There is no guarantee that the operands are the same mode, as they
11257 might be within FLOAT or FLOAT_EXTEND expressions. */
11259 #ifndef SYSV386_COMPAT
11260 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
11261 wants to fix the assemblers because that causes incompatibility
11262 with gcc. No-one wants to fix gcc because that causes
11263 incompatibility with assemblers... You can use the option of
11264 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
11265 #define SYSV386_COMPAT 1
11266 #endif
11268 const char *
11269 output_387_binary_op (rtx insn, rtx *operands)
11271 static char buf[40];
11272 const char *p;
11273 const char *ssep;
11274 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
11276 #ifdef ENABLE_CHECKING
11277 /* Even if we do not want to check the inputs, this documents input
11278 constraints. Which helps in understanding the following code. */
11279 if (STACK_REG_P (operands[0])
11280 && ((REG_P (operands[1])
11281 && REGNO (operands[0]) == REGNO (operands[1])
11282 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
11283 || (REG_P (operands[2])
11284 && REGNO (operands[0]) == REGNO (operands[2])
11285 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
11286 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
11287 ; /* ok */
11288 else
11289 gcc_assert (is_sse);
11290 #endif
11292 switch (GET_CODE (operands[3]))
11294 case PLUS:
11295 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11296 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11297 p = "fiadd";
11298 else
11299 p = "fadd";
11300 ssep = "vadd";
11301 break;
11303 case MINUS:
11304 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11305 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11306 p = "fisub";
11307 else
11308 p = "fsub";
11309 ssep = "vsub";
11310 break;
11312 case MULT:
11313 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11314 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11315 p = "fimul";
11316 else
11317 p = "fmul";
11318 ssep = "vmul";
11319 break;
11321 case DIV:
11322 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11323 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11324 p = "fidiv";
11325 else
11326 p = "fdiv";
11327 ssep = "vdiv";
11328 break;
11330 default:
11331 gcc_unreachable ();
11334 if (is_sse)
11336 if (TARGET_AVX)
11338 strcpy (buf, ssep);
11339 if (GET_MODE (operands[0]) == SFmode)
11340 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
11341 else
11342 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
11344 else
11346 strcpy (buf, ssep + 1);
11347 if (GET_MODE (operands[0]) == SFmode)
11348 strcat (buf, "ss\t{%2, %0|%0, %2}");
11349 else
11350 strcat (buf, "sd\t{%2, %0|%0, %2}");
11352 return buf;
11354 strcpy (buf, p);
11356 switch (GET_CODE (operands[3]))
11358 case MULT:
11359 case PLUS:
11360 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
11362 rtx temp = operands[2];
11363 operands[2] = operands[1];
11364 operands[1] = temp;
11367 /* know operands[0] == operands[1]. */
11369 if (MEM_P (operands[2]))
11371 p = "%z2\t%2";
11372 break;
11375 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11377 if (STACK_TOP_P (operands[0]))
11378 /* How is it that we are storing to a dead operand[2]?
11379 Well, presumably operands[1] is dead too. We can't
11380 store the result to st(0) as st(0) gets popped on this
11381 instruction. Instead store to operands[2] (which I
11382 think has to be st(1)). st(1) will be popped later.
11383 gcc <= 2.8.1 didn't have this check and generated
11384 assembly code that the Unixware assembler rejected. */
11385 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11386 else
11387 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11388 break;
11391 if (STACK_TOP_P (operands[0]))
11392 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11393 else
11394 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11395 break;
11397 case MINUS:
11398 case DIV:
11399 if (MEM_P (operands[1]))
11401 p = "r%z1\t%1";
11402 break;
11405 if (MEM_P (operands[2]))
11407 p = "%z2\t%2";
11408 break;
11411 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11413 #if SYSV386_COMPAT
11414 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
11415 derived assemblers, confusingly reverse the direction of
11416 the operation for fsub{r} and fdiv{r} when the
11417 destination register is not st(0). The Intel assembler
11418 doesn't have this brain damage. Read !SYSV386_COMPAT to
11419 figure out what the hardware really does. */
11420 if (STACK_TOP_P (operands[0]))
11421 p = "{p\t%0, %2|rp\t%2, %0}";
11422 else
11423 p = "{rp\t%2, %0|p\t%0, %2}";
11424 #else
11425 if (STACK_TOP_P (operands[0]))
11426 /* As above for fmul/fadd, we can't store to st(0). */
11427 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11428 else
11429 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11430 #endif
11431 break;
11434 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
11436 #if SYSV386_COMPAT
11437 if (STACK_TOP_P (operands[0]))
11438 p = "{rp\t%0, %1|p\t%1, %0}";
11439 else
11440 p = "{p\t%1, %0|rp\t%0, %1}";
11441 #else
11442 if (STACK_TOP_P (operands[0]))
11443 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
11444 else
11445 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
11446 #endif
11447 break;
11450 if (STACK_TOP_P (operands[0]))
11452 if (STACK_TOP_P (operands[1]))
11453 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11454 else
11455 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
11456 break;
11458 else if (STACK_TOP_P (operands[1]))
11460 #if SYSV386_COMPAT
11461 p = "{\t%1, %0|r\t%0, %1}";
11462 #else
11463 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
11464 #endif
11466 else
11468 #if SYSV386_COMPAT
11469 p = "{r\t%2, %0|\t%0, %2}";
11470 #else
11471 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11472 #endif
11474 break;
11476 default:
11477 gcc_unreachable ();
11480 strcat (buf, p);
11481 return buf;
11484 /* Return needed mode for entity in optimize_mode_switching pass. */
11487 ix86_mode_needed (int entity, rtx insn)
11489 enum attr_i387_cw mode;
11491 /* The mode UNINITIALIZED is used to store control word after a
11492 function call or ASM pattern. The mode ANY specify that function
11493 has no requirements on the control word and make no changes in the
11494 bits we are interested in. */
11496 if (CALL_P (insn)
11497 || (NONJUMP_INSN_P (insn)
11498 && (asm_noperands (PATTERN (insn)) >= 0
11499 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
11500 return I387_CW_UNINITIALIZED;
11502 if (recog_memoized (insn) < 0)
11503 return I387_CW_ANY;
11505 mode = get_attr_i387_cw (insn);
11507 switch (entity)
11509 case I387_TRUNC:
11510 if (mode == I387_CW_TRUNC)
11511 return mode;
11512 break;
11514 case I387_FLOOR:
11515 if (mode == I387_CW_FLOOR)
11516 return mode;
11517 break;
11519 case I387_CEIL:
11520 if (mode == I387_CW_CEIL)
11521 return mode;
11522 break;
11524 case I387_MASK_PM:
11525 if (mode == I387_CW_MASK_PM)
11526 return mode;
11527 break;
11529 default:
11530 gcc_unreachable ();
11533 return I387_CW_ANY;
11536 /* Output code to initialize control word copies used by trunc?f?i and
11537 rounding patterns. CURRENT_MODE is set to current control word,
11538 while NEW_MODE is set to new control word. */
11540 void
11541 emit_i387_cw_initialization (int mode)
11543 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
11544 rtx new_mode;
11546 enum ix86_stack_slot slot;
11548 rtx reg = gen_reg_rtx (HImode);
11550 emit_insn (gen_x86_fnstcw_1 (stored_mode));
11551 emit_move_insn (reg, copy_rtx (stored_mode));
11553 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
11554 || optimize_function_for_size_p (cfun))
11556 switch (mode)
11558 case I387_CW_TRUNC:
11559 /* round toward zero (truncate) */
11560 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
11561 slot = SLOT_CW_TRUNC;
11562 break;
11564 case I387_CW_FLOOR:
11565 /* round down toward -oo */
11566 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11567 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
11568 slot = SLOT_CW_FLOOR;
11569 break;
11571 case I387_CW_CEIL:
11572 /* round up toward +oo */
11573 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11574 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
11575 slot = SLOT_CW_CEIL;
11576 break;
11578 case I387_CW_MASK_PM:
11579 /* mask precision exception for nearbyint() */
11580 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11581 slot = SLOT_CW_MASK_PM;
11582 break;
11584 default:
11585 gcc_unreachable ();
11588 else
11590 switch (mode)
11592 case I387_CW_TRUNC:
11593 /* round toward zero (truncate) */
11594 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
11595 slot = SLOT_CW_TRUNC;
11596 break;
11598 case I387_CW_FLOOR:
11599 /* round down toward -oo */
11600 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
11601 slot = SLOT_CW_FLOOR;
11602 break;
11604 case I387_CW_CEIL:
11605 /* round up toward +oo */
11606 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
11607 slot = SLOT_CW_CEIL;
11608 break;
11610 case I387_CW_MASK_PM:
11611 /* mask precision exception for nearbyint() */
11612 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11613 slot = SLOT_CW_MASK_PM;
11614 break;
11616 default:
11617 gcc_unreachable ();
11621 gcc_assert (slot < MAX_386_STACK_LOCALS);
11623 new_mode = assign_386_stack_local (HImode, slot);
11624 emit_move_insn (new_mode, reg);
11627 /* Output code for INSN to convert a float to a signed int. OPERANDS
11628 are the insn operands. The output may be [HSD]Imode and the input
11629 operand may be [SDX]Fmode. */
11631 const char *
11632 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
11634 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11635 int dimode_p = GET_MODE (operands[0]) == DImode;
11636 int round_mode = get_attr_i387_cw (insn);
11638 /* Jump through a hoop or two for DImode, since the hardware has no
11639 non-popping instruction. We used to do this a different way, but
11640 that was somewhat fragile and broke with post-reload splitters. */
11641 if ((dimode_p || fisttp) && !stack_top_dies)
11642 output_asm_insn ("fld\t%y1", operands);
11644 gcc_assert (STACK_TOP_P (operands[1]));
11645 gcc_assert (MEM_P (operands[0]));
11646 gcc_assert (GET_MODE (operands[1]) != TFmode);
11648 if (fisttp)
11649 output_asm_insn ("fisttp%z0\t%0", operands);
11650 else
11652 if (round_mode != I387_CW_ANY)
11653 output_asm_insn ("fldcw\t%3", operands);
11654 if (stack_top_dies || dimode_p)
11655 output_asm_insn ("fistp%z0\t%0", operands);
11656 else
11657 output_asm_insn ("fist%z0\t%0", operands);
11658 if (round_mode != I387_CW_ANY)
11659 output_asm_insn ("fldcw\t%2", operands);
11662 return "";
11665 /* Output code for x87 ffreep insn. The OPNO argument, which may only
11666 have the values zero or one, indicates the ffreep insn's operand
11667 from the OPERANDS array. */
11669 static const char *
11670 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
11672 if (TARGET_USE_FFREEP)
11673 #if HAVE_AS_IX86_FFREEP
11674 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
11675 #else
11677 static char retval[] = ".word\t0xc_df";
11678 int regno = REGNO (operands[opno]);
11680 gcc_assert (FP_REGNO_P (regno));
11682 retval[9] = '0' + (regno - FIRST_STACK_REG);
11683 return retval;
11685 #endif
11687 return opno ? "fstp\t%y1" : "fstp\t%y0";
11691 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
11692 should be used. UNORDERED_P is true when fucom should be used. */
11694 const char *
11695 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
11697 int stack_top_dies;
11698 rtx cmp_op0, cmp_op1;
11699 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
11701 if (eflags_p)
11703 cmp_op0 = operands[0];
11704 cmp_op1 = operands[1];
11706 else
11708 cmp_op0 = operands[1];
11709 cmp_op1 = operands[2];
11712 if (is_sse)
11714 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
11715 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
11716 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
11717 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
11719 if (GET_MODE (operands[0]) == SFmode)
11720 if (unordered_p)
11721 return &ucomiss[TARGET_AVX ? 0 : 1];
11722 else
11723 return &comiss[TARGET_AVX ? 0 : 1];
11724 else
11725 if (unordered_p)
11726 return &ucomisd[TARGET_AVX ? 0 : 1];
11727 else
11728 return &comisd[TARGET_AVX ? 0 : 1];
11731 gcc_assert (STACK_TOP_P (cmp_op0));
11733 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11735 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
11737 if (stack_top_dies)
11739 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
11740 return output_387_ffreep (operands, 1);
11742 else
11743 return "ftst\n\tfnstsw\t%0";
11746 if (STACK_REG_P (cmp_op1)
11747 && stack_top_dies
11748 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
11749 && REGNO (cmp_op1) != FIRST_STACK_REG)
11751 /* If both the top of the 387 stack dies, and the other operand
11752 is also a stack register that dies, then this must be a
11753 `fcompp' float compare */
11755 if (eflags_p)
11757 /* There is no double popping fcomi variant. Fortunately,
11758 eflags is immune from the fstp's cc clobbering. */
11759 if (unordered_p)
11760 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
11761 else
11762 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
11763 return output_387_ffreep (operands, 0);
11765 else
11767 if (unordered_p)
11768 return "fucompp\n\tfnstsw\t%0";
11769 else
11770 return "fcompp\n\tfnstsw\t%0";
11773 else
11775 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
11777 static const char * const alt[16] =
11779 "fcom%z2\t%y2\n\tfnstsw\t%0",
11780 "fcomp%z2\t%y2\n\tfnstsw\t%0",
11781 "fucom%z2\t%y2\n\tfnstsw\t%0",
11782 "fucomp%z2\t%y2\n\tfnstsw\t%0",
11784 "ficom%z2\t%y2\n\tfnstsw\t%0",
11785 "ficomp%z2\t%y2\n\tfnstsw\t%0",
11786 NULL,
11787 NULL,
11789 "fcomi\t{%y1, %0|%0, %y1}",
11790 "fcomip\t{%y1, %0|%0, %y1}",
11791 "fucomi\t{%y1, %0|%0, %y1}",
11792 "fucomip\t{%y1, %0|%0, %y1}",
11794 NULL,
11795 NULL,
11796 NULL,
11797 NULL
11800 int mask;
11801 const char *ret;
11803 mask = eflags_p << 3;
11804 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
11805 mask |= unordered_p << 1;
11806 mask |= stack_top_dies;
11808 gcc_assert (mask < 16);
11809 ret = alt[mask];
11810 gcc_assert (ret);
11812 return ret;
11816 void
11817 ix86_output_addr_vec_elt (FILE *file, int value)
11819 const char *directive = ASM_LONG;
11821 #ifdef ASM_QUAD
11822 if (TARGET_64BIT)
11823 directive = ASM_QUAD;
11824 #else
11825 gcc_assert (!TARGET_64BIT);
11826 #endif
11828 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
11831 void
11832 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
11834 const char *directive = ASM_LONG;
11836 #ifdef ASM_QUAD
11837 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
11838 directive = ASM_QUAD;
11839 #else
11840 gcc_assert (!TARGET_64BIT);
11841 #endif
11842 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
11843 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
11844 fprintf (file, "%s%s%d-%s%d\n",
11845 directive, LPREFIX, value, LPREFIX, rel);
11846 else if (HAVE_AS_GOTOFF_IN_DATA)
11847 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
11848 #if TARGET_MACHO
11849 else if (TARGET_MACHO)
11851 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
11852 machopic_output_function_base_name (file);
11853 fprintf(file, "\n");
11855 #endif
11856 else
11857 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
11858 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
11861 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
11862 for the target. */
11864 void
11865 ix86_expand_clear (rtx dest)
11867 rtx tmp;
11869 /* We play register width games, which are only valid after reload. */
11870 gcc_assert (reload_completed);
11872 /* Avoid HImode and its attendant prefix byte. */
11873 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
11874 dest = gen_rtx_REG (SImode, REGNO (dest));
11875 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
11877 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
11878 if (reload_completed && (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ()))
11880 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11881 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
11884 emit_insn (tmp);
11887 /* X is an unchanging MEM. If it is a constant pool reference, return
11888 the constant pool rtx, else NULL. */
11891 maybe_get_pool_constant (rtx x)
11893 x = ix86_delegitimize_address (XEXP (x, 0));
11895 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
11896 return get_pool_constant (x);
11898 return NULL_RTX;
11901 void
11902 ix86_expand_move (enum machine_mode mode, rtx operands[])
11904 rtx op0, op1;
11905 enum tls_model model;
11907 op0 = operands[0];
11908 op1 = operands[1];
11910 if (GET_CODE (op1) == SYMBOL_REF)
11912 model = SYMBOL_REF_TLS_MODEL (op1);
11913 if (model)
11915 op1 = legitimize_tls_address (op1, model, true);
11916 op1 = force_operand (op1, op0);
11917 if (op1 == op0)
11918 return;
11920 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11921 && SYMBOL_REF_DLLIMPORT_P (op1))
11922 op1 = legitimize_dllimport_symbol (op1, false);
11924 else if (GET_CODE (op1) == CONST
11925 && GET_CODE (XEXP (op1, 0)) == PLUS
11926 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
11928 rtx addend = XEXP (XEXP (op1, 0), 1);
11929 rtx symbol = XEXP (XEXP (op1, 0), 0);
11930 rtx tmp = NULL;
11932 model = SYMBOL_REF_TLS_MODEL (symbol);
11933 if (model)
11934 tmp = legitimize_tls_address (symbol, model, true);
11935 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11936 && SYMBOL_REF_DLLIMPORT_P (symbol))
11937 tmp = legitimize_dllimport_symbol (symbol, true);
11939 if (tmp)
11941 tmp = force_operand (tmp, NULL);
11942 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
11943 op0, 1, OPTAB_DIRECT);
11944 if (tmp == op0)
11945 return;
11949 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
11951 if (TARGET_MACHO && !TARGET_64BIT)
11953 #if TARGET_MACHO
11954 if (MACHOPIC_PURE)
11956 rtx temp = ((reload_in_progress
11957 || ((op0 && REG_P (op0))
11958 && mode == Pmode))
11959 ? op0 : gen_reg_rtx (Pmode));
11960 op1 = machopic_indirect_data_reference (op1, temp);
11961 op1 = machopic_legitimize_pic_address (op1, mode,
11962 temp == op1 ? 0 : temp);
11964 else if (MACHOPIC_INDIRECT)
11965 op1 = machopic_indirect_data_reference (op1, 0);
11966 if (op0 == op1)
11967 return;
11968 #endif
11970 else
11972 if (MEM_P (op0))
11973 op1 = force_reg (Pmode, op1);
11974 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
11976 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
11977 op1 = legitimize_pic_address (op1, reg);
11978 if (op0 == op1)
11979 return;
11983 else
11985 if (MEM_P (op0)
11986 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
11987 || !push_operand (op0, mode))
11988 && MEM_P (op1))
11989 op1 = force_reg (mode, op1);
11991 if (push_operand (op0, mode)
11992 && ! general_no_elim_operand (op1, mode))
11993 op1 = copy_to_mode_reg (mode, op1);
11995 /* Force large constants in 64bit compilation into register
11996 to get them CSEed. */
11997 if (can_create_pseudo_p ()
11998 && (mode == DImode) && TARGET_64BIT
11999 && immediate_operand (op1, mode)
12000 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12001 && !register_operand (op0, mode)
12002 && optimize)
12003 op1 = copy_to_mode_reg (mode, op1);
12005 if (can_create_pseudo_p ()
12006 && FLOAT_MODE_P (mode)
12007 && GET_CODE (op1) == CONST_DOUBLE)
12009 /* If we are loading a floating point constant to a register,
12010 force the value to memory now, since we'll get better code
12011 out the back end. */
12013 op1 = validize_mem (force_const_mem (mode, op1));
12014 if (!register_operand (op0, mode))
12016 rtx temp = gen_reg_rtx (mode);
12017 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12018 emit_move_insn (op0, temp);
12019 return;
12024 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12027 void
12028 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12030 rtx op0 = operands[0], op1 = operands[1];
12031 unsigned int align = GET_MODE_ALIGNMENT (mode);
12033 /* Force constants other than zero into memory. We do not know how
12034 the instructions used to build constants modify the upper 64 bits
12035 of the register, once we have that information we may be able
12036 to handle some of them more efficiently. */
12037 if (can_create_pseudo_p ()
12038 && register_operand (op0, mode)
12039 && (CONSTANT_P (op1)
12040 || (GET_CODE (op1) == SUBREG
12041 && CONSTANT_P (SUBREG_REG (op1))))
12042 && standard_sse_constant_p (op1) <= 0)
12043 op1 = validize_mem (force_const_mem (mode, op1));
12045 /* We need to check memory alignment for SSE mode since attribute
12046 can make operands unaligned. */
12047 if (can_create_pseudo_p ()
12048 && SSE_REG_MODE_P (mode)
12049 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
12050 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
12052 rtx tmp[2];
12054 /* ix86_expand_vector_move_misalign() does not like constants ... */
12055 if (CONSTANT_P (op1)
12056 || (GET_CODE (op1) == SUBREG
12057 && CONSTANT_P (SUBREG_REG (op1))))
12058 op1 = validize_mem (force_const_mem (mode, op1));
12060 /* ... nor both arguments in memory. */
12061 if (!register_operand (op0, mode)
12062 && !register_operand (op1, mode))
12063 op1 = force_reg (mode, op1);
12065 tmp[0] = op0; tmp[1] = op1;
12066 ix86_expand_vector_move_misalign (mode, tmp);
12067 return;
12070 /* Make operand1 a register if it isn't already. */
12071 if (can_create_pseudo_p ()
12072 && !register_operand (op0, mode)
12073 && !register_operand (op1, mode))
12075 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
12076 return;
12079 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12082 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
12083 straight to ix86_expand_vector_move. */
12084 /* Code generation for scalar reg-reg moves of single and double precision data:
12085 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
12086 movaps reg, reg
12087 else
12088 movss reg, reg
12089 if (x86_sse_partial_reg_dependency == true)
12090 movapd reg, reg
12091 else
12092 movsd reg, reg
12094 Code generation for scalar loads of double precision data:
12095 if (x86_sse_split_regs == true)
12096 movlpd mem, reg (gas syntax)
12097 else
12098 movsd mem, reg
12100 Code generation for unaligned packed loads of single precision data
12101 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
12102 if (x86_sse_unaligned_move_optimal)
12103 movups mem, reg
12105 if (x86_sse_partial_reg_dependency == true)
12107 xorps reg, reg
12108 movlps mem, reg
12109 movhps mem+8, reg
12111 else
12113 movlps mem, reg
12114 movhps mem+8, reg
12117 Code generation for unaligned packed loads of double precision data
12118 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
12119 if (x86_sse_unaligned_move_optimal)
12120 movupd mem, reg
12122 if (x86_sse_split_regs == true)
12124 movlpd mem, reg
12125 movhpd mem+8, reg
12127 else
12129 movsd mem, reg
12130 movhpd mem+8, reg
12134 void
12135 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
12137 rtx op0, op1, m;
12139 op0 = operands[0];
12140 op1 = operands[1];
12142 if (TARGET_AVX)
12144 switch (GET_MODE_CLASS (mode))
12146 case MODE_VECTOR_INT:
12147 case MODE_INT:
12148 switch (GET_MODE_SIZE (mode))
12150 case 16:
12151 op0 = gen_lowpart (V16QImode, op0);
12152 op1 = gen_lowpart (V16QImode, op1);
12153 emit_insn (gen_avx_movdqu (op0, op1));
12154 break;
12155 case 32:
12156 op0 = gen_lowpart (V32QImode, op0);
12157 op1 = gen_lowpart (V32QImode, op1);
12158 emit_insn (gen_avx_movdqu256 (op0, op1));
12159 break;
12160 default:
12161 gcc_unreachable ();
12163 break;
12164 case MODE_VECTOR_FLOAT:
12165 op0 = gen_lowpart (mode, op0);
12166 op1 = gen_lowpart (mode, op1);
12168 switch (mode)
12170 case V4SFmode:
12171 emit_insn (gen_avx_movups (op0, op1));
12172 break;
12173 case V8SFmode:
12174 emit_insn (gen_avx_movups256 (op0, op1));
12175 break;
12176 case V2DFmode:
12177 emit_insn (gen_avx_movupd (op0, op1));
12178 break;
12179 case V4DFmode:
12180 emit_insn (gen_avx_movupd256 (op0, op1));
12181 break;
12182 default:
12183 gcc_unreachable ();
12185 break;
12187 default:
12188 gcc_unreachable ();
12191 return;
12194 if (MEM_P (op1))
12196 /* If we're optimizing for size, movups is the smallest. */
12197 if (optimize_insn_for_size_p ())
12199 op0 = gen_lowpart (V4SFmode, op0);
12200 op1 = gen_lowpart (V4SFmode, op1);
12201 emit_insn (gen_sse_movups (op0, op1));
12202 return;
12205 /* ??? If we have typed data, then it would appear that using
12206 movdqu is the only way to get unaligned data loaded with
12207 integer type. */
12208 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12210 op0 = gen_lowpart (V16QImode, op0);
12211 op1 = gen_lowpart (V16QImode, op1);
12212 emit_insn (gen_sse2_movdqu (op0, op1));
12213 return;
12216 if (TARGET_SSE2 && mode == V2DFmode)
12218 rtx zero;
12220 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12222 op0 = gen_lowpart (V2DFmode, op0);
12223 op1 = gen_lowpart (V2DFmode, op1);
12224 emit_insn (gen_sse2_movupd (op0, op1));
12225 return;
12228 /* When SSE registers are split into halves, we can avoid
12229 writing to the top half twice. */
12230 if (TARGET_SSE_SPLIT_REGS)
12232 emit_clobber (op0);
12233 zero = op0;
12235 else
12237 /* ??? Not sure about the best option for the Intel chips.
12238 The following would seem to satisfy; the register is
12239 entirely cleared, breaking the dependency chain. We
12240 then store to the upper half, with a dependency depth
12241 of one. A rumor has it that Intel recommends two movsd
12242 followed by an unpacklpd, but this is unconfirmed. And
12243 given that the dependency depth of the unpacklpd would
12244 still be one, I'm not sure why this would be better. */
12245 zero = CONST0_RTX (V2DFmode);
12248 m = adjust_address (op1, DFmode, 0);
12249 emit_insn (gen_sse2_loadlpd (op0, zero, m));
12250 m = adjust_address (op1, DFmode, 8);
12251 emit_insn (gen_sse2_loadhpd (op0, op0, m));
12253 else
12255 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12257 op0 = gen_lowpart (V4SFmode, op0);
12258 op1 = gen_lowpart (V4SFmode, op1);
12259 emit_insn (gen_sse_movups (op0, op1));
12260 return;
12263 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
12264 emit_move_insn (op0, CONST0_RTX (mode));
12265 else
12266 emit_clobber (op0);
12268 if (mode != V4SFmode)
12269 op0 = gen_lowpart (V4SFmode, op0);
12270 m = adjust_address (op1, V2SFmode, 0);
12271 emit_insn (gen_sse_loadlps (op0, op0, m));
12272 m = adjust_address (op1, V2SFmode, 8);
12273 emit_insn (gen_sse_loadhps (op0, op0, m));
12276 else if (MEM_P (op0))
12278 /* If we're optimizing for size, movups is the smallest. */
12279 if (optimize_insn_for_size_p ())
12281 op0 = gen_lowpart (V4SFmode, op0);
12282 op1 = gen_lowpart (V4SFmode, op1);
12283 emit_insn (gen_sse_movups (op0, op1));
12284 return;
12287 /* ??? Similar to above, only less clear because of quote
12288 typeless stores unquote. */
12289 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
12290 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12292 op0 = gen_lowpart (V16QImode, op0);
12293 op1 = gen_lowpart (V16QImode, op1);
12294 emit_insn (gen_sse2_movdqu (op0, op1));
12295 return;
12298 if (TARGET_SSE2 && mode == V2DFmode)
12300 m = adjust_address (op0, DFmode, 0);
12301 emit_insn (gen_sse2_storelpd (m, op1));
12302 m = adjust_address (op0, DFmode, 8);
12303 emit_insn (gen_sse2_storehpd (m, op1));
12305 else
12307 if (mode != V4SFmode)
12308 op1 = gen_lowpart (V4SFmode, op1);
12309 m = adjust_address (op0, V2SFmode, 0);
12310 emit_insn (gen_sse_storelps (m, op1));
12311 m = adjust_address (op0, V2SFmode, 8);
12312 emit_insn (gen_sse_storehps (m, op1));
12315 else
12316 gcc_unreachable ();
12319 /* Expand a push in MODE. This is some mode for which we do not support
12320 proper push instructions, at least from the registers that we expect
12321 the value to live in. */
12323 void
12324 ix86_expand_push (enum machine_mode mode, rtx x)
12326 rtx tmp;
12328 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
12329 GEN_INT (-GET_MODE_SIZE (mode)),
12330 stack_pointer_rtx, 1, OPTAB_DIRECT);
12331 if (tmp != stack_pointer_rtx)
12332 emit_move_insn (stack_pointer_rtx, tmp);
12334 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
12335 emit_move_insn (tmp, x);
12338 /* Helper function of ix86_fixup_binary_operands to canonicalize
12339 operand order. Returns true if the operands should be swapped. */
12341 static bool
12342 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
12343 rtx operands[])
12345 rtx dst = operands[0];
12346 rtx src1 = operands[1];
12347 rtx src2 = operands[2];
12349 /* If the operation is not commutative, we can't do anything. */
12350 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
12351 return false;
12353 /* Highest priority is that src1 should match dst. */
12354 if (rtx_equal_p (dst, src1))
12355 return false;
12356 if (rtx_equal_p (dst, src2))
12357 return true;
12359 /* Next highest priority is that immediate constants come second. */
12360 if (immediate_operand (src2, mode))
12361 return false;
12362 if (immediate_operand (src1, mode))
12363 return true;
12365 /* Lowest priority is that memory references should come second. */
12366 if (MEM_P (src2))
12367 return false;
12368 if (MEM_P (src1))
12369 return true;
12371 return false;
12375 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
12376 destination to use for the operation. If different from the true
12377 destination in operands[0], a copy operation will be required. */
12380 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
12381 rtx operands[])
12383 rtx dst = operands[0];
12384 rtx src1 = operands[1];
12385 rtx src2 = operands[2];
12387 /* Canonicalize operand order. */
12388 if (ix86_swap_binary_operands_p (code, mode, operands))
12390 rtx temp;
12392 /* It is invalid to swap operands of different modes. */
12393 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
12395 temp = src1;
12396 src1 = src2;
12397 src2 = temp;
12400 /* Both source operands cannot be in memory. */
12401 if (MEM_P (src1) && MEM_P (src2))
12403 /* Optimization: Only read from memory once. */
12404 if (rtx_equal_p (src1, src2))
12406 src2 = force_reg (mode, src2);
12407 src1 = src2;
12409 else
12410 src2 = force_reg (mode, src2);
12413 /* If the destination is memory, and we do not have matching source
12414 operands, do things in registers. */
12415 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12416 dst = gen_reg_rtx (mode);
12418 /* Source 1 cannot be a constant. */
12419 if (CONSTANT_P (src1))
12420 src1 = force_reg (mode, src1);
12422 /* Source 1 cannot be a non-matching memory. */
12423 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12424 src1 = force_reg (mode, src1);
12426 operands[1] = src1;
12427 operands[2] = src2;
12428 return dst;
12431 /* Similarly, but assume that the destination has already been
12432 set up properly. */
12434 void
12435 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
12436 enum machine_mode mode, rtx operands[])
12438 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
12439 gcc_assert (dst == operands[0]);
12442 /* Attempt to expand a binary operator. Make the expansion closer to the
12443 actual machine, then just general_operand, which will allow 3 separate
12444 memory references (one output, two input) in a single insn. */
12446 void
12447 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
12448 rtx operands[])
12450 rtx src1, src2, dst, op, clob;
12452 dst = ix86_fixup_binary_operands (code, mode, operands);
12453 src1 = operands[1];
12454 src2 = operands[2];
12456 /* Emit the instruction. */
12458 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
12459 if (reload_in_progress)
12461 /* Reload doesn't know about the flags register, and doesn't know that
12462 it doesn't want to clobber it. We can only do this with PLUS. */
12463 gcc_assert (code == PLUS);
12464 emit_insn (op);
12466 else
12468 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12469 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12472 /* Fix up the destination if needed. */
12473 if (dst != operands[0])
12474 emit_move_insn (operands[0], dst);
12477 /* Return TRUE or FALSE depending on whether the binary operator meets the
12478 appropriate constraints. */
12481 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
12482 rtx operands[3])
12484 rtx dst = operands[0];
12485 rtx src1 = operands[1];
12486 rtx src2 = operands[2];
12488 /* Both source operands cannot be in memory. */
12489 if (MEM_P (src1) && MEM_P (src2))
12490 return 0;
12492 /* Canonicalize operand order for commutative operators. */
12493 if (ix86_swap_binary_operands_p (code, mode, operands))
12495 rtx temp = src1;
12496 src1 = src2;
12497 src2 = temp;
12500 /* If the destination is memory, we must have a matching source operand. */
12501 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12502 return 0;
12504 /* Source 1 cannot be a constant. */
12505 if (CONSTANT_P (src1))
12506 return 0;
12508 /* Source 1 cannot be a non-matching memory. */
12509 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12510 return 0;
12512 return 1;
12515 /* Attempt to expand a unary operator. Make the expansion closer to the
12516 actual machine, then just general_operand, which will allow 2 separate
12517 memory references (one output, one input) in a single insn. */
12519 void
12520 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
12521 rtx operands[])
12523 int matching_memory;
12524 rtx src, dst, op, clob;
12526 dst = operands[0];
12527 src = operands[1];
12529 /* If the destination is memory, and we do not have matching source
12530 operands, do things in registers. */
12531 matching_memory = 0;
12532 if (MEM_P (dst))
12534 if (rtx_equal_p (dst, src))
12535 matching_memory = 1;
12536 else
12537 dst = gen_reg_rtx (mode);
12540 /* When source operand is memory, destination must match. */
12541 if (MEM_P (src) && !matching_memory)
12542 src = force_reg (mode, src);
12544 /* Emit the instruction. */
12546 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
12547 if (reload_in_progress || code == NOT)
12549 /* Reload doesn't know about the flags register, and doesn't know that
12550 it doesn't want to clobber it. */
12551 gcc_assert (code == NOT);
12552 emit_insn (op);
12554 else
12556 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12557 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12560 /* Fix up the destination if needed. */
12561 if (dst != operands[0])
12562 emit_move_insn (operands[0], dst);
12565 /* Return TRUE or FALSE depending on whether the unary operator meets the
12566 appropriate constraints. */
12569 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
12570 enum machine_mode mode ATTRIBUTE_UNUSED,
12571 rtx operands[2] ATTRIBUTE_UNUSED)
12573 /* If one of operands is memory, source and destination must match. */
12574 if ((MEM_P (operands[0])
12575 || MEM_P (operands[1]))
12576 && ! rtx_equal_p (operands[0], operands[1]))
12577 return FALSE;
12578 return TRUE;
12581 /* Post-reload splitter for converting an SF or DFmode value in an
12582 SSE register into an unsigned SImode. */
12584 void
12585 ix86_split_convert_uns_si_sse (rtx operands[])
12587 enum machine_mode vecmode;
12588 rtx value, large, zero_or_two31, input, two31, x;
12590 large = operands[1];
12591 zero_or_two31 = operands[2];
12592 input = operands[3];
12593 two31 = operands[4];
12594 vecmode = GET_MODE (large);
12595 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
12597 /* Load up the value into the low element. We must ensure that the other
12598 elements are valid floats -- zero is the easiest such value. */
12599 if (MEM_P (input))
12601 if (vecmode == V4SFmode)
12602 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
12603 else
12604 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
12606 else
12608 input = gen_rtx_REG (vecmode, REGNO (input));
12609 emit_move_insn (value, CONST0_RTX (vecmode));
12610 if (vecmode == V4SFmode)
12611 emit_insn (gen_sse_movss (value, value, input));
12612 else
12613 emit_insn (gen_sse2_movsd (value, value, input));
12616 emit_move_insn (large, two31);
12617 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
12619 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
12620 emit_insn (gen_rtx_SET (VOIDmode, large, x));
12622 x = gen_rtx_AND (vecmode, zero_or_two31, large);
12623 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
12625 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
12626 emit_insn (gen_rtx_SET (VOIDmode, value, x));
12628 large = gen_rtx_REG (V4SImode, REGNO (large));
12629 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
12631 x = gen_rtx_REG (V4SImode, REGNO (value));
12632 if (vecmode == V4SFmode)
12633 emit_insn (gen_sse2_cvttps2dq (x, value));
12634 else
12635 emit_insn (gen_sse2_cvttpd2dq (x, value));
12636 value = x;
12638 emit_insn (gen_xorv4si3 (value, value, large));
12641 /* Convert an unsigned DImode value into a DFmode, using only SSE.
12642 Expects the 64-bit DImode to be supplied in a pair of integral
12643 registers. Requires SSE2; will use SSE3 if available. For x86_32,
12644 -mfpmath=sse, !optimize_size only. */
12646 void
12647 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
12649 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
12650 rtx int_xmm, fp_xmm;
12651 rtx biases, exponents;
12652 rtx x;
12654 int_xmm = gen_reg_rtx (V4SImode);
12655 if (TARGET_INTER_UNIT_MOVES)
12656 emit_insn (gen_movdi_to_sse (int_xmm, input));
12657 else if (TARGET_SSE_SPLIT_REGS)
12659 emit_clobber (int_xmm);
12660 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
12662 else
12664 x = gen_reg_rtx (V2DImode);
12665 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
12666 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
12669 x = gen_rtx_CONST_VECTOR (V4SImode,
12670 gen_rtvec (4, GEN_INT (0x43300000UL),
12671 GEN_INT (0x45300000UL),
12672 const0_rtx, const0_rtx));
12673 exponents = validize_mem (force_const_mem (V4SImode, x));
12675 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
12676 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
12678 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
12679 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
12680 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
12681 (0x1.0p84 + double(fp_value_hi_xmm)).
12682 Note these exponents differ by 32. */
12684 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
12686 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
12687 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
12688 real_ldexp (&bias_lo_rvt, &dconst1, 52);
12689 real_ldexp (&bias_hi_rvt, &dconst1, 84);
12690 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
12691 x = const_double_from_real_value (bias_hi_rvt, DFmode);
12692 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
12693 biases = validize_mem (force_const_mem (V2DFmode, biases));
12694 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
12696 /* Add the upper and lower DFmode values together. */
12697 if (TARGET_SSE3)
12698 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
12699 else
12701 x = copy_to_mode_reg (V2DFmode, fp_xmm);
12702 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
12703 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
12706 ix86_expand_vector_extract (false, target, fp_xmm, 0);
12709 /* Not used, but eases macroization of patterns. */
12710 void
12711 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
12712 rtx input ATTRIBUTE_UNUSED)
12714 gcc_unreachable ();
12717 /* Convert an unsigned SImode value into a DFmode. Only currently used
12718 for SSE, but applicable anywhere. */
12720 void
12721 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
12723 REAL_VALUE_TYPE TWO31r;
12724 rtx x, fp;
12726 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
12727 NULL, 1, OPTAB_DIRECT);
12729 fp = gen_reg_rtx (DFmode);
12730 emit_insn (gen_floatsidf2 (fp, x));
12732 real_ldexp (&TWO31r, &dconst1, 31);
12733 x = const_double_from_real_value (TWO31r, DFmode);
12735 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
12736 if (x != target)
12737 emit_move_insn (target, x);
12740 /* Convert a signed DImode value into a DFmode. Only used for SSE in
12741 32-bit mode; otherwise we have a direct convert instruction. */
12743 void
12744 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
12746 REAL_VALUE_TYPE TWO32r;
12747 rtx fp_lo, fp_hi, x;
12749 fp_lo = gen_reg_rtx (DFmode);
12750 fp_hi = gen_reg_rtx (DFmode);
12752 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
12754 real_ldexp (&TWO32r, &dconst1, 32);
12755 x = const_double_from_real_value (TWO32r, DFmode);
12756 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
12758 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
12760 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
12761 0, OPTAB_DIRECT);
12762 if (x != target)
12763 emit_move_insn (target, x);
12766 /* Convert an unsigned SImode value into a SFmode, using only SSE.
12767 For x86_32, -mfpmath=sse, !optimize_size only. */
12768 void
12769 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
12771 REAL_VALUE_TYPE ONE16r;
12772 rtx fp_hi, fp_lo, int_hi, int_lo, x;
12774 real_ldexp (&ONE16r, &dconst1, 16);
12775 x = const_double_from_real_value (ONE16r, SFmode);
12776 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
12777 NULL, 0, OPTAB_DIRECT);
12778 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
12779 NULL, 0, OPTAB_DIRECT);
12780 fp_hi = gen_reg_rtx (SFmode);
12781 fp_lo = gen_reg_rtx (SFmode);
12782 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
12783 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
12784 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
12785 0, OPTAB_DIRECT);
12786 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
12787 0, OPTAB_DIRECT);
12788 if (!rtx_equal_p (target, fp_hi))
12789 emit_move_insn (target, fp_hi);
12792 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
12793 then replicate the value for all elements of the vector
12794 register. */
12797 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
12799 rtvec v;
12800 switch (mode)
12802 case SImode:
12803 gcc_assert (vect);
12804 v = gen_rtvec (4, value, value, value, value);
12805 return gen_rtx_CONST_VECTOR (V4SImode, v);
12807 case DImode:
12808 gcc_assert (vect);
12809 v = gen_rtvec (2, value, value);
12810 return gen_rtx_CONST_VECTOR (V2DImode, v);
12812 case SFmode:
12813 if (vect)
12814 v = gen_rtvec (4, value, value, value, value);
12815 else
12816 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
12817 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
12818 return gen_rtx_CONST_VECTOR (V4SFmode, v);
12820 case DFmode:
12821 if (vect)
12822 v = gen_rtvec (2, value, value);
12823 else
12824 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
12825 return gen_rtx_CONST_VECTOR (V2DFmode, v);
12827 default:
12828 gcc_unreachable ();
12832 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
12833 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
12834 for an SSE register. If VECT is true, then replicate the mask for
12835 all elements of the vector register. If INVERT is true, then create
12836 a mask excluding the sign bit. */
12839 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
12841 enum machine_mode vec_mode, imode;
12842 HOST_WIDE_INT hi, lo;
12843 int shift = 63;
12844 rtx v;
12845 rtx mask;
12847 /* Find the sign bit, sign extended to 2*HWI. */
12848 switch (mode)
12850 case SImode:
12851 case SFmode:
12852 imode = SImode;
12853 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
12854 lo = 0x80000000, hi = lo < 0;
12855 break;
12857 case DImode:
12858 case DFmode:
12859 imode = DImode;
12860 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
12861 if (HOST_BITS_PER_WIDE_INT >= 64)
12862 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
12863 else
12864 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12865 break;
12867 case TImode:
12868 case TFmode:
12869 vec_mode = VOIDmode;
12870 if (HOST_BITS_PER_WIDE_INT >= 64)
12872 imode = TImode;
12873 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
12875 else
12877 rtvec vec;
12879 imode = DImode;
12880 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12882 if (invert)
12884 lo = ~lo, hi = ~hi;
12885 v = constm1_rtx;
12887 else
12888 v = const0_rtx;
12890 mask = immed_double_const (lo, hi, imode);
12892 vec = gen_rtvec (2, v, mask);
12893 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
12894 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
12896 return v;
12898 break;
12900 default:
12901 gcc_unreachable ();
12904 if (invert)
12905 lo = ~lo, hi = ~hi;
12907 /* Force this value into the low part of a fp vector constant. */
12908 mask = immed_double_const (lo, hi, imode);
12909 mask = gen_lowpart (mode, mask);
12911 if (vec_mode == VOIDmode)
12912 return force_reg (mode, mask);
12914 v = ix86_build_const_vector (mode, vect, mask);
12915 return force_reg (vec_mode, v);
12918 /* Generate code for floating point ABS or NEG. */
12920 void
12921 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
12922 rtx operands[])
12924 rtx mask, set, use, clob, dst, src;
12925 bool use_sse = false;
12926 bool vector_mode = VECTOR_MODE_P (mode);
12927 enum machine_mode elt_mode = mode;
12929 if (vector_mode)
12931 elt_mode = GET_MODE_INNER (mode);
12932 use_sse = true;
12934 else if (mode == TFmode)
12935 use_sse = true;
12936 else if (TARGET_SSE_MATH)
12937 use_sse = SSE_FLOAT_MODE_P (mode);
12939 /* NEG and ABS performed with SSE use bitwise mask operations.
12940 Create the appropriate mask now. */
12941 if (use_sse)
12942 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
12943 else
12944 mask = NULL_RTX;
12946 dst = operands[0];
12947 src = operands[1];
12949 if (vector_mode)
12951 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
12952 set = gen_rtx_SET (VOIDmode, dst, set);
12953 emit_insn (set);
12955 else
12957 set = gen_rtx_fmt_e (code, mode, src);
12958 set = gen_rtx_SET (VOIDmode, dst, set);
12959 if (mask)
12961 use = gen_rtx_USE (VOIDmode, mask);
12962 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12963 emit_insn (gen_rtx_PARALLEL (VOIDmode,
12964 gen_rtvec (3, set, use, clob)));
12966 else
12967 emit_insn (set);
12971 /* Expand a copysign operation. Special case operand 0 being a constant. */
12973 void
12974 ix86_expand_copysign (rtx operands[])
12976 enum machine_mode mode;
12977 rtx dest, op0, op1, mask, nmask;
12979 dest = operands[0];
12980 op0 = operands[1];
12981 op1 = operands[2];
12983 mode = GET_MODE (dest);
12985 if (GET_CODE (op0) == CONST_DOUBLE)
12987 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
12989 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
12990 op0 = simplify_unary_operation (ABS, mode, op0, mode);
12992 if (mode == SFmode || mode == DFmode)
12994 enum machine_mode vmode;
12996 vmode = mode == SFmode ? V4SFmode : V2DFmode;
12998 if (op0 == CONST0_RTX (mode))
12999 op0 = CONST0_RTX (vmode);
13000 else
13002 rtvec v;
13004 if (mode == SFmode)
13005 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
13006 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13007 else
13008 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
13010 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
13013 else if (op0 != CONST0_RTX (mode))
13014 op0 = force_reg (mode, op0);
13016 mask = ix86_build_signbit_mask (mode, 0, 0);
13018 if (mode == SFmode)
13019 copysign_insn = gen_copysignsf3_const;
13020 else if (mode == DFmode)
13021 copysign_insn = gen_copysigndf3_const;
13022 else
13023 copysign_insn = gen_copysigntf3_const;
13025 emit_insn (copysign_insn (dest, op0, op1, mask));
13027 else
13029 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
13031 nmask = ix86_build_signbit_mask (mode, 0, 1);
13032 mask = ix86_build_signbit_mask (mode, 0, 0);
13034 if (mode == SFmode)
13035 copysign_insn = gen_copysignsf3_var;
13036 else if (mode == DFmode)
13037 copysign_insn = gen_copysigndf3_var;
13038 else
13039 copysign_insn = gen_copysigntf3_var;
13041 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
13045 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
13046 be a constant, and so has already been expanded into a vector constant. */
13048 void
13049 ix86_split_copysign_const (rtx operands[])
13051 enum machine_mode mode, vmode;
13052 rtx dest, op0, op1, mask, x;
13054 dest = operands[0];
13055 op0 = operands[1];
13056 op1 = operands[2];
13057 mask = operands[3];
13059 mode = GET_MODE (dest);
13060 vmode = GET_MODE (mask);
13062 dest = simplify_gen_subreg (vmode, dest, mode, 0);
13063 x = gen_rtx_AND (vmode, dest, mask);
13064 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13066 if (op0 != CONST0_RTX (vmode))
13068 x = gen_rtx_IOR (vmode, dest, op0);
13069 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13073 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
13074 so we have to do two masks. */
13076 void
13077 ix86_split_copysign_var (rtx operands[])
13079 enum machine_mode mode, vmode;
13080 rtx dest, scratch, op0, op1, mask, nmask, x;
13082 dest = operands[0];
13083 scratch = operands[1];
13084 op0 = operands[2];
13085 op1 = operands[3];
13086 nmask = operands[4];
13087 mask = operands[5];
13089 mode = GET_MODE (dest);
13090 vmode = GET_MODE (mask);
13092 if (rtx_equal_p (op0, op1))
13094 /* Shouldn't happen often (it's useless, obviously), but when it does
13095 we'd generate incorrect code if we continue below. */
13096 emit_move_insn (dest, op0);
13097 return;
13100 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
13102 gcc_assert (REGNO (op1) == REGNO (scratch));
13104 x = gen_rtx_AND (vmode, scratch, mask);
13105 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13107 dest = mask;
13108 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13109 x = gen_rtx_NOT (vmode, dest);
13110 x = gen_rtx_AND (vmode, x, op0);
13111 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13113 else
13115 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
13117 x = gen_rtx_AND (vmode, scratch, mask);
13119 else /* alternative 2,4 */
13121 gcc_assert (REGNO (mask) == REGNO (scratch));
13122 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
13123 x = gen_rtx_AND (vmode, scratch, op1);
13125 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13127 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
13129 dest = simplify_gen_subreg (vmode, op0, mode, 0);
13130 x = gen_rtx_AND (vmode, dest, nmask);
13132 else /* alternative 3,4 */
13134 gcc_assert (REGNO (nmask) == REGNO (dest));
13135 dest = nmask;
13136 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13137 x = gen_rtx_AND (vmode, dest, op0);
13139 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13142 x = gen_rtx_IOR (vmode, dest, scratch);
13143 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13146 /* Return TRUE or FALSE depending on whether the first SET in INSN
13147 has source and destination with matching CC modes, and that the
13148 CC mode is at least as constrained as REQ_MODE. */
13151 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
13153 rtx set;
13154 enum machine_mode set_mode;
13156 set = PATTERN (insn);
13157 if (GET_CODE (set) == PARALLEL)
13158 set = XVECEXP (set, 0, 0);
13159 gcc_assert (GET_CODE (set) == SET);
13160 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
13162 set_mode = GET_MODE (SET_DEST (set));
13163 switch (set_mode)
13165 case CCNOmode:
13166 if (req_mode != CCNOmode
13167 && (req_mode != CCmode
13168 || XEXP (SET_SRC (set), 1) != const0_rtx))
13169 return 0;
13170 break;
13171 case CCmode:
13172 if (req_mode == CCGCmode)
13173 return 0;
13174 /* FALLTHRU */
13175 case CCGCmode:
13176 if (req_mode == CCGOCmode || req_mode == CCNOmode)
13177 return 0;
13178 /* FALLTHRU */
13179 case CCGOCmode:
13180 if (req_mode == CCZmode)
13181 return 0;
13182 /* FALLTHRU */
13183 case CCAmode:
13184 case CCCmode:
13185 case CCOmode:
13186 case CCSmode:
13187 case CCZmode:
13188 break;
13190 default:
13191 gcc_unreachable ();
13194 return (GET_MODE (SET_SRC (set)) == set_mode);
13197 /* Generate insn patterns to do an integer compare of OPERANDS. */
13199 static rtx
13200 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
13202 enum machine_mode cmpmode;
13203 rtx tmp, flags;
13205 cmpmode = SELECT_CC_MODE (code, op0, op1);
13206 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
13208 /* This is very simple, but making the interface the same as in the
13209 FP case makes the rest of the code easier. */
13210 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
13211 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
13213 /* Return the test that should be put into the flags user, i.e.
13214 the bcc, scc, or cmov instruction. */
13215 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
13218 /* Figure out whether to use ordered or unordered fp comparisons.
13219 Return the appropriate mode to use. */
13221 enum machine_mode
13222 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
13224 /* ??? In order to make all comparisons reversible, we do all comparisons
13225 non-trapping when compiling for IEEE. Once gcc is able to distinguish
13226 all forms trapping and nontrapping comparisons, we can make inequality
13227 comparisons trapping again, since it results in better code when using
13228 FCOM based compares. */
13229 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
13232 enum machine_mode
13233 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
13235 enum machine_mode mode = GET_MODE (op0);
13237 if (SCALAR_FLOAT_MODE_P (mode))
13239 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
13240 return ix86_fp_compare_mode (code);
13243 switch (code)
13245 /* Only zero flag is needed. */
13246 case EQ: /* ZF=0 */
13247 case NE: /* ZF!=0 */
13248 return CCZmode;
13249 /* Codes needing carry flag. */
13250 case GEU: /* CF=0 */
13251 case LTU: /* CF=1 */
13252 /* Detect overflow checks. They need just the carry flag. */
13253 if (GET_CODE (op0) == PLUS
13254 && rtx_equal_p (op1, XEXP (op0, 0)))
13255 return CCCmode;
13256 else
13257 return CCmode;
13258 case GTU: /* CF=0 & ZF=0 */
13259 case LEU: /* CF=1 | ZF=1 */
13260 /* Detect overflow checks. They need just the carry flag. */
13261 if (GET_CODE (op0) == MINUS
13262 && rtx_equal_p (op1, XEXP (op0, 0)))
13263 return CCCmode;
13264 else
13265 return CCmode;
13266 /* Codes possibly doable only with sign flag when
13267 comparing against zero. */
13268 case GE: /* SF=OF or SF=0 */
13269 case LT: /* SF<>OF or SF=1 */
13270 if (op1 == const0_rtx)
13271 return CCGOCmode;
13272 else
13273 /* For other cases Carry flag is not required. */
13274 return CCGCmode;
13275 /* Codes doable only with sign flag when comparing
13276 against zero, but we miss jump instruction for it
13277 so we need to use relational tests against overflow
13278 that thus needs to be zero. */
13279 case GT: /* ZF=0 & SF=OF */
13280 case LE: /* ZF=1 | SF<>OF */
13281 if (op1 == const0_rtx)
13282 return CCNOmode;
13283 else
13284 return CCGCmode;
13285 /* strcmp pattern do (use flags) and combine may ask us for proper
13286 mode. */
13287 case USE:
13288 return CCmode;
13289 default:
13290 gcc_unreachable ();
13294 /* Return the fixed registers used for condition codes. */
13296 static bool
13297 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13299 *p1 = FLAGS_REG;
13300 *p2 = FPSR_REG;
13301 return true;
13304 /* If two condition code modes are compatible, return a condition code
13305 mode which is compatible with both. Otherwise, return
13306 VOIDmode. */
13308 static enum machine_mode
13309 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
13311 if (m1 == m2)
13312 return m1;
13314 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
13315 return VOIDmode;
13317 if ((m1 == CCGCmode && m2 == CCGOCmode)
13318 || (m1 == CCGOCmode && m2 == CCGCmode))
13319 return CCGCmode;
13321 switch (m1)
13323 default:
13324 gcc_unreachable ();
13326 case CCmode:
13327 case CCGCmode:
13328 case CCGOCmode:
13329 case CCNOmode:
13330 case CCAmode:
13331 case CCCmode:
13332 case CCOmode:
13333 case CCSmode:
13334 case CCZmode:
13335 switch (m2)
13337 default:
13338 return VOIDmode;
13340 case CCmode:
13341 case CCGCmode:
13342 case CCGOCmode:
13343 case CCNOmode:
13344 case CCAmode:
13345 case CCCmode:
13346 case CCOmode:
13347 case CCSmode:
13348 case CCZmode:
13349 return CCmode;
13352 case CCFPmode:
13353 case CCFPUmode:
13354 /* These are only compatible with themselves, which we already
13355 checked above. */
13356 return VOIDmode;
13360 /* Split comparison code CODE into comparisons we can do using branch
13361 instructions. BYPASS_CODE is comparison code for branch that will
13362 branch around FIRST_CODE and SECOND_CODE. If some of branches
13363 is not required, set value to UNKNOWN.
13364 We never require more than two branches. */
13366 void
13367 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
13368 enum rtx_code *first_code,
13369 enum rtx_code *second_code)
13371 *first_code = code;
13372 *bypass_code = UNKNOWN;
13373 *second_code = UNKNOWN;
13375 /* The fcomi comparison sets flags as follows:
13377 cmp ZF PF CF
13378 > 0 0 0
13379 < 0 0 1
13380 = 1 0 0
13381 un 1 1 1 */
13383 switch (code)
13385 case GT: /* GTU - CF=0 & ZF=0 */
13386 case GE: /* GEU - CF=0 */
13387 case ORDERED: /* PF=0 */
13388 case UNORDERED: /* PF=1 */
13389 case UNEQ: /* EQ - ZF=1 */
13390 case UNLT: /* LTU - CF=1 */
13391 case UNLE: /* LEU - CF=1 | ZF=1 */
13392 case LTGT: /* EQ - ZF=0 */
13393 break;
13394 case LT: /* LTU - CF=1 - fails on unordered */
13395 *first_code = UNLT;
13396 *bypass_code = UNORDERED;
13397 break;
13398 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
13399 *first_code = UNLE;
13400 *bypass_code = UNORDERED;
13401 break;
13402 case EQ: /* EQ - ZF=1 - fails on unordered */
13403 *first_code = UNEQ;
13404 *bypass_code = UNORDERED;
13405 break;
13406 case NE: /* NE - ZF=0 - fails on unordered */
13407 *first_code = LTGT;
13408 *second_code = UNORDERED;
13409 break;
13410 case UNGE: /* GEU - CF=0 - fails on unordered */
13411 *first_code = GE;
13412 *second_code = UNORDERED;
13413 break;
13414 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
13415 *first_code = GT;
13416 *second_code = UNORDERED;
13417 break;
13418 default:
13419 gcc_unreachable ();
13421 if (!TARGET_IEEE_FP)
13423 *second_code = UNKNOWN;
13424 *bypass_code = UNKNOWN;
13428 /* Return cost of comparison done fcom + arithmetics operations on AX.
13429 All following functions do use number of instructions as a cost metrics.
13430 In future this should be tweaked to compute bytes for optimize_size and
13431 take into account performance of various instructions on various CPUs. */
13432 static int
13433 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
13435 if (!TARGET_IEEE_FP)
13436 return 4;
13437 /* The cost of code output by ix86_expand_fp_compare. */
13438 switch (code)
13440 case UNLE:
13441 case UNLT:
13442 case LTGT:
13443 case GT:
13444 case GE:
13445 case UNORDERED:
13446 case ORDERED:
13447 case UNEQ:
13448 return 4;
13449 break;
13450 case LT:
13451 case NE:
13452 case EQ:
13453 case UNGE:
13454 return 5;
13455 break;
13456 case LE:
13457 case UNGT:
13458 return 6;
13459 break;
13460 default:
13461 gcc_unreachable ();
13465 /* Return cost of comparison done using fcomi operation.
13466 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13467 static int
13468 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
13470 enum rtx_code bypass_code, first_code, second_code;
13471 /* Return arbitrarily high cost when instruction is not supported - this
13472 prevents gcc from using it. */
13473 if (!TARGET_CMOVE)
13474 return 1024;
13475 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13476 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
13479 /* Return cost of comparison done using sahf operation.
13480 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13481 static int
13482 ix86_fp_comparison_sahf_cost (enum rtx_code code)
13484 enum rtx_code bypass_code, first_code, second_code;
13485 /* Return arbitrarily high cost when instruction is not preferred - this
13486 avoids gcc from using it. */
13487 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ())))
13488 return 1024;
13489 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13490 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
13493 /* Compute cost of the comparison done using any method.
13494 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13495 static int
13496 ix86_fp_comparison_cost (enum rtx_code code)
13498 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
13499 int min;
13501 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
13502 sahf_cost = ix86_fp_comparison_sahf_cost (code);
13504 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
13505 if (min > sahf_cost)
13506 min = sahf_cost;
13507 if (min > fcomi_cost)
13508 min = fcomi_cost;
13509 return min;
13512 /* Return true if we should use an FCOMI instruction for this
13513 fp comparison. */
13516 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
13518 enum rtx_code swapped_code = swap_condition (code);
13520 return ((ix86_fp_comparison_cost (code)
13521 == ix86_fp_comparison_fcomi_cost (code))
13522 || (ix86_fp_comparison_cost (swapped_code)
13523 == ix86_fp_comparison_fcomi_cost (swapped_code)));
13526 /* Swap, force into registers, or otherwise massage the two operands
13527 to a fp comparison. The operands are updated in place; the new
13528 comparison code is returned. */
13530 static enum rtx_code
13531 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
13533 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
13534 rtx op0 = *pop0, op1 = *pop1;
13535 enum machine_mode op_mode = GET_MODE (op0);
13536 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
13538 /* All of the unordered compare instructions only work on registers.
13539 The same is true of the fcomi compare instructions. The XFmode
13540 compare instructions require registers except when comparing
13541 against zero or when converting operand 1 from fixed point to
13542 floating point. */
13544 if (!is_sse
13545 && (fpcmp_mode == CCFPUmode
13546 || (op_mode == XFmode
13547 && ! (standard_80387_constant_p (op0) == 1
13548 || standard_80387_constant_p (op1) == 1)
13549 && GET_CODE (op1) != FLOAT)
13550 || ix86_use_fcomi_compare (code)))
13552 op0 = force_reg (op_mode, op0);
13553 op1 = force_reg (op_mode, op1);
13555 else
13557 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
13558 things around if they appear profitable, otherwise force op0
13559 into a register. */
13561 if (standard_80387_constant_p (op0) == 0
13562 || (MEM_P (op0)
13563 && ! (standard_80387_constant_p (op1) == 0
13564 || MEM_P (op1))))
13566 rtx tmp;
13567 tmp = op0, op0 = op1, op1 = tmp;
13568 code = swap_condition (code);
13571 if (!REG_P (op0))
13572 op0 = force_reg (op_mode, op0);
13574 if (CONSTANT_P (op1))
13576 int tmp = standard_80387_constant_p (op1);
13577 if (tmp == 0)
13578 op1 = validize_mem (force_const_mem (op_mode, op1));
13579 else if (tmp == 1)
13581 if (TARGET_CMOVE)
13582 op1 = force_reg (op_mode, op1);
13584 else
13585 op1 = force_reg (op_mode, op1);
13589 /* Try to rearrange the comparison to make it cheaper. */
13590 if (ix86_fp_comparison_cost (code)
13591 > ix86_fp_comparison_cost (swap_condition (code))
13592 && (REG_P (op1) || can_create_pseudo_p ()))
13594 rtx tmp;
13595 tmp = op0, op0 = op1, op1 = tmp;
13596 code = swap_condition (code);
13597 if (!REG_P (op0))
13598 op0 = force_reg (op_mode, op0);
13601 *pop0 = op0;
13602 *pop1 = op1;
13603 return code;
13606 /* Convert comparison codes we use to represent FP comparison to integer
13607 code that will result in proper branch. Return UNKNOWN if no such code
13608 is available. */
13610 enum rtx_code
13611 ix86_fp_compare_code_to_integer (enum rtx_code code)
13613 switch (code)
13615 case GT:
13616 return GTU;
13617 case GE:
13618 return GEU;
13619 case ORDERED:
13620 case UNORDERED:
13621 return code;
13622 break;
13623 case UNEQ:
13624 return EQ;
13625 break;
13626 case UNLT:
13627 return LTU;
13628 break;
13629 case UNLE:
13630 return LEU;
13631 break;
13632 case LTGT:
13633 return NE;
13634 break;
13635 default:
13636 return UNKNOWN;
13640 /* Generate insn patterns to do a floating point compare of OPERANDS. */
13642 static rtx
13643 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
13644 rtx *second_test, rtx *bypass_test)
13646 enum machine_mode fpcmp_mode, intcmp_mode;
13647 rtx tmp, tmp2;
13648 int cost = ix86_fp_comparison_cost (code);
13649 enum rtx_code bypass_code, first_code, second_code;
13651 fpcmp_mode = ix86_fp_compare_mode (code);
13652 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
13654 if (second_test)
13655 *second_test = NULL_RTX;
13656 if (bypass_test)
13657 *bypass_test = NULL_RTX;
13659 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13661 /* Do fcomi/sahf based test when profitable. */
13662 if (ix86_fp_comparison_arithmetics_cost (code) > cost
13663 && (bypass_code == UNKNOWN || bypass_test)
13664 && (second_code == UNKNOWN || second_test))
13666 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13667 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
13668 tmp);
13669 if (TARGET_CMOVE)
13670 emit_insn (tmp);
13671 else
13673 gcc_assert (TARGET_SAHF);
13675 if (!scratch)
13676 scratch = gen_reg_rtx (HImode);
13677 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
13679 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
13682 /* The FP codes work out to act like unsigned. */
13683 intcmp_mode = fpcmp_mode;
13684 code = first_code;
13685 if (bypass_code != UNKNOWN)
13686 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
13687 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13688 const0_rtx);
13689 if (second_code != UNKNOWN)
13690 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
13691 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13692 const0_rtx);
13694 else
13696 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
13697 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13698 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
13699 if (!scratch)
13700 scratch = gen_reg_rtx (HImode);
13701 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
13703 /* In the unordered case, we have to check C2 for NaN's, which
13704 doesn't happen to work out to anything nice combination-wise.
13705 So do some bit twiddling on the value we've got in AH to come
13706 up with an appropriate set of condition codes. */
13708 intcmp_mode = CCNOmode;
13709 switch (code)
13711 case GT:
13712 case UNGT:
13713 if (code == GT || !TARGET_IEEE_FP)
13715 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13716 code = EQ;
13718 else
13720 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13721 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13722 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
13723 intcmp_mode = CCmode;
13724 code = GEU;
13726 break;
13727 case LT:
13728 case UNLT:
13729 if (code == LT && TARGET_IEEE_FP)
13731 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13732 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
13733 intcmp_mode = CCmode;
13734 code = EQ;
13736 else
13738 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
13739 code = NE;
13741 break;
13742 case GE:
13743 case UNGE:
13744 if (code == GE || !TARGET_IEEE_FP)
13746 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
13747 code = EQ;
13749 else
13751 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13752 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13753 GEN_INT (0x01)));
13754 code = NE;
13756 break;
13757 case LE:
13758 case UNLE:
13759 if (code == LE && TARGET_IEEE_FP)
13761 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13762 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13763 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13764 intcmp_mode = CCmode;
13765 code = LTU;
13767 else
13769 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13770 code = NE;
13772 break;
13773 case EQ:
13774 case UNEQ:
13775 if (code == EQ && TARGET_IEEE_FP)
13777 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13778 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13779 intcmp_mode = CCmode;
13780 code = EQ;
13782 else
13784 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13785 code = NE;
13786 break;
13788 break;
13789 case NE:
13790 case LTGT:
13791 if (code == NE && TARGET_IEEE_FP)
13793 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13794 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13795 GEN_INT (0x40)));
13796 code = NE;
13798 else
13800 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13801 code = EQ;
13803 break;
13805 case UNORDERED:
13806 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13807 code = NE;
13808 break;
13809 case ORDERED:
13810 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13811 code = EQ;
13812 break;
13814 default:
13815 gcc_unreachable ();
13819 /* Return the test that should be put into the flags user, i.e.
13820 the bcc, scc, or cmov instruction. */
13821 return gen_rtx_fmt_ee (code, VOIDmode,
13822 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13823 const0_rtx);
13827 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
13829 rtx op0, op1, ret;
13830 op0 = ix86_compare_op0;
13831 op1 = ix86_compare_op1;
13833 if (second_test)
13834 *second_test = NULL_RTX;
13835 if (bypass_test)
13836 *bypass_test = NULL_RTX;
13838 if (ix86_compare_emitted)
13840 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
13841 ix86_compare_emitted = NULL_RTX;
13843 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
13845 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
13846 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
13847 second_test, bypass_test);
13849 else
13850 ret = ix86_expand_int_compare (code, op0, op1);
13852 return ret;
13855 /* Return true if the CODE will result in nontrivial jump sequence. */
13856 bool
13857 ix86_fp_jump_nontrivial_p (enum rtx_code code)
13859 enum rtx_code bypass_code, first_code, second_code;
13860 if (!TARGET_CMOVE)
13861 return true;
13862 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13863 return bypass_code != UNKNOWN || second_code != UNKNOWN;
13866 void
13867 ix86_expand_branch (enum rtx_code code, rtx label)
13869 rtx tmp;
13871 /* If we have emitted a compare insn, go straight to simple.
13872 ix86_expand_compare won't emit anything if ix86_compare_emitted
13873 is non NULL. */
13874 if (ix86_compare_emitted)
13875 goto simple;
13877 switch (GET_MODE (ix86_compare_op0))
13879 case QImode:
13880 case HImode:
13881 case SImode:
13882 simple:
13883 tmp = ix86_expand_compare (code, NULL, NULL);
13884 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13885 gen_rtx_LABEL_REF (VOIDmode, label),
13886 pc_rtx);
13887 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
13888 return;
13890 case SFmode:
13891 case DFmode:
13892 case XFmode:
13894 rtvec vec;
13895 int use_fcomi;
13896 enum rtx_code bypass_code, first_code, second_code;
13898 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
13899 &ix86_compare_op1);
13901 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13903 /* Check whether we will use the natural sequence with one jump. If
13904 so, we can expand jump early. Otherwise delay expansion by
13905 creating compound insn to not confuse optimizers. */
13906 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
13908 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
13909 gen_rtx_LABEL_REF (VOIDmode, label),
13910 pc_rtx, NULL_RTX, NULL_RTX);
13912 else
13914 tmp = gen_rtx_fmt_ee (code, VOIDmode,
13915 ix86_compare_op0, ix86_compare_op1);
13916 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13917 gen_rtx_LABEL_REF (VOIDmode, label),
13918 pc_rtx);
13919 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
13921 use_fcomi = ix86_use_fcomi_compare (code);
13922 vec = rtvec_alloc (3 + !use_fcomi);
13923 RTVEC_ELT (vec, 0) = tmp;
13924 RTVEC_ELT (vec, 1)
13925 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
13926 RTVEC_ELT (vec, 2)
13927 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
13928 if (! use_fcomi)
13929 RTVEC_ELT (vec, 3)
13930 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
13932 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
13934 return;
13937 case DImode:
13938 if (TARGET_64BIT)
13939 goto simple;
13940 case TImode:
13941 /* Expand DImode branch into multiple compare+branch. */
13943 rtx lo[2], hi[2], label2;
13944 enum rtx_code code1, code2, code3;
13945 enum machine_mode submode;
13947 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
13949 tmp = ix86_compare_op0;
13950 ix86_compare_op0 = ix86_compare_op1;
13951 ix86_compare_op1 = tmp;
13952 code = swap_condition (code);
13954 if (GET_MODE (ix86_compare_op0) == DImode)
13956 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
13957 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
13958 submode = SImode;
13960 else
13962 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
13963 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
13964 submode = DImode;
13967 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
13968 avoid two branches. This costs one extra insn, so disable when
13969 optimizing for size. */
13971 if ((code == EQ || code == NE)
13972 && (!optimize_insn_for_size_p ()
13973 || hi[1] == const0_rtx || lo[1] == const0_rtx))
13975 rtx xor0, xor1;
13977 xor1 = hi[0];
13978 if (hi[1] != const0_rtx)
13979 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
13980 NULL_RTX, 0, OPTAB_WIDEN);
13982 xor0 = lo[0];
13983 if (lo[1] != const0_rtx)
13984 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
13985 NULL_RTX, 0, OPTAB_WIDEN);
13987 tmp = expand_binop (submode, ior_optab, xor1, xor0,
13988 NULL_RTX, 0, OPTAB_WIDEN);
13990 ix86_compare_op0 = tmp;
13991 ix86_compare_op1 = const0_rtx;
13992 ix86_expand_branch (code, label);
13993 return;
13996 /* Otherwise, if we are doing less-than or greater-or-equal-than,
13997 op1 is a constant and the low word is zero, then we can just
13998 examine the high word. Similarly for low word -1 and
13999 less-or-equal-than or greater-than. */
14001 if (CONST_INT_P (hi[1]))
14002 switch (code)
14004 case LT: case LTU: case GE: case GEU:
14005 if (lo[1] == const0_rtx)
14007 ix86_compare_op0 = hi[0];
14008 ix86_compare_op1 = hi[1];
14009 ix86_expand_branch (code, label);
14010 return;
14012 break;
14013 case LE: case LEU: case GT: case GTU:
14014 if (lo[1] == constm1_rtx)
14016 ix86_compare_op0 = hi[0];
14017 ix86_compare_op1 = hi[1];
14018 ix86_expand_branch (code, label);
14019 return;
14021 break;
14022 default:
14023 break;
14026 /* Otherwise, we need two or three jumps. */
14028 label2 = gen_label_rtx ();
14030 code1 = code;
14031 code2 = swap_condition (code);
14032 code3 = unsigned_condition (code);
14034 switch (code)
14036 case LT: case GT: case LTU: case GTU:
14037 break;
14039 case LE: code1 = LT; code2 = GT; break;
14040 case GE: code1 = GT; code2 = LT; break;
14041 case LEU: code1 = LTU; code2 = GTU; break;
14042 case GEU: code1 = GTU; code2 = LTU; break;
14044 case EQ: code1 = UNKNOWN; code2 = NE; break;
14045 case NE: code2 = UNKNOWN; break;
14047 default:
14048 gcc_unreachable ();
14052 * a < b =>
14053 * if (hi(a) < hi(b)) goto true;
14054 * if (hi(a) > hi(b)) goto false;
14055 * if (lo(a) < lo(b)) goto true;
14056 * false:
14059 ix86_compare_op0 = hi[0];
14060 ix86_compare_op1 = hi[1];
14062 if (code1 != UNKNOWN)
14063 ix86_expand_branch (code1, label);
14064 if (code2 != UNKNOWN)
14065 ix86_expand_branch (code2, label2);
14067 ix86_compare_op0 = lo[0];
14068 ix86_compare_op1 = lo[1];
14069 ix86_expand_branch (code3, label);
14071 if (code2 != UNKNOWN)
14072 emit_label (label2);
14073 return;
14076 default:
14077 gcc_unreachable ();
14081 /* Split branch based on floating point condition. */
14082 void
14083 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
14084 rtx target1, rtx target2, rtx tmp, rtx pushed)
14086 rtx second, bypass;
14087 rtx label = NULL_RTX;
14088 rtx condition;
14089 int bypass_probability = -1, second_probability = -1, probability = -1;
14090 rtx i;
14092 if (target2 != pc_rtx)
14094 rtx tmp = target2;
14095 code = reverse_condition_maybe_unordered (code);
14096 target2 = target1;
14097 target1 = tmp;
14100 condition = ix86_expand_fp_compare (code, op1, op2,
14101 tmp, &second, &bypass);
14103 /* Remove pushed operand from stack. */
14104 if (pushed)
14105 ix86_free_from_memory (GET_MODE (pushed));
14107 if (split_branch_probability >= 0)
14109 /* Distribute the probabilities across the jumps.
14110 Assume the BYPASS and SECOND to be always test
14111 for UNORDERED. */
14112 probability = split_branch_probability;
14114 /* Value of 1 is low enough to make no need for probability
14115 to be updated. Later we may run some experiments and see
14116 if unordered values are more frequent in practice. */
14117 if (bypass)
14118 bypass_probability = 1;
14119 if (second)
14120 second_probability = 1;
14122 if (bypass != NULL_RTX)
14124 label = gen_label_rtx ();
14125 i = emit_jump_insn (gen_rtx_SET
14126 (VOIDmode, pc_rtx,
14127 gen_rtx_IF_THEN_ELSE (VOIDmode,
14128 bypass,
14129 gen_rtx_LABEL_REF (VOIDmode,
14130 label),
14131 pc_rtx)));
14132 if (bypass_probability >= 0)
14133 REG_NOTES (i)
14134 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14135 GEN_INT (bypass_probability),
14136 REG_NOTES (i));
14138 i = emit_jump_insn (gen_rtx_SET
14139 (VOIDmode, pc_rtx,
14140 gen_rtx_IF_THEN_ELSE (VOIDmode,
14141 condition, target1, target2)));
14142 if (probability >= 0)
14143 REG_NOTES (i)
14144 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14145 GEN_INT (probability),
14146 REG_NOTES (i));
14147 if (second != NULL_RTX)
14149 i = emit_jump_insn (gen_rtx_SET
14150 (VOIDmode, pc_rtx,
14151 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
14152 target2)));
14153 if (second_probability >= 0)
14154 REG_NOTES (i)
14155 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14156 GEN_INT (second_probability),
14157 REG_NOTES (i));
14159 if (label != NULL_RTX)
14160 emit_label (label);
14164 ix86_expand_setcc (enum rtx_code code, rtx dest)
14166 rtx ret, tmp, tmpreg, equiv;
14167 rtx second_test, bypass_test;
14169 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
14170 return 0; /* FAIL */
14172 gcc_assert (GET_MODE (dest) == QImode);
14174 ret = ix86_expand_compare (code, &second_test, &bypass_test);
14175 PUT_MODE (ret, QImode);
14177 tmp = dest;
14178 tmpreg = dest;
14180 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
14181 if (bypass_test || second_test)
14183 rtx test = second_test;
14184 int bypass = 0;
14185 rtx tmp2 = gen_reg_rtx (QImode);
14186 if (bypass_test)
14188 gcc_assert (!second_test);
14189 test = bypass_test;
14190 bypass = 1;
14191 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
14193 PUT_MODE (test, QImode);
14194 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
14196 if (bypass)
14197 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
14198 else
14199 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
14202 /* Attach a REG_EQUAL note describing the comparison result. */
14203 if (ix86_compare_op0 && ix86_compare_op1)
14205 equiv = simplify_gen_relational (code, QImode,
14206 GET_MODE (ix86_compare_op0),
14207 ix86_compare_op0, ix86_compare_op1);
14208 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
14211 return 1; /* DONE */
14214 /* Expand comparison setting or clearing carry flag. Return true when
14215 successful and set pop for the operation. */
14216 static bool
14217 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
14219 enum machine_mode mode =
14220 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
14222 /* Do not handle DImode compares that go through special path. */
14223 if (mode == (TARGET_64BIT ? TImode : DImode))
14224 return false;
14226 if (SCALAR_FLOAT_MODE_P (mode))
14228 rtx second_test = NULL, bypass_test = NULL;
14229 rtx compare_op, compare_seq;
14231 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14233 /* Shortcut: following common codes never translate
14234 into carry flag compares. */
14235 if (code == EQ || code == NE || code == UNEQ || code == LTGT
14236 || code == ORDERED || code == UNORDERED)
14237 return false;
14239 /* These comparisons require zero flag; swap operands so they won't. */
14240 if ((code == GT || code == UNLE || code == LE || code == UNGT)
14241 && !TARGET_IEEE_FP)
14243 rtx tmp = op0;
14244 op0 = op1;
14245 op1 = tmp;
14246 code = swap_condition (code);
14249 /* Try to expand the comparison and verify that we end up with
14250 carry flag based comparison. This fails to be true only when
14251 we decide to expand comparison using arithmetic that is not
14252 too common scenario. */
14253 start_sequence ();
14254 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14255 &second_test, &bypass_test);
14256 compare_seq = get_insns ();
14257 end_sequence ();
14259 if (second_test || bypass_test)
14260 return false;
14262 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14263 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14264 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
14265 else
14266 code = GET_CODE (compare_op);
14268 if (code != LTU && code != GEU)
14269 return false;
14271 emit_insn (compare_seq);
14272 *pop = compare_op;
14273 return true;
14276 if (!INTEGRAL_MODE_P (mode))
14277 return false;
14279 switch (code)
14281 case LTU:
14282 case GEU:
14283 break;
14285 /* Convert a==0 into (unsigned)a<1. */
14286 case EQ:
14287 case NE:
14288 if (op1 != const0_rtx)
14289 return false;
14290 op1 = const1_rtx;
14291 code = (code == EQ ? LTU : GEU);
14292 break;
14294 /* Convert a>b into b<a or a>=b-1. */
14295 case GTU:
14296 case LEU:
14297 if (CONST_INT_P (op1))
14299 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
14300 /* Bail out on overflow. We still can swap operands but that
14301 would force loading of the constant into register. */
14302 if (op1 == const0_rtx
14303 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
14304 return false;
14305 code = (code == GTU ? GEU : LTU);
14307 else
14309 rtx tmp = op1;
14310 op1 = op0;
14311 op0 = tmp;
14312 code = (code == GTU ? LTU : GEU);
14314 break;
14316 /* Convert a>=0 into (unsigned)a<0x80000000. */
14317 case LT:
14318 case GE:
14319 if (mode == DImode || op1 != const0_rtx)
14320 return false;
14321 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14322 code = (code == LT ? GEU : LTU);
14323 break;
14324 case LE:
14325 case GT:
14326 if (mode == DImode || op1 != constm1_rtx)
14327 return false;
14328 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14329 code = (code == LE ? GEU : LTU);
14330 break;
14332 default:
14333 return false;
14335 /* Swapping operands may cause constant to appear as first operand. */
14336 if (!nonimmediate_operand (op0, VOIDmode))
14338 if (!can_create_pseudo_p ())
14339 return false;
14340 op0 = force_reg (mode, op0);
14342 ix86_compare_op0 = op0;
14343 ix86_compare_op1 = op1;
14344 *pop = ix86_expand_compare (code, NULL, NULL);
14345 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
14346 return true;
14350 ix86_expand_int_movcc (rtx operands[])
14352 enum rtx_code code = GET_CODE (operands[1]), compare_code;
14353 rtx compare_seq, compare_op;
14354 rtx second_test, bypass_test;
14355 enum machine_mode mode = GET_MODE (operands[0]);
14356 bool sign_bit_compare_p = false;;
14358 start_sequence ();
14359 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14360 compare_seq = get_insns ();
14361 end_sequence ();
14363 compare_code = GET_CODE (compare_op);
14365 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
14366 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
14367 sign_bit_compare_p = true;
14369 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
14370 HImode insns, we'd be swallowed in word prefix ops. */
14372 if ((mode != HImode || TARGET_FAST_PREFIX)
14373 && (mode != (TARGET_64BIT ? TImode : DImode))
14374 && CONST_INT_P (operands[2])
14375 && CONST_INT_P (operands[3]))
14377 rtx out = operands[0];
14378 HOST_WIDE_INT ct = INTVAL (operands[2]);
14379 HOST_WIDE_INT cf = INTVAL (operands[3]);
14380 HOST_WIDE_INT diff;
14382 diff = ct - cf;
14383 /* Sign bit compares are better done using shifts than we do by using
14384 sbb. */
14385 if (sign_bit_compare_p
14386 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
14387 ix86_compare_op1, &compare_op))
14389 /* Detect overlap between destination and compare sources. */
14390 rtx tmp = out;
14392 if (!sign_bit_compare_p)
14394 bool fpcmp = false;
14396 compare_code = GET_CODE (compare_op);
14398 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14399 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14401 fpcmp = true;
14402 compare_code = ix86_fp_compare_code_to_integer (compare_code);
14405 /* To simplify rest of code, restrict to the GEU case. */
14406 if (compare_code == LTU)
14408 HOST_WIDE_INT tmp = ct;
14409 ct = cf;
14410 cf = tmp;
14411 compare_code = reverse_condition (compare_code);
14412 code = reverse_condition (code);
14414 else
14416 if (fpcmp)
14417 PUT_CODE (compare_op,
14418 reverse_condition_maybe_unordered
14419 (GET_CODE (compare_op)));
14420 else
14421 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
14423 diff = ct - cf;
14425 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
14426 || reg_overlap_mentioned_p (out, ix86_compare_op1))
14427 tmp = gen_reg_rtx (mode);
14429 if (mode == DImode)
14430 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
14431 else
14432 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
14434 else
14436 if (code == GT || code == GE)
14437 code = reverse_condition (code);
14438 else
14440 HOST_WIDE_INT tmp = ct;
14441 ct = cf;
14442 cf = tmp;
14443 diff = ct - cf;
14445 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
14446 ix86_compare_op1, VOIDmode, 0, -1);
14449 if (diff == 1)
14452 * cmpl op0,op1
14453 * sbbl dest,dest
14454 * [addl dest, ct]
14456 * Size 5 - 8.
14458 if (ct)
14459 tmp = expand_simple_binop (mode, PLUS,
14460 tmp, GEN_INT (ct),
14461 copy_rtx (tmp), 1, OPTAB_DIRECT);
14463 else if (cf == -1)
14466 * cmpl op0,op1
14467 * sbbl dest,dest
14468 * orl $ct, dest
14470 * Size 8.
14472 tmp = expand_simple_binop (mode, IOR,
14473 tmp, GEN_INT (ct),
14474 copy_rtx (tmp), 1, OPTAB_DIRECT);
14476 else if (diff == -1 && ct)
14479 * cmpl op0,op1
14480 * sbbl dest,dest
14481 * notl dest
14482 * [addl dest, cf]
14484 * Size 8 - 11.
14486 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14487 if (cf)
14488 tmp = expand_simple_binop (mode, PLUS,
14489 copy_rtx (tmp), GEN_INT (cf),
14490 copy_rtx (tmp), 1, OPTAB_DIRECT);
14492 else
14495 * cmpl op0,op1
14496 * sbbl dest,dest
14497 * [notl dest]
14498 * andl cf - ct, dest
14499 * [addl dest, ct]
14501 * Size 8 - 11.
14504 if (cf == 0)
14506 cf = ct;
14507 ct = 0;
14508 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14511 tmp = expand_simple_binop (mode, AND,
14512 copy_rtx (tmp),
14513 gen_int_mode (cf - ct, mode),
14514 copy_rtx (tmp), 1, OPTAB_DIRECT);
14515 if (ct)
14516 tmp = expand_simple_binop (mode, PLUS,
14517 copy_rtx (tmp), GEN_INT (ct),
14518 copy_rtx (tmp), 1, OPTAB_DIRECT);
14521 if (!rtx_equal_p (tmp, out))
14522 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
14524 return 1; /* DONE */
14527 if (diff < 0)
14529 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14531 HOST_WIDE_INT tmp;
14532 tmp = ct, ct = cf, cf = tmp;
14533 diff = -diff;
14535 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14537 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14539 /* We may be reversing unordered compare to normal compare, that
14540 is not valid in general (we may convert non-trapping condition
14541 to trapping one), however on i386 we currently emit all
14542 comparisons unordered. */
14543 compare_code = reverse_condition_maybe_unordered (compare_code);
14544 code = reverse_condition_maybe_unordered (code);
14546 else
14548 compare_code = reverse_condition (compare_code);
14549 code = reverse_condition (code);
14553 compare_code = UNKNOWN;
14554 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
14555 && CONST_INT_P (ix86_compare_op1))
14557 if (ix86_compare_op1 == const0_rtx
14558 && (code == LT || code == GE))
14559 compare_code = code;
14560 else if (ix86_compare_op1 == constm1_rtx)
14562 if (code == LE)
14563 compare_code = LT;
14564 else if (code == GT)
14565 compare_code = GE;
14569 /* Optimize dest = (op0 < 0) ? -1 : cf. */
14570 if (compare_code != UNKNOWN
14571 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
14572 && (cf == -1 || ct == -1))
14574 /* If lea code below could be used, only optimize
14575 if it results in a 2 insn sequence. */
14577 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
14578 || diff == 3 || diff == 5 || diff == 9)
14579 || (compare_code == LT && ct == -1)
14580 || (compare_code == GE && cf == -1))
14583 * notl op1 (if necessary)
14584 * sarl $31, op1
14585 * orl cf, op1
14587 if (ct != -1)
14589 cf = ct;
14590 ct = -1;
14591 code = reverse_condition (code);
14594 out = emit_store_flag (out, code, ix86_compare_op0,
14595 ix86_compare_op1, VOIDmode, 0, -1);
14597 out = expand_simple_binop (mode, IOR,
14598 out, GEN_INT (cf),
14599 out, 1, OPTAB_DIRECT);
14600 if (out != operands[0])
14601 emit_move_insn (operands[0], out);
14603 return 1; /* DONE */
14608 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
14609 || diff == 3 || diff == 5 || diff == 9)
14610 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
14611 && (mode != DImode
14612 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
14615 * xorl dest,dest
14616 * cmpl op1,op2
14617 * setcc dest
14618 * lea cf(dest*(ct-cf)),dest
14620 * Size 14.
14622 * This also catches the degenerate setcc-only case.
14625 rtx tmp;
14626 int nops;
14628 out = emit_store_flag (out, code, ix86_compare_op0,
14629 ix86_compare_op1, VOIDmode, 0, 1);
14631 nops = 0;
14632 /* On x86_64 the lea instruction operates on Pmode, so we need
14633 to get arithmetics done in proper mode to match. */
14634 if (diff == 1)
14635 tmp = copy_rtx (out);
14636 else
14638 rtx out1;
14639 out1 = copy_rtx (out);
14640 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
14641 nops++;
14642 if (diff & 1)
14644 tmp = gen_rtx_PLUS (mode, tmp, out1);
14645 nops++;
14648 if (cf != 0)
14650 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
14651 nops++;
14653 if (!rtx_equal_p (tmp, out))
14655 if (nops == 1)
14656 out = force_operand (tmp, copy_rtx (out));
14657 else
14658 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
14660 if (!rtx_equal_p (out, operands[0]))
14661 emit_move_insn (operands[0], copy_rtx (out));
14663 return 1; /* DONE */
14667 * General case: Jumpful:
14668 * xorl dest,dest cmpl op1, op2
14669 * cmpl op1, op2 movl ct, dest
14670 * setcc dest jcc 1f
14671 * decl dest movl cf, dest
14672 * andl (cf-ct),dest 1:
14673 * addl ct,dest
14675 * Size 20. Size 14.
14677 * This is reasonably steep, but branch mispredict costs are
14678 * high on modern cpus, so consider failing only if optimizing
14679 * for space.
14682 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14683 && BRANCH_COST (optimize_insn_for_speed_p (),
14684 false) >= 2)
14686 if (cf == 0)
14688 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14690 cf = ct;
14691 ct = 0;
14693 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14695 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14697 /* We may be reversing unordered compare to normal compare,
14698 that is not valid in general (we may convert non-trapping
14699 condition to trapping one), however on i386 we currently
14700 emit all comparisons unordered. */
14701 code = reverse_condition_maybe_unordered (code);
14703 else
14705 code = reverse_condition (code);
14706 if (compare_code != UNKNOWN)
14707 compare_code = reverse_condition (compare_code);
14711 if (compare_code != UNKNOWN)
14713 /* notl op1 (if needed)
14714 sarl $31, op1
14715 andl (cf-ct), op1
14716 addl ct, op1
14718 For x < 0 (resp. x <= -1) there will be no notl,
14719 so if possible swap the constants to get rid of the
14720 complement.
14721 True/false will be -1/0 while code below (store flag
14722 followed by decrement) is 0/-1, so the constants need
14723 to be exchanged once more. */
14725 if (compare_code == GE || !cf)
14727 code = reverse_condition (code);
14728 compare_code = LT;
14730 else
14732 HOST_WIDE_INT tmp = cf;
14733 cf = ct;
14734 ct = tmp;
14737 out = emit_store_flag (out, code, ix86_compare_op0,
14738 ix86_compare_op1, VOIDmode, 0, -1);
14740 else
14742 out = emit_store_flag (out, code, ix86_compare_op0,
14743 ix86_compare_op1, VOIDmode, 0, 1);
14745 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
14746 copy_rtx (out), 1, OPTAB_DIRECT);
14749 out = expand_simple_binop (mode, AND, copy_rtx (out),
14750 gen_int_mode (cf - ct, mode),
14751 copy_rtx (out), 1, OPTAB_DIRECT);
14752 if (ct)
14753 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
14754 copy_rtx (out), 1, OPTAB_DIRECT);
14755 if (!rtx_equal_p (out, operands[0]))
14756 emit_move_insn (operands[0], copy_rtx (out));
14758 return 1; /* DONE */
14762 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14764 /* Try a few things more with specific constants and a variable. */
14766 optab op;
14767 rtx var, orig_out, out, tmp;
14769 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
14770 return 0; /* FAIL */
14772 /* If one of the two operands is an interesting constant, load a
14773 constant with the above and mask it in with a logical operation. */
14775 if (CONST_INT_P (operands[2]))
14777 var = operands[3];
14778 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
14779 operands[3] = constm1_rtx, op = and_optab;
14780 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
14781 operands[3] = const0_rtx, op = ior_optab;
14782 else
14783 return 0; /* FAIL */
14785 else if (CONST_INT_P (operands[3]))
14787 var = operands[2];
14788 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
14789 operands[2] = constm1_rtx, op = and_optab;
14790 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
14791 operands[2] = const0_rtx, op = ior_optab;
14792 else
14793 return 0; /* FAIL */
14795 else
14796 return 0; /* FAIL */
14798 orig_out = operands[0];
14799 tmp = gen_reg_rtx (mode);
14800 operands[0] = tmp;
14802 /* Recurse to get the constant loaded. */
14803 if (ix86_expand_int_movcc (operands) == 0)
14804 return 0; /* FAIL */
14806 /* Mask in the interesting variable. */
14807 out = expand_binop (mode, op, var, tmp, orig_out, 0,
14808 OPTAB_WIDEN);
14809 if (!rtx_equal_p (out, orig_out))
14810 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
14812 return 1; /* DONE */
14816 * For comparison with above,
14818 * movl cf,dest
14819 * movl ct,tmp
14820 * cmpl op1,op2
14821 * cmovcc tmp,dest
14823 * Size 15.
14826 if (! nonimmediate_operand (operands[2], mode))
14827 operands[2] = force_reg (mode, operands[2]);
14828 if (! nonimmediate_operand (operands[3], mode))
14829 operands[3] = force_reg (mode, operands[3]);
14831 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
14833 rtx tmp = gen_reg_rtx (mode);
14834 emit_move_insn (tmp, operands[3]);
14835 operands[3] = tmp;
14837 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
14839 rtx tmp = gen_reg_rtx (mode);
14840 emit_move_insn (tmp, operands[2]);
14841 operands[2] = tmp;
14844 if (! register_operand (operands[2], VOIDmode)
14845 && (mode == QImode
14846 || ! register_operand (operands[3], VOIDmode)))
14847 operands[2] = force_reg (mode, operands[2]);
14849 if (mode == QImode
14850 && ! register_operand (operands[3], VOIDmode))
14851 operands[3] = force_reg (mode, operands[3]);
14853 emit_insn (compare_seq);
14854 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14855 gen_rtx_IF_THEN_ELSE (mode,
14856 compare_op, operands[2],
14857 operands[3])));
14858 if (bypass_test)
14859 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14860 gen_rtx_IF_THEN_ELSE (mode,
14861 bypass_test,
14862 copy_rtx (operands[3]),
14863 copy_rtx (operands[0]))));
14864 if (second_test)
14865 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14866 gen_rtx_IF_THEN_ELSE (mode,
14867 second_test,
14868 copy_rtx (operands[2]),
14869 copy_rtx (operands[0]))));
14871 return 1; /* DONE */
14874 /* Swap, force into registers, or otherwise massage the two operands
14875 to an sse comparison with a mask result. Thus we differ a bit from
14876 ix86_prepare_fp_compare_args which expects to produce a flags result.
14878 The DEST operand exists to help determine whether to commute commutative
14879 operators. The POP0/POP1 operands are updated in place. The new
14880 comparison code is returned, or UNKNOWN if not implementable. */
14882 static enum rtx_code
14883 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
14884 rtx *pop0, rtx *pop1)
14886 rtx tmp;
14888 switch (code)
14890 case LTGT:
14891 case UNEQ:
14892 /* We have no LTGT as an operator. We could implement it with
14893 NE & ORDERED, but this requires an extra temporary. It's
14894 not clear that it's worth it. */
14895 return UNKNOWN;
14897 case LT:
14898 case LE:
14899 case UNGT:
14900 case UNGE:
14901 /* These are supported directly. */
14902 break;
14904 case EQ:
14905 case NE:
14906 case UNORDERED:
14907 case ORDERED:
14908 /* For commutative operators, try to canonicalize the destination
14909 operand to be first in the comparison - this helps reload to
14910 avoid extra moves. */
14911 if (!dest || !rtx_equal_p (dest, *pop1))
14912 break;
14913 /* FALLTHRU */
14915 case GE:
14916 case GT:
14917 case UNLE:
14918 case UNLT:
14919 /* These are not supported directly. Swap the comparison operands
14920 to transform into something that is supported. */
14921 tmp = *pop0;
14922 *pop0 = *pop1;
14923 *pop1 = tmp;
14924 code = swap_condition (code);
14925 break;
14927 default:
14928 gcc_unreachable ();
14931 return code;
14934 /* Detect conditional moves that exactly match min/max operational
14935 semantics. Note that this is IEEE safe, as long as we don't
14936 interchange the operands.
14938 Returns FALSE if this conditional move doesn't match a MIN/MAX,
14939 and TRUE if the operation is successful and instructions are emitted. */
14941 static bool
14942 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
14943 rtx cmp_op1, rtx if_true, rtx if_false)
14945 enum machine_mode mode;
14946 bool is_min;
14947 rtx tmp;
14949 if (code == LT)
14951 else if (code == UNGE)
14953 tmp = if_true;
14954 if_true = if_false;
14955 if_false = tmp;
14957 else
14958 return false;
14960 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
14961 is_min = true;
14962 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
14963 is_min = false;
14964 else
14965 return false;
14967 mode = GET_MODE (dest);
14969 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
14970 but MODE may be a vector mode and thus not appropriate. */
14971 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
14973 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
14974 rtvec v;
14976 if_true = force_reg (mode, if_true);
14977 v = gen_rtvec (2, if_true, if_false);
14978 tmp = gen_rtx_UNSPEC (mode, v, u);
14980 else
14982 code = is_min ? SMIN : SMAX;
14983 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
14986 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
14987 return true;
14990 /* Expand an sse vector comparison. Return the register with the result. */
14992 static rtx
14993 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
14994 rtx op_true, rtx op_false)
14996 enum machine_mode mode = GET_MODE (dest);
14997 rtx x;
14999 cmp_op0 = force_reg (mode, cmp_op0);
15000 if (!nonimmediate_operand (cmp_op1, mode))
15001 cmp_op1 = force_reg (mode, cmp_op1);
15003 if (optimize
15004 || reg_overlap_mentioned_p (dest, op_true)
15005 || reg_overlap_mentioned_p (dest, op_false))
15006 dest = gen_reg_rtx (mode);
15008 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
15009 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15011 return dest;
15014 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
15015 operations. This is used for both scalar and vector conditional moves. */
15017 static void
15018 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
15020 enum machine_mode mode = GET_MODE (dest);
15021 rtx t2, t3, x;
15023 if (op_false == CONST0_RTX (mode))
15025 op_true = force_reg (mode, op_true);
15026 x = gen_rtx_AND (mode, cmp, op_true);
15027 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15029 else if (op_true == CONST0_RTX (mode))
15031 op_false = force_reg (mode, op_false);
15032 x = gen_rtx_NOT (mode, cmp);
15033 x = gen_rtx_AND (mode, x, op_false);
15034 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15036 else if (TARGET_SSE5)
15038 rtx pcmov = gen_rtx_SET (mode, dest,
15039 gen_rtx_IF_THEN_ELSE (mode, cmp,
15040 op_true,
15041 op_false));
15042 emit_insn (pcmov);
15044 else
15046 op_true = force_reg (mode, op_true);
15047 op_false = force_reg (mode, op_false);
15049 t2 = gen_reg_rtx (mode);
15050 if (optimize)
15051 t3 = gen_reg_rtx (mode);
15052 else
15053 t3 = dest;
15055 x = gen_rtx_AND (mode, op_true, cmp);
15056 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
15058 x = gen_rtx_NOT (mode, cmp);
15059 x = gen_rtx_AND (mode, x, op_false);
15060 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
15062 x = gen_rtx_IOR (mode, t3, t2);
15063 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15067 /* Expand a floating-point conditional move. Return true if successful. */
15070 ix86_expand_fp_movcc (rtx operands[])
15072 enum machine_mode mode = GET_MODE (operands[0]);
15073 enum rtx_code code = GET_CODE (operands[1]);
15074 rtx tmp, compare_op, second_test, bypass_test;
15076 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
15078 enum machine_mode cmode;
15080 /* Since we've no cmove for sse registers, don't force bad register
15081 allocation just to gain access to it. Deny movcc when the
15082 comparison mode doesn't match the move mode. */
15083 cmode = GET_MODE (ix86_compare_op0);
15084 if (cmode == VOIDmode)
15085 cmode = GET_MODE (ix86_compare_op1);
15086 if (cmode != mode)
15087 return 0;
15089 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15090 &ix86_compare_op0,
15091 &ix86_compare_op1);
15092 if (code == UNKNOWN)
15093 return 0;
15095 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
15096 ix86_compare_op1, operands[2],
15097 operands[3]))
15098 return 1;
15100 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
15101 ix86_compare_op1, operands[2], operands[3]);
15102 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
15103 return 1;
15106 /* The floating point conditional move instructions don't directly
15107 support conditions resulting from a signed integer comparison. */
15109 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15111 /* The floating point conditional move instructions don't directly
15112 support signed integer comparisons. */
15114 if (!fcmov_comparison_operator (compare_op, VOIDmode))
15116 gcc_assert (!second_test && !bypass_test);
15117 tmp = gen_reg_rtx (QImode);
15118 ix86_expand_setcc (code, tmp);
15119 code = NE;
15120 ix86_compare_op0 = tmp;
15121 ix86_compare_op1 = const0_rtx;
15122 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15124 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15126 tmp = gen_reg_rtx (mode);
15127 emit_move_insn (tmp, operands[3]);
15128 operands[3] = tmp;
15130 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15132 tmp = gen_reg_rtx (mode);
15133 emit_move_insn (tmp, operands[2]);
15134 operands[2] = tmp;
15137 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15138 gen_rtx_IF_THEN_ELSE (mode, compare_op,
15139 operands[2], operands[3])));
15140 if (bypass_test)
15141 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15142 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
15143 operands[3], operands[0])));
15144 if (second_test)
15145 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15146 gen_rtx_IF_THEN_ELSE (mode, second_test,
15147 operands[2], operands[0])));
15149 return 1;
15152 /* Expand a floating-point vector conditional move; a vcond operation
15153 rather than a movcc operation. */
15155 bool
15156 ix86_expand_fp_vcond (rtx operands[])
15158 enum rtx_code code = GET_CODE (operands[3]);
15159 rtx cmp;
15161 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15162 &operands[4], &operands[5]);
15163 if (code == UNKNOWN)
15164 return false;
15166 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
15167 operands[5], operands[1], operands[2]))
15168 return true;
15170 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
15171 operands[1], operands[2]);
15172 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
15173 return true;
15176 /* Expand a signed/unsigned integral vector conditional move. */
15178 bool
15179 ix86_expand_int_vcond (rtx operands[])
15181 enum machine_mode mode = GET_MODE (operands[0]);
15182 enum rtx_code code = GET_CODE (operands[3]);
15183 bool negate = false;
15184 rtx x, cop0, cop1;
15186 cop0 = operands[4];
15187 cop1 = operands[5];
15189 /* SSE5 supports all of the comparisons on all vector int types. */
15190 if (!TARGET_SSE5)
15192 /* Canonicalize the comparison to EQ, GT, GTU. */
15193 switch (code)
15195 case EQ:
15196 case GT:
15197 case GTU:
15198 break;
15200 case NE:
15201 case LE:
15202 case LEU:
15203 code = reverse_condition (code);
15204 negate = true;
15205 break;
15207 case GE:
15208 case GEU:
15209 code = reverse_condition (code);
15210 negate = true;
15211 /* FALLTHRU */
15213 case LT:
15214 case LTU:
15215 code = swap_condition (code);
15216 x = cop0, cop0 = cop1, cop1 = x;
15217 break;
15219 default:
15220 gcc_unreachable ();
15223 /* Only SSE4.1/SSE4.2 supports V2DImode. */
15224 if (mode == V2DImode)
15226 switch (code)
15228 case EQ:
15229 /* SSE4.1 supports EQ. */
15230 if (!TARGET_SSE4_1)
15231 return false;
15232 break;
15234 case GT:
15235 case GTU:
15236 /* SSE4.2 supports GT/GTU. */
15237 if (!TARGET_SSE4_2)
15238 return false;
15239 break;
15241 default:
15242 gcc_unreachable ();
15246 /* Unsigned parallel compare is not supported by the hardware. Play some
15247 tricks to turn this into a signed comparison against 0. */
15248 if (code == GTU)
15250 cop0 = force_reg (mode, cop0);
15252 switch (mode)
15254 case V4SImode:
15255 case V2DImode:
15257 rtx t1, t2, mask;
15259 /* Perform a parallel modulo subtraction. */
15260 t1 = gen_reg_rtx (mode);
15261 emit_insn ((mode == V4SImode
15262 ? gen_subv4si3
15263 : gen_subv2di3) (t1, cop0, cop1));
15265 /* Extract the original sign bit of op0. */
15266 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
15267 true, false);
15268 t2 = gen_reg_rtx (mode);
15269 emit_insn ((mode == V4SImode
15270 ? gen_andv4si3
15271 : gen_andv2di3) (t2, cop0, mask));
15273 /* XOR it back into the result of the subtraction. This results
15274 in the sign bit set iff we saw unsigned underflow. */
15275 x = gen_reg_rtx (mode);
15276 emit_insn ((mode == V4SImode
15277 ? gen_xorv4si3
15278 : gen_xorv2di3) (x, t1, t2));
15280 code = GT;
15282 break;
15284 case V16QImode:
15285 case V8HImode:
15286 /* Perform a parallel unsigned saturating subtraction. */
15287 x = gen_reg_rtx (mode);
15288 emit_insn (gen_rtx_SET (VOIDmode, x,
15289 gen_rtx_US_MINUS (mode, cop0, cop1)));
15291 code = EQ;
15292 negate = !negate;
15293 break;
15295 default:
15296 gcc_unreachable ();
15299 cop0 = x;
15300 cop1 = CONST0_RTX (mode);
15304 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
15305 operands[1+negate], operands[2-negate]);
15307 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
15308 operands[2-negate]);
15309 return true;
15312 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
15313 true if we should do zero extension, else sign extension. HIGH_P is
15314 true if we want the N/2 high elements, else the low elements. */
15316 void
15317 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15319 enum machine_mode imode = GET_MODE (operands[1]);
15320 rtx (*unpack)(rtx, rtx, rtx);
15321 rtx se, dest;
15323 switch (imode)
15325 case V16QImode:
15326 if (high_p)
15327 unpack = gen_vec_interleave_highv16qi;
15328 else
15329 unpack = gen_vec_interleave_lowv16qi;
15330 break;
15331 case V8HImode:
15332 if (high_p)
15333 unpack = gen_vec_interleave_highv8hi;
15334 else
15335 unpack = gen_vec_interleave_lowv8hi;
15336 break;
15337 case V4SImode:
15338 if (high_p)
15339 unpack = gen_vec_interleave_highv4si;
15340 else
15341 unpack = gen_vec_interleave_lowv4si;
15342 break;
15343 default:
15344 gcc_unreachable ();
15347 dest = gen_lowpart (imode, operands[0]);
15349 if (unsigned_p)
15350 se = force_reg (imode, CONST0_RTX (imode));
15351 else
15352 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
15353 operands[1], pc_rtx, pc_rtx);
15355 emit_insn (unpack (dest, operands[1], se));
15358 /* This function performs the same task as ix86_expand_sse_unpack,
15359 but with SSE4.1 instructions. */
15361 void
15362 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15364 enum machine_mode imode = GET_MODE (operands[1]);
15365 rtx (*unpack)(rtx, rtx);
15366 rtx src, dest;
15368 switch (imode)
15370 case V16QImode:
15371 if (unsigned_p)
15372 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
15373 else
15374 unpack = gen_sse4_1_extendv8qiv8hi2;
15375 break;
15376 case V8HImode:
15377 if (unsigned_p)
15378 unpack = gen_sse4_1_zero_extendv4hiv4si2;
15379 else
15380 unpack = gen_sse4_1_extendv4hiv4si2;
15381 break;
15382 case V4SImode:
15383 if (unsigned_p)
15384 unpack = gen_sse4_1_zero_extendv2siv2di2;
15385 else
15386 unpack = gen_sse4_1_extendv2siv2di2;
15387 break;
15388 default:
15389 gcc_unreachable ();
15392 dest = operands[0];
15393 if (high_p)
15395 /* Shift higher 8 bytes to lower 8 bytes. */
15396 src = gen_reg_rtx (imode);
15397 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
15398 gen_lowpart (TImode, operands[1]),
15399 GEN_INT (64)));
15401 else
15402 src = operands[1];
15404 emit_insn (unpack (dest, src));
15407 /* This function performs the same task as ix86_expand_sse_unpack,
15408 but with sse5 instructions. */
15410 void
15411 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15413 enum machine_mode imode = GET_MODE (operands[1]);
15414 int pperm_bytes[16];
15415 int i;
15416 int h = (high_p) ? 8 : 0;
15417 int h2;
15418 int sign_extend;
15419 rtvec v = rtvec_alloc (16);
15420 rtvec vs;
15421 rtx x, p;
15422 rtx op0 = operands[0], op1 = operands[1];
15424 switch (imode)
15426 case V16QImode:
15427 vs = rtvec_alloc (8);
15428 h2 = (high_p) ? 8 : 0;
15429 for (i = 0; i < 8; i++)
15431 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
15432 pperm_bytes[2*i+1] = ((unsigned_p)
15433 ? PPERM_ZERO
15434 : PPERM_SIGN | PPERM_SRC2 | i | h);
15437 for (i = 0; i < 16; i++)
15438 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15440 for (i = 0; i < 8; i++)
15441 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15443 p = gen_rtx_PARALLEL (VOIDmode, vs);
15444 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15445 if (unsigned_p)
15446 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
15447 else
15448 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
15449 break;
15451 case V8HImode:
15452 vs = rtvec_alloc (4);
15453 h2 = (high_p) ? 4 : 0;
15454 for (i = 0; i < 4; i++)
15456 sign_extend = ((unsigned_p)
15457 ? PPERM_ZERO
15458 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
15459 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
15460 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
15461 pperm_bytes[4*i+2] = sign_extend;
15462 pperm_bytes[4*i+3] = sign_extend;
15465 for (i = 0; i < 16; i++)
15466 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15468 for (i = 0; i < 4; i++)
15469 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15471 p = gen_rtx_PARALLEL (VOIDmode, vs);
15472 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15473 if (unsigned_p)
15474 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
15475 else
15476 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
15477 break;
15479 case V4SImode:
15480 vs = rtvec_alloc (2);
15481 h2 = (high_p) ? 2 : 0;
15482 for (i = 0; i < 2; i++)
15484 sign_extend = ((unsigned_p)
15485 ? PPERM_ZERO
15486 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
15487 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
15488 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
15489 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
15490 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
15491 pperm_bytes[8*i+4] = sign_extend;
15492 pperm_bytes[8*i+5] = sign_extend;
15493 pperm_bytes[8*i+6] = sign_extend;
15494 pperm_bytes[8*i+7] = sign_extend;
15497 for (i = 0; i < 16; i++)
15498 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15500 for (i = 0; i < 2; i++)
15501 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15503 p = gen_rtx_PARALLEL (VOIDmode, vs);
15504 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15505 if (unsigned_p)
15506 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
15507 else
15508 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
15509 break;
15511 default:
15512 gcc_unreachable ();
15515 return;
15518 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
15519 next narrower integer vector type */
15520 void
15521 ix86_expand_sse5_pack (rtx operands[3])
15523 enum machine_mode imode = GET_MODE (operands[0]);
15524 int pperm_bytes[16];
15525 int i;
15526 rtvec v = rtvec_alloc (16);
15527 rtx x;
15528 rtx op0 = operands[0];
15529 rtx op1 = operands[1];
15530 rtx op2 = operands[2];
15532 switch (imode)
15534 case V16QImode:
15535 for (i = 0; i < 8; i++)
15537 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
15538 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
15541 for (i = 0; i < 16; i++)
15542 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15544 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15545 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
15546 break;
15548 case V8HImode:
15549 for (i = 0; i < 4; i++)
15551 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
15552 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
15553 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
15554 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
15557 for (i = 0; i < 16; i++)
15558 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15560 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15561 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
15562 break;
15564 case V4SImode:
15565 for (i = 0; i < 2; i++)
15567 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
15568 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
15569 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
15570 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
15571 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
15572 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
15573 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
15574 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
15577 for (i = 0; i < 16; i++)
15578 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15580 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15581 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
15582 break;
15584 default:
15585 gcc_unreachable ();
15588 return;
15591 /* Expand conditional increment or decrement using adb/sbb instructions.
15592 The default case using setcc followed by the conditional move can be
15593 done by generic code. */
15595 ix86_expand_int_addcc (rtx operands[])
15597 enum rtx_code code = GET_CODE (operands[1]);
15598 rtx compare_op;
15599 rtx val = const0_rtx;
15600 bool fpcmp = false;
15601 enum machine_mode mode = GET_MODE (operands[0]);
15603 if (operands[3] != const1_rtx
15604 && operands[3] != constm1_rtx)
15605 return 0;
15606 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15607 ix86_compare_op1, &compare_op))
15608 return 0;
15609 code = GET_CODE (compare_op);
15611 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15612 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15614 fpcmp = true;
15615 code = ix86_fp_compare_code_to_integer (code);
15618 if (code != LTU)
15620 val = constm1_rtx;
15621 if (fpcmp)
15622 PUT_CODE (compare_op,
15623 reverse_condition_maybe_unordered
15624 (GET_CODE (compare_op)));
15625 else
15626 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
15628 PUT_MODE (compare_op, mode);
15630 /* Construct either adc or sbb insn. */
15631 if ((code == LTU) == (operands[3] == constm1_rtx))
15633 switch (GET_MODE (operands[0]))
15635 case QImode:
15636 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
15637 break;
15638 case HImode:
15639 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
15640 break;
15641 case SImode:
15642 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
15643 break;
15644 case DImode:
15645 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15646 break;
15647 default:
15648 gcc_unreachable ();
15651 else
15653 switch (GET_MODE (operands[0]))
15655 case QImode:
15656 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
15657 break;
15658 case HImode:
15659 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
15660 break;
15661 case SImode:
15662 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
15663 break;
15664 case DImode:
15665 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15666 break;
15667 default:
15668 gcc_unreachable ();
15671 return 1; /* DONE */
15675 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
15676 works for floating pointer parameters and nonoffsetable memories.
15677 For pushes, it returns just stack offsets; the values will be saved
15678 in the right order. Maximally three parts are generated. */
15680 static int
15681 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
15683 int size;
15685 if (!TARGET_64BIT)
15686 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
15687 else
15688 size = (GET_MODE_SIZE (mode) + 4) / 8;
15690 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
15691 gcc_assert (size >= 2 && size <= 4);
15693 /* Optimize constant pool reference to immediates. This is used by fp
15694 moves, that force all constants to memory to allow combining. */
15695 if (MEM_P (operand) && MEM_READONLY_P (operand))
15697 rtx tmp = maybe_get_pool_constant (operand);
15698 if (tmp)
15699 operand = tmp;
15702 if (MEM_P (operand) && !offsettable_memref_p (operand))
15704 /* The only non-offsetable memories we handle are pushes. */
15705 int ok = push_operand (operand, VOIDmode);
15707 gcc_assert (ok);
15709 operand = copy_rtx (operand);
15710 PUT_MODE (operand, Pmode);
15711 parts[0] = parts[1] = parts[2] = parts[3] = operand;
15712 return size;
15715 if (GET_CODE (operand) == CONST_VECTOR)
15717 enum machine_mode imode = int_mode_for_mode (mode);
15718 /* Caution: if we looked through a constant pool memory above,
15719 the operand may actually have a different mode now. That's
15720 ok, since we want to pun this all the way back to an integer. */
15721 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
15722 gcc_assert (operand != NULL);
15723 mode = imode;
15726 if (!TARGET_64BIT)
15728 if (mode == DImode)
15729 split_di (&operand, 1, &parts[0], &parts[1]);
15730 else
15732 int i;
15734 if (REG_P (operand))
15736 gcc_assert (reload_completed);
15737 for (i = 0; i < size; i++)
15738 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
15740 else if (offsettable_memref_p (operand))
15742 operand = adjust_address (operand, SImode, 0);
15743 parts[0] = operand;
15744 for (i = 1; i < size; i++)
15745 parts[i] = adjust_address (operand, SImode, 4 * i);
15747 else if (GET_CODE (operand) == CONST_DOUBLE)
15749 REAL_VALUE_TYPE r;
15750 long l[4];
15752 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15753 switch (mode)
15755 case TFmode:
15756 real_to_target (l, &r, mode);
15757 parts[3] = gen_int_mode (l[3], SImode);
15758 parts[2] = gen_int_mode (l[2], SImode);
15759 break;
15760 case XFmode:
15761 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
15762 parts[2] = gen_int_mode (l[2], SImode);
15763 break;
15764 case DFmode:
15765 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
15766 break;
15767 default:
15768 gcc_unreachable ();
15770 parts[1] = gen_int_mode (l[1], SImode);
15771 parts[0] = gen_int_mode (l[0], SImode);
15773 else
15774 gcc_unreachable ();
15777 else
15779 if (mode == TImode)
15780 split_ti (&operand, 1, &parts[0], &parts[1]);
15781 if (mode == XFmode || mode == TFmode)
15783 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
15784 if (REG_P (operand))
15786 gcc_assert (reload_completed);
15787 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
15788 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
15790 else if (offsettable_memref_p (operand))
15792 operand = adjust_address (operand, DImode, 0);
15793 parts[0] = operand;
15794 parts[1] = adjust_address (operand, upper_mode, 8);
15796 else if (GET_CODE (operand) == CONST_DOUBLE)
15798 REAL_VALUE_TYPE r;
15799 long l[4];
15801 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15802 real_to_target (l, &r, mode);
15804 /* Do not use shift by 32 to avoid warning on 32bit systems. */
15805 if (HOST_BITS_PER_WIDE_INT >= 64)
15806 parts[0]
15807 = gen_int_mode
15808 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
15809 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
15810 DImode);
15811 else
15812 parts[0] = immed_double_const (l[0], l[1], DImode);
15814 if (upper_mode == SImode)
15815 parts[1] = gen_int_mode (l[2], SImode);
15816 else if (HOST_BITS_PER_WIDE_INT >= 64)
15817 parts[1]
15818 = gen_int_mode
15819 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
15820 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
15821 DImode);
15822 else
15823 parts[1] = immed_double_const (l[2], l[3], DImode);
15825 else
15826 gcc_unreachable ();
15830 return size;
15833 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
15834 Return false when normal moves are needed; true when all required
15835 insns have been emitted. Operands 2-4 contain the input values
15836 int the correct order; operands 5-7 contain the output values. */
15838 void
15839 ix86_split_long_move (rtx operands[])
15841 rtx part[2][4];
15842 int nparts, i, j;
15843 int push = 0;
15844 int collisions = 0;
15845 enum machine_mode mode = GET_MODE (operands[0]);
15846 bool collisionparts[4];
15848 /* The DFmode expanders may ask us to move double.
15849 For 64bit target this is single move. By hiding the fact
15850 here we simplify i386.md splitters. */
15851 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
15853 /* Optimize constant pool reference to immediates. This is used by
15854 fp moves, that force all constants to memory to allow combining. */
15856 if (MEM_P (operands[1])
15857 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
15858 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
15859 operands[1] = get_pool_constant (XEXP (operands[1], 0));
15860 if (push_operand (operands[0], VOIDmode))
15862 operands[0] = copy_rtx (operands[0]);
15863 PUT_MODE (operands[0], Pmode);
15865 else
15866 operands[0] = gen_lowpart (DImode, operands[0]);
15867 operands[1] = gen_lowpart (DImode, operands[1]);
15868 emit_move_insn (operands[0], operands[1]);
15869 return;
15872 /* The only non-offsettable memory we handle is push. */
15873 if (push_operand (operands[0], VOIDmode))
15874 push = 1;
15875 else
15876 gcc_assert (!MEM_P (operands[0])
15877 || offsettable_memref_p (operands[0]));
15879 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
15880 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
15882 /* When emitting push, take care for source operands on the stack. */
15883 if (push && MEM_P (operands[1])
15884 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
15885 for (i = 0; i < nparts - 1; i++)
15886 part[1][i] = change_address (part[1][i],
15887 GET_MODE (part[1][i]),
15888 XEXP (part[1][i + 1], 0));
15890 /* We need to do copy in the right order in case an address register
15891 of the source overlaps the destination. */
15892 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
15894 rtx tmp;
15896 for (i = 0; i < nparts; i++)
15898 collisionparts[i]
15899 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
15900 if (collisionparts[i])
15901 collisions++;
15904 /* Collision in the middle part can be handled by reordering. */
15905 if (collisions == 1 && nparts == 3 && collisionparts [1])
15907 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15908 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15910 else if (collisions == 1
15911 && nparts == 4
15912 && (collisionparts [1] || collisionparts [2]))
15914 if (collisionparts [1])
15916 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15917 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15919 else
15921 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
15922 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
15926 /* If there are more collisions, we can't handle it by reordering.
15927 Do an lea to the last part and use only one colliding move. */
15928 else if (collisions > 1)
15930 rtx base;
15932 collisions = 1;
15934 base = part[0][nparts - 1];
15936 /* Handle the case when the last part isn't valid for lea.
15937 Happens in 64-bit mode storing the 12-byte XFmode. */
15938 if (GET_MODE (base) != Pmode)
15939 base = gen_rtx_REG (Pmode, REGNO (base));
15941 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
15942 part[1][0] = replace_equiv_address (part[1][0], base);
15943 for (i = 1; i < nparts; i++)
15945 tmp = plus_constant (base, UNITS_PER_WORD * i);
15946 part[1][i] = replace_equiv_address (part[1][i], tmp);
15951 if (push)
15953 if (!TARGET_64BIT)
15955 if (nparts == 3)
15957 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
15958 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
15959 emit_move_insn (part[0][2], part[1][2]);
15961 else if (nparts == 4)
15963 emit_move_insn (part[0][3], part[1][3]);
15964 emit_move_insn (part[0][2], part[1][2]);
15967 else
15969 /* In 64bit mode we don't have 32bit push available. In case this is
15970 register, it is OK - we will just use larger counterpart. We also
15971 retype memory - these comes from attempt to avoid REX prefix on
15972 moving of second half of TFmode value. */
15973 if (GET_MODE (part[1][1]) == SImode)
15975 switch (GET_CODE (part[1][1]))
15977 case MEM:
15978 part[1][1] = adjust_address (part[1][1], DImode, 0);
15979 break;
15981 case REG:
15982 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
15983 break;
15985 default:
15986 gcc_unreachable ();
15989 if (GET_MODE (part[1][0]) == SImode)
15990 part[1][0] = part[1][1];
15993 emit_move_insn (part[0][1], part[1][1]);
15994 emit_move_insn (part[0][0], part[1][0]);
15995 return;
15998 /* Choose correct order to not overwrite the source before it is copied. */
15999 if ((REG_P (part[0][0])
16000 && REG_P (part[1][1])
16001 && (REGNO (part[0][0]) == REGNO (part[1][1])
16002 || (nparts == 3
16003 && REGNO (part[0][0]) == REGNO (part[1][2]))
16004 || (nparts == 4
16005 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16006 || (collisions > 0
16007 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16009 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16011 operands[2 + i] = part[0][j];
16012 operands[6 + i] = part[1][j];
16015 else
16017 for (i = 0; i < nparts; i++)
16019 operands[2 + i] = part[0][i];
16020 operands[6 + i] = part[1][i];
16024 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16025 if (optimize_insn_for_size_p ())
16027 for (j = 0; j < nparts - 1; j++)
16028 if (CONST_INT_P (operands[6 + j])
16029 && operands[6 + j] != const0_rtx
16030 && REG_P (operands[2 + j]))
16031 for (i = j; i < nparts - 1; i++)
16032 if (CONST_INT_P (operands[7 + i])
16033 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16034 operands[7 + i] = operands[2 + j];
16037 for (i = 0; i < nparts; i++)
16038 emit_move_insn (operands[2 + i], operands[6 + i]);
16040 return;
16043 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16044 left shift by a constant, either using a single shift or
16045 a sequence of add instructions. */
16047 static void
16048 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16050 if (count == 1)
16052 emit_insn ((mode == DImode
16053 ? gen_addsi3
16054 : gen_adddi3) (operand, operand, operand));
16056 else if (!optimize_insn_for_size_p ()
16057 && count * ix86_cost->add <= ix86_cost->shift_const)
16059 int i;
16060 for (i=0; i<count; i++)
16062 emit_insn ((mode == DImode
16063 ? gen_addsi3
16064 : gen_adddi3) (operand, operand, operand));
16067 else
16068 emit_insn ((mode == DImode
16069 ? gen_ashlsi3
16070 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16073 void
16074 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16076 rtx low[2], high[2];
16077 int count;
16078 const int single_width = mode == DImode ? 32 : 64;
16080 if (CONST_INT_P (operands[2]))
16082 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16083 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16085 if (count >= single_width)
16087 emit_move_insn (high[0], low[1]);
16088 emit_move_insn (low[0], const0_rtx);
16090 if (count > single_width)
16091 ix86_expand_ashl_const (high[0], count - single_width, mode);
16093 else
16095 if (!rtx_equal_p (operands[0], operands[1]))
16096 emit_move_insn (operands[0], operands[1]);
16097 emit_insn ((mode == DImode
16098 ? gen_x86_shld
16099 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16100 ix86_expand_ashl_const (low[0], count, mode);
16102 return;
16105 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16107 if (operands[1] == const1_rtx)
16109 /* Assuming we've chosen a QImode capable registers, then 1 << N
16110 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16111 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16113 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16115 ix86_expand_clear (low[0]);
16116 ix86_expand_clear (high[0]);
16117 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16119 d = gen_lowpart (QImode, low[0]);
16120 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16121 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16122 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16124 d = gen_lowpart (QImode, high[0]);
16125 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16126 s = gen_rtx_NE (QImode, flags, const0_rtx);
16127 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16130 /* Otherwise, we can get the same results by manually performing
16131 a bit extract operation on bit 5/6, and then performing the two
16132 shifts. The two methods of getting 0/1 into low/high are exactly
16133 the same size. Avoiding the shift in the bit extract case helps
16134 pentium4 a bit; no one else seems to care much either way. */
16135 else
16137 rtx x;
16139 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16140 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16141 else
16142 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16143 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16145 emit_insn ((mode == DImode
16146 ? gen_lshrsi3
16147 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
16148 emit_insn ((mode == DImode
16149 ? gen_andsi3
16150 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
16151 emit_move_insn (low[0], high[0]);
16152 emit_insn ((mode == DImode
16153 ? gen_xorsi3
16154 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
16157 emit_insn ((mode == DImode
16158 ? gen_ashlsi3
16159 : gen_ashldi3) (low[0], low[0], operands[2]));
16160 emit_insn ((mode == DImode
16161 ? gen_ashlsi3
16162 : gen_ashldi3) (high[0], high[0], operands[2]));
16163 return;
16166 if (operands[1] == constm1_rtx)
16168 /* For -1 << N, we can avoid the shld instruction, because we
16169 know that we're shifting 0...31/63 ones into a -1. */
16170 emit_move_insn (low[0], constm1_rtx);
16171 if (optimize_insn_for_size_p ())
16172 emit_move_insn (high[0], low[0]);
16173 else
16174 emit_move_insn (high[0], constm1_rtx);
16176 else
16178 if (!rtx_equal_p (operands[0], operands[1]))
16179 emit_move_insn (operands[0], operands[1]);
16181 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16182 emit_insn ((mode == DImode
16183 ? gen_x86_shld
16184 : gen_x86_64_shld) (high[0], low[0], operands[2]));
16187 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
16189 if (TARGET_CMOVE && scratch)
16191 ix86_expand_clear (scratch);
16192 emit_insn ((mode == DImode
16193 ? gen_x86_shift_adj_1
16194 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
16195 scratch));
16197 else
16198 emit_insn ((mode == DImode
16199 ? gen_x86_shift_adj_2
16200 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
16203 void
16204 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
16206 rtx low[2], high[2];
16207 int count;
16208 const int single_width = mode == DImode ? 32 : 64;
16210 if (CONST_INT_P (operands[2]))
16212 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16213 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16215 if (count == single_width * 2 - 1)
16217 emit_move_insn (high[0], high[1]);
16218 emit_insn ((mode == DImode
16219 ? gen_ashrsi3
16220 : gen_ashrdi3) (high[0], high[0],
16221 GEN_INT (single_width - 1)));
16222 emit_move_insn (low[0], high[0]);
16225 else if (count >= single_width)
16227 emit_move_insn (low[0], high[1]);
16228 emit_move_insn (high[0], low[0]);
16229 emit_insn ((mode == DImode
16230 ? gen_ashrsi3
16231 : gen_ashrdi3) (high[0], high[0],
16232 GEN_INT (single_width - 1)));
16233 if (count > single_width)
16234 emit_insn ((mode == DImode
16235 ? gen_ashrsi3
16236 : gen_ashrdi3) (low[0], low[0],
16237 GEN_INT (count - single_width)));
16239 else
16241 if (!rtx_equal_p (operands[0], operands[1]))
16242 emit_move_insn (operands[0], operands[1]);
16243 emit_insn ((mode == DImode
16244 ? gen_x86_shrd
16245 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16246 emit_insn ((mode == DImode
16247 ? gen_ashrsi3
16248 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
16251 else
16253 if (!rtx_equal_p (operands[0], operands[1]))
16254 emit_move_insn (operands[0], operands[1]);
16256 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16258 emit_insn ((mode == DImode
16259 ? gen_x86_shrd
16260 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16261 emit_insn ((mode == DImode
16262 ? gen_ashrsi3
16263 : gen_ashrdi3) (high[0], high[0], operands[2]));
16265 if (TARGET_CMOVE && scratch)
16267 emit_move_insn (scratch, high[0]);
16268 emit_insn ((mode == DImode
16269 ? gen_ashrsi3
16270 : gen_ashrdi3) (scratch, scratch,
16271 GEN_INT (single_width - 1)));
16272 emit_insn ((mode == DImode
16273 ? gen_x86_shift_adj_1
16274 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16275 scratch));
16277 else
16278 emit_insn ((mode == DImode
16279 ? gen_x86_shift_adj_3
16280 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
16284 void
16285 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
16287 rtx low[2], high[2];
16288 int count;
16289 const int single_width = mode == DImode ? 32 : 64;
16291 if (CONST_INT_P (operands[2]))
16293 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16294 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16296 if (count >= single_width)
16298 emit_move_insn (low[0], high[1]);
16299 ix86_expand_clear (high[0]);
16301 if (count > single_width)
16302 emit_insn ((mode == DImode
16303 ? gen_lshrsi3
16304 : gen_lshrdi3) (low[0], low[0],
16305 GEN_INT (count - single_width)));
16307 else
16309 if (!rtx_equal_p (operands[0], operands[1]))
16310 emit_move_insn (operands[0], operands[1]);
16311 emit_insn ((mode == DImode
16312 ? gen_x86_shrd
16313 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16314 emit_insn ((mode == DImode
16315 ? gen_lshrsi3
16316 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
16319 else
16321 if (!rtx_equal_p (operands[0], operands[1]))
16322 emit_move_insn (operands[0], operands[1]);
16324 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16326 emit_insn ((mode == DImode
16327 ? gen_x86_shrd
16328 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16329 emit_insn ((mode == DImode
16330 ? gen_lshrsi3
16331 : gen_lshrdi3) (high[0], high[0], operands[2]));
16333 /* Heh. By reversing the arguments, we can reuse this pattern. */
16334 if (TARGET_CMOVE && scratch)
16336 ix86_expand_clear (scratch);
16337 emit_insn ((mode == DImode
16338 ? gen_x86_shift_adj_1
16339 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16340 scratch));
16342 else
16343 emit_insn ((mode == DImode
16344 ? gen_x86_shift_adj_2
16345 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
16349 /* Predict just emitted jump instruction to be taken with probability PROB. */
16350 static void
16351 predict_jump (int prob)
16353 rtx insn = get_last_insn ();
16354 gcc_assert (JUMP_P (insn));
16355 REG_NOTES (insn)
16356 = gen_rtx_EXPR_LIST (REG_BR_PROB,
16357 GEN_INT (prob),
16358 REG_NOTES (insn));
16361 /* Helper function for the string operations below. Dest VARIABLE whether
16362 it is aligned to VALUE bytes. If true, jump to the label. */
16363 static rtx
16364 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
16366 rtx label = gen_label_rtx ();
16367 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
16368 if (GET_MODE (variable) == DImode)
16369 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
16370 else
16371 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
16372 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
16373 1, label);
16374 if (epilogue)
16375 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16376 else
16377 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16378 return label;
16381 /* Adjust COUNTER by the VALUE. */
16382 static void
16383 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
16385 if (GET_MODE (countreg) == DImode)
16386 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
16387 else
16388 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
16391 /* Zero extend possibly SImode EXP to Pmode register. */
16393 ix86_zero_extend_to_Pmode (rtx exp)
16395 rtx r;
16396 if (GET_MODE (exp) == VOIDmode)
16397 return force_reg (Pmode, exp);
16398 if (GET_MODE (exp) == Pmode)
16399 return copy_to_mode_reg (Pmode, exp);
16400 r = gen_reg_rtx (Pmode);
16401 emit_insn (gen_zero_extendsidi2 (r, exp));
16402 return r;
16405 /* Divide COUNTREG by SCALE. */
16406 static rtx
16407 scale_counter (rtx countreg, int scale)
16409 rtx sc;
16410 rtx piece_size_mask;
16412 if (scale == 1)
16413 return countreg;
16414 if (CONST_INT_P (countreg))
16415 return GEN_INT (INTVAL (countreg) / scale);
16416 gcc_assert (REG_P (countreg));
16418 piece_size_mask = GEN_INT (scale - 1);
16419 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
16420 GEN_INT (exact_log2 (scale)),
16421 NULL, 1, OPTAB_DIRECT);
16422 return sc;
16425 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
16426 DImode for constant loop counts. */
16428 static enum machine_mode
16429 counter_mode (rtx count_exp)
16431 if (GET_MODE (count_exp) != VOIDmode)
16432 return GET_MODE (count_exp);
16433 if (GET_CODE (count_exp) != CONST_INT)
16434 return Pmode;
16435 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
16436 return DImode;
16437 return SImode;
16440 /* When SRCPTR is non-NULL, output simple loop to move memory
16441 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
16442 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
16443 equivalent loop to set memory by VALUE (supposed to be in MODE).
16445 The size is rounded down to whole number of chunk size moved at once.
16446 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
16449 static void
16450 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
16451 rtx destptr, rtx srcptr, rtx value,
16452 rtx count, enum machine_mode mode, int unroll,
16453 int expected_size)
16455 rtx out_label, top_label, iter, tmp;
16456 enum machine_mode iter_mode = counter_mode (count);
16457 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
16458 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
16459 rtx size;
16460 rtx x_addr;
16461 rtx y_addr;
16462 int i;
16464 top_label = gen_label_rtx ();
16465 out_label = gen_label_rtx ();
16466 iter = gen_reg_rtx (iter_mode);
16468 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
16469 NULL, 1, OPTAB_DIRECT);
16470 /* Those two should combine. */
16471 if (piece_size == const1_rtx)
16473 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
16474 true, out_label);
16475 predict_jump (REG_BR_PROB_BASE * 10 / 100);
16477 emit_move_insn (iter, const0_rtx);
16479 emit_label (top_label);
16481 tmp = convert_modes (Pmode, iter_mode, iter, true);
16482 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
16483 destmem = change_address (destmem, mode, x_addr);
16485 if (srcmem)
16487 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
16488 srcmem = change_address (srcmem, mode, y_addr);
16490 /* When unrolling for chips that reorder memory reads and writes,
16491 we can save registers by using single temporary.
16492 Also using 4 temporaries is overkill in 32bit mode. */
16493 if (!TARGET_64BIT && 0)
16495 for (i = 0; i < unroll; i++)
16497 if (i)
16499 destmem =
16500 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16501 srcmem =
16502 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16504 emit_move_insn (destmem, srcmem);
16507 else
16509 rtx tmpreg[4];
16510 gcc_assert (unroll <= 4);
16511 for (i = 0; i < unroll; i++)
16513 tmpreg[i] = gen_reg_rtx (mode);
16514 if (i)
16516 srcmem =
16517 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16519 emit_move_insn (tmpreg[i], srcmem);
16521 for (i = 0; i < unroll; i++)
16523 if (i)
16525 destmem =
16526 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16528 emit_move_insn (destmem, tmpreg[i]);
16532 else
16533 for (i = 0; i < unroll; i++)
16535 if (i)
16536 destmem =
16537 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16538 emit_move_insn (destmem, value);
16541 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
16542 true, OPTAB_LIB_WIDEN);
16543 if (tmp != iter)
16544 emit_move_insn (iter, tmp);
16546 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
16547 true, top_label);
16548 if (expected_size != -1)
16550 expected_size /= GET_MODE_SIZE (mode) * unroll;
16551 if (expected_size == 0)
16552 predict_jump (0);
16553 else if (expected_size > REG_BR_PROB_BASE)
16554 predict_jump (REG_BR_PROB_BASE - 1);
16555 else
16556 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
16558 else
16559 predict_jump (REG_BR_PROB_BASE * 80 / 100);
16560 iter = ix86_zero_extend_to_Pmode (iter);
16561 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
16562 true, OPTAB_LIB_WIDEN);
16563 if (tmp != destptr)
16564 emit_move_insn (destptr, tmp);
16565 if (srcptr)
16567 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
16568 true, OPTAB_LIB_WIDEN);
16569 if (tmp != srcptr)
16570 emit_move_insn (srcptr, tmp);
16572 emit_label (out_label);
16575 /* Output "rep; mov" instruction.
16576 Arguments have same meaning as for previous function */
16577 static void
16578 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
16579 rtx destptr, rtx srcptr,
16580 rtx count,
16581 enum machine_mode mode)
16583 rtx destexp;
16584 rtx srcexp;
16585 rtx countreg;
16587 /* If the size is known, it is shorter to use rep movs. */
16588 if (mode == QImode && CONST_INT_P (count)
16589 && !(INTVAL (count) & 3))
16590 mode = SImode;
16592 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16593 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16594 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
16595 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
16596 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16597 if (mode != QImode)
16599 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16600 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16601 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16602 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
16603 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16604 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
16606 else
16608 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16609 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
16611 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
16612 destexp, srcexp));
16615 /* Output "rep; stos" instruction.
16616 Arguments have same meaning as for previous function */
16617 static void
16618 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
16619 rtx count,
16620 enum machine_mode mode)
16622 rtx destexp;
16623 rtx countreg;
16625 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16626 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16627 value = force_reg (mode, gen_lowpart (mode, value));
16628 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16629 if (mode != QImode)
16631 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16632 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16633 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16635 else
16636 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16637 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
16640 static void
16641 emit_strmov (rtx destmem, rtx srcmem,
16642 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
16644 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
16645 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
16646 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16649 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
16650 static void
16651 expand_movmem_epilogue (rtx destmem, rtx srcmem,
16652 rtx destptr, rtx srcptr, rtx count, int max_size)
16654 rtx src, dest;
16655 if (CONST_INT_P (count))
16657 HOST_WIDE_INT countval = INTVAL (count);
16658 int offset = 0;
16660 if ((countval & 0x10) && max_size > 16)
16662 if (TARGET_64BIT)
16664 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16665 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
16667 else
16668 gcc_unreachable ();
16669 offset += 16;
16671 if ((countval & 0x08) && max_size > 8)
16673 if (TARGET_64BIT)
16674 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16675 else
16677 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16678 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
16680 offset += 8;
16682 if ((countval & 0x04) && max_size > 4)
16684 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16685 offset += 4;
16687 if ((countval & 0x02) && max_size > 2)
16689 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
16690 offset += 2;
16692 if ((countval & 0x01) && max_size > 1)
16694 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
16695 offset += 1;
16697 return;
16699 if (max_size > 8)
16701 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
16702 count, 1, OPTAB_DIRECT);
16703 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
16704 count, QImode, 1, 4);
16705 return;
16708 /* When there are stringops, we can cheaply increase dest and src pointers.
16709 Otherwise we save code size by maintaining offset (zero is readily
16710 available from preceding rep operation) and using x86 addressing modes.
16712 if (TARGET_SINGLE_STRINGOP)
16714 if (max_size > 4)
16716 rtx label = ix86_expand_aligntest (count, 4, true);
16717 src = change_address (srcmem, SImode, srcptr);
16718 dest = change_address (destmem, SImode, destptr);
16719 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16720 emit_label (label);
16721 LABEL_NUSES (label) = 1;
16723 if (max_size > 2)
16725 rtx label = ix86_expand_aligntest (count, 2, true);
16726 src = change_address (srcmem, HImode, srcptr);
16727 dest = change_address (destmem, HImode, destptr);
16728 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16729 emit_label (label);
16730 LABEL_NUSES (label) = 1;
16732 if (max_size > 1)
16734 rtx label = ix86_expand_aligntest (count, 1, true);
16735 src = change_address (srcmem, QImode, srcptr);
16736 dest = change_address (destmem, QImode, destptr);
16737 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16738 emit_label (label);
16739 LABEL_NUSES (label) = 1;
16742 else
16744 rtx offset = force_reg (Pmode, const0_rtx);
16745 rtx tmp;
16747 if (max_size > 4)
16749 rtx label = ix86_expand_aligntest (count, 4, true);
16750 src = change_address (srcmem, SImode, srcptr);
16751 dest = change_address (destmem, SImode, destptr);
16752 emit_move_insn (dest, src);
16753 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
16754 true, OPTAB_LIB_WIDEN);
16755 if (tmp != offset)
16756 emit_move_insn (offset, tmp);
16757 emit_label (label);
16758 LABEL_NUSES (label) = 1;
16760 if (max_size > 2)
16762 rtx label = ix86_expand_aligntest (count, 2, true);
16763 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16764 src = change_address (srcmem, HImode, tmp);
16765 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16766 dest = change_address (destmem, HImode, tmp);
16767 emit_move_insn (dest, src);
16768 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
16769 true, OPTAB_LIB_WIDEN);
16770 if (tmp != offset)
16771 emit_move_insn (offset, tmp);
16772 emit_label (label);
16773 LABEL_NUSES (label) = 1;
16775 if (max_size > 1)
16777 rtx label = ix86_expand_aligntest (count, 1, true);
16778 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16779 src = change_address (srcmem, QImode, tmp);
16780 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16781 dest = change_address (destmem, QImode, tmp);
16782 emit_move_insn (dest, src);
16783 emit_label (label);
16784 LABEL_NUSES (label) = 1;
16789 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16790 static void
16791 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
16792 rtx count, int max_size)
16794 count =
16795 expand_simple_binop (counter_mode (count), AND, count,
16796 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
16797 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
16798 gen_lowpart (QImode, value), count, QImode,
16799 1, max_size / 2);
16802 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16803 static void
16804 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
16806 rtx dest;
16808 if (CONST_INT_P (count))
16810 HOST_WIDE_INT countval = INTVAL (count);
16811 int offset = 0;
16813 if ((countval & 0x10) && max_size > 16)
16815 if (TARGET_64BIT)
16817 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16818 emit_insn (gen_strset (destptr, dest, value));
16819 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
16820 emit_insn (gen_strset (destptr, dest, value));
16822 else
16823 gcc_unreachable ();
16824 offset += 16;
16826 if ((countval & 0x08) && max_size > 8)
16828 if (TARGET_64BIT)
16830 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16831 emit_insn (gen_strset (destptr, dest, value));
16833 else
16835 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16836 emit_insn (gen_strset (destptr, dest, value));
16837 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
16838 emit_insn (gen_strset (destptr, dest, value));
16840 offset += 8;
16842 if ((countval & 0x04) && max_size > 4)
16844 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16845 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16846 offset += 4;
16848 if ((countval & 0x02) && max_size > 2)
16850 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
16851 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16852 offset += 2;
16854 if ((countval & 0x01) && max_size > 1)
16856 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
16857 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16858 offset += 1;
16860 return;
16862 if (max_size > 32)
16864 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
16865 return;
16867 if (max_size > 16)
16869 rtx label = ix86_expand_aligntest (count, 16, true);
16870 if (TARGET_64BIT)
16872 dest = change_address (destmem, DImode, destptr);
16873 emit_insn (gen_strset (destptr, dest, value));
16874 emit_insn (gen_strset (destptr, dest, value));
16876 else
16878 dest = change_address (destmem, SImode, destptr);
16879 emit_insn (gen_strset (destptr, dest, value));
16880 emit_insn (gen_strset (destptr, dest, value));
16881 emit_insn (gen_strset (destptr, dest, value));
16882 emit_insn (gen_strset (destptr, dest, value));
16884 emit_label (label);
16885 LABEL_NUSES (label) = 1;
16887 if (max_size > 8)
16889 rtx label = ix86_expand_aligntest (count, 8, true);
16890 if (TARGET_64BIT)
16892 dest = change_address (destmem, DImode, destptr);
16893 emit_insn (gen_strset (destptr, dest, value));
16895 else
16897 dest = change_address (destmem, SImode, destptr);
16898 emit_insn (gen_strset (destptr, dest, value));
16899 emit_insn (gen_strset (destptr, dest, value));
16901 emit_label (label);
16902 LABEL_NUSES (label) = 1;
16904 if (max_size > 4)
16906 rtx label = ix86_expand_aligntest (count, 4, true);
16907 dest = change_address (destmem, SImode, destptr);
16908 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16909 emit_label (label);
16910 LABEL_NUSES (label) = 1;
16912 if (max_size > 2)
16914 rtx label = ix86_expand_aligntest (count, 2, true);
16915 dest = change_address (destmem, HImode, destptr);
16916 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16917 emit_label (label);
16918 LABEL_NUSES (label) = 1;
16920 if (max_size > 1)
16922 rtx label = ix86_expand_aligntest (count, 1, true);
16923 dest = change_address (destmem, QImode, destptr);
16924 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16925 emit_label (label);
16926 LABEL_NUSES (label) = 1;
16930 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
16931 DESIRED_ALIGNMENT. */
16932 static void
16933 expand_movmem_prologue (rtx destmem, rtx srcmem,
16934 rtx destptr, rtx srcptr, rtx count,
16935 int align, int desired_alignment)
16937 if (align <= 1 && desired_alignment > 1)
16939 rtx label = ix86_expand_aligntest (destptr, 1, false);
16940 srcmem = change_address (srcmem, QImode, srcptr);
16941 destmem = change_address (destmem, QImode, destptr);
16942 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16943 ix86_adjust_counter (count, 1);
16944 emit_label (label);
16945 LABEL_NUSES (label) = 1;
16947 if (align <= 2 && desired_alignment > 2)
16949 rtx label = ix86_expand_aligntest (destptr, 2, false);
16950 srcmem = change_address (srcmem, HImode, srcptr);
16951 destmem = change_address (destmem, HImode, destptr);
16952 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16953 ix86_adjust_counter (count, 2);
16954 emit_label (label);
16955 LABEL_NUSES (label) = 1;
16957 if (align <= 4 && desired_alignment > 4)
16959 rtx label = ix86_expand_aligntest (destptr, 4, false);
16960 srcmem = change_address (srcmem, SImode, srcptr);
16961 destmem = change_address (destmem, SImode, destptr);
16962 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16963 ix86_adjust_counter (count, 4);
16964 emit_label (label);
16965 LABEL_NUSES (label) = 1;
16967 gcc_assert (desired_alignment <= 8);
16970 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
16971 DESIRED_ALIGNMENT. */
16972 static void
16973 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
16974 int align, int desired_alignment)
16976 if (align <= 1 && desired_alignment > 1)
16978 rtx label = ix86_expand_aligntest (destptr, 1, false);
16979 destmem = change_address (destmem, QImode, destptr);
16980 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
16981 ix86_adjust_counter (count, 1);
16982 emit_label (label);
16983 LABEL_NUSES (label) = 1;
16985 if (align <= 2 && desired_alignment > 2)
16987 rtx label = ix86_expand_aligntest (destptr, 2, false);
16988 destmem = change_address (destmem, HImode, destptr);
16989 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
16990 ix86_adjust_counter (count, 2);
16991 emit_label (label);
16992 LABEL_NUSES (label) = 1;
16994 if (align <= 4 && desired_alignment > 4)
16996 rtx label = ix86_expand_aligntest (destptr, 4, false);
16997 destmem = change_address (destmem, SImode, destptr);
16998 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
16999 ix86_adjust_counter (count, 4);
17000 emit_label (label);
17001 LABEL_NUSES (label) = 1;
17003 gcc_assert (desired_alignment <= 8);
17006 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17007 static enum stringop_alg
17008 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17009 int *dynamic_check)
17011 const struct stringop_algs * algs;
17012 bool optimize_for_speed;
17013 /* Algorithms using the rep prefix want at least edi and ecx;
17014 additionally, memset wants eax and memcpy wants esi. Don't
17015 consider such algorithms if the user has appropriated those
17016 registers for their own purposes. */
17017 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17018 || (memset
17019 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17021 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17022 || (alg != rep_prefix_1_byte \
17023 && alg != rep_prefix_4_byte \
17024 && alg != rep_prefix_8_byte))
17025 const struct processor_costs *cost;
17027 /* Even if the string operation call is cold, we still might spend a lot
17028 of time processing large blocks. */
17029 if (optimize_function_for_size_p (cfun)
17030 || (optimize_insn_for_size_p ()
17031 && expected_size != -1 && expected_size < 256))
17032 optimize_for_speed = false;
17033 else
17034 optimize_for_speed = true;
17036 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17038 *dynamic_check = -1;
17039 if (memset)
17040 algs = &cost->memset[TARGET_64BIT != 0];
17041 else
17042 algs = &cost->memcpy[TARGET_64BIT != 0];
17043 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
17044 return stringop_alg;
17045 /* rep; movq or rep; movl is the smallest variant. */
17046 else if (!optimize_for_speed)
17048 if (!count || (count & 3))
17049 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
17050 else
17051 return rep_prefix_usable ? rep_prefix_4_byte : loop;
17053 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
17055 else if (expected_size != -1 && expected_size < 4)
17056 return loop_1_byte;
17057 else if (expected_size != -1)
17059 unsigned int i;
17060 enum stringop_alg alg = libcall;
17061 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17063 /* We get here if the algorithms that were not libcall-based
17064 were rep-prefix based and we are unable to use rep prefixes
17065 based on global register usage. Break out of the loop and
17066 use the heuristic below. */
17067 if (algs->size[i].max == 0)
17068 break;
17069 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
17071 enum stringop_alg candidate = algs->size[i].alg;
17073 if (candidate != libcall && ALG_USABLE_P (candidate))
17074 alg = candidate;
17075 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
17076 last non-libcall inline algorithm. */
17077 if (TARGET_INLINE_ALL_STRINGOPS)
17079 /* When the current size is best to be copied by a libcall,
17080 but we are still forced to inline, run the heuristic below
17081 that will pick code for medium sized blocks. */
17082 if (alg != libcall)
17083 return alg;
17084 break;
17086 else if (ALG_USABLE_P (candidate))
17087 return candidate;
17090 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
17092 /* When asked to inline the call anyway, try to pick meaningful choice.
17093 We look for maximal size of block that is faster to copy by hand and
17094 take blocks of at most of that size guessing that average size will
17095 be roughly half of the block.
17097 If this turns out to be bad, we might simply specify the preferred
17098 choice in ix86_costs. */
17099 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17100 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
17102 int max = -1;
17103 enum stringop_alg alg;
17104 int i;
17105 bool any_alg_usable_p = true;
17107 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17109 enum stringop_alg candidate = algs->size[i].alg;
17110 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
17112 if (candidate != libcall && candidate
17113 && ALG_USABLE_P (candidate))
17114 max = algs->size[i].max;
17116 /* If there aren't any usable algorithms, then recursing on
17117 smaller sizes isn't going to find anything. Just return the
17118 simple byte-at-a-time copy loop. */
17119 if (!any_alg_usable_p)
17121 /* Pick something reasonable. */
17122 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17123 *dynamic_check = 128;
17124 return loop_1_byte;
17126 if (max == -1)
17127 max = 4096;
17128 alg = decide_alg (count, max / 2, memset, dynamic_check);
17129 gcc_assert (*dynamic_check == -1);
17130 gcc_assert (alg != libcall);
17131 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17132 *dynamic_check = max;
17133 return alg;
17135 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
17136 #undef ALG_USABLE_P
17139 /* Decide on alignment. We know that the operand is already aligned to ALIGN
17140 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
17141 static int
17142 decide_alignment (int align,
17143 enum stringop_alg alg,
17144 int expected_size)
17146 int desired_align = 0;
17147 switch (alg)
17149 case no_stringop:
17150 gcc_unreachable ();
17151 case loop:
17152 case unrolled_loop:
17153 desired_align = GET_MODE_SIZE (Pmode);
17154 break;
17155 case rep_prefix_8_byte:
17156 desired_align = 8;
17157 break;
17158 case rep_prefix_4_byte:
17159 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17160 copying whole cacheline at once. */
17161 if (TARGET_PENTIUMPRO)
17162 desired_align = 8;
17163 else
17164 desired_align = 4;
17165 break;
17166 case rep_prefix_1_byte:
17167 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17168 copying whole cacheline at once. */
17169 if (TARGET_PENTIUMPRO)
17170 desired_align = 8;
17171 else
17172 desired_align = 1;
17173 break;
17174 case loop_1_byte:
17175 desired_align = 1;
17176 break;
17177 case libcall:
17178 return 0;
17181 if (optimize_size)
17182 desired_align = 1;
17183 if (desired_align < align)
17184 desired_align = align;
17185 if (expected_size != -1 && expected_size < 4)
17186 desired_align = align;
17187 return desired_align;
17190 /* Return the smallest power of 2 greater than VAL. */
17191 static int
17192 smallest_pow2_greater_than (int val)
17194 int ret = 1;
17195 while (ret <= val)
17196 ret <<= 1;
17197 return ret;
17200 /* Expand string move (memcpy) operation. Use i386 string operations when
17201 profitable. expand_setmem contains similar code. The code depends upon
17202 architecture, block size and alignment, but always has the same
17203 overall structure:
17205 1) Prologue guard: Conditional that jumps up to epilogues for small
17206 blocks that can be handled by epilogue alone. This is faster but
17207 also needed for correctness, since prologue assume the block is larger
17208 than the desired alignment.
17210 Optional dynamic check for size and libcall for large
17211 blocks is emitted here too, with -minline-stringops-dynamically.
17213 2) Prologue: copy first few bytes in order to get destination aligned
17214 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
17215 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
17216 We emit either a jump tree on power of two sized blocks, or a byte loop.
17218 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
17219 with specified algorithm.
17221 4) Epilogue: code copying tail of the block that is too small to be
17222 handled by main body (or up to size guarded by prologue guard). */
17225 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
17226 rtx expected_align_exp, rtx expected_size_exp)
17228 rtx destreg;
17229 rtx srcreg;
17230 rtx label = NULL;
17231 rtx tmp;
17232 rtx jump_around_label = NULL;
17233 HOST_WIDE_INT align = 1;
17234 unsigned HOST_WIDE_INT count = 0;
17235 HOST_WIDE_INT expected_size = -1;
17236 int size_needed = 0, epilogue_size_needed;
17237 int desired_align = 0;
17238 enum stringop_alg alg;
17239 int dynamic_check;
17240 bool need_zero_guard = false;
17242 if (CONST_INT_P (align_exp))
17243 align = INTVAL (align_exp);
17244 /* i386 can do misaligned access on reasonably increased cost. */
17245 if (CONST_INT_P (expected_align_exp)
17246 && INTVAL (expected_align_exp) > align)
17247 align = INTVAL (expected_align_exp);
17248 if (CONST_INT_P (count_exp))
17249 count = expected_size = INTVAL (count_exp);
17250 if (CONST_INT_P (expected_size_exp) && count == 0)
17251 expected_size = INTVAL (expected_size_exp);
17253 /* Make sure we don't need to care about overflow later on. */
17254 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17255 return 0;
17257 /* Step 0: Decide on preferred algorithm, desired alignment and
17258 size of chunks to be copied by main loop. */
17260 alg = decide_alg (count, expected_size, false, &dynamic_check);
17261 desired_align = decide_alignment (align, alg, expected_size);
17263 if (!TARGET_ALIGN_STRINGOPS)
17264 align = desired_align;
17266 if (alg == libcall)
17267 return 0;
17268 gcc_assert (alg != no_stringop);
17269 if (!count)
17270 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
17271 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17272 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
17273 switch (alg)
17275 case libcall:
17276 case no_stringop:
17277 gcc_unreachable ();
17278 case loop:
17279 need_zero_guard = true;
17280 size_needed = GET_MODE_SIZE (Pmode);
17281 break;
17282 case unrolled_loop:
17283 need_zero_guard = true;
17284 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
17285 break;
17286 case rep_prefix_8_byte:
17287 size_needed = 8;
17288 break;
17289 case rep_prefix_4_byte:
17290 size_needed = 4;
17291 break;
17292 case rep_prefix_1_byte:
17293 size_needed = 1;
17294 break;
17295 case loop_1_byte:
17296 need_zero_guard = true;
17297 size_needed = 1;
17298 break;
17301 epilogue_size_needed = size_needed;
17303 /* Step 1: Prologue guard. */
17305 /* Alignment code needs count to be in register. */
17306 if (CONST_INT_P (count_exp) && desired_align > align)
17307 count_exp = force_reg (counter_mode (count_exp), count_exp);
17308 gcc_assert (desired_align >= 1 && align >= 1);
17310 /* Ensure that alignment prologue won't copy past end of block. */
17311 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17313 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17314 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17315 Make sure it is power of 2. */
17316 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17318 if (CONST_INT_P (count_exp))
17320 if (UINTVAL (count_exp) < (unsigned HOST_WIDE_INT)epilogue_size_needed)
17321 goto epilogue;
17323 else
17325 label = gen_label_rtx ();
17326 emit_cmp_and_jump_insns (count_exp,
17327 GEN_INT (epilogue_size_needed),
17328 LTU, 0, counter_mode (count_exp), 1, label);
17329 if (expected_size == -1 || expected_size < epilogue_size_needed)
17330 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17331 else
17332 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17336 /* Emit code to decide on runtime whether library call or inline should be
17337 used. */
17338 if (dynamic_check != -1)
17340 if (CONST_INT_P (count_exp))
17342 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
17344 emit_block_move_via_libcall (dst, src, count_exp, false);
17345 count_exp = const0_rtx;
17346 goto epilogue;
17349 else
17351 rtx hot_label = gen_label_rtx ();
17352 jump_around_label = gen_label_rtx ();
17353 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17354 LEU, 0, GET_MODE (count_exp), 1, hot_label);
17355 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17356 emit_block_move_via_libcall (dst, src, count_exp, false);
17357 emit_jump (jump_around_label);
17358 emit_label (hot_label);
17362 /* Step 2: Alignment prologue. */
17364 if (desired_align > align)
17366 /* Except for the first move in epilogue, we no longer know
17367 constant offset in aliasing info. It don't seems to worth
17368 the pain to maintain it for the first move, so throw away
17369 the info early. */
17370 src = change_address (src, BLKmode, srcreg);
17371 dst = change_address (dst, BLKmode, destreg);
17372 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
17373 desired_align);
17374 if (need_zero_guard && !count)
17376 /* It is possible that we copied enough so the main loop will not
17377 execute. */
17378 emit_cmp_and_jump_insns (count_exp,
17379 GEN_INT (size_needed),
17380 LTU, 0, counter_mode (count_exp), 1, label);
17381 if (expected_size == -1
17382 || expected_size < (desired_align - align) / 2 + size_needed)
17383 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17384 else
17385 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17388 if (label && size_needed == 1)
17390 emit_label (label);
17391 LABEL_NUSES (label) = 1;
17392 label = NULL;
17395 /* Step 3: Main loop. */
17397 switch (alg)
17399 case libcall:
17400 case no_stringop:
17401 gcc_unreachable ();
17402 case loop_1_byte:
17403 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17404 count_exp, QImode, 1, expected_size);
17405 break;
17406 case loop:
17407 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17408 count_exp, Pmode, 1, expected_size);
17409 break;
17410 case unrolled_loop:
17411 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
17412 registers for 4 temporaries anyway. */
17413 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17414 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
17415 expected_size);
17416 break;
17417 case rep_prefix_8_byte:
17418 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17419 DImode);
17420 break;
17421 case rep_prefix_4_byte:
17422 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17423 SImode);
17424 break;
17425 case rep_prefix_1_byte:
17426 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17427 QImode);
17428 break;
17430 /* Adjust properly the offset of src and dest memory for aliasing. */
17431 if (CONST_INT_P (count_exp))
17433 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
17434 (count / size_needed) * size_needed);
17435 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17436 (count / size_needed) * size_needed);
17438 else
17440 src = change_address (src, BLKmode, srcreg);
17441 dst = change_address (dst, BLKmode, destreg);
17444 /* Step 4: Epilogue to copy the remaining bytes. */
17445 epilogue:
17446 if (label)
17448 /* When the main loop is done, COUNT_EXP might hold original count,
17449 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17450 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17451 bytes. Compensate if needed. */
17453 if (size_needed < epilogue_size_needed)
17455 tmp =
17456 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17457 GEN_INT (size_needed - 1), count_exp, 1,
17458 OPTAB_DIRECT);
17459 if (tmp != count_exp)
17460 emit_move_insn (count_exp, tmp);
17462 emit_label (label);
17463 LABEL_NUSES (label) = 1;
17466 if (count_exp != const0_rtx && epilogue_size_needed > 1)
17467 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
17468 epilogue_size_needed);
17469 if (jump_around_label)
17470 emit_label (jump_around_label);
17471 return 1;
17474 /* Helper function for memcpy. For QImode value 0xXY produce
17475 0xXYXYXYXY of wide specified by MODE. This is essentially
17476 a * 0x10101010, but we can do slightly better than
17477 synth_mult by unwinding the sequence by hand on CPUs with
17478 slow multiply. */
17479 static rtx
17480 promote_duplicated_reg (enum machine_mode mode, rtx val)
17482 enum machine_mode valmode = GET_MODE (val);
17483 rtx tmp;
17484 int nops = mode == DImode ? 3 : 2;
17486 gcc_assert (mode == SImode || mode == DImode);
17487 if (val == const0_rtx)
17488 return copy_to_mode_reg (mode, const0_rtx);
17489 if (CONST_INT_P (val))
17491 HOST_WIDE_INT v = INTVAL (val) & 255;
17493 v |= v << 8;
17494 v |= v << 16;
17495 if (mode == DImode)
17496 v |= (v << 16) << 16;
17497 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
17500 if (valmode == VOIDmode)
17501 valmode = QImode;
17502 if (valmode != QImode)
17503 val = gen_lowpart (QImode, val);
17504 if (mode == QImode)
17505 return val;
17506 if (!TARGET_PARTIAL_REG_STALL)
17507 nops--;
17508 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
17509 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
17510 <= (ix86_cost->shift_const + ix86_cost->add) * nops
17511 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
17513 rtx reg = convert_modes (mode, QImode, val, true);
17514 tmp = promote_duplicated_reg (mode, const1_rtx);
17515 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
17516 OPTAB_DIRECT);
17518 else
17520 rtx reg = convert_modes (mode, QImode, val, true);
17522 if (!TARGET_PARTIAL_REG_STALL)
17523 if (mode == SImode)
17524 emit_insn (gen_movsi_insv_1 (reg, reg));
17525 else
17526 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
17527 else
17529 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
17530 NULL, 1, OPTAB_DIRECT);
17531 reg =
17532 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17534 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
17535 NULL, 1, OPTAB_DIRECT);
17536 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17537 if (mode == SImode)
17538 return reg;
17539 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
17540 NULL, 1, OPTAB_DIRECT);
17541 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17542 return reg;
17546 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
17547 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
17548 alignment from ALIGN to DESIRED_ALIGN. */
17549 static rtx
17550 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
17552 rtx promoted_val;
17554 if (TARGET_64BIT
17555 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
17556 promoted_val = promote_duplicated_reg (DImode, val);
17557 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
17558 promoted_val = promote_duplicated_reg (SImode, val);
17559 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
17560 promoted_val = promote_duplicated_reg (HImode, val);
17561 else
17562 promoted_val = val;
17564 return promoted_val;
17567 /* Expand string clear operation (bzero). Use i386 string operations when
17568 profitable. See expand_movmem comment for explanation of individual
17569 steps performed. */
17571 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
17572 rtx expected_align_exp, rtx expected_size_exp)
17574 rtx destreg;
17575 rtx label = NULL;
17576 rtx tmp;
17577 rtx jump_around_label = NULL;
17578 HOST_WIDE_INT align = 1;
17579 unsigned HOST_WIDE_INT count = 0;
17580 HOST_WIDE_INT expected_size = -1;
17581 int size_needed = 0, epilogue_size_needed;
17582 int desired_align = 0;
17583 enum stringop_alg alg;
17584 rtx promoted_val = NULL;
17585 bool force_loopy_epilogue = false;
17586 int dynamic_check;
17587 bool need_zero_guard = false;
17589 if (CONST_INT_P (align_exp))
17590 align = INTVAL (align_exp);
17591 /* i386 can do misaligned access on reasonably increased cost. */
17592 if (CONST_INT_P (expected_align_exp)
17593 && INTVAL (expected_align_exp) > align)
17594 align = INTVAL (expected_align_exp);
17595 if (CONST_INT_P (count_exp))
17596 count = expected_size = INTVAL (count_exp);
17597 if (CONST_INT_P (expected_size_exp) && count == 0)
17598 expected_size = INTVAL (expected_size_exp);
17600 /* Make sure we don't need to care about overflow later on. */
17601 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17602 return 0;
17604 /* Step 0: Decide on preferred algorithm, desired alignment and
17605 size of chunks to be copied by main loop. */
17607 alg = decide_alg (count, expected_size, true, &dynamic_check);
17608 desired_align = decide_alignment (align, alg, expected_size);
17610 if (!TARGET_ALIGN_STRINGOPS)
17611 align = desired_align;
17613 if (alg == libcall)
17614 return 0;
17615 gcc_assert (alg != no_stringop);
17616 if (!count)
17617 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
17618 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17619 switch (alg)
17621 case libcall:
17622 case no_stringop:
17623 gcc_unreachable ();
17624 case loop:
17625 need_zero_guard = true;
17626 size_needed = GET_MODE_SIZE (Pmode);
17627 break;
17628 case unrolled_loop:
17629 need_zero_guard = true;
17630 size_needed = GET_MODE_SIZE (Pmode) * 4;
17631 break;
17632 case rep_prefix_8_byte:
17633 size_needed = 8;
17634 break;
17635 case rep_prefix_4_byte:
17636 size_needed = 4;
17637 break;
17638 case rep_prefix_1_byte:
17639 size_needed = 1;
17640 break;
17641 case loop_1_byte:
17642 need_zero_guard = true;
17643 size_needed = 1;
17644 break;
17646 epilogue_size_needed = size_needed;
17648 /* Step 1: Prologue guard. */
17650 /* Alignment code needs count to be in register. */
17651 if (CONST_INT_P (count_exp) && desired_align > align)
17653 enum machine_mode mode = SImode;
17654 if (TARGET_64BIT && (count & ~0xffffffff))
17655 mode = DImode;
17656 count_exp = force_reg (mode, count_exp);
17658 /* Do the cheap promotion to allow better CSE across the
17659 main loop and epilogue (ie one load of the big constant in the
17660 front of all code. */
17661 if (CONST_INT_P (val_exp))
17662 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17663 desired_align, align);
17664 /* Ensure that alignment prologue won't copy past end of block. */
17665 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17667 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17668 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17669 Make sure it is power of 2. */
17670 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17672 /* To improve performance of small blocks, we jump around the VAL
17673 promoting mode. This mean that if the promoted VAL is not constant,
17674 we might not use it in the epilogue and have to use byte
17675 loop variant. */
17676 if (epilogue_size_needed > 2 && !promoted_val)
17677 force_loopy_epilogue = true;
17678 label = gen_label_rtx ();
17679 emit_cmp_and_jump_insns (count_exp,
17680 GEN_INT (epilogue_size_needed),
17681 LTU, 0, counter_mode (count_exp), 1, label);
17682 if (GET_CODE (count_exp) == CONST_INT)
17684 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
17685 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17686 else
17687 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17689 if (dynamic_check != -1)
17691 rtx hot_label = gen_label_rtx ();
17692 jump_around_label = gen_label_rtx ();
17693 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17694 LEU, 0, counter_mode (count_exp), 1, hot_label);
17695 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17696 set_storage_via_libcall (dst, count_exp, val_exp, false);
17697 emit_jump (jump_around_label);
17698 emit_label (hot_label);
17701 /* Step 2: Alignment prologue. */
17703 /* Do the expensive promotion once we branched off the small blocks. */
17704 if (!promoted_val)
17705 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17706 desired_align, align);
17707 gcc_assert (desired_align >= 1 && align >= 1);
17709 if (desired_align > align)
17711 /* Except for the first move in epilogue, we no longer know
17712 constant offset in aliasing info. It don't seems to worth
17713 the pain to maintain it for the first move, so throw away
17714 the info early. */
17715 dst = change_address (dst, BLKmode, destreg);
17716 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
17717 desired_align);
17718 if (need_zero_guard && !count)
17720 /* It is possible that we copied enough so the main loop will not
17721 execute. */
17722 emit_cmp_and_jump_insns (count_exp,
17723 GEN_INT (size_needed),
17724 LTU, 0, counter_mode (count_exp), 1, label);
17725 if (expected_size == -1
17726 || expected_size < (desired_align - align) / 2 + size_needed)
17727 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17728 else
17729 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17732 if (label && size_needed == 1)
17734 emit_label (label);
17735 LABEL_NUSES (label) = 1;
17736 label = NULL;
17739 /* Step 3: Main loop. */
17741 switch (alg)
17743 case libcall:
17744 case no_stringop:
17745 gcc_unreachable ();
17746 case loop_1_byte:
17747 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17748 count_exp, QImode, 1, expected_size);
17749 break;
17750 case loop:
17751 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17752 count_exp, Pmode, 1, expected_size);
17753 break;
17754 case unrolled_loop:
17755 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17756 count_exp, Pmode, 4, expected_size);
17757 break;
17758 case rep_prefix_8_byte:
17759 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17760 DImode);
17761 break;
17762 case rep_prefix_4_byte:
17763 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17764 SImode);
17765 break;
17766 case rep_prefix_1_byte:
17767 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17768 QImode);
17769 break;
17771 /* Adjust properly the offset of src and dest memory for aliasing. */
17772 if (CONST_INT_P (count_exp))
17773 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17774 (count / size_needed) * size_needed);
17775 else
17776 dst = change_address (dst, BLKmode, destreg);
17778 /* Step 4: Epilogue to copy the remaining bytes. */
17780 if (label)
17782 /* When the main loop is done, COUNT_EXP might hold original count,
17783 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17784 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17785 bytes. Compensate if needed. */
17787 if (size_needed < desired_align - align)
17789 tmp =
17790 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17791 GEN_INT (size_needed - 1), count_exp, 1,
17792 OPTAB_DIRECT);
17793 size_needed = desired_align - align + 1;
17794 if (tmp != count_exp)
17795 emit_move_insn (count_exp, tmp);
17797 emit_label (label);
17798 LABEL_NUSES (label) = 1;
17800 if (count_exp != const0_rtx && epilogue_size_needed > 1)
17802 if (force_loopy_epilogue)
17803 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
17804 size_needed);
17805 else
17806 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
17807 size_needed);
17809 if (jump_around_label)
17810 emit_label (jump_around_label);
17811 return 1;
17814 /* Expand the appropriate insns for doing strlen if not just doing
17815 repnz; scasb
17817 out = result, initialized with the start address
17818 align_rtx = alignment of the address.
17819 scratch = scratch register, initialized with the startaddress when
17820 not aligned, otherwise undefined
17822 This is just the body. It needs the initializations mentioned above and
17823 some address computing at the end. These things are done in i386.md. */
17825 static void
17826 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
17828 int align;
17829 rtx tmp;
17830 rtx align_2_label = NULL_RTX;
17831 rtx align_3_label = NULL_RTX;
17832 rtx align_4_label = gen_label_rtx ();
17833 rtx end_0_label = gen_label_rtx ();
17834 rtx mem;
17835 rtx tmpreg = gen_reg_rtx (SImode);
17836 rtx scratch = gen_reg_rtx (SImode);
17837 rtx cmp;
17839 align = 0;
17840 if (CONST_INT_P (align_rtx))
17841 align = INTVAL (align_rtx);
17843 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
17845 /* Is there a known alignment and is it less than 4? */
17846 if (align < 4)
17848 rtx scratch1 = gen_reg_rtx (Pmode);
17849 emit_move_insn (scratch1, out);
17850 /* Is there a known alignment and is it not 2? */
17851 if (align != 2)
17853 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
17854 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
17856 /* Leave just the 3 lower bits. */
17857 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
17858 NULL_RTX, 0, OPTAB_WIDEN);
17860 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17861 Pmode, 1, align_4_label);
17862 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
17863 Pmode, 1, align_2_label);
17864 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
17865 Pmode, 1, align_3_label);
17867 else
17869 /* Since the alignment is 2, we have to check 2 or 0 bytes;
17870 check if is aligned to 4 - byte. */
17872 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
17873 NULL_RTX, 0, OPTAB_WIDEN);
17875 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17876 Pmode, 1, align_4_label);
17879 mem = change_address (src, QImode, out);
17881 /* Now compare the bytes. */
17883 /* Compare the first n unaligned byte on a byte per byte basis. */
17884 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
17885 QImode, 1, end_0_label);
17887 /* Increment the address. */
17888 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17890 /* Not needed with an alignment of 2 */
17891 if (align != 2)
17893 emit_label (align_2_label);
17895 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17896 end_0_label);
17898 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17900 emit_label (align_3_label);
17903 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17904 end_0_label);
17906 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17909 /* Generate loop to check 4 bytes at a time. It is not a good idea to
17910 align this loop. It gives only huge programs, but does not help to
17911 speed up. */
17912 emit_label (align_4_label);
17914 mem = change_address (src, SImode, out);
17915 emit_move_insn (scratch, mem);
17916 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
17918 /* This formula yields a nonzero result iff one of the bytes is zero.
17919 This saves three branches inside loop and many cycles. */
17921 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
17922 emit_insn (gen_one_cmplsi2 (scratch, scratch));
17923 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
17924 emit_insn (gen_andsi3 (tmpreg, tmpreg,
17925 gen_int_mode (0x80808080, SImode)));
17926 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
17927 align_4_label);
17929 if (TARGET_CMOVE)
17931 rtx reg = gen_reg_rtx (SImode);
17932 rtx reg2 = gen_reg_rtx (Pmode);
17933 emit_move_insn (reg, tmpreg);
17934 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
17936 /* If zero is not in the first two bytes, move two bytes forward. */
17937 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17938 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17939 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17940 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
17941 gen_rtx_IF_THEN_ELSE (SImode, tmp,
17942 reg,
17943 tmpreg)));
17944 /* Emit lea manually to avoid clobbering of flags. */
17945 emit_insn (gen_rtx_SET (SImode, reg2,
17946 gen_rtx_PLUS (Pmode, out, const2_rtx)));
17948 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17949 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17950 emit_insn (gen_rtx_SET (VOIDmode, out,
17951 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
17952 reg2,
17953 out)));
17956 else
17958 rtx end_2_label = gen_label_rtx ();
17959 /* Is zero in the first two bytes? */
17961 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17962 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17963 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
17964 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17965 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
17966 pc_rtx);
17967 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17968 JUMP_LABEL (tmp) = end_2_label;
17970 /* Not in the first two. Move two bytes forward. */
17971 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
17972 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
17974 emit_label (end_2_label);
17978 /* Avoid branch in fixing the byte. */
17979 tmpreg = gen_lowpart (QImode, tmpreg);
17980 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
17981 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
17982 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
17984 emit_label (end_0_label);
17987 /* Expand strlen. */
17990 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
17992 rtx addr, scratch1, scratch2, scratch3, scratch4;
17994 /* The generic case of strlen expander is long. Avoid it's
17995 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
17997 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
17998 && !TARGET_INLINE_ALL_STRINGOPS
17999 && !optimize_insn_for_size_p ()
18000 && (!CONST_INT_P (align) || INTVAL (align) < 4))
18001 return 0;
18003 addr = force_reg (Pmode, XEXP (src, 0));
18004 scratch1 = gen_reg_rtx (Pmode);
18006 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18007 && !optimize_insn_for_size_p ())
18009 /* Well it seems that some optimizer does not combine a call like
18010 foo(strlen(bar), strlen(bar));
18011 when the move and the subtraction is done here. It does calculate
18012 the length just once when these instructions are done inside of
18013 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
18014 often used and I use one fewer register for the lifetime of
18015 output_strlen_unroll() this is better. */
18017 emit_move_insn (out, addr);
18019 ix86_expand_strlensi_unroll_1 (out, src, align);
18021 /* strlensi_unroll_1 returns the address of the zero at the end of
18022 the string, like memchr(), so compute the length by subtracting
18023 the start address. */
18024 emit_insn ((*ix86_gen_sub3) (out, out, addr));
18026 else
18028 rtx unspec;
18030 /* Can't use this if the user has appropriated eax, ecx, or edi. */
18031 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
18032 return false;
18034 scratch2 = gen_reg_rtx (Pmode);
18035 scratch3 = gen_reg_rtx (Pmode);
18036 scratch4 = force_reg (Pmode, constm1_rtx);
18038 emit_move_insn (scratch3, addr);
18039 eoschar = force_reg (QImode, eoschar);
18041 src = replace_equiv_address_nv (src, scratch3);
18043 /* If .md starts supporting :P, this can be done in .md. */
18044 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
18045 scratch4), UNSPEC_SCAS);
18046 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
18047 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
18048 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
18050 return 1;
18053 /* For given symbol (function) construct code to compute address of it's PLT
18054 entry in large x86-64 PIC model. */
18056 construct_plt_address (rtx symbol)
18058 rtx tmp = gen_reg_rtx (Pmode);
18059 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
18061 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18062 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
18064 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
18065 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
18066 return tmp;
18069 void
18070 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
18071 rtx callarg2 ATTRIBUTE_UNUSED,
18072 rtx pop, int sibcall)
18074 rtx use = NULL, call;
18076 if (pop == const0_rtx)
18077 pop = NULL;
18078 gcc_assert (!TARGET_64BIT || !pop);
18080 if (TARGET_MACHO && !TARGET_64BIT)
18082 #if TARGET_MACHO
18083 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
18084 fnaddr = machopic_indirect_call_target (fnaddr);
18085 #endif
18087 else
18089 /* Static functions and indirect calls don't need the pic register. */
18090 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
18091 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18092 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
18093 use_reg (&use, pic_offset_table_rtx);
18096 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
18098 rtx al = gen_rtx_REG (QImode, AX_REG);
18099 emit_move_insn (al, callarg2);
18100 use_reg (&use, al);
18103 if (ix86_cmodel == CM_LARGE_PIC
18104 && GET_CODE (fnaddr) == MEM
18105 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18106 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
18107 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
18108 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
18110 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18111 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18113 if (sibcall && TARGET_64BIT
18114 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
18116 rtx addr;
18117 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18118 fnaddr = gen_rtx_REG (Pmode, R11_REG);
18119 emit_move_insn (fnaddr, addr);
18120 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18123 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
18124 if (retval)
18125 call = gen_rtx_SET (VOIDmode, retval, call);
18126 if (pop)
18128 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
18129 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
18130 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
18133 call = emit_call_insn (call);
18134 if (use)
18135 CALL_INSN_FUNCTION_USAGE (call) = use;
18139 /* Clear stack slot assignments remembered from previous functions.
18140 This is called from INIT_EXPANDERS once before RTL is emitted for each
18141 function. */
18143 static struct machine_function *
18144 ix86_init_machine_status (void)
18146 struct machine_function *f;
18148 f = GGC_CNEW (struct machine_function);
18149 f->use_fast_prologue_epilogue_nregs = -1;
18150 f->tls_descriptor_call_expanded_p = 0;
18151 f->call_abi = DEFAULT_ABI;
18153 return f;
18156 /* Return a MEM corresponding to a stack slot with mode MODE.
18157 Allocate a new slot if necessary.
18159 The RTL for a function can have several slots available: N is
18160 which slot to use. */
18163 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
18165 struct stack_local_entry *s;
18167 gcc_assert (n < MAX_386_STACK_LOCALS);
18169 /* Virtual slot is valid only before vregs are instantiated. */
18170 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
18172 for (s = ix86_stack_locals; s; s = s->next)
18173 if (s->mode == mode && s->n == n)
18174 return copy_rtx (s->rtl);
18176 s = (struct stack_local_entry *)
18177 ggc_alloc (sizeof (struct stack_local_entry));
18178 s->n = n;
18179 s->mode = mode;
18180 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18182 s->next = ix86_stack_locals;
18183 ix86_stack_locals = s;
18184 return s->rtl;
18187 /* Construct the SYMBOL_REF for the tls_get_addr function. */
18189 static GTY(()) rtx ix86_tls_symbol;
18191 ix86_tls_get_addr (void)
18194 if (!ix86_tls_symbol)
18196 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
18197 (TARGET_ANY_GNU_TLS
18198 && !TARGET_64BIT)
18199 ? "___tls_get_addr"
18200 : "__tls_get_addr");
18203 return ix86_tls_symbol;
18206 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
18208 static GTY(()) rtx ix86_tls_module_base_symbol;
18210 ix86_tls_module_base (void)
18213 if (!ix86_tls_module_base_symbol)
18215 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
18216 "_TLS_MODULE_BASE_");
18217 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
18218 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
18221 return ix86_tls_module_base_symbol;
18224 /* Calculate the length of the memory address in the instruction
18225 encoding. Does not include the one-byte modrm, opcode, or prefix. */
18228 memory_address_length (rtx addr)
18230 struct ix86_address parts;
18231 rtx base, index, disp;
18232 int len;
18233 int ok;
18235 if (GET_CODE (addr) == PRE_DEC
18236 || GET_CODE (addr) == POST_INC
18237 || GET_CODE (addr) == PRE_MODIFY
18238 || GET_CODE (addr) == POST_MODIFY)
18239 return 0;
18241 ok = ix86_decompose_address (addr, &parts);
18242 gcc_assert (ok);
18244 if (parts.base && GET_CODE (parts.base) == SUBREG)
18245 parts.base = SUBREG_REG (parts.base);
18246 if (parts.index && GET_CODE (parts.index) == SUBREG)
18247 parts.index = SUBREG_REG (parts.index);
18249 base = parts.base;
18250 index = parts.index;
18251 disp = parts.disp;
18252 len = 0;
18254 /* Rule of thumb:
18255 - esp as the base always wants an index,
18256 - ebp as the base always wants a displacement. */
18258 /* Register Indirect. */
18259 if (base && !index && !disp)
18261 /* esp (for its index) and ebp (for its displacement) need
18262 the two-byte modrm form. */
18263 if (addr == stack_pointer_rtx
18264 || addr == arg_pointer_rtx
18265 || addr == frame_pointer_rtx
18266 || addr == hard_frame_pointer_rtx)
18267 len = 1;
18270 /* Direct Addressing. */
18271 else if (disp && !base && !index)
18272 len = 4;
18274 else
18276 /* Find the length of the displacement constant. */
18277 if (disp)
18279 if (base && satisfies_constraint_K (disp))
18280 len = 1;
18281 else
18282 len = 4;
18284 /* ebp always wants a displacement. */
18285 else if (base == hard_frame_pointer_rtx)
18286 len = 1;
18288 /* An index requires the two-byte modrm form.... */
18289 if (index
18290 /* ...like esp, which always wants an index. */
18291 || base == stack_pointer_rtx
18292 || base == arg_pointer_rtx
18293 || base == frame_pointer_rtx)
18294 len += 1;
18297 return len;
18300 /* Compute default value for "length_immediate" attribute. When SHORTFORM
18301 is set, expect that insn have 8bit immediate alternative. */
18303 ix86_attr_length_immediate_default (rtx insn, int shortform)
18305 int len = 0;
18306 int i;
18307 extract_insn_cached (insn);
18308 for (i = recog_data.n_operands - 1; i >= 0; --i)
18309 if (CONSTANT_P (recog_data.operand[i]))
18311 gcc_assert (!len);
18312 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
18313 len = 1;
18314 else
18316 switch (get_attr_mode (insn))
18318 case MODE_QI:
18319 len+=1;
18320 break;
18321 case MODE_HI:
18322 len+=2;
18323 break;
18324 case MODE_SI:
18325 len+=4;
18326 break;
18327 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
18328 case MODE_DI:
18329 len+=4;
18330 break;
18331 default:
18332 fatal_insn ("unknown insn mode", insn);
18336 return len;
18338 /* Compute default value for "length_address" attribute. */
18340 ix86_attr_length_address_default (rtx insn)
18342 int i;
18344 if (get_attr_type (insn) == TYPE_LEA)
18346 rtx set = PATTERN (insn);
18348 if (GET_CODE (set) == PARALLEL)
18349 set = XVECEXP (set, 0, 0);
18351 gcc_assert (GET_CODE (set) == SET);
18353 return memory_address_length (SET_SRC (set));
18356 extract_insn_cached (insn);
18357 for (i = recog_data.n_operands - 1; i >= 0; --i)
18358 if (MEM_P (recog_data.operand[i]))
18360 return memory_address_length (XEXP (recog_data.operand[i], 0));
18361 break;
18363 return 0;
18366 /* Compute default value for "length_vex" attribute. It includes
18367 2 or 3 byte VEX prefix and 1 opcode byte. */
18370 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
18371 int has_vex_w)
18373 int i;
18375 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
18376 byte VEX prefix. */
18377 if (!has_0f_opcode || has_vex_w)
18378 return 3 + 1;
18380 /* We can always use 2 byte VEX prefix in 32bit. */
18381 if (!TARGET_64BIT)
18382 return 2 + 1;
18384 extract_insn_cached (insn);
18386 for (i = recog_data.n_operands - 1; i >= 0; --i)
18387 if (REG_P (recog_data.operand[i]))
18389 /* REX.W bit uses 3 byte VEX prefix. */
18390 if (GET_MODE (recog_data.operand[i]) == DImode)
18391 return 3 + 1;
18393 else
18395 /* REX.X or REX.B bits use 3 byte VEX prefix. */
18396 if (MEM_P (recog_data.operand[i])
18397 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
18398 return 3 + 1;
18401 return 2 + 1;
18404 /* Return the maximum number of instructions a cpu can issue. */
18406 static int
18407 ix86_issue_rate (void)
18409 switch (ix86_tune)
18411 case PROCESSOR_PENTIUM:
18412 case PROCESSOR_K6:
18413 return 2;
18415 case PROCESSOR_PENTIUMPRO:
18416 case PROCESSOR_PENTIUM4:
18417 case PROCESSOR_ATHLON:
18418 case PROCESSOR_K8:
18419 case PROCESSOR_AMDFAM10:
18420 case PROCESSOR_NOCONA:
18421 case PROCESSOR_GENERIC32:
18422 case PROCESSOR_GENERIC64:
18423 return 3;
18425 case PROCESSOR_CORE2:
18426 return 4;
18428 default:
18429 return 1;
18433 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
18434 by DEP_INSN and nothing set by DEP_INSN. */
18436 static int
18437 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
18439 rtx set, set2;
18441 /* Simplify the test for uninteresting insns. */
18442 if (insn_type != TYPE_SETCC
18443 && insn_type != TYPE_ICMOV
18444 && insn_type != TYPE_FCMOV
18445 && insn_type != TYPE_IBR)
18446 return 0;
18448 if ((set = single_set (dep_insn)) != 0)
18450 set = SET_DEST (set);
18451 set2 = NULL_RTX;
18453 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
18454 && XVECLEN (PATTERN (dep_insn), 0) == 2
18455 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
18456 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
18458 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
18459 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
18461 else
18462 return 0;
18464 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
18465 return 0;
18467 /* This test is true if the dependent insn reads the flags but
18468 not any other potentially set register. */
18469 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
18470 return 0;
18472 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
18473 return 0;
18475 return 1;
18478 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
18479 address with operands set by DEP_INSN. */
18481 static int
18482 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
18484 rtx addr;
18486 if (insn_type == TYPE_LEA
18487 && TARGET_PENTIUM)
18489 addr = PATTERN (insn);
18491 if (GET_CODE (addr) == PARALLEL)
18492 addr = XVECEXP (addr, 0, 0);
18494 gcc_assert (GET_CODE (addr) == SET);
18496 addr = SET_SRC (addr);
18498 else
18500 int i;
18501 extract_insn_cached (insn);
18502 for (i = recog_data.n_operands - 1; i >= 0; --i)
18503 if (MEM_P (recog_data.operand[i]))
18505 addr = XEXP (recog_data.operand[i], 0);
18506 goto found;
18508 return 0;
18509 found:;
18512 return modified_in_p (addr, dep_insn);
18515 static int
18516 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
18518 enum attr_type insn_type, dep_insn_type;
18519 enum attr_memory memory;
18520 rtx set, set2;
18521 int dep_insn_code_number;
18523 /* Anti and output dependencies have zero cost on all CPUs. */
18524 if (REG_NOTE_KIND (link) != 0)
18525 return 0;
18527 dep_insn_code_number = recog_memoized (dep_insn);
18529 /* If we can't recognize the insns, we can't really do anything. */
18530 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
18531 return cost;
18533 insn_type = get_attr_type (insn);
18534 dep_insn_type = get_attr_type (dep_insn);
18536 switch (ix86_tune)
18538 case PROCESSOR_PENTIUM:
18539 /* Address Generation Interlock adds a cycle of latency. */
18540 if (ix86_agi_dependent (insn, dep_insn, insn_type))
18541 cost += 1;
18543 /* ??? Compares pair with jump/setcc. */
18544 if (ix86_flags_dependent (insn, dep_insn, insn_type))
18545 cost = 0;
18547 /* Floating point stores require value to be ready one cycle earlier. */
18548 if (insn_type == TYPE_FMOV
18549 && get_attr_memory (insn) == MEMORY_STORE
18550 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18551 cost += 1;
18552 break;
18554 case PROCESSOR_PENTIUMPRO:
18555 memory = get_attr_memory (insn);
18557 /* INT->FP conversion is expensive. */
18558 if (get_attr_fp_int_src (dep_insn))
18559 cost += 5;
18561 /* There is one cycle extra latency between an FP op and a store. */
18562 if (insn_type == TYPE_FMOV
18563 && (set = single_set (dep_insn)) != NULL_RTX
18564 && (set2 = single_set (insn)) != NULL_RTX
18565 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
18566 && MEM_P (SET_DEST (set2)))
18567 cost += 1;
18569 /* Show ability of reorder buffer to hide latency of load by executing
18570 in parallel with previous instruction in case
18571 previous instruction is not needed to compute the address. */
18572 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18573 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18575 /* Claim moves to take one cycle, as core can issue one load
18576 at time and the next load can start cycle later. */
18577 if (dep_insn_type == TYPE_IMOV
18578 || dep_insn_type == TYPE_FMOV)
18579 cost = 1;
18580 else if (cost > 1)
18581 cost--;
18583 break;
18585 case PROCESSOR_K6:
18586 memory = get_attr_memory (insn);
18588 /* The esp dependency is resolved before the instruction is really
18589 finished. */
18590 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
18591 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
18592 return 1;
18594 /* INT->FP conversion is expensive. */
18595 if (get_attr_fp_int_src (dep_insn))
18596 cost += 5;
18598 /* Show ability of reorder buffer to hide latency of load by executing
18599 in parallel with previous instruction in case
18600 previous instruction is not needed to compute the address. */
18601 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18602 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18604 /* Claim moves to take one cycle, as core can issue one load
18605 at time and the next load can start cycle later. */
18606 if (dep_insn_type == TYPE_IMOV
18607 || dep_insn_type == TYPE_FMOV)
18608 cost = 1;
18609 else if (cost > 2)
18610 cost -= 2;
18611 else
18612 cost = 1;
18614 break;
18616 case PROCESSOR_ATHLON:
18617 case PROCESSOR_K8:
18618 case PROCESSOR_AMDFAM10:
18619 case PROCESSOR_GENERIC32:
18620 case PROCESSOR_GENERIC64:
18621 memory = get_attr_memory (insn);
18623 /* Show ability of reorder buffer to hide latency of load by executing
18624 in parallel with previous instruction in case
18625 previous instruction is not needed to compute the address. */
18626 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18627 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18629 enum attr_unit unit = get_attr_unit (insn);
18630 int loadcost = 3;
18632 /* Because of the difference between the length of integer and
18633 floating unit pipeline preparation stages, the memory operands
18634 for floating point are cheaper.
18636 ??? For Athlon it the difference is most probably 2. */
18637 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
18638 loadcost = 3;
18639 else
18640 loadcost = TARGET_ATHLON ? 2 : 0;
18642 if (cost >= loadcost)
18643 cost -= loadcost;
18644 else
18645 cost = 0;
18648 default:
18649 break;
18652 return cost;
18655 /* How many alternative schedules to try. This should be as wide as the
18656 scheduling freedom in the DFA, but no wider. Making this value too
18657 large results extra work for the scheduler. */
18659 static int
18660 ia32_multipass_dfa_lookahead (void)
18662 switch (ix86_tune)
18664 case PROCESSOR_PENTIUM:
18665 return 2;
18667 case PROCESSOR_PENTIUMPRO:
18668 case PROCESSOR_K6:
18669 return 1;
18671 default:
18672 return 0;
18677 /* Compute the alignment given to a constant that is being placed in memory.
18678 EXP is the constant and ALIGN is the alignment that the object would
18679 ordinarily have.
18680 The value of this function is used instead of that alignment to align
18681 the object. */
18684 ix86_constant_alignment (tree exp, int align)
18686 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
18687 || TREE_CODE (exp) == INTEGER_CST)
18689 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
18690 return 64;
18691 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
18692 return 128;
18694 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
18695 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
18696 return BITS_PER_WORD;
18698 return align;
18701 /* Compute the alignment for a static variable.
18702 TYPE is the data type, and ALIGN is the alignment that
18703 the object would ordinarily have. The value of this function is used
18704 instead of that alignment to align the object. */
18707 ix86_data_alignment (tree type, int align)
18709 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
18711 if (AGGREGATE_TYPE_P (type)
18712 && TYPE_SIZE (type)
18713 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18714 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
18715 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
18716 && align < max_align)
18717 align = max_align;
18719 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18720 to 16byte boundary. */
18721 if (TARGET_64BIT)
18723 if (AGGREGATE_TYPE_P (type)
18724 && TYPE_SIZE (type)
18725 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18726 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
18727 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18728 return 128;
18731 if (TREE_CODE (type) == ARRAY_TYPE)
18733 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18734 return 64;
18735 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18736 return 128;
18738 else if (TREE_CODE (type) == COMPLEX_TYPE)
18741 if (TYPE_MODE (type) == DCmode && align < 64)
18742 return 64;
18743 if ((TYPE_MODE (type) == XCmode
18744 || TYPE_MODE (type) == TCmode) && align < 128)
18745 return 128;
18747 else if ((TREE_CODE (type) == RECORD_TYPE
18748 || TREE_CODE (type) == UNION_TYPE
18749 || TREE_CODE (type) == QUAL_UNION_TYPE)
18750 && TYPE_FIELDS (type))
18752 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18753 return 64;
18754 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18755 return 128;
18757 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18758 || TREE_CODE (type) == INTEGER_TYPE)
18760 if (TYPE_MODE (type) == DFmode && align < 64)
18761 return 64;
18762 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18763 return 128;
18766 return align;
18769 /* Compute the alignment for a local variable or a stack slot. TYPE is
18770 the data type, MODE is the widest mode available and ALIGN is the
18771 alignment that the object would ordinarily have. The value of this
18772 macro is used instead of that alignment to align the object. */
18774 unsigned int
18775 ix86_local_alignment (tree type, enum machine_mode mode,
18776 unsigned int align)
18778 /* If TYPE is NULL, we are allocating a stack slot for caller-save
18779 register in MODE. We will return the largest alignment of XF
18780 and DF. */
18781 if (!type)
18783 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
18784 align = GET_MODE_ALIGNMENT (DFmode);
18785 return align;
18788 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18789 to 16byte boundary. */
18790 if (TARGET_64BIT)
18792 if (AGGREGATE_TYPE_P (type)
18793 && TYPE_SIZE (type)
18794 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18795 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
18796 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18797 return 128;
18799 if (TREE_CODE (type) == ARRAY_TYPE)
18801 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18802 return 64;
18803 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18804 return 128;
18806 else if (TREE_CODE (type) == COMPLEX_TYPE)
18808 if (TYPE_MODE (type) == DCmode && align < 64)
18809 return 64;
18810 if ((TYPE_MODE (type) == XCmode
18811 || TYPE_MODE (type) == TCmode) && align < 128)
18812 return 128;
18814 else if ((TREE_CODE (type) == RECORD_TYPE
18815 || TREE_CODE (type) == UNION_TYPE
18816 || TREE_CODE (type) == QUAL_UNION_TYPE)
18817 && TYPE_FIELDS (type))
18819 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18820 return 64;
18821 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18822 return 128;
18824 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18825 || TREE_CODE (type) == INTEGER_TYPE)
18828 if (TYPE_MODE (type) == DFmode && align < 64)
18829 return 64;
18830 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18831 return 128;
18833 return align;
18836 /* Emit RTL insns to initialize the variable parts of a trampoline.
18837 FNADDR is an RTX for the address of the function's pure code.
18838 CXT is an RTX for the static chain value for the function. */
18839 void
18840 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
18842 if (!TARGET_64BIT)
18844 /* Compute offset from the end of the jmp to the target function. */
18845 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
18846 plus_constant (tramp, 10),
18847 NULL_RTX, 1, OPTAB_DIRECT);
18848 emit_move_insn (gen_rtx_MEM (QImode, tramp),
18849 gen_int_mode (0xb9, QImode));
18850 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
18851 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
18852 gen_int_mode (0xe9, QImode));
18853 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
18855 else
18857 int offset = 0;
18858 /* Try to load address using shorter movl instead of movabs.
18859 We may want to support movq for kernel mode, but kernel does not use
18860 trampolines at the moment. */
18861 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
18863 fnaddr = copy_to_mode_reg (DImode, fnaddr);
18864 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18865 gen_int_mode (0xbb41, HImode));
18866 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
18867 gen_lowpart (SImode, fnaddr));
18868 offset += 6;
18870 else
18872 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18873 gen_int_mode (0xbb49, HImode));
18874 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18875 fnaddr);
18876 offset += 10;
18878 /* Load static chain using movabs to r10. */
18879 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18880 gen_int_mode (0xba49, HImode));
18881 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18882 cxt);
18883 offset += 10;
18884 /* Jump to the r11 */
18885 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18886 gen_int_mode (0xff49, HImode));
18887 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
18888 gen_int_mode (0xe3, QImode));
18889 offset += 3;
18890 gcc_assert (offset <= TRAMPOLINE_SIZE);
18893 #ifdef ENABLE_EXECUTE_STACK
18894 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
18895 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
18896 #endif
18899 /* Codes for all the SSE/MMX builtins. */
18900 enum ix86_builtins
18902 IX86_BUILTIN_ADDPS,
18903 IX86_BUILTIN_ADDSS,
18904 IX86_BUILTIN_DIVPS,
18905 IX86_BUILTIN_DIVSS,
18906 IX86_BUILTIN_MULPS,
18907 IX86_BUILTIN_MULSS,
18908 IX86_BUILTIN_SUBPS,
18909 IX86_BUILTIN_SUBSS,
18911 IX86_BUILTIN_CMPEQPS,
18912 IX86_BUILTIN_CMPLTPS,
18913 IX86_BUILTIN_CMPLEPS,
18914 IX86_BUILTIN_CMPGTPS,
18915 IX86_BUILTIN_CMPGEPS,
18916 IX86_BUILTIN_CMPNEQPS,
18917 IX86_BUILTIN_CMPNLTPS,
18918 IX86_BUILTIN_CMPNLEPS,
18919 IX86_BUILTIN_CMPNGTPS,
18920 IX86_BUILTIN_CMPNGEPS,
18921 IX86_BUILTIN_CMPORDPS,
18922 IX86_BUILTIN_CMPUNORDPS,
18923 IX86_BUILTIN_CMPEQSS,
18924 IX86_BUILTIN_CMPLTSS,
18925 IX86_BUILTIN_CMPLESS,
18926 IX86_BUILTIN_CMPNEQSS,
18927 IX86_BUILTIN_CMPNLTSS,
18928 IX86_BUILTIN_CMPNLESS,
18929 IX86_BUILTIN_CMPNGTSS,
18930 IX86_BUILTIN_CMPNGESS,
18931 IX86_BUILTIN_CMPORDSS,
18932 IX86_BUILTIN_CMPUNORDSS,
18934 IX86_BUILTIN_COMIEQSS,
18935 IX86_BUILTIN_COMILTSS,
18936 IX86_BUILTIN_COMILESS,
18937 IX86_BUILTIN_COMIGTSS,
18938 IX86_BUILTIN_COMIGESS,
18939 IX86_BUILTIN_COMINEQSS,
18940 IX86_BUILTIN_UCOMIEQSS,
18941 IX86_BUILTIN_UCOMILTSS,
18942 IX86_BUILTIN_UCOMILESS,
18943 IX86_BUILTIN_UCOMIGTSS,
18944 IX86_BUILTIN_UCOMIGESS,
18945 IX86_BUILTIN_UCOMINEQSS,
18947 IX86_BUILTIN_CVTPI2PS,
18948 IX86_BUILTIN_CVTPS2PI,
18949 IX86_BUILTIN_CVTSI2SS,
18950 IX86_BUILTIN_CVTSI642SS,
18951 IX86_BUILTIN_CVTSS2SI,
18952 IX86_BUILTIN_CVTSS2SI64,
18953 IX86_BUILTIN_CVTTPS2PI,
18954 IX86_BUILTIN_CVTTSS2SI,
18955 IX86_BUILTIN_CVTTSS2SI64,
18957 IX86_BUILTIN_MAXPS,
18958 IX86_BUILTIN_MAXSS,
18959 IX86_BUILTIN_MINPS,
18960 IX86_BUILTIN_MINSS,
18962 IX86_BUILTIN_LOADUPS,
18963 IX86_BUILTIN_STOREUPS,
18964 IX86_BUILTIN_MOVSS,
18966 IX86_BUILTIN_MOVHLPS,
18967 IX86_BUILTIN_MOVLHPS,
18968 IX86_BUILTIN_LOADHPS,
18969 IX86_BUILTIN_LOADLPS,
18970 IX86_BUILTIN_STOREHPS,
18971 IX86_BUILTIN_STORELPS,
18973 IX86_BUILTIN_MASKMOVQ,
18974 IX86_BUILTIN_MOVMSKPS,
18975 IX86_BUILTIN_PMOVMSKB,
18977 IX86_BUILTIN_MOVNTPS,
18978 IX86_BUILTIN_MOVNTQ,
18980 IX86_BUILTIN_LOADDQU,
18981 IX86_BUILTIN_STOREDQU,
18983 IX86_BUILTIN_PACKSSWB,
18984 IX86_BUILTIN_PACKSSDW,
18985 IX86_BUILTIN_PACKUSWB,
18987 IX86_BUILTIN_PADDB,
18988 IX86_BUILTIN_PADDW,
18989 IX86_BUILTIN_PADDD,
18990 IX86_BUILTIN_PADDQ,
18991 IX86_BUILTIN_PADDSB,
18992 IX86_BUILTIN_PADDSW,
18993 IX86_BUILTIN_PADDUSB,
18994 IX86_BUILTIN_PADDUSW,
18995 IX86_BUILTIN_PSUBB,
18996 IX86_BUILTIN_PSUBW,
18997 IX86_BUILTIN_PSUBD,
18998 IX86_BUILTIN_PSUBQ,
18999 IX86_BUILTIN_PSUBSB,
19000 IX86_BUILTIN_PSUBSW,
19001 IX86_BUILTIN_PSUBUSB,
19002 IX86_BUILTIN_PSUBUSW,
19004 IX86_BUILTIN_PAND,
19005 IX86_BUILTIN_PANDN,
19006 IX86_BUILTIN_POR,
19007 IX86_BUILTIN_PXOR,
19009 IX86_BUILTIN_PAVGB,
19010 IX86_BUILTIN_PAVGW,
19012 IX86_BUILTIN_PCMPEQB,
19013 IX86_BUILTIN_PCMPEQW,
19014 IX86_BUILTIN_PCMPEQD,
19015 IX86_BUILTIN_PCMPGTB,
19016 IX86_BUILTIN_PCMPGTW,
19017 IX86_BUILTIN_PCMPGTD,
19019 IX86_BUILTIN_PMADDWD,
19021 IX86_BUILTIN_PMAXSW,
19022 IX86_BUILTIN_PMAXUB,
19023 IX86_BUILTIN_PMINSW,
19024 IX86_BUILTIN_PMINUB,
19026 IX86_BUILTIN_PMULHUW,
19027 IX86_BUILTIN_PMULHW,
19028 IX86_BUILTIN_PMULLW,
19030 IX86_BUILTIN_PSADBW,
19031 IX86_BUILTIN_PSHUFW,
19033 IX86_BUILTIN_PSLLW,
19034 IX86_BUILTIN_PSLLD,
19035 IX86_BUILTIN_PSLLQ,
19036 IX86_BUILTIN_PSRAW,
19037 IX86_BUILTIN_PSRAD,
19038 IX86_BUILTIN_PSRLW,
19039 IX86_BUILTIN_PSRLD,
19040 IX86_BUILTIN_PSRLQ,
19041 IX86_BUILTIN_PSLLWI,
19042 IX86_BUILTIN_PSLLDI,
19043 IX86_BUILTIN_PSLLQI,
19044 IX86_BUILTIN_PSRAWI,
19045 IX86_BUILTIN_PSRADI,
19046 IX86_BUILTIN_PSRLWI,
19047 IX86_BUILTIN_PSRLDI,
19048 IX86_BUILTIN_PSRLQI,
19050 IX86_BUILTIN_PUNPCKHBW,
19051 IX86_BUILTIN_PUNPCKHWD,
19052 IX86_BUILTIN_PUNPCKHDQ,
19053 IX86_BUILTIN_PUNPCKLBW,
19054 IX86_BUILTIN_PUNPCKLWD,
19055 IX86_BUILTIN_PUNPCKLDQ,
19057 IX86_BUILTIN_SHUFPS,
19059 IX86_BUILTIN_RCPPS,
19060 IX86_BUILTIN_RCPSS,
19061 IX86_BUILTIN_RSQRTPS,
19062 IX86_BUILTIN_RSQRTPS_NR,
19063 IX86_BUILTIN_RSQRTSS,
19064 IX86_BUILTIN_RSQRTF,
19065 IX86_BUILTIN_SQRTPS,
19066 IX86_BUILTIN_SQRTPS_NR,
19067 IX86_BUILTIN_SQRTSS,
19069 IX86_BUILTIN_UNPCKHPS,
19070 IX86_BUILTIN_UNPCKLPS,
19072 IX86_BUILTIN_ANDPS,
19073 IX86_BUILTIN_ANDNPS,
19074 IX86_BUILTIN_ORPS,
19075 IX86_BUILTIN_XORPS,
19077 IX86_BUILTIN_EMMS,
19078 IX86_BUILTIN_LDMXCSR,
19079 IX86_BUILTIN_STMXCSR,
19080 IX86_BUILTIN_SFENCE,
19082 /* 3DNow! Original */
19083 IX86_BUILTIN_FEMMS,
19084 IX86_BUILTIN_PAVGUSB,
19085 IX86_BUILTIN_PF2ID,
19086 IX86_BUILTIN_PFACC,
19087 IX86_BUILTIN_PFADD,
19088 IX86_BUILTIN_PFCMPEQ,
19089 IX86_BUILTIN_PFCMPGE,
19090 IX86_BUILTIN_PFCMPGT,
19091 IX86_BUILTIN_PFMAX,
19092 IX86_BUILTIN_PFMIN,
19093 IX86_BUILTIN_PFMUL,
19094 IX86_BUILTIN_PFRCP,
19095 IX86_BUILTIN_PFRCPIT1,
19096 IX86_BUILTIN_PFRCPIT2,
19097 IX86_BUILTIN_PFRSQIT1,
19098 IX86_BUILTIN_PFRSQRT,
19099 IX86_BUILTIN_PFSUB,
19100 IX86_BUILTIN_PFSUBR,
19101 IX86_BUILTIN_PI2FD,
19102 IX86_BUILTIN_PMULHRW,
19104 /* 3DNow! Athlon Extensions */
19105 IX86_BUILTIN_PF2IW,
19106 IX86_BUILTIN_PFNACC,
19107 IX86_BUILTIN_PFPNACC,
19108 IX86_BUILTIN_PI2FW,
19109 IX86_BUILTIN_PSWAPDSI,
19110 IX86_BUILTIN_PSWAPDSF,
19112 /* SSE2 */
19113 IX86_BUILTIN_ADDPD,
19114 IX86_BUILTIN_ADDSD,
19115 IX86_BUILTIN_DIVPD,
19116 IX86_BUILTIN_DIVSD,
19117 IX86_BUILTIN_MULPD,
19118 IX86_BUILTIN_MULSD,
19119 IX86_BUILTIN_SUBPD,
19120 IX86_BUILTIN_SUBSD,
19122 IX86_BUILTIN_CMPEQPD,
19123 IX86_BUILTIN_CMPLTPD,
19124 IX86_BUILTIN_CMPLEPD,
19125 IX86_BUILTIN_CMPGTPD,
19126 IX86_BUILTIN_CMPGEPD,
19127 IX86_BUILTIN_CMPNEQPD,
19128 IX86_BUILTIN_CMPNLTPD,
19129 IX86_BUILTIN_CMPNLEPD,
19130 IX86_BUILTIN_CMPNGTPD,
19131 IX86_BUILTIN_CMPNGEPD,
19132 IX86_BUILTIN_CMPORDPD,
19133 IX86_BUILTIN_CMPUNORDPD,
19134 IX86_BUILTIN_CMPEQSD,
19135 IX86_BUILTIN_CMPLTSD,
19136 IX86_BUILTIN_CMPLESD,
19137 IX86_BUILTIN_CMPNEQSD,
19138 IX86_BUILTIN_CMPNLTSD,
19139 IX86_BUILTIN_CMPNLESD,
19140 IX86_BUILTIN_CMPORDSD,
19141 IX86_BUILTIN_CMPUNORDSD,
19143 IX86_BUILTIN_COMIEQSD,
19144 IX86_BUILTIN_COMILTSD,
19145 IX86_BUILTIN_COMILESD,
19146 IX86_BUILTIN_COMIGTSD,
19147 IX86_BUILTIN_COMIGESD,
19148 IX86_BUILTIN_COMINEQSD,
19149 IX86_BUILTIN_UCOMIEQSD,
19150 IX86_BUILTIN_UCOMILTSD,
19151 IX86_BUILTIN_UCOMILESD,
19152 IX86_BUILTIN_UCOMIGTSD,
19153 IX86_BUILTIN_UCOMIGESD,
19154 IX86_BUILTIN_UCOMINEQSD,
19156 IX86_BUILTIN_MAXPD,
19157 IX86_BUILTIN_MAXSD,
19158 IX86_BUILTIN_MINPD,
19159 IX86_BUILTIN_MINSD,
19161 IX86_BUILTIN_ANDPD,
19162 IX86_BUILTIN_ANDNPD,
19163 IX86_BUILTIN_ORPD,
19164 IX86_BUILTIN_XORPD,
19166 IX86_BUILTIN_SQRTPD,
19167 IX86_BUILTIN_SQRTSD,
19169 IX86_BUILTIN_UNPCKHPD,
19170 IX86_BUILTIN_UNPCKLPD,
19172 IX86_BUILTIN_SHUFPD,
19174 IX86_BUILTIN_LOADUPD,
19175 IX86_BUILTIN_STOREUPD,
19176 IX86_BUILTIN_MOVSD,
19178 IX86_BUILTIN_LOADHPD,
19179 IX86_BUILTIN_LOADLPD,
19181 IX86_BUILTIN_CVTDQ2PD,
19182 IX86_BUILTIN_CVTDQ2PS,
19184 IX86_BUILTIN_CVTPD2DQ,
19185 IX86_BUILTIN_CVTPD2PI,
19186 IX86_BUILTIN_CVTPD2PS,
19187 IX86_BUILTIN_CVTTPD2DQ,
19188 IX86_BUILTIN_CVTTPD2PI,
19190 IX86_BUILTIN_CVTPI2PD,
19191 IX86_BUILTIN_CVTSI2SD,
19192 IX86_BUILTIN_CVTSI642SD,
19194 IX86_BUILTIN_CVTSD2SI,
19195 IX86_BUILTIN_CVTSD2SI64,
19196 IX86_BUILTIN_CVTSD2SS,
19197 IX86_BUILTIN_CVTSS2SD,
19198 IX86_BUILTIN_CVTTSD2SI,
19199 IX86_BUILTIN_CVTTSD2SI64,
19201 IX86_BUILTIN_CVTPS2DQ,
19202 IX86_BUILTIN_CVTPS2PD,
19203 IX86_BUILTIN_CVTTPS2DQ,
19205 IX86_BUILTIN_MOVNTI,
19206 IX86_BUILTIN_MOVNTPD,
19207 IX86_BUILTIN_MOVNTDQ,
19209 IX86_BUILTIN_MOVQ128,
19211 /* SSE2 MMX */
19212 IX86_BUILTIN_MASKMOVDQU,
19213 IX86_BUILTIN_MOVMSKPD,
19214 IX86_BUILTIN_PMOVMSKB128,
19216 IX86_BUILTIN_PACKSSWB128,
19217 IX86_BUILTIN_PACKSSDW128,
19218 IX86_BUILTIN_PACKUSWB128,
19220 IX86_BUILTIN_PADDB128,
19221 IX86_BUILTIN_PADDW128,
19222 IX86_BUILTIN_PADDD128,
19223 IX86_BUILTIN_PADDQ128,
19224 IX86_BUILTIN_PADDSB128,
19225 IX86_BUILTIN_PADDSW128,
19226 IX86_BUILTIN_PADDUSB128,
19227 IX86_BUILTIN_PADDUSW128,
19228 IX86_BUILTIN_PSUBB128,
19229 IX86_BUILTIN_PSUBW128,
19230 IX86_BUILTIN_PSUBD128,
19231 IX86_BUILTIN_PSUBQ128,
19232 IX86_BUILTIN_PSUBSB128,
19233 IX86_BUILTIN_PSUBSW128,
19234 IX86_BUILTIN_PSUBUSB128,
19235 IX86_BUILTIN_PSUBUSW128,
19237 IX86_BUILTIN_PAND128,
19238 IX86_BUILTIN_PANDN128,
19239 IX86_BUILTIN_POR128,
19240 IX86_BUILTIN_PXOR128,
19242 IX86_BUILTIN_PAVGB128,
19243 IX86_BUILTIN_PAVGW128,
19245 IX86_BUILTIN_PCMPEQB128,
19246 IX86_BUILTIN_PCMPEQW128,
19247 IX86_BUILTIN_PCMPEQD128,
19248 IX86_BUILTIN_PCMPGTB128,
19249 IX86_BUILTIN_PCMPGTW128,
19250 IX86_BUILTIN_PCMPGTD128,
19252 IX86_BUILTIN_PMADDWD128,
19254 IX86_BUILTIN_PMAXSW128,
19255 IX86_BUILTIN_PMAXUB128,
19256 IX86_BUILTIN_PMINSW128,
19257 IX86_BUILTIN_PMINUB128,
19259 IX86_BUILTIN_PMULUDQ,
19260 IX86_BUILTIN_PMULUDQ128,
19261 IX86_BUILTIN_PMULHUW128,
19262 IX86_BUILTIN_PMULHW128,
19263 IX86_BUILTIN_PMULLW128,
19265 IX86_BUILTIN_PSADBW128,
19266 IX86_BUILTIN_PSHUFHW,
19267 IX86_BUILTIN_PSHUFLW,
19268 IX86_BUILTIN_PSHUFD,
19270 IX86_BUILTIN_PSLLDQI128,
19271 IX86_BUILTIN_PSLLWI128,
19272 IX86_BUILTIN_PSLLDI128,
19273 IX86_BUILTIN_PSLLQI128,
19274 IX86_BUILTIN_PSRAWI128,
19275 IX86_BUILTIN_PSRADI128,
19276 IX86_BUILTIN_PSRLDQI128,
19277 IX86_BUILTIN_PSRLWI128,
19278 IX86_BUILTIN_PSRLDI128,
19279 IX86_BUILTIN_PSRLQI128,
19281 IX86_BUILTIN_PSLLDQ128,
19282 IX86_BUILTIN_PSLLW128,
19283 IX86_BUILTIN_PSLLD128,
19284 IX86_BUILTIN_PSLLQ128,
19285 IX86_BUILTIN_PSRAW128,
19286 IX86_BUILTIN_PSRAD128,
19287 IX86_BUILTIN_PSRLW128,
19288 IX86_BUILTIN_PSRLD128,
19289 IX86_BUILTIN_PSRLQ128,
19291 IX86_BUILTIN_PUNPCKHBW128,
19292 IX86_BUILTIN_PUNPCKHWD128,
19293 IX86_BUILTIN_PUNPCKHDQ128,
19294 IX86_BUILTIN_PUNPCKHQDQ128,
19295 IX86_BUILTIN_PUNPCKLBW128,
19296 IX86_BUILTIN_PUNPCKLWD128,
19297 IX86_BUILTIN_PUNPCKLDQ128,
19298 IX86_BUILTIN_PUNPCKLQDQ128,
19300 IX86_BUILTIN_CLFLUSH,
19301 IX86_BUILTIN_MFENCE,
19302 IX86_BUILTIN_LFENCE,
19304 /* SSE3. */
19305 IX86_BUILTIN_ADDSUBPS,
19306 IX86_BUILTIN_HADDPS,
19307 IX86_BUILTIN_HSUBPS,
19308 IX86_BUILTIN_MOVSHDUP,
19309 IX86_BUILTIN_MOVSLDUP,
19310 IX86_BUILTIN_ADDSUBPD,
19311 IX86_BUILTIN_HADDPD,
19312 IX86_BUILTIN_HSUBPD,
19313 IX86_BUILTIN_LDDQU,
19315 IX86_BUILTIN_MONITOR,
19316 IX86_BUILTIN_MWAIT,
19318 /* SSSE3. */
19319 IX86_BUILTIN_PHADDW,
19320 IX86_BUILTIN_PHADDD,
19321 IX86_BUILTIN_PHADDSW,
19322 IX86_BUILTIN_PHSUBW,
19323 IX86_BUILTIN_PHSUBD,
19324 IX86_BUILTIN_PHSUBSW,
19325 IX86_BUILTIN_PMADDUBSW,
19326 IX86_BUILTIN_PMULHRSW,
19327 IX86_BUILTIN_PSHUFB,
19328 IX86_BUILTIN_PSIGNB,
19329 IX86_BUILTIN_PSIGNW,
19330 IX86_BUILTIN_PSIGND,
19331 IX86_BUILTIN_PALIGNR,
19332 IX86_BUILTIN_PABSB,
19333 IX86_BUILTIN_PABSW,
19334 IX86_BUILTIN_PABSD,
19336 IX86_BUILTIN_PHADDW128,
19337 IX86_BUILTIN_PHADDD128,
19338 IX86_BUILTIN_PHADDSW128,
19339 IX86_BUILTIN_PHSUBW128,
19340 IX86_BUILTIN_PHSUBD128,
19341 IX86_BUILTIN_PHSUBSW128,
19342 IX86_BUILTIN_PMADDUBSW128,
19343 IX86_BUILTIN_PMULHRSW128,
19344 IX86_BUILTIN_PSHUFB128,
19345 IX86_BUILTIN_PSIGNB128,
19346 IX86_BUILTIN_PSIGNW128,
19347 IX86_BUILTIN_PSIGND128,
19348 IX86_BUILTIN_PALIGNR128,
19349 IX86_BUILTIN_PABSB128,
19350 IX86_BUILTIN_PABSW128,
19351 IX86_BUILTIN_PABSD128,
19353 /* AMDFAM10 - SSE4A New Instructions. */
19354 IX86_BUILTIN_MOVNTSD,
19355 IX86_BUILTIN_MOVNTSS,
19356 IX86_BUILTIN_EXTRQI,
19357 IX86_BUILTIN_EXTRQ,
19358 IX86_BUILTIN_INSERTQI,
19359 IX86_BUILTIN_INSERTQ,
19361 /* SSE4.1. */
19362 IX86_BUILTIN_BLENDPD,
19363 IX86_BUILTIN_BLENDPS,
19364 IX86_BUILTIN_BLENDVPD,
19365 IX86_BUILTIN_BLENDVPS,
19366 IX86_BUILTIN_PBLENDVB128,
19367 IX86_BUILTIN_PBLENDW128,
19369 IX86_BUILTIN_DPPD,
19370 IX86_BUILTIN_DPPS,
19372 IX86_BUILTIN_INSERTPS128,
19374 IX86_BUILTIN_MOVNTDQA,
19375 IX86_BUILTIN_MPSADBW128,
19376 IX86_BUILTIN_PACKUSDW128,
19377 IX86_BUILTIN_PCMPEQQ,
19378 IX86_BUILTIN_PHMINPOSUW128,
19380 IX86_BUILTIN_PMAXSB128,
19381 IX86_BUILTIN_PMAXSD128,
19382 IX86_BUILTIN_PMAXUD128,
19383 IX86_BUILTIN_PMAXUW128,
19385 IX86_BUILTIN_PMINSB128,
19386 IX86_BUILTIN_PMINSD128,
19387 IX86_BUILTIN_PMINUD128,
19388 IX86_BUILTIN_PMINUW128,
19390 IX86_BUILTIN_PMOVSXBW128,
19391 IX86_BUILTIN_PMOVSXBD128,
19392 IX86_BUILTIN_PMOVSXBQ128,
19393 IX86_BUILTIN_PMOVSXWD128,
19394 IX86_BUILTIN_PMOVSXWQ128,
19395 IX86_BUILTIN_PMOVSXDQ128,
19397 IX86_BUILTIN_PMOVZXBW128,
19398 IX86_BUILTIN_PMOVZXBD128,
19399 IX86_BUILTIN_PMOVZXBQ128,
19400 IX86_BUILTIN_PMOVZXWD128,
19401 IX86_BUILTIN_PMOVZXWQ128,
19402 IX86_BUILTIN_PMOVZXDQ128,
19404 IX86_BUILTIN_PMULDQ128,
19405 IX86_BUILTIN_PMULLD128,
19407 IX86_BUILTIN_ROUNDPD,
19408 IX86_BUILTIN_ROUNDPS,
19409 IX86_BUILTIN_ROUNDSD,
19410 IX86_BUILTIN_ROUNDSS,
19412 IX86_BUILTIN_PTESTZ,
19413 IX86_BUILTIN_PTESTC,
19414 IX86_BUILTIN_PTESTNZC,
19416 IX86_BUILTIN_VEC_INIT_V2SI,
19417 IX86_BUILTIN_VEC_INIT_V4HI,
19418 IX86_BUILTIN_VEC_INIT_V8QI,
19419 IX86_BUILTIN_VEC_EXT_V2DF,
19420 IX86_BUILTIN_VEC_EXT_V2DI,
19421 IX86_BUILTIN_VEC_EXT_V4SF,
19422 IX86_BUILTIN_VEC_EXT_V4SI,
19423 IX86_BUILTIN_VEC_EXT_V8HI,
19424 IX86_BUILTIN_VEC_EXT_V2SI,
19425 IX86_BUILTIN_VEC_EXT_V4HI,
19426 IX86_BUILTIN_VEC_EXT_V16QI,
19427 IX86_BUILTIN_VEC_SET_V2DI,
19428 IX86_BUILTIN_VEC_SET_V4SF,
19429 IX86_BUILTIN_VEC_SET_V4SI,
19430 IX86_BUILTIN_VEC_SET_V8HI,
19431 IX86_BUILTIN_VEC_SET_V4HI,
19432 IX86_BUILTIN_VEC_SET_V16QI,
19434 IX86_BUILTIN_VEC_PACK_SFIX,
19436 /* SSE4.2. */
19437 IX86_BUILTIN_CRC32QI,
19438 IX86_BUILTIN_CRC32HI,
19439 IX86_BUILTIN_CRC32SI,
19440 IX86_BUILTIN_CRC32DI,
19442 IX86_BUILTIN_PCMPESTRI128,
19443 IX86_BUILTIN_PCMPESTRM128,
19444 IX86_BUILTIN_PCMPESTRA128,
19445 IX86_BUILTIN_PCMPESTRC128,
19446 IX86_BUILTIN_PCMPESTRO128,
19447 IX86_BUILTIN_PCMPESTRS128,
19448 IX86_BUILTIN_PCMPESTRZ128,
19449 IX86_BUILTIN_PCMPISTRI128,
19450 IX86_BUILTIN_PCMPISTRM128,
19451 IX86_BUILTIN_PCMPISTRA128,
19452 IX86_BUILTIN_PCMPISTRC128,
19453 IX86_BUILTIN_PCMPISTRO128,
19454 IX86_BUILTIN_PCMPISTRS128,
19455 IX86_BUILTIN_PCMPISTRZ128,
19457 IX86_BUILTIN_PCMPGTQ,
19459 /* AES instructions */
19460 IX86_BUILTIN_AESENC128,
19461 IX86_BUILTIN_AESENCLAST128,
19462 IX86_BUILTIN_AESDEC128,
19463 IX86_BUILTIN_AESDECLAST128,
19464 IX86_BUILTIN_AESIMC128,
19465 IX86_BUILTIN_AESKEYGENASSIST128,
19467 /* PCLMUL instruction */
19468 IX86_BUILTIN_PCLMULQDQ128,
19470 /* AVX */
19471 IX86_BUILTIN_ADDPD256,
19472 IX86_BUILTIN_ADDPS256,
19473 IX86_BUILTIN_ADDSUBPD256,
19474 IX86_BUILTIN_ADDSUBPS256,
19475 IX86_BUILTIN_ANDPD256,
19476 IX86_BUILTIN_ANDPS256,
19477 IX86_BUILTIN_ANDNPD256,
19478 IX86_BUILTIN_ANDNPS256,
19479 IX86_BUILTIN_BLENDPD256,
19480 IX86_BUILTIN_BLENDPS256,
19481 IX86_BUILTIN_BLENDVPD256,
19482 IX86_BUILTIN_BLENDVPS256,
19483 IX86_BUILTIN_DIVPD256,
19484 IX86_BUILTIN_DIVPS256,
19485 IX86_BUILTIN_DPPS256,
19486 IX86_BUILTIN_HADDPD256,
19487 IX86_BUILTIN_HADDPS256,
19488 IX86_BUILTIN_HSUBPD256,
19489 IX86_BUILTIN_HSUBPS256,
19490 IX86_BUILTIN_MAXPD256,
19491 IX86_BUILTIN_MAXPS256,
19492 IX86_BUILTIN_MINPD256,
19493 IX86_BUILTIN_MINPS256,
19494 IX86_BUILTIN_MULPD256,
19495 IX86_BUILTIN_MULPS256,
19496 IX86_BUILTIN_ORPD256,
19497 IX86_BUILTIN_ORPS256,
19498 IX86_BUILTIN_SHUFPD256,
19499 IX86_BUILTIN_SHUFPS256,
19500 IX86_BUILTIN_SUBPD256,
19501 IX86_BUILTIN_SUBPS256,
19502 IX86_BUILTIN_XORPD256,
19503 IX86_BUILTIN_XORPS256,
19504 IX86_BUILTIN_CMPSD,
19505 IX86_BUILTIN_CMPSS,
19506 IX86_BUILTIN_CMPPD,
19507 IX86_BUILTIN_CMPPS,
19508 IX86_BUILTIN_CMPPD256,
19509 IX86_BUILTIN_CMPPS256,
19510 IX86_BUILTIN_CVTDQ2PD256,
19511 IX86_BUILTIN_CVTDQ2PS256,
19512 IX86_BUILTIN_CVTPD2PS256,
19513 IX86_BUILTIN_CVTPS2DQ256,
19514 IX86_BUILTIN_CVTPS2PD256,
19515 IX86_BUILTIN_CVTTPD2DQ256,
19516 IX86_BUILTIN_CVTPD2DQ256,
19517 IX86_BUILTIN_CVTTPS2DQ256,
19518 IX86_BUILTIN_EXTRACTF128PD256,
19519 IX86_BUILTIN_EXTRACTF128PS256,
19520 IX86_BUILTIN_EXTRACTF128SI256,
19521 IX86_BUILTIN_VZEROALL,
19522 IX86_BUILTIN_VZEROUPPER,
19523 IX86_BUILTIN_VZEROUPPER_REX64,
19524 IX86_BUILTIN_VPERMILVARPD,
19525 IX86_BUILTIN_VPERMILVARPS,
19526 IX86_BUILTIN_VPERMILVARPD256,
19527 IX86_BUILTIN_VPERMILVARPS256,
19528 IX86_BUILTIN_VPERMILPD,
19529 IX86_BUILTIN_VPERMILPS,
19530 IX86_BUILTIN_VPERMILPD256,
19531 IX86_BUILTIN_VPERMILPS256,
19532 IX86_BUILTIN_VPERMIL2PD,
19533 IX86_BUILTIN_VPERMIL2PS,
19534 IX86_BUILTIN_VPERMIL2PD256,
19535 IX86_BUILTIN_VPERMIL2PS256,
19536 IX86_BUILTIN_VPERM2F128PD256,
19537 IX86_BUILTIN_VPERM2F128PS256,
19538 IX86_BUILTIN_VPERM2F128SI256,
19539 IX86_BUILTIN_VBROADCASTSS,
19540 IX86_BUILTIN_VBROADCASTSD256,
19541 IX86_BUILTIN_VBROADCASTSS256,
19542 IX86_BUILTIN_VBROADCASTPD256,
19543 IX86_BUILTIN_VBROADCASTPS256,
19544 IX86_BUILTIN_VINSERTF128PD256,
19545 IX86_BUILTIN_VINSERTF128PS256,
19546 IX86_BUILTIN_VINSERTF128SI256,
19547 IX86_BUILTIN_LOADUPD256,
19548 IX86_BUILTIN_LOADUPS256,
19549 IX86_BUILTIN_STOREUPD256,
19550 IX86_BUILTIN_STOREUPS256,
19551 IX86_BUILTIN_LDDQU256,
19552 IX86_BUILTIN_LOADDQU256,
19553 IX86_BUILTIN_STOREDQU256,
19554 IX86_BUILTIN_MASKLOADPD,
19555 IX86_BUILTIN_MASKLOADPS,
19556 IX86_BUILTIN_MASKSTOREPD,
19557 IX86_BUILTIN_MASKSTOREPS,
19558 IX86_BUILTIN_MASKLOADPD256,
19559 IX86_BUILTIN_MASKLOADPS256,
19560 IX86_BUILTIN_MASKSTOREPD256,
19561 IX86_BUILTIN_MASKSTOREPS256,
19562 IX86_BUILTIN_MOVSHDUP256,
19563 IX86_BUILTIN_MOVSLDUP256,
19564 IX86_BUILTIN_MOVDDUP256,
19566 IX86_BUILTIN_SQRTPD256,
19567 IX86_BUILTIN_SQRTPS256,
19568 IX86_BUILTIN_SQRTPS_NR256,
19569 IX86_BUILTIN_RSQRTPS256,
19570 IX86_BUILTIN_RSQRTPS_NR256,
19572 IX86_BUILTIN_RCPPS256,
19574 IX86_BUILTIN_ROUNDPD256,
19575 IX86_BUILTIN_ROUNDPS256,
19577 IX86_BUILTIN_UNPCKHPD256,
19578 IX86_BUILTIN_UNPCKLPD256,
19579 IX86_BUILTIN_UNPCKHPS256,
19580 IX86_BUILTIN_UNPCKLPS256,
19582 IX86_BUILTIN_SI256_SI,
19583 IX86_BUILTIN_PS256_PS,
19584 IX86_BUILTIN_PD256_PD,
19585 IX86_BUILTIN_SI_SI256,
19586 IX86_BUILTIN_PS_PS256,
19587 IX86_BUILTIN_PD_PD256,
19589 IX86_BUILTIN_VTESTZPD,
19590 IX86_BUILTIN_VTESTCPD,
19591 IX86_BUILTIN_VTESTNZCPD,
19592 IX86_BUILTIN_VTESTZPS,
19593 IX86_BUILTIN_VTESTCPS,
19594 IX86_BUILTIN_VTESTNZCPS,
19595 IX86_BUILTIN_VTESTZPD256,
19596 IX86_BUILTIN_VTESTCPD256,
19597 IX86_BUILTIN_VTESTNZCPD256,
19598 IX86_BUILTIN_VTESTZPS256,
19599 IX86_BUILTIN_VTESTCPS256,
19600 IX86_BUILTIN_VTESTNZCPS256,
19601 IX86_BUILTIN_PTESTZ256,
19602 IX86_BUILTIN_PTESTC256,
19603 IX86_BUILTIN_PTESTNZC256,
19605 IX86_BUILTIN_MOVMSKPD256,
19606 IX86_BUILTIN_MOVMSKPS256,
19608 /* TFmode support builtins. */
19609 IX86_BUILTIN_INFQ,
19610 IX86_BUILTIN_FABSQ,
19611 IX86_BUILTIN_COPYSIGNQ,
19613 /* SSE5 instructions */
19614 IX86_BUILTIN_FMADDSS,
19615 IX86_BUILTIN_FMADDSD,
19616 IX86_BUILTIN_FMADDPS,
19617 IX86_BUILTIN_FMADDPD,
19618 IX86_BUILTIN_FMSUBSS,
19619 IX86_BUILTIN_FMSUBSD,
19620 IX86_BUILTIN_FMSUBPS,
19621 IX86_BUILTIN_FMSUBPD,
19622 IX86_BUILTIN_FNMADDSS,
19623 IX86_BUILTIN_FNMADDSD,
19624 IX86_BUILTIN_FNMADDPS,
19625 IX86_BUILTIN_FNMADDPD,
19626 IX86_BUILTIN_FNMSUBSS,
19627 IX86_BUILTIN_FNMSUBSD,
19628 IX86_BUILTIN_FNMSUBPS,
19629 IX86_BUILTIN_FNMSUBPD,
19630 IX86_BUILTIN_PCMOV,
19631 IX86_BUILTIN_PCMOV_V2DI,
19632 IX86_BUILTIN_PCMOV_V4SI,
19633 IX86_BUILTIN_PCMOV_V8HI,
19634 IX86_BUILTIN_PCMOV_V16QI,
19635 IX86_BUILTIN_PCMOV_V4SF,
19636 IX86_BUILTIN_PCMOV_V2DF,
19637 IX86_BUILTIN_PPERM,
19638 IX86_BUILTIN_PERMPS,
19639 IX86_BUILTIN_PERMPD,
19640 IX86_BUILTIN_PMACSSWW,
19641 IX86_BUILTIN_PMACSWW,
19642 IX86_BUILTIN_PMACSSWD,
19643 IX86_BUILTIN_PMACSWD,
19644 IX86_BUILTIN_PMACSSDD,
19645 IX86_BUILTIN_PMACSDD,
19646 IX86_BUILTIN_PMACSSDQL,
19647 IX86_BUILTIN_PMACSSDQH,
19648 IX86_BUILTIN_PMACSDQL,
19649 IX86_BUILTIN_PMACSDQH,
19650 IX86_BUILTIN_PMADCSSWD,
19651 IX86_BUILTIN_PMADCSWD,
19652 IX86_BUILTIN_PHADDBW,
19653 IX86_BUILTIN_PHADDBD,
19654 IX86_BUILTIN_PHADDBQ,
19655 IX86_BUILTIN_PHADDWD,
19656 IX86_BUILTIN_PHADDWQ,
19657 IX86_BUILTIN_PHADDDQ,
19658 IX86_BUILTIN_PHADDUBW,
19659 IX86_BUILTIN_PHADDUBD,
19660 IX86_BUILTIN_PHADDUBQ,
19661 IX86_BUILTIN_PHADDUWD,
19662 IX86_BUILTIN_PHADDUWQ,
19663 IX86_BUILTIN_PHADDUDQ,
19664 IX86_BUILTIN_PHSUBBW,
19665 IX86_BUILTIN_PHSUBWD,
19666 IX86_BUILTIN_PHSUBDQ,
19667 IX86_BUILTIN_PROTB,
19668 IX86_BUILTIN_PROTW,
19669 IX86_BUILTIN_PROTD,
19670 IX86_BUILTIN_PROTQ,
19671 IX86_BUILTIN_PROTB_IMM,
19672 IX86_BUILTIN_PROTW_IMM,
19673 IX86_BUILTIN_PROTD_IMM,
19674 IX86_BUILTIN_PROTQ_IMM,
19675 IX86_BUILTIN_PSHLB,
19676 IX86_BUILTIN_PSHLW,
19677 IX86_BUILTIN_PSHLD,
19678 IX86_BUILTIN_PSHLQ,
19679 IX86_BUILTIN_PSHAB,
19680 IX86_BUILTIN_PSHAW,
19681 IX86_BUILTIN_PSHAD,
19682 IX86_BUILTIN_PSHAQ,
19683 IX86_BUILTIN_FRCZSS,
19684 IX86_BUILTIN_FRCZSD,
19685 IX86_BUILTIN_FRCZPS,
19686 IX86_BUILTIN_FRCZPD,
19687 IX86_BUILTIN_CVTPH2PS,
19688 IX86_BUILTIN_CVTPS2PH,
19690 IX86_BUILTIN_COMEQSS,
19691 IX86_BUILTIN_COMNESS,
19692 IX86_BUILTIN_COMLTSS,
19693 IX86_BUILTIN_COMLESS,
19694 IX86_BUILTIN_COMGTSS,
19695 IX86_BUILTIN_COMGESS,
19696 IX86_BUILTIN_COMUEQSS,
19697 IX86_BUILTIN_COMUNESS,
19698 IX86_BUILTIN_COMULTSS,
19699 IX86_BUILTIN_COMULESS,
19700 IX86_BUILTIN_COMUGTSS,
19701 IX86_BUILTIN_COMUGESS,
19702 IX86_BUILTIN_COMORDSS,
19703 IX86_BUILTIN_COMUNORDSS,
19704 IX86_BUILTIN_COMFALSESS,
19705 IX86_BUILTIN_COMTRUESS,
19707 IX86_BUILTIN_COMEQSD,
19708 IX86_BUILTIN_COMNESD,
19709 IX86_BUILTIN_COMLTSD,
19710 IX86_BUILTIN_COMLESD,
19711 IX86_BUILTIN_COMGTSD,
19712 IX86_BUILTIN_COMGESD,
19713 IX86_BUILTIN_COMUEQSD,
19714 IX86_BUILTIN_COMUNESD,
19715 IX86_BUILTIN_COMULTSD,
19716 IX86_BUILTIN_COMULESD,
19717 IX86_BUILTIN_COMUGTSD,
19718 IX86_BUILTIN_COMUGESD,
19719 IX86_BUILTIN_COMORDSD,
19720 IX86_BUILTIN_COMUNORDSD,
19721 IX86_BUILTIN_COMFALSESD,
19722 IX86_BUILTIN_COMTRUESD,
19724 IX86_BUILTIN_COMEQPS,
19725 IX86_BUILTIN_COMNEPS,
19726 IX86_BUILTIN_COMLTPS,
19727 IX86_BUILTIN_COMLEPS,
19728 IX86_BUILTIN_COMGTPS,
19729 IX86_BUILTIN_COMGEPS,
19730 IX86_BUILTIN_COMUEQPS,
19731 IX86_BUILTIN_COMUNEPS,
19732 IX86_BUILTIN_COMULTPS,
19733 IX86_BUILTIN_COMULEPS,
19734 IX86_BUILTIN_COMUGTPS,
19735 IX86_BUILTIN_COMUGEPS,
19736 IX86_BUILTIN_COMORDPS,
19737 IX86_BUILTIN_COMUNORDPS,
19738 IX86_BUILTIN_COMFALSEPS,
19739 IX86_BUILTIN_COMTRUEPS,
19741 IX86_BUILTIN_COMEQPD,
19742 IX86_BUILTIN_COMNEPD,
19743 IX86_BUILTIN_COMLTPD,
19744 IX86_BUILTIN_COMLEPD,
19745 IX86_BUILTIN_COMGTPD,
19746 IX86_BUILTIN_COMGEPD,
19747 IX86_BUILTIN_COMUEQPD,
19748 IX86_BUILTIN_COMUNEPD,
19749 IX86_BUILTIN_COMULTPD,
19750 IX86_BUILTIN_COMULEPD,
19751 IX86_BUILTIN_COMUGTPD,
19752 IX86_BUILTIN_COMUGEPD,
19753 IX86_BUILTIN_COMORDPD,
19754 IX86_BUILTIN_COMUNORDPD,
19755 IX86_BUILTIN_COMFALSEPD,
19756 IX86_BUILTIN_COMTRUEPD,
19758 IX86_BUILTIN_PCOMEQUB,
19759 IX86_BUILTIN_PCOMNEUB,
19760 IX86_BUILTIN_PCOMLTUB,
19761 IX86_BUILTIN_PCOMLEUB,
19762 IX86_BUILTIN_PCOMGTUB,
19763 IX86_BUILTIN_PCOMGEUB,
19764 IX86_BUILTIN_PCOMFALSEUB,
19765 IX86_BUILTIN_PCOMTRUEUB,
19766 IX86_BUILTIN_PCOMEQUW,
19767 IX86_BUILTIN_PCOMNEUW,
19768 IX86_BUILTIN_PCOMLTUW,
19769 IX86_BUILTIN_PCOMLEUW,
19770 IX86_BUILTIN_PCOMGTUW,
19771 IX86_BUILTIN_PCOMGEUW,
19772 IX86_BUILTIN_PCOMFALSEUW,
19773 IX86_BUILTIN_PCOMTRUEUW,
19774 IX86_BUILTIN_PCOMEQUD,
19775 IX86_BUILTIN_PCOMNEUD,
19776 IX86_BUILTIN_PCOMLTUD,
19777 IX86_BUILTIN_PCOMLEUD,
19778 IX86_BUILTIN_PCOMGTUD,
19779 IX86_BUILTIN_PCOMGEUD,
19780 IX86_BUILTIN_PCOMFALSEUD,
19781 IX86_BUILTIN_PCOMTRUEUD,
19782 IX86_BUILTIN_PCOMEQUQ,
19783 IX86_BUILTIN_PCOMNEUQ,
19784 IX86_BUILTIN_PCOMLTUQ,
19785 IX86_BUILTIN_PCOMLEUQ,
19786 IX86_BUILTIN_PCOMGTUQ,
19787 IX86_BUILTIN_PCOMGEUQ,
19788 IX86_BUILTIN_PCOMFALSEUQ,
19789 IX86_BUILTIN_PCOMTRUEUQ,
19791 IX86_BUILTIN_PCOMEQB,
19792 IX86_BUILTIN_PCOMNEB,
19793 IX86_BUILTIN_PCOMLTB,
19794 IX86_BUILTIN_PCOMLEB,
19795 IX86_BUILTIN_PCOMGTB,
19796 IX86_BUILTIN_PCOMGEB,
19797 IX86_BUILTIN_PCOMFALSEB,
19798 IX86_BUILTIN_PCOMTRUEB,
19799 IX86_BUILTIN_PCOMEQW,
19800 IX86_BUILTIN_PCOMNEW,
19801 IX86_BUILTIN_PCOMLTW,
19802 IX86_BUILTIN_PCOMLEW,
19803 IX86_BUILTIN_PCOMGTW,
19804 IX86_BUILTIN_PCOMGEW,
19805 IX86_BUILTIN_PCOMFALSEW,
19806 IX86_BUILTIN_PCOMTRUEW,
19807 IX86_BUILTIN_PCOMEQD,
19808 IX86_BUILTIN_PCOMNED,
19809 IX86_BUILTIN_PCOMLTD,
19810 IX86_BUILTIN_PCOMLED,
19811 IX86_BUILTIN_PCOMGTD,
19812 IX86_BUILTIN_PCOMGED,
19813 IX86_BUILTIN_PCOMFALSED,
19814 IX86_BUILTIN_PCOMTRUED,
19815 IX86_BUILTIN_PCOMEQQ,
19816 IX86_BUILTIN_PCOMNEQ,
19817 IX86_BUILTIN_PCOMLTQ,
19818 IX86_BUILTIN_PCOMLEQ,
19819 IX86_BUILTIN_PCOMGTQ,
19820 IX86_BUILTIN_PCOMGEQ,
19821 IX86_BUILTIN_PCOMFALSEQ,
19822 IX86_BUILTIN_PCOMTRUEQ,
19824 IX86_BUILTIN_MAX
19827 /* Table for the ix86 builtin decls. */
19828 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
19830 /* Table of all of the builtin functions that are possible with different ISA's
19831 but are waiting to be built until a function is declared to use that
19832 ISA. */
19833 struct builtin_isa GTY(())
19835 tree type; /* builtin type to use in the declaration */
19836 const char *name; /* function name */
19837 int isa; /* isa_flags this builtin is defined for */
19838 bool const_p; /* true if the declaration is constant */
19841 static GTY(()) struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
19844 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
19845 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
19846 * function decl in the ix86_builtins array. Returns the function decl or
19847 * NULL_TREE, if the builtin was not added.
19849 * If the front end has a special hook for builtin functions, delay adding
19850 * builtin functions that aren't in the current ISA until the ISA is changed
19851 * with function specific optimization. Doing so, can save about 300K for the
19852 * default compiler. When the builtin is expanded, check at that time whether
19853 * it is valid.
19855 * If the front end doesn't have a special hook, record all builtins, even if
19856 * it isn't an instruction set in the current ISA in case the user uses
19857 * function specific options for a different ISA, so that we don't get scope
19858 * errors if a builtin is added in the middle of a function scope. */
19860 static inline tree
19861 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
19863 tree decl = NULL_TREE;
19865 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
19867 ix86_builtins_isa[(int) code].isa = mask;
19869 if ((mask & ix86_isa_flags) != 0
19870 || (lang_hooks.builtin_function
19871 == lang_hooks.builtin_function_ext_scope))
19874 decl = add_builtin_function (name, type, code, BUILT_IN_MD, NULL,
19875 NULL_TREE);
19876 ix86_builtins[(int) code] = decl;
19877 ix86_builtins_isa[(int) code].type = NULL_TREE;
19879 else
19881 ix86_builtins[(int) code] = NULL_TREE;
19882 ix86_builtins_isa[(int) code].const_p = false;
19883 ix86_builtins_isa[(int) code].type = type;
19884 ix86_builtins_isa[(int) code].name = name;
19888 return decl;
19891 /* Like def_builtin, but also marks the function decl "const". */
19893 static inline tree
19894 def_builtin_const (int mask, const char *name, tree type,
19895 enum ix86_builtins code)
19897 tree decl = def_builtin (mask, name, type, code);
19898 if (decl)
19899 TREE_READONLY (decl) = 1;
19900 else
19901 ix86_builtins_isa[(int) code].const_p = true;
19903 return decl;
19906 /* Add any new builtin functions for a given ISA that may not have been
19907 declared. This saves a bit of space compared to adding all of the
19908 declarations to the tree, even if we didn't use them. */
19910 static void
19911 ix86_add_new_builtins (int isa)
19913 int i;
19914 tree decl;
19916 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
19918 if ((ix86_builtins_isa[i].isa & isa) != 0
19919 && ix86_builtins_isa[i].type != NULL_TREE)
19921 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
19922 ix86_builtins_isa[i].type,
19923 i, BUILT_IN_MD, NULL,
19924 NULL_TREE);
19926 ix86_builtins[i] = decl;
19927 ix86_builtins_isa[i].type = NULL_TREE;
19928 if (ix86_builtins_isa[i].const_p)
19929 TREE_READONLY (decl) = 1;
19934 /* Bits for builtin_description.flag. */
19936 /* Set when we don't support the comparison natively, and should
19937 swap_comparison in order to support it. */
19938 #define BUILTIN_DESC_SWAP_OPERANDS 1
19940 struct builtin_description
19942 const unsigned int mask;
19943 const enum insn_code icode;
19944 const char *const name;
19945 const enum ix86_builtins code;
19946 const enum rtx_code comparison;
19947 const int flag;
19950 static const struct builtin_description bdesc_comi[] =
19952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
19953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
19954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
19955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
19956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
19957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
19958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
19959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
19960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
19961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
19962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
19963 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
19964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
19965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
19966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
19967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
19968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
19969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
19970 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
19971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
19972 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
19973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
19974 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
19975 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
19978 static const struct builtin_description bdesc_pcmpestr[] =
19980 /* SSE4.2 */
19981 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
19982 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
19983 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
19984 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
19985 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
19986 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
19987 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
19990 static const struct builtin_description bdesc_pcmpistr[] =
19992 /* SSE4.2 */
19993 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
19994 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
19995 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
19996 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
19997 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
19998 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
19999 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
20002 /* Special builtin types */
20003 enum ix86_special_builtin_type
20005 SPECIAL_FTYPE_UNKNOWN,
20006 VOID_FTYPE_VOID,
20007 V32QI_FTYPE_PCCHAR,
20008 V16QI_FTYPE_PCCHAR,
20009 V8SF_FTYPE_PCV4SF,
20010 V8SF_FTYPE_PCFLOAT,
20011 V4DF_FTYPE_PCV2DF,
20012 V4DF_FTYPE_PCDOUBLE,
20013 V4SF_FTYPE_PCFLOAT,
20014 V2DF_FTYPE_PCDOUBLE,
20015 V8SF_FTYPE_PCV8SF_V8SF,
20016 V4DF_FTYPE_PCV4DF_V4DF,
20017 V4SF_FTYPE_V4SF_PCV2SF,
20018 V4SF_FTYPE_PCV4SF_V4SF,
20019 V2DF_FTYPE_V2DF_PCDOUBLE,
20020 V2DF_FTYPE_PCV2DF_V2DF,
20021 V2DI_FTYPE_PV2DI,
20022 VOID_FTYPE_PV2SF_V4SF,
20023 VOID_FTYPE_PV2DI_V2DI,
20024 VOID_FTYPE_PCHAR_V32QI,
20025 VOID_FTYPE_PCHAR_V16QI,
20026 VOID_FTYPE_PFLOAT_V8SF,
20027 VOID_FTYPE_PFLOAT_V4SF,
20028 VOID_FTYPE_PDOUBLE_V4DF,
20029 VOID_FTYPE_PDOUBLE_V2DF,
20030 VOID_FTYPE_PDI_DI,
20031 VOID_FTYPE_PINT_INT,
20032 VOID_FTYPE_PV8SF_V8SF_V8SF,
20033 VOID_FTYPE_PV4DF_V4DF_V4DF,
20034 VOID_FTYPE_PV4SF_V4SF_V4SF,
20035 VOID_FTYPE_PV2DF_V2DF_V2DF
20038 /* Builtin types */
20039 enum ix86_builtin_type
20041 FTYPE_UNKNOWN,
20042 FLOAT128_FTYPE_FLOAT128,
20043 FLOAT_FTYPE_FLOAT,
20044 FLOAT128_FTYPE_FLOAT128_FLOAT128,
20045 INT_FTYPE_V8SF_V8SF_PTEST,
20046 INT_FTYPE_V4DI_V4DI_PTEST,
20047 INT_FTYPE_V4DF_V4DF_PTEST,
20048 INT_FTYPE_V4SF_V4SF_PTEST,
20049 INT_FTYPE_V2DI_V2DI_PTEST,
20050 INT_FTYPE_V2DF_V2DF_PTEST,
20051 INT64_FTYPE_V4SF,
20052 INT64_FTYPE_V2DF,
20053 INT_FTYPE_V16QI,
20054 INT_FTYPE_V8QI,
20055 INT_FTYPE_V8SF,
20056 INT_FTYPE_V4DF,
20057 INT_FTYPE_V4SF,
20058 INT_FTYPE_V2DF,
20059 V16QI_FTYPE_V16QI,
20060 V8SI_FTYPE_V8SF,
20061 V8SI_FTYPE_V4SI,
20062 V8HI_FTYPE_V8HI,
20063 V8HI_FTYPE_V16QI,
20064 V8QI_FTYPE_V8QI,
20065 V8SF_FTYPE_V8SF,
20066 V8SF_FTYPE_V8SI,
20067 V8SF_FTYPE_V4SF,
20068 V4SI_FTYPE_V4SI,
20069 V4SI_FTYPE_V16QI,
20070 V4SI_FTYPE_V8SI,
20071 V4SI_FTYPE_V8HI,
20072 V4SI_FTYPE_V4DF,
20073 V4SI_FTYPE_V4SF,
20074 V4SI_FTYPE_V2DF,
20075 V4HI_FTYPE_V4HI,
20076 V4DF_FTYPE_V4DF,
20077 V4DF_FTYPE_V4SI,
20078 V4DF_FTYPE_V4SF,
20079 V4DF_FTYPE_V2DF,
20080 V4SF_FTYPE_V4DF,
20081 V4SF_FTYPE_V4SF,
20082 V4SF_FTYPE_V4SF_VEC_MERGE,
20083 V4SF_FTYPE_V8SF,
20084 V4SF_FTYPE_V4SI,
20085 V4SF_FTYPE_V2DF,
20086 V2DI_FTYPE_V2DI,
20087 V2DI_FTYPE_V16QI,
20088 V2DI_FTYPE_V8HI,
20089 V2DI_FTYPE_V4SI,
20090 V2DF_FTYPE_V2DF,
20091 V2DF_FTYPE_V2DF_VEC_MERGE,
20092 V2DF_FTYPE_V4SI,
20093 V2DF_FTYPE_V4DF,
20094 V2DF_FTYPE_V4SF,
20095 V2DF_FTYPE_V2SI,
20096 V2SI_FTYPE_V2SI,
20097 V2SI_FTYPE_V4SF,
20098 V2SI_FTYPE_V2SF,
20099 V2SI_FTYPE_V2DF,
20100 V2SF_FTYPE_V2SF,
20101 V2SF_FTYPE_V2SI,
20102 V16QI_FTYPE_V16QI_V16QI,
20103 V16QI_FTYPE_V8HI_V8HI,
20104 V8QI_FTYPE_V8QI_V8QI,
20105 V8QI_FTYPE_V4HI_V4HI,
20106 V8HI_FTYPE_V8HI_V8HI,
20107 V8HI_FTYPE_V8HI_V8HI_COUNT,
20108 V8HI_FTYPE_V16QI_V16QI,
20109 V8HI_FTYPE_V4SI_V4SI,
20110 V8HI_FTYPE_V8HI_SI_COUNT,
20111 V8SF_FTYPE_V8SF_V8SF,
20112 V8SF_FTYPE_V8SF_V8SI,
20113 V4SI_FTYPE_V4SI_V4SI,
20114 V4SI_FTYPE_V4SI_V4SI_COUNT,
20115 V4SI_FTYPE_V8HI_V8HI,
20116 V4SI_FTYPE_V4SF_V4SF,
20117 V4SI_FTYPE_V2DF_V2DF,
20118 V4SI_FTYPE_V4SI_SI_COUNT,
20119 V4HI_FTYPE_V4HI_V4HI,
20120 V4HI_FTYPE_V4HI_V4HI_COUNT,
20121 V4HI_FTYPE_V8QI_V8QI,
20122 V4HI_FTYPE_V2SI_V2SI,
20123 V4HI_FTYPE_V4HI_SI_COUNT,
20124 V4DF_FTYPE_V4DF_V4DF,
20125 V4DF_FTYPE_V4DF_V4DI,
20126 V4SF_FTYPE_V4SF_V4SF,
20127 V4SF_FTYPE_V4SF_V4SF_SWAP,
20128 V4SF_FTYPE_V4SF_V4SI,
20129 V4SF_FTYPE_V4SF_V2SI,
20130 V4SF_FTYPE_V4SF_V2DF,
20131 V4SF_FTYPE_V4SF_DI,
20132 V4SF_FTYPE_V4SF_SI,
20133 V2DI_FTYPE_V2DI_V2DI,
20134 V2DI_FTYPE_V2DI_V2DI_COUNT,
20135 V2DI_FTYPE_V16QI_V16QI,
20136 V2DI_FTYPE_V4SI_V4SI,
20137 V2DI_FTYPE_V2DI_V16QI,
20138 V2DI_FTYPE_V2DF_V2DF,
20139 V2DI_FTYPE_V2DI_SI_COUNT,
20140 V2SI_FTYPE_V2SI_V2SI,
20141 V2SI_FTYPE_V2SI_V2SI_COUNT,
20142 V2SI_FTYPE_V4HI_V4HI,
20143 V2SI_FTYPE_V2SF_V2SF,
20144 V2SI_FTYPE_V2SI_SI_COUNT,
20145 V2DF_FTYPE_V2DF_V2DF,
20146 V2DF_FTYPE_V2DF_V2DF_SWAP,
20147 V2DF_FTYPE_V2DF_V4SF,
20148 V2DF_FTYPE_V2DF_V2DI,
20149 V2DF_FTYPE_V2DF_DI,
20150 V2DF_FTYPE_V2DF_SI,
20151 V2SF_FTYPE_V2SF_V2SF,
20152 V1DI_FTYPE_V1DI_V1DI,
20153 V1DI_FTYPE_V1DI_V1DI_COUNT,
20154 V1DI_FTYPE_V8QI_V8QI,
20155 V1DI_FTYPE_V2SI_V2SI,
20156 V1DI_FTYPE_V1DI_SI_COUNT,
20157 UINT64_FTYPE_UINT64_UINT64,
20158 UINT_FTYPE_UINT_UINT,
20159 UINT_FTYPE_UINT_USHORT,
20160 UINT_FTYPE_UINT_UCHAR,
20161 V8HI_FTYPE_V8HI_INT,
20162 V4SI_FTYPE_V4SI_INT,
20163 V4HI_FTYPE_V4HI_INT,
20164 V8SF_FTYPE_V8SF_INT,
20165 V4SI_FTYPE_V8SI_INT,
20166 V4SF_FTYPE_V8SF_INT,
20167 V2DF_FTYPE_V4DF_INT,
20168 V4DF_FTYPE_V4DF_INT,
20169 V4SF_FTYPE_V4SF_INT,
20170 V2DI_FTYPE_V2DI_INT,
20171 V2DI2TI_FTYPE_V2DI_INT,
20172 V2DF_FTYPE_V2DF_INT,
20173 V16QI_FTYPE_V16QI_V16QI_V16QI,
20174 V8SF_FTYPE_V8SF_V8SF_V8SF,
20175 V4DF_FTYPE_V4DF_V4DF_V4DF,
20176 V4SF_FTYPE_V4SF_V4SF_V4SF,
20177 V2DF_FTYPE_V2DF_V2DF_V2DF,
20178 V16QI_FTYPE_V16QI_V16QI_INT,
20179 V8SI_FTYPE_V8SI_V8SI_INT,
20180 V8SI_FTYPE_V8SI_V4SI_INT,
20181 V8HI_FTYPE_V8HI_V8HI_INT,
20182 V8SF_FTYPE_V8SF_V8SF_INT,
20183 V8SF_FTYPE_V8SF_V4SF_INT,
20184 V4SI_FTYPE_V4SI_V4SI_INT,
20185 V4DF_FTYPE_V4DF_V4DF_INT,
20186 V4DF_FTYPE_V4DF_V2DF_INT,
20187 V4SF_FTYPE_V4SF_V4SF_INT,
20188 V2DI_FTYPE_V2DI_V2DI_INT,
20189 V2DI2TI_FTYPE_V2DI_V2DI_INT,
20190 V1DI2DI_FTYPE_V1DI_V1DI_INT,
20191 V2DF_FTYPE_V2DF_V2DF_INT,
20192 V8SF_FTYPE_V8SF_V8SF_V8SI_INT,
20193 V4DF_FTYPE_V4DF_V4DF_V4DI_INT,
20194 V4SF_FTYPE_V4SF_V4SF_V4SI_INT,
20195 V2DF_FTYPE_V2DF_V2DF_V2DI_INT,
20196 V2DI_FTYPE_V2DI_UINT_UINT,
20197 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
20200 /* Special builtins with variable number of arguments. */
20201 static const struct builtin_description bdesc_special_args[] =
20203 /* MMX */
20204 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20206 /* 3DNow! */
20207 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20209 /* SSE */
20210 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20211 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20212 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20214 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20215 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20216 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20217 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20219 /* SSE or 3DNow!A */
20220 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20221 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
20223 /* SSE2 */
20224 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20225 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20226 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20227 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
20228 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20229 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
20230 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
20231 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
20232 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20234 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20235 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20237 /* SSE3 */
20238 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20240 /* SSE4.1 */
20241 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
20243 /* SSE4A */
20244 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20245 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20247 /* AVX */
20248 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
20249 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, 0, IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
20250 { OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_64BIT, CODE_FOR_avx_vzeroupper_rex64, 0, IX86_BUILTIN_VZEROUPPER_REX64, UNKNOWN, (int) VOID_FTYPE_VOID },
20252 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20253 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastsd256, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20254 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss256, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20255 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_pd256, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
20256 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_ps256, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
20258 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20259 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20260 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
20261 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
20262 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20263 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
20264 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20266 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
20267 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
20268 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
20269 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
20270 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
20271 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
20272 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
20273 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
20276 /* Builtins with variable number of arguments. */
20277 static const struct builtin_description bdesc_args[] =
20279 /* MMX */
20280 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20281 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20282 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20283 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20284 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20285 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20287 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20288 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20289 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20290 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20291 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20292 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20293 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20294 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20296 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20297 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20299 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20300 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20301 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20302 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20304 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20305 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20306 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20307 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20308 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20309 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20311 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20312 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20313 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20314 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20315 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
20316 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
20318 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20319 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
20320 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20322 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
20324 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20325 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20326 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20327 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20328 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20329 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20331 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20332 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20333 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20334 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20335 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20336 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20338 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20339 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20340 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20341 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20343 /* 3DNow! */
20344 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
20345 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
20346 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20347 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20349 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20350 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20351 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20352 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20353 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20354 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20355 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20356 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20357 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20358 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20359 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20360 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20361 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20362 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20363 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20365 /* 3DNow!A */
20366 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
20367 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
20368 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
20369 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20370 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20371 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20373 /* SSE */
20374 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
20375 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20376 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20377 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20378 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20379 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20380 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
20381 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
20382 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
20383 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
20384 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
20385 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
20387 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20389 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20390 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20391 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20392 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20393 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20394 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20395 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20396 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20398 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
20399 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
20400 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
20401 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20402 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20403 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20404 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
20405 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
20406 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
20407 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20408 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
20409 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20410 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
20411 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
20412 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
20413 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20414 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
20415 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
20416 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
20417 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20418 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20419 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20421 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20422 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20423 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20424 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20426 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20427 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20428 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20429 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20431 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20432 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20433 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20434 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20435 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20437 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
20438 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
20439 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
20441 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
20443 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20444 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20445 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20447 /* SSE MMX or 3Dnow!A */
20448 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20449 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20450 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20452 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20453 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20454 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20455 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20457 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
20458 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
20460 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
20462 /* SSE2 */
20463 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20465 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
20466 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
20467 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
20468 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
20469 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
20471 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
20472 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
20473 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
20474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
20475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
20477 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
20479 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
20480 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
20481 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
20482 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
20484 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
20485 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
20486 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
20488 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20489 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20490 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20491 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20492 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20493 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20494 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20495 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20497 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
20498 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
20499 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
20500 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20501 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
20502 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20503 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
20504 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
20505 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
20506 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20507 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20508 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20509 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
20510 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
20511 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
20512 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20513 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
20514 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
20515 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
20516 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20518 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20519 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20520 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20521 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20523 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20524 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20525 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20526 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20528 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20529 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20530 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20532 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
20534 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20535 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20536 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20537 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20538 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20539 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20540 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20541 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20548 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20552 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20553 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
20555 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20556 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20557 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20558 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20560 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20561 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20563 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20564 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20565 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20566 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20567 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20568 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20570 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20571 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20572 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20573 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20575 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20576 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20577 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20578 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20579 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20580 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20581 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20582 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20584 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
20585 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
20586 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
20588 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20589 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
20591 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
20592 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
20594 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
20596 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
20597 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
20598 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
20599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
20601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
20602 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20603 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20604 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
20605 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20606 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20607 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
20609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
20610 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20611 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20612 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
20613 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20614 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20615 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
20617 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20618 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20619 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20620 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20622 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
20623 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
20624 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
20626 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
20628 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
20629 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
20631 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
20633 /* SSE2 MMX */
20634 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
20635 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
20637 /* SSE3 */
20638 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
20639 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20641 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20642 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20643 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20644 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20645 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20646 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20648 /* SSSE3 */
20649 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
20650 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
20651 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
20652 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
20653 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
20654 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
20656 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20657 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20658 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20659 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20660 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20661 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20662 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20663 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20664 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20665 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20666 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20667 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20668 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
20669 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
20670 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20671 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20672 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20673 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20674 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20675 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20676 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20677 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20678 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20679 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20681 /* SSSE3. */
20682 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
20683 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
20685 /* SSE4.1 */
20686 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20687 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20688 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
20689 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
20690 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20691 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20692 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20693 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
20694 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
20695 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
20697 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
20698 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
20699 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
20700 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
20701 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
20702 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
20703 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
20704 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
20705 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
20706 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
20707 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
20708 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
20709 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
20711 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
20712 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20713 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20714 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20715 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20716 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20717 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20718 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20719 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20720 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20721 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
20722 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20724 /* SSE4.1 and SSE5 */
20725 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
20726 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
20727 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20728 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20730 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20731 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20732 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20734 /* SSE4.2 */
20735 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20736 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
20737 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
20738 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
20739 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
20741 /* SSE4A */
20742 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
20743 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
20744 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
20745 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20747 /* AES */
20748 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
20749 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
20751 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20752 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20753 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20754 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20756 /* PCLMUL */
20757 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
20759 /* AVX */
20760 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20761 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20762 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20763 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20764 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20765 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20766 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_nandv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20767 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_nandv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20768 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20769 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20770 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20771 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20772 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20773 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20774 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20775 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20776 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20777 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20778 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20779 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20780 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20781 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20782 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20783 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20784 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20785 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20787 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
20788 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
20789 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
20790 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
20792 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20793 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20794 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
20795 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
20796 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20797 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20800 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20801 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20802 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20803 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20804 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20805 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
20806 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
20807 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
20808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
20809 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
20810 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
20811 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
20812 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
20813 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
20814 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
20815 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
20816 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20817 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20818 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
20819 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
20820 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
20821 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
20822 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
20823 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI_INT },
20824 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI_INT },
20825 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT },
20826 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT },
20827 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
20828 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
20829 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
20831 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20832 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20833 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
20835 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
20836 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20837 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20838 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20839 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20841 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20843 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
20844 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
20846 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20847 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20848 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20849 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20851 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
20852 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
20853 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
20854 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
20855 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
20856 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
20858 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20859 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20860 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20861 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20862 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20863 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20864 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20865 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20866 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20867 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20868 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20869 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20870 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20871 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20872 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20874 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
20875 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
20878 /* SSE5 */
20879 enum multi_arg_type {
20880 MULTI_ARG_UNKNOWN,
20881 MULTI_ARG_3_SF,
20882 MULTI_ARG_3_DF,
20883 MULTI_ARG_3_DI,
20884 MULTI_ARG_3_SI,
20885 MULTI_ARG_3_SI_DI,
20886 MULTI_ARG_3_HI,
20887 MULTI_ARG_3_HI_SI,
20888 MULTI_ARG_3_QI,
20889 MULTI_ARG_3_PERMPS,
20890 MULTI_ARG_3_PERMPD,
20891 MULTI_ARG_2_SF,
20892 MULTI_ARG_2_DF,
20893 MULTI_ARG_2_DI,
20894 MULTI_ARG_2_SI,
20895 MULTI_ARG_2_HI,
20896 MULTI_ARG_2_QI,
20897 MULTI_ARG_2_DI_IMM,
20898 MULTI_ARG_2_SI_IMM,
20899 MULTI_ARG_2_HI_IMM,
20900 MULTI_ARG_2_QI_IMM,
20901 MULTI_ARG_2_SF_CMP,
20902 MULTI_ARG_2_DF_CMP,
20903 MULTI_ARG_2_DI_CMP,
20904 MULTI_ARG_2_SI_CMP,
20905 MULTI_ARG_2_HI_CMP,
20906 MULTI_ARG_2_QI_CMP,
20907 MULTI_ARG_2_DI_TF,
20908 MULTI_ARG_2_SI_TF,
20909 MULTI_ARG_2_HI_TF,
20910 MULTI_ARG_2_QI_TF,
20911 MULTI_ARG_2_SF_TF,
20912 MULTI_ARG_2_DF_TF,
20913 MULTI_ARG_1_SF,
20914 MULTI_ARG_1_DF,
20915 MULTI_ARG_1_DI,
20916 MULTI_ARG_1_SI,
20917 MULTI_ARG_1_HI,
20918 MULTI_ARG_1_QI,
20919 MULTI_ARG_1_SI_DI,
20920 MULTI_ARG_1_HI_DI,
20921 MULTI_ARG_1_HI_SI,
20922 MULTI_ARG_1_QI_DI,
20923 MULTI_ARG_1_QI_SI,
20924 MULTI_ARG_1_QI_HI,
20925 MULTI_ARG_1_PH2PS,
20926 MULTI_ARG_1_PS2PH
20929 static const struct builtin_description bdesc_multi_arg[] =
20931 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
20932 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
20933 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
20934 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
20935 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
20936 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
20937 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
20938 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
20939 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
20940 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
20941 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
20942 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
20943 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
20944 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
20945 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
20946 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
20947 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV, 0, (int)MULTI_ARG_3_DI },
20948 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
20949 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
20950 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
20951 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
20952 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
20953 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
20954 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
20955 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
20956 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
20957 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
20958 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
20959 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
20960 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
20961 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
20962 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
20963 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
20964 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
20965 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
20966 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
20967 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
20968 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
20969 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
20970 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
20971 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
20972 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
20973 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
20974 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
20975 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
20976 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
20977 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
20978 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
20979 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
20980 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
20981 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
20982 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
20983 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
20984 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
20985 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
20986 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
20987 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
20988 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
20989 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
20990 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
20991 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
20992 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
20993 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
20994 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
20995 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
20996 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
20997 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
20998 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
20999 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
21000 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
21001 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
21002 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
21003 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
21004 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
21005 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
21007 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
21008 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21009 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21010 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
21011 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
21012 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
21013 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
21014 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21015 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21016 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21017 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21018 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21019 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21020 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21021 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21022 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21024 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
21025 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21026 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21027 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
21028 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
21029 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
21030 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
21031 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21032 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21033 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21034 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21035 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21036 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21037 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21038 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21039 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21041 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
21042 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21043 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21044 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
21045 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
21046 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
21047 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
21048 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21049 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21050 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21051 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21052 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21053 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21054 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21055 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21056 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21058 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
21059 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21060 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21061 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
21062 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
21063 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
21064 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
21065 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21066 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21067 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21068 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21069 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21070 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21071 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21072 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21073 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21075 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
21076 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21077 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21078 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
21079 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
21080 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
21081 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
21083 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
21084 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21085 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21086 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
21087 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
21088 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
21089 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
21091 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
21092 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21093 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21094 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
21095 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
21096 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
21097 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
21099 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21100 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21101 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21102 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
21103 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
21104 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
21105 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
21107 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
21108 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21109 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21110 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
21111 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
21112 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
21113 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
21115 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
21116 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21117 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21118 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
21119 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
21120 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
21121 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
21123 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
21124 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21125 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21126 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
21127 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
21128 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
21129 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
21131 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21132 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21133 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21134 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
21135 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
21136 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
21137 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
21139 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
21140 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
21141 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
21142 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
21143 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
21144 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
21145 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
21146 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
21148 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21149 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21150 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21151 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21152 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21153 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21154 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21155 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21157 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21158 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21159 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21160 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21161 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21162 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21163 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21164 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21167 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
21168 in the current target ISA to allow the user to compile particular modules
21169 with different target specific options that differ from the command line
21170 options. */
21171 static void
21172 ix86_init_mmx_sse_builtins (void)
21174 const struct builtin_description * d;
21175 size_t i;
21177 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
21178 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
21179 tree V1DI_type_node
21180 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
21181 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
21182 tree V2DI_type_node
21183 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
21184 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
21185 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
21186 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
21187 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
21188 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
21189 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
21191 tree pchar_type_node = build_pointer_type (char_type_node);
21192 tree pcchar_type_node
21193 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
21194 tree pfloat_type_node = build_pointer_type (float_type_node);
21195 tree pcfloat_type_node
21196 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
21197 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
21198 tree pcv2sf_type_node
21199 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
21200 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
21201 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
21203 /* Comparisons. */
21204 tree int_ftype_v4sf_v4sf
21205 = build_function_type_list (integer_type_node,
21206 V4SF_type_node, V4SF_type_node, NULL_TREE);
21207 tree v4si_ftype_v4sf_v4sf
21208 = build_function_type_list (V4SI_type_node,
21209 V4SF_type_node, V4SF_type_node, NULL_TREE);
21210 /* MMX/SSE/integer conversions. */
21211 tree int_ftype_v4sf
21212 = build_function_type_list (integer_type_node,
21213 V4SF_type_node, NULL_TREE);
21214 tree int64_ftype_v4sf
21215 = build_function_type_list (long_long_integer_type_node,
21216 V4SF_type_node, NULL_TREE);
21217 tree int_ftype_v8qi
21218 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
21219 tree v4sf_ftype_v4sf_int
21220 = build_function_type_list (V4SF_type_node,
21221 V4SF_type_node, integer_type_node, NULL_TREE);
21222 tree v4sf_ftype_v4sf_int64
21223 = build_function_type_list (V4SF_type_node,
21224 V4SF_type_node, long_long_integer_type_node,
21225 NULL_TREE);
21226 tree v4sf_ftype_v4sf_v2si
21227 = build_function_type_list (V4SF_type_node,
21228 V4SF_type_node, V2SI_type_node, NULL_TREE);
21230 /* Miscellaneous. */
21231 tree v8qi_ftype_v4hi_v4hi
21232 = build_function_type_list (V8QI_type_node,
21233 V4HI_type_node, V4HI_type_node, NULL_TREE);
21234 tree v4hi_ftype_v2si_v2si
21235 = build_function_type_list (V4HI_type_node,
21236 V2SI_type_node, V2SI_type_node, NULL_TREE);
21237 tree v4sf_ftype_v4sf_v4sf_int
21238 = build_function_type_list (V4SF_type_node,
21239 V4SF_type_node, V4SF_type_node,
21240 integer_type_node, NULL_TREE);
21241 tree v2si_ftype_v4hi_v4hi
21242 = build_function_type_list (V2SI_type_node,
21243 V4HI_type_node, V4HI_type_node, NULL_TREE);
21244 tree v4hi_ftype_v4hi_int
21245 = build_function_type_list (V4HI_type_node,
21246 V4HI_type_node, integer_type_node, NULL_TREE);
21247 tree v2si_ftype_v2si_int
21248 = build_function_type_list (V2SI_type_node,
21249 V2SI_type_node, integer_type_node, NULL_TREE);
21250 tree v1di_ftype_v1di_int
21251 = build_function_type_list (V1DI_type_node,
21252 V1DI_type_node, integer_type_node, NULL_TREE);
21254 tree void_ftype_void
21255 = build_function_type (void_type_node, void_list_node);
21256 tree void_ftype_unsigned
21257 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
21258 tree void_ftype_unsigned_unsigned
21259 = build_function_type_list (void_type_node, unsigned_type_node,
21260 unsigned_type_node, NULL_TREE);
21261 tree void_ftype_pcvoid_unsigned_unsigned
21262 = build_function_type_list (void_type_node, const_ptr_type_node,
21263 unsigned_type_node, unsigned_type_node,
21264 NULL_TREE);
21265 tree unsigned_ftype_void
21266 = build_function_type (unsigned_type_node, void_list_node);
21267 tree v2si_ftype_v4sf
21268 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
21269 /* Loads/stores. */
21270 tree void_ftype_v8qi_v8qi_pchar
21271 = build_function_type_list (void_type_node,
21272 V8QI_type_node, V8QI_type_node,
21273 pchar_type_node, NULL_TREE);
21274 tree v4sf_ftype_pcfloat
21275 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
21276 tree v4sf_ftype_v4sf_pcv2sf
21277 = build_function_type_list (V4SF_type_node,
21278 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
21279 tree void_ftype_pv2sf_v4sf
21280 = build_function_type_list (void_type_node,
21281 pv2sf_type_node, V4SF_type_node, NULL_TREE);
21282 tree void_ftype_pfloat_v4sf
21283 = build_function_type_list (void_type_node,
21284 pfloat_type_node, V4SF_type_node, NULL_TREE);
21285 tree void_ftype_pdi_di
21286 = build_function_type_list (void_type_node,
21287 pdi_type_node, long_long_unsigned_type_node,
21288 NULL_TREE);
21289 tree void_ftype_pv2di_v2di
21290 = build_function_type_list (void_type_node,
21291 pv2di_type_node, V2DI_type_node, NULL_TREE);
21292 /* Normal vector unops. */
21293 tree v4sf_ftype_v4sf
21294 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
21295 tree v16qi_ftype_v16qi
21296 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
21297 tree v8hi_ftype_v8hi
21298 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
21299 tree v4si_ftype_v4si
21300 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
21301 tree v8qi_ftype_v8qi
21302 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
21303 tree v4hi_ftype_v4hi
21304 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
21306 /* Normal vector binops. */
21307 tree v4sf_ftype_v4sf_v4sf
21308 = build_function_type_list (V4SF_type_node,
21309 V4SF_type_node, V4SF_type_node, NULL_TREE);
21310 tree v8qi_ftype_v8qi_v8qi
21311 = build_function_type_list (V8QI_type_node,
21312 V8QI_type_node, V8QI_type_node, NULL_TREE);
21313 tree v4hi_ftype_v4hi_v4hi
21314 = build_function_type_list (V4HI_type_node,
21315 V4HI_type_node, V4HI_type_node, NULL_TREE);
21316 tree v2si_ftype_v2si_v2si
21317 = build_function_type_list (V2SI_type_node,
21318 V2SI_type_node, V2SI_type_node, NULL_TREE);
21319 tree v1di_ftype_v1di_v1di
21320 = build_function_type_list (V1DI_type_node,
21321 V1DI_type_node, V1DI_type_node, NULL_TREE);
21322 tree v1di_ftype_v1di_v1di_int
21323 = build_function_type_list (V1DI_type_node,
21324 V1DI_type_node, V1DI_type_node,
21325 integer_type_node, NULL_TREE);
21326 tree v2si_ftype_v2sf
21327 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
21328 tree v2sf_ftype_v2si
21329 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
21330 tree v2si_ftype_v2si
21331 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
21332 tree v2sf_ftype_v2sf
21333 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
21334 tree v2sf_ftype_v2sf_v2sf
21335 = build_function_type_list (V2SF_type_node,
21336 V2SF_type_node, V2SF_type_node, NULL_TREE);
21337 tree v2si_ftype_v2sf_v2sf
21338 = build_function_type_list (V2SI_type_node,
21339 V2SF_type_node, V2SF_type_node, NULL_TREE);
21340 tree pint_type_node = build_pointer_type (integer_type_node);
21341 tree pdouble_type_node = build_pointer_type (double_type_node);
21342 tree pcdouble_type_node = build_pointer_type (
21343 build_type_variant (double_type_node, 1, 0));
21344 tree int_ftype_v2df_v2df
21345 = build_function_type_list (integer_type_node,
21346 V2DF_type_node, V2DF_type_node, NULL_TREE);
21348 tree void_ftype_pcvoid
21349 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
21350 tree v4sf_ftype_v4si
21351 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
21352 tree v4si_ftype_v4sf
21353 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
21354 tree v2df_ftype_v4si
21355 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
21356 tree v4si_ftype_v2df
21357 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
21358 tree v4si_ftype_v2df_v2df
21359 = build_function_type_list (V4SI_type_node,
21360 V2DF_type_node, V2DF_type_node, NULL_TREE);
21361 tree v2si_ftype_v2df
21362 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
21363 tree v4sf_ftype_v2df
21364 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
21365 tree v2df_ftype_v2si
21366 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
21367 tree v2df_ftype_v4sf
21368 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
21369 tree int_ftype_v2df
21370 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
21371 tree int64_ftype_v2df
21372 = build_function_type_list (long_long_integer_type_node,
21373 V2DF_type_node, NULL_TREE);
21374 tree v2df_ftype_v2df_int
21375 = build_function_type_list (V2DF_type_node,
21376 V2DF_type_node, integer_type_node, NULL_TREE);
21377 tree v2df_ftype_v2df_int64
21378 = build_function_type_list (V2DF_type_node,
21379 V2DF_type_node, long_long_integer_type_node,
21380 NULL_TREE);
21381 tree v4sf_ftype_v4sf_v2df
21382 = build_function_type_list (V4SF_type_node,
21383 V4SF_type_node, V2DF_type_node, NULL_TREE);
21384 tree v2df_ftype_v2df_v4sf
21385 = build_function_type_list (V2DF_type_node,
21386 V2DF_type_node, V4SF_type_node, NULL_TREE);
21387 tree v2df_ftype_v2df_v2df_int
21388 = build_function_type_list (V2DF_type_node,
21389 V2DF_type_node, V2DF_type_node,
21390 integer_type_node,
21391 NULL_TREE);
21392 tree v2df_ftype_v2df_pcdouble
21393 = build_function_type_list (V2DF_type_node,
21394 V2DF_type_node, pcdouble_type_node, NULL_TREE);
21395 tree void_ftype_pdouble_v2df
21396 = build_function_type_list (void_type_node,
21397 pdouble_type_node, V2DF_type_node, NULL_TREE);
21398 tree void_ftype_pint_int
21399 = build_function_type_list (void_type_node,
21400 pint_type_node, integer_type_node, NULL_TREE);
21401 tree void_ftype_v16qi_v16qi_pchar
21402 = build_function_type_list (void_type_node,
21403 V16QI_type_node, V16QI_type_node,
21404 pchar_type_node, NULL_TREE);
21405 tree v2df_ftype_pcdouble
21406 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
21407 tree v2df_ftype_v2df_v2df
21408 = build_function_type_list (V2DF_type_node,
21409 V2DF_type_node, V2DF_type_node, NULL_TREE);
21410 tree v16qi_ftype_v16qi_v16qi
21411 = build_function_type_list (V16QI_type_node,
21412 V16QI_type_node, V16QI_type_node, NULL_TREE);
21413 tree v8hi_ftype_v8hi_v8hi
21414 = build_function_type_list (V8HI_type_node,
21415 V8HI_type_node, V8HI_type_node, NULL_TREE);
21416 tree v4si_ftype_v4si_v4si
21417 = build_function_type_list (V4SI_type_node,
21418 V4SI_type_node, V4SI_type_node, NULL_TREE);
21419 tree v2di_ftype_v2di_v2di
21420 = build_function_type_list (V2DI_type_node,
21421 V2DI_type_node, V2DI_type_node, NULL_TREE);
21422 tree v2di_ftype_v2df_v2df
21423 = build_function_type_list (V2DI_type_node,
21424 V2DF_type_node, V2DF_type_node, NULL_TREE);
21425 tree v2df_ftype_v2df
21426 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
21427 tree v2di_ftype_v2di_int
21428 = build_function_type_list (V2DI_type_node,
21429 V2DI_type_node, integer_type_node, NULL_TREE);
21430 tree v2di_ftype_v2di_v2di_int
21431 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21432 V2DI_type_node, integer_type_node, NULL_TREE);
21433 tree v4si_ftype_v4si_int
21434 = build_function_type_list (V4SI_type_node,
21435 V4SI_type_node, integer_type_node, NULL_TREE);
21436 tree v8hi_ftype_v8hi_int
21437 = build_function_type_list (V8HI_type_node,
21438 V8HI_type_node, integer_type_node, NULL_TREE);
21439 tree v4si_ftype_v8hi_v8hi
21440 = build_function_type_list (V4SI_type_node,
21441 V8HI_type_node, V8HI_type_node, NULL_TREE);
21442 tree v1di_ftype_v8qi_v8qi
21443 = build_function_type_list (V1DI_type_node,
21444 V8QI_type_node, V8QI_type_node, NULL_TREE);
21445 tree v1di_ftype_v2si_v2si
21446 = build_function_type_list (V1DI_type_node,
21447 V2SI_type_node, V2SI_type_node, NULL_TREE);
21448 tree v2di_ftype_v16qi_v16qi
21449 = build_function_type_list (V2DI_type_node,
21450 V16QI_type_node, V16QI_type_node, NULL_TREE);
21451 tree v2di_ftype_v4si_v4si
21452 = build_function_type_list (V2DI_type_node,
21453 V4SI_type_node, V4SI_type_node, NULL_TREE);
21454 tree int_ftype_v16qi
21455 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
21456 tree v16qi_ftype_pcchar
21457 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
21458 tree void_ftype_pchar_v16qi
21459 = build_function_type_list (void_type_node,
21460 pchar_type_node, V16QI_type_node, NULL_TREE);
21462 tree v2di_ftype_v2di_unsigned_unsigned
21463 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21464 unsigned_type_node, unsigned_type_node,
21465 NULL_TREE);
21466 tree v2di_ftype_v2di_v2di_unsigned_unsigned
21467 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
21468 unsigned_type_node, unsigned_type_node,
21469 NULL_TREE);
21470 tree v2di_ftype_v2di_v16qi
21471 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
21472 NULL_TREE);
21473 tree v2df_ftype_v2df_v2df_v2df
21474 = build_function_type_list (V2DF_type_node,
21475 V2DF_type_node, V2DF_type_node,
21476 V2DF_type_node, NULL_TREE);
21477 tree v4sf_ftype_v4sf_v4sf_v4sf
21478 = build_function_type_list (V4SF_type_node,
21479 V4SF_type_node, V4SF_type_node,
21480 V4SF_type_node, NULL_TREE);
21481 tree v8hi_ftype_v16qi
21482 = build_function_type_list (V8HI_type_node, V16QI_type_node,
21483 NULL_TREE);
21484 tree v4si_ftype_v16qi
21485 = build_function_type_list (V4SI_type_node, V16QI_type_node,
21486 NULL_TREE);
21487 tree v2di_ftype_v16qi
21488 = build_function_type_list (V2DI_type_node, V16QI_type_node,
21489 NULL_TREE);
21490 tree v4si_ftype_v8hi
21491 = build_function_type_list (V4SI_type_node, V8HI_type_node,
21492 NULL_TREE);
21493 tree v2di_ftype_v8hi
21494 = build_function_type_list (V2DI_type_node, V8HI_type_node,
21495 NULL_TREE);
21496 tree v2di_ftype_v4si
21497 = build_function_type_list (V2DI_type_node, V4SI_type_node,
21498 NULL_TREE);
21499 tree v2di_ftype_pv2di
21500 = build_function_type_list (V2DI_type_node, pv2di_type_node,
21501 NULL_TREE);
21502 tree v16qi_ftype_v16qi_v16qi_int
21503 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21504 V16QI_type_node, integer_type_node,
21505 NULL_TREE);
21506 tree v16qi_ftype_v16qi_v16qi_v16qi
21507 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21508 V16QI_type_node, V16QI_type_node,
21509 NULL_TREE);
21510 tree v8hi_ftype_v8hi_v8hi_int
21511 = build_function_type_list (V8HI_type_node, V8HI_type_node,
21512 V8HI_type_node, integer_type_node,
21513 NULL_TREE);
21514 tree v4si_ftype_v4si_v4si_int
21515 = build_function_type_list (V4SI_type_node, V4SI_type_node,
21516 V4SI_type_node, integer_type_node,
21517 NULL_TREE);
21518 tree int_ftype_v2di_v2di
21519 = build_function_type_list (integer_type_node,
21520 V2DI_type_node, V2DI_type_node,
21521 NULL_TREE);
21522 tree int_ftype_v16qi_int_v16qi_int_int
21523 = build_function_type_list (integer_type_node,
21524 V16QI_type_node,
21525 integer_type_node,
21526 V16QI_type_node,
21527 integer_type_node,
21528 integer_type_node,
21529 NULL_TREE);
21530 tree v16qi_ftype_v16qi_int_v16qi_int_int
21531 = build_function_type_list (V16QI_type_node,
21532 V16QI_type_node,
21533 integer_type_node,
21534 V16QI_type_node,
21535 integer_type_node,
21536 integer_type_node,
21537 NULL_TREE);
21538 tree int_ftype_v16qi_v16qi_int
21539 = build_function_type_list (integer_type_node,
21540 V16QI_type_node,
21541 V16QI_type_node,
21542 integer_type_node,
21543 NULL_TREE);
21545 /* SSE5 instructions */
21546 tree v2di_ftype_v2di_v2di_v2di
21547 = build_function_type_list (V2DI_type_node,
21548 V2DI_type_node,
21549 V2DI_type_node,
21550 V2DI_type_node,
21551 NULL_TREE);
21553 tree v4si_ftype_v4si_v4si_v4si
21554 = build_function_type_list (V4SI_type_node,
21555 V4SI_type_node,
21556 V4SI_type_node,
21557 V4SI_type_node,
21558 NULL_TREE);
21560 tree v4si_ftype_v4si_v4si_v2di
21561 = build_function_type_list (V4SI_type_node,
21562 V4SI_type_node,
21563 V4SI_type_node,
21564 V2DI_type_node,
21565 NULL_TREE);
21567 tree v8hi_ftype_v8hi_v8hi_v8hi
21568 = build_function_type_list (V8HI_type_node,
21569 V8HI_type_node,
21570 V8HI_type_node,
21571 V8HI_type_node,
21572 NULL_TREE);
21574 tree v8hi_ftype_v8hi_v8hi_v4si
21575 = build_function_type_list (V8HI_type_node,
21576 V8HI_type_node,
21577 V8HI_type_node,
21578 V4SI_type_node,
21579 NULL_TREE);
21581 tree v2df_ftype_v2df_v2df_v16qi
21582 = build_function_type_list (V2DF_type_node,
21583 V2DF_type_node,
21584 V2DF_type_node,
21585 V16QI_type_node,
21586 NULL_TREE);
21588 tree v4sf_ftype_v4sf_v4sf_v16qi
21589 = build_function_type_list (V4SF_type_node,
21590 V4SF_type_node,
21591 V4SF_type_node,
21592 V16QI_type_node,
21593 NULL_TREE);
21595 tree v2di_ftype_v2di_si
21596 = build_function_type_list (V2DI_type_node,
21597 V2DI_type_node,
21598 integer_type_node,
21599 NULL_TREE);
21601 tree v4si_ftype_v4si_si
21602 = build_function_type_list (V4SI_type_node,
21603 V4SI_type_node,
21604 integer_type_node,
21605 NULL_TREE);
21607 tree v8hi_ftype_v8hi_si
21608 = build_function_type_list (V8HI_type_node,
21609 V8HI_type_node,
21610 integer_type_node,
21611 NULL_TREE);
21613 tree v16qi_ftype_v16qi_si
21614 = build_function_type_list (V16QI_type_node,
21615 V16QI_type_node,
21616 integer_type_node,
21617 NULL_TREE);
21618 tree v4sf_ftype_v4hi
21619 = build_function_type_list (V4SF_type_node,
21620 V4HI_type_node,
21621 NULL_TREE);
21623 tree v4hi_ftype_v4sf
21624 = build_function_type_list (V4HI_type_node,
21625 V4SF_type_node,
21626 NULL_TREE);
21628 tree v2di_ftype_v2di
21629 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
21631 tree v16qi_ftype_v8hi_v8hi
21632 = build_function_type_list (V16QI_type_node,
21633 V8HI_type_node, V8HI_type_node,
21634 NULL_TREE);
21635 tree v8hi_ftype_v4si_v4si
21636 = build_function_type_list (V8HI_type_node,
21637 V4SI_type_node, V4SI_type_node,
21638 NULL_TREE);
21639 tree v8hi_ftype_v16qi_v16qi
21640 = build_function_type_list (V8HI_type_node,
21641 V16QI_type_node, V16QI_type_node,
21642 NULL_TREE);
21643 tree v4hi_ftype_v8qi_v8qi
21644 = build_function_type_list (V4HI_type_node,
21645 V8QI_type_node, V8QI_type_node,
21646 NULL_TREE);
21647 tree unsigned_ftype_unsigned_uchar
21648 = build_function_type_list (unsigned_type_node,
21649 unsigned_type_node,
21650 unsigned_char_type_node,
21651 NULL_TREE);
21652 tree unsigned_ftype_unsigned_ushort
21653 = build_function_type_list (unsigned_type_node,
21654 unsigned_type_node,
21655 short_unsigned_type_node,
21656 NULL_TREE);
21657 tree unsigned_ftype_unsigned_unsigned
21658 = build_function_type_list (unsigned_type_node,
21659 unsigned_type_node,
21660 unsigned_type_node,
21661 NULL_TREE);
21662 tree uint64_ftype_uint64_uint64
21663 = build_function_type_list (long_long_unsigned_type_node,
21664 long_long_unsigned_type_node,
21665 long_long_unsigned_type_node,
21666 NULL_TREE);
21667 tree float_ftype_float
21668 = build_function_type_list (float_type_node,
21669 float_type_node,
21670 NULL_TREE);
21672 /* AVX builtins */
21673 tree V32QI_type_node = build_vector_type_for_mode (char_type_node,
21674 V32QImode);
21675 tree V8SI_type_node = build_vector_type_for_mode (intSI_type_node,
21676 V8SImode);
21677 tree V8SF_type_node = build_vector_type_for_mode (float_type_node,
21678 V8SFmode);
21679 tree V4DI_type_node = build_vector_type_for_mode (long_long_integer_type_node,
21680 V4DImode);
21681 tree V4DF_type_node = build_vector_type_for_mode (double_type_node,
21682 V4DFmode);
21683 tree v8sf_ftype_v8sf
21684 = build_function_type_list (V8SF_type_node,
21685 V8SF_type_node,
21686 NULL_TREE);
21687 tree v8si_ftype_v8sf
21688 = build_function_type_list (V8SI_type_node,
21689 V8SF_type_node,
21690 NULL_TREE);
21691 tree v8sf_ftype_v8si
21692 = build_function_type_list (V8SF_type_node,
21693 V8SI_type_node,
21694 NULL_TREE);
21695 tree v4si_ftype_v4df
21696 = build_function_type_list (V4SI_type_node,
21697 V4DF_type_node,
21698 NULL_TREE);
21699 tree v4df_ftype_v4df
21700 = build_function_type_list (V4DF_type_node,
21701 V4DF_type_node,
21702 NULL_TREE);
21703 tree v4df_ftype_v4si
21704 = build_function_type_list (V4DF_type_node,
21705 V4SI_type_node,
21706 NULL_TREE);
21707 tree v4df_ftype_v4sf
21708 = build_function_type_list (V4DF_type_node,
21709 V4SF_type_node,
21710 NULL_TREE);
21711 tree v4sf_ftype_v4df
21712 = build_function_type_list (V4SF_type_node,
21713 V4DF_type_node,
21714 NULL_TREE);
21715 tree v8sf_ftype_v8sf_v8sf
21716 = build_function_type_list (V8SF_type_node,
21717 V8SF_type_node, V8SF_type_node,
21718 NULL_TREE);
21719 tree v4df_ftype_v4df_v4df
21720 = build_function_type_list (V4DF_type_node,
21721 V4DF_type_node, V4DF_type_node,
21722 NULL_TREE);
21723 tree v8sf_ftype_v8sf_int
21724 = build_function_type_list (V8SF_type_node,
21725 V8SF_type_node, integer_type_node,
21726 NULL_TREE);
21727 tree v4si_ftype_v8si_int
21728 = build_function_type_list (V4SI_type_node,
21729 V8SI_type_node, integer_type_node,
21730 NULL_TREE);
21731 tree v4df_ftype_v4df_int
21732 = build_function_type_list (V4DF_type_node,
21733 V4DF_type_node, integer_type_node,
21734 NULL_TREE);
21735 tree v4sf_ftype_v8sf_int
21736 = build_function_type_list (V4SF_type_node,
21737 V8SF_type_node, integer_type_node,
21738 NULL_TREE);
21739 tree v2df_ftype_v4df_int
21740 = build_function_type_list (V2DF_type_node,
21741 V4DF_type_node, integer_type_node,
21742 NULL_TREE);
21743 tree v8sf_ftype_v8sf_v8sf_int
21744 = build_function_type_list (V8SF_type_node,
21745 V8SF_type_node, V8SF_type_node,
21746 integer_type_node,
21747 NULL_TREE);
21748 tree v8sf_ftype_v8sf_v8sf_v8sf
21749 = build_function_type_list (V8SF_type_node,
21750 V8SF_type_node, V8SF_type_node,
21751 V8SF_type_node,
21752 NULL_TREE);
21753 tree v4df_ftype_v4df_v4df_v4df
21754 = build_function_type_list (V4DF_type_node,
21755 V4DF_type_node, V4DF_type_node,
21756 V4DF_type_node,
21757 NULL_TREE);
21758 tree v8si_ftype_v8si_v8si_int
21759 = build_function_type_list (V8SI_type_node,
21760 V8SI_type_node, V8SI_type_node,
21761 integer_type_node,
21762 NULL_TREE);
21763 tree v4df_ftype_v4df_v4df_int
21764 = build_function_type_list (V4DF_type_node,
21765 V4DF_type_node, V4DF_type_node,
21766 integer_type_node,
21767 NULL_TREE);
21768 tree v8sf_ftype_v8sf_v8sf_v8si_int
21769 = build_function_type_list (V8SF_type_node,
21770 V8SF_type_node, V8SF_type_node,
21771 V8SI_type_node, integer_type_node,
21772 NULL_TREE);
21773 tree v4df_ftype_v4df_v4df_v4di_int
21774 = build_function_type_list (V4DF_type_node,
21775 V4DF_type_node, V4DF_type_node,
21776 V4DI_type_node, integer_type_node,
21777 NULL_TREE);
21778 tree v4sf_ftype_v4sf_v4sf_v4si_int
21779 = build_function_type_list (V4SF_type_node,
21780 V4SF_type_node, V4SF_type_node,
21781 V4SI_type_node, integer_type_node,
21782 NULL_TREE);
21783 tree v2df_ftype_v2df_v2df_v2di_int
21784 = build_function_type_list (V2DF_type_node,
21785 V2DF_type_node, V2DF_type_node,
21786 V2DI_type_node, integer_type_node,
21787 NULL_TREE);
21788 tree v8sf_ftype_pcfloat
21789 = build_function_type_list (V8SF_type_node,
21790 pcfloat_type_node,
21791 NULL_TREE);
21792 tree v4df_ftype_pcdouble
21793 = build_function_type_list (V4DF_type_node,
21794 pcdouble_type_node,
21795 NULL_TREE);
21796 tree pcv4sf_type_node
21797 = build_pointer_type (build_type_variant (V4SF_type_node, 1, 0));
21798 tree pcv2df_type_node
21799 = build_pointer_type (build_type_variant (V2DF_type_node, 1, 0));
21800 tree v8sf_ftype_pcv4sf
21801 = build_function_type_list (V8SF_type_node,
21802 pcv4sf_type_node,
21803 NULL_TREE);
21804 tree v4df_ftype_pcv2df
21805 = build_function_type_list (V4DF_type_node,
21806 pcv2df_type_node,
21807 NULL_TREE);
21808 tree v32qi_ftype_pcchar
21809 = build_function_type_list (V32QI_type_node,
21810 pcchar_type_node,
21811 NULL_TREE);
21812 tree void_ftype_pchar_v32qi
21813 = build_function_type_list (void_type_node,
21814 pchar_type_node, V32QI_type_node,
21815 NULL_TREE);
21816 tree v8si_ftype_v8si_v4si_int
21817 = build_function_type_list (V8SI_type_node,
21818 V8SI_type_node, V4SI_type_node,
21819 integer_type_node,
21820 NULL_TREE);
21821 tree v8sf_ftype_v8sf_v4sf_int
21822 = build_function_type_list (V8SF_type_node,
21823 V8SF_type_node, V4SF_type_node,
21824 integer_type_node,
21825 NULL_TREE);
21826 tree v4df_ftype_v4df_v2df_int
21827 = build_function_type_list (V4DF_type_node,
21828 V4DF_type_node, V2DF_type_node,
21829 integer_type_node,
21830 NULL_TREE);
21831 tree void_ftype_pfloat_v8sf
21832 = build_function_type_list (void_type_node,
21833 pfloat_type_node, V8SF_type_node,
21834 NULL_TREE);
21835 tree void_ftype_pdouble_v4df
21836 = build_function_type_list (void_type_node,
21837 pdouble_type_node, V4DF_type_node,
21838 NULL_TREE);
21839 tree pv8sf_type_node = build_pointer_type (V8SF_type_node);
21840 tree pv4sf_type_node = build_pointer_type (V4SF_type_node);
21841 tree pv4df_type_node = build_pointer_type (V4DF_type_node);
21842 tree pv2df_type_node = build_pointer_type (V2DF_type_node);
21843 tree pcv8sf_type_node
21844 = build_pointer_type (build_type_variant (V8SF_type_node, 1, 0));
21845 tree pcv4df_type_node
21846 = build_pointer_type (build_type_variant (V4DF_type_node, 1, 0));
21847 tree v8sf_ftype_pcv8sf_v8sf
21848 = build_function_type_list (V8SF_type_node,
21849 pcv8sf_type_node, V8SF_type_node,
21850 NULL_TREE);
21851 tree v4df_ftype_pcv4df_v4df
21852 = build_function_type_list (V4DF_type_node,
21853 pcv4df_type_node, V4DF_type_node,
21854 NULL_TREE);
21855 tree v4sf_ftype_pcv4sf_v4sf
21856 = build_function_type_list (V4SF_type_node,
21857 pcv4sf_type_node, V4SF_type_node,
21858 NULL_TREE);
21859 tree v2df_ftype_pcv2df_v2df
21860 = build_function_type_list (V2DF_type_node,
21861 pcv2df_type_node, V2DF_type_node,
21862 NULL_TREE);
21863 tree void_ftype_pv8sf_v8sf_v8sf
21864 = build_function_type_list (void_type_node,
21865 pv8sf_type_node, V8SF_type_node,
21866 V8SF_type_node,
21867 NULL_TREE);
21868 tree void_ftype_pv4df_v4df_v4df
21869 = build_function_type_list (void_type_node,
21870 pv4df_type_node, V4DF_type_node,
21871 V4DF_type_node,
21872 NULL_TREE);
21873 tree void_ftype_pv4sf_v4sf_v4sf
21874 = build_function_type_list (void_type_node,
21875 pv4sf_type_node, V4SF_type_node,
21876 V4SF_type_node,
21877 NULL_TREE);
21878 tree void_ftype_pv2df_v2df_v2df
21879 = build_function_type_list (void_type_node,
21880 pv2df_type_node, V2DF_type_node,
21881 V2DF_type_node,
21882 NULL_TREE);
21883 tree v4df_ftype_v2df
21884 = build_function_type_list (V4DF_type_node,
21885 V2DF_type_node,
21886 NULL_TREE);
21887 tree v8sf_ftype_v4sf
21888 = build_function_type_list (V8SF_type_node,
21889 V4SF_type_node,
21890 NULL_TREE);
21891 tree v8si_ftype_v4si
21892 = build_function_type_list (V8SI_type_node,
21893 V4SI_type_node,
21894 NULL_TREE);
21895 tree v2df_ftype_v4df
21896 = build_function_type_list (V2DF_type_node,
21897 V4DF_type_node,
21898 NULL_TREE);
21899 tree v4sf_ftype_v8sf
21900 = build_function_type_list (V4SF_type_node,
21901 V8SF_type_node,
21902 NULL_TREE);
21903 tree v4si_ftype_v8si
21904 = build_function_type_list (V4SI_type_node,
21905 V8SI_type_node,
21906 NULL_TREE);
21907 tree int_ftype_v4df
21908 = build_function_type_list (integer_type_node,
21909 V4DF_type_node,
21910 NULL_TREE);
21911 tree int_ftype_v8sf
21912 = build_function_type_list (integer_type_node,
21913 V8SF_type_node,
21914 NULL_TREE);
21915 tree int_ftype_v8sf_v8sf
21916 = build_function_type_list (integer_type_node,
21917 V8SF_type_node, V8SF_type_node,
21918 NULL_TREE);
21919 tree int_ftype_v4di_v4di
21920 = build_function_type_list (integer_type_node,
21921 V4DI_type_node, V4DI_type_node,
21922 NULL_TREE);
21923 tree int_ftype_v4df_v4df
21924 = build_function_type_list (integer_type_node,
21925 V4DF_type_node, V4DF_type_node,
21926 NULL_TREE);
21927 tree v8sf_ftype_v8sf_v8si
21928 = build_function_type_list (V8SF_type_node,
21929 V8SF_type_node, V8SI_type_node,
21930 NULL_TREE);
21931 tree v4df_ftype_v4df_v4di
21932 = build_function_type_list (V4DF_type_node,
21933 V4DF_type_node, V4DI_type_node,
21934 NULL_TREE);
21935 tree v4sf_ftype_v4sf_v4si
21936 = build_function_type_list (V4SF_type_node,
21937 V4SF_type_node, V4SI_type_node, NULL_TREE);
21938 tree v2df_ftype_v2df_v2di
21939 = build_function_type_list (V2DF_type_node,
21940 V2DF_type_node, V2DI_type_node, NULL_TREE);
21942 tree ftype;
21944 /* Add all special builtins with variable number of operands. */
21945 for (i = 0, d = bdesc_special_args;
21946 i < ARRAY_SIZE (bdesc_special_args);
21947 i++, d++)
21949 tree type;
21951 if (d->name == 0)
21952 continue;
21954 switch ((enum ix86_special_builtin_type) d->flag)
21956 case VOID_FTYPE_VOID:
21957 type = void_ftype_void;
21958 break;
21959 case V32QI_FTYPE_PCCHAR:
21960 type = v32qi_ftype_pcchar;
21961 break;
21962 case V16QI_FTYPE_PCCHAR:
21963 type = v16qi_ftype_pcchar;
21964 break;
21965 case V8SF_FTYPE_PCV4SF:
21966 type = v8sf_ftype_pcv4sf;
21967 break;
21968 case V8SF_FTYPE_PCFLOAT:
21969 type = v8sf_ftype_pcfloat;
21970 break;
21971 case V4DF_FTYPE_PCV2DF:
21972 type = v4df_ftype_pcv2df;
21973 break;
21974 case V4DF_FTYPE_PCDOUBLE:
21975 type = v4df_ftype_pcdouble;
21976 break;
21977 case V4SF_FTYPE_PCFLOAT:
21978 type = v4sf_ftype_pcfloat;
21979 break;
21980 case V2DI_FTYPE_PV2DI:
21981 type = v2di_ftype_pv2di;
21982 break;
21983 case V2DF_FTYPE_PCDOUBLE:
21984 type = v2df_ftype_pcdouble;
21985 break;
21986 case V8SF_FTYPE_PCV8SF_V8SF:
21987 type = v8sf_ftype_pcv8sf_v8sf;
21988 break;
21989 case V4DF_FTYPE_PCV4DF_V4DF:
21990 type = v4df_ftype_pcv4df_v4df;
21991 break;
21992 case V4SF_FTYPE_V4SF_PCV2SF:
21993 type = v4sf_ftype_v4sf_pcv2sf;
21994 break;
21995 case V4SF_FTYPE_PCV4SF_V4SF:
21996 type = v4sf_ftype_pcv4sf_v4sf;
21997 break;
21998 case V2DF_FTYPE_V2DF_PCDOUBLE:
21999 type = v2df_ftype_v2df_pcdouble;
22000 break;
22001 case V2DF_FTYPE_PCV2DF_V2DF:
22002 type = v2df_ftype_pcv2df_v2df;
22003 break;
22004 case VOID_FTYPE_PV2SF_V4SF:
22005 type = void_ftype_pv2sf_v4sf;
22006 break;
22007 case VOID_FTYPE_PV2DI_V2DI:
22008 type = void_ftype_pv2di_v2di;
22009 break;
22010 case VOID_FTYPE_PCHAR_V32QI:
22011 type = void_ftype_pchar_v32qi;
22012 break;
22013 case VOID_FTYPE_PCHAR_V16QI:
22014 type = void_ftype_pchar_v16qi;
22015 break;
22016 case VOID_FTYPE_PFLOAT_V8SF:
22017 type = void_ftype_pfloat_v8sf;
22018 break;
22019 case VOID_FTYPE_PFLOAT_V4SF:
22020 type = void_ftype_pfloat_v4sf;
22021 break;
22022 case VOID_FTYPE_PDOUBLE_V4DF:
22023 type = void_ftype_pdouble_v4df;
22024 break;
22025 case VOID_FTYPE_PDOUBLE_V2DF:
22026 type = void_ftype_pdouble_v2df;
22027 break;
22028 case VOID_FTYPE_PDI_DI:
22029 type = void_ftype_pdi_di;
22030 break;
22031 case VOID_FTYPE_PINT_INT:
22032 type = void_ftype_pint_int;
22033 break;
22034 case VOID_FTYPE_PV8SF_V8SF_V8SF:
22035 type = void_ftype_pv8sf_v8sf_v8sf;
22036 break;
22037 case VOID_FTYPE_PV4DF_V4DF_V4DF:
22038 type = void_ftype_pv4df_v4df_v4df;
22039 break;
22040 case VOID_FTYPE_PV4SF_V4SF_V4SF:
22041 type = void_ftype_pv4sf_v4sf_v4sf;
22042 break;
22043 case VOID_FTYPE_PV2DF_V2DF_V2DF:
22044 type = void_ftype_pv2df_v2df_v2df;
22045 break;
22046 default:
22047 gcc_unreachable ();
22050 def_builtin (d->mask, d->name, type, d->code);
22053 /* Add all builtins with variable number of operands. */
22054 for (i = 0, d = bdesc_args;
22055 i < ARRAY_SIZE (bdesc_args);
22056 i++, d++)
22058 tree type;
22060 if (d->name == 0)
22061 continue;
22063 switch ((enum ix86_builtin_type) d->flag)
22065 case FLOAT_FTYPE_FLOAT:
22066 type = float_ftype_float;
22067 break;
22068 case INT_FTYPE_V8SF_V8SF_PTEST:
22069 type = int_ftype_v8sf_v8sf;
22070 break;
22071 case INT_FTYPE_V4DI_V4DI_PTEST:
22072 type = int_ftype_v4di_v4di;
22073 break;
22074 case INT_FTYPE_V4DF_V4DF_PTEST:
22075 type = int_ftype_v4df_v4df;
22076 break;
22077 case INT_FTYPE_V4SF_V4SF_PTEST:
22078 type = int_ftype_v4sf_v4sf;
22079 break;
22080 case INT_FTYPE_V2DI_V2DI_PTEST:
22081 type = int_ftype_v2di_v2di;
22082 break;
22083 case INT_FTYPE_V2DF_V2DF_PTEST:
22084 type = int_ftype_v2df_v2df;
22085 break;
22086 case INT64_FTYPE_V4SF:
22087 type = int64_ftype_v4sf;
22088 break;
22089 case INT64_FTYPE_V2DF:
22090 type = int64_ftype_v2df;
22091 break;
22092 case INT_FTYPE_V16QI:
22093 type = int_ftype_v16qi;
22094 break;
22095 case INT_FTYPE_V8QI:
22096 type = int_ftype_v8qi;
22097 break;
22098 case INT_FTYPE_V8SF:
22099 type = int_ftype_v8sf;
22100 break;
22101 case INT_FTYPE_V4DF:
22102 type = int_ftype_v4df;
22103 break;
22104 case INT_FTYPE_V4SF:
22105 type = int_ftype_v4sf;
22106 break;
22107 case INT_FTYPE_V2DF:
22108 type = int_ftype_v2df;
22109 break;
22110 case V16QI_FTYPE_V16QI:
22111 type = v16qi_ftype_v16qi;
22112 break;
22113 case V8SI_FTYPE_V8SF:
22114 type = v8si_ftype_v8sf;
22115 break;
22116 case V8SI_FTYPE_V4SI:
22117 type = v8si_ftype_v4si;
22118 break;
22119 case V8HI_FTYPE_V8HI:
22120 type = v8hi_ftype_v8hi;
22121 break;
22122 case V8HI_FTYPE_V16QI:
22123 type = v8hi_ftype_v16qi;
22124 break;
22125 case V8QI_FTYPE_V8QI:
22126 type = v8qi_ftype_v8qi;
22127 break;
22128 case V8SF_FTYPE_V8SF:
22129 type = v8sf_ftype_v8sf;
22130 break;
22131 case V8SF_FTYPE_V8SI:
22132 type = v8sf_ftype_v8si;
22133 break;
22134 case V8SF_FTYPE_V4SF:
22135 type = v8sf_ftype_v4sf;
22136 break;
22137 case V4SI_FTYPE_V4DF:
22138 type = v4si_ftype_v4df;
22139 break;
22140 case V4SI_FTYPE_V4SI:
22141 type = v4si_ftype_v4si;
22142 break;
22143 case V4SI_FTYPE_V16QI:
22144 type = v4si_ftype_v16qi;
22145 break;
22146 case V4SI_FTYPE_V8SI:
22147 type = v4si_ftype_v8si;
22148 break;
22149 case V4SI_FTYPE_V8HI:
22150 type = v4si_ftype_v8hi;
22151 break;
22152 case V4SI_FTYPE_V4SF:
22153 type = v4si_ftype_v4sf;
22154 break;
22155 case V4SI_FTYPE_V2DF:
22156 type = v4si_ftype_v2df;
22157 break;
22158 case V4HI_FTYPE_V4HI:
22159 type = v4hi_ftype_v4hi;
22160 break;
22161 case V4DF_FTYPE_V4DF:
22162 type = v4df_ftype_v4df;
22163 break;
22164 case V4DF_FTYPE_V4SI:
22165 type = v4df_ftype_v4si;
22166 break;
22167 case V4DF_FTYPE_V4SF:
22168 type = v4df_ftype_v4sf;
22169 break;
22170 case V4DF_FTYPE_V2DF:
22171 type = v4df_ftype_v2df;
22172 break;
22173 case V4SF_FTYPE_V4SF:
22174 case V4SF_FTYPE_V4SF_VEC_MERGE:
22175 type = v4sf_ftype_v4sf;
22176 break;
22177 case V4SF_FTYPE_V8SF:
22178 type = v4sf_ftype_v8sf;
22179 break;
22180 case V4SF_FTYPE_V4SI:
22181 type = v4sf_ftype_v4si;
22182 break;
22183 case V4SF_FTYPE_V4DF:
22184 type = v4sf_ftype_v4df;
22185 break;
22186 case V4SF_FTYPE_V2DF:
22187 type = v4sf_ftype_v2df;
22188 break;
22189 case V2DI_FTYPE_V2DI:
22190 type = v2di_ftype_v2di;
22191 break;
22192 case V2DI_FTYPE_V16QI:
22193 type = v2di_ftype_v16qi;
22194 break;
22195 case V2DI_FTYPE_V8HI:
22196 type = v2di_ftype_v8hi;
22197 break;
22198 case V2DI_FTYPE_V4SI:
22199 type = v2di_ftype_v4si;
22200 break;
22201 case V2SI_FTYPE_V2SI:
22202 type = v2si_ftype_v2si;
22203 break;
22204 case V2SI_FTYPE_V4SF:
22205 type = v2si_ftype_v4sf;
22206 break;
22207 case V2SI_FTYPE_V2DF:
22208 type = v2si_ftype_v2df;
22209 break;
22210 case V2SI_FTYPE_V2SF:
22211 type = v2si_ftype_v2sf;
22212 break;
22213 case V2DF_FTYPE_V4DF:
22214 type = v2df_ftype_v4df;
22215 break;
22216 case V2DF_FTYPE_V4SF:
22217 type = v2df_ftype_v4sf;
22218 break;
22219 case V2DF_FTYPE_V2DF:
22220 case V2DF_FTYPE_V2DF_VEC_MERGE:
22221 type = v2df_ftype_v2df;
22222 break;
22223 case V2DF_FTYPE_V2SI:
22224 type = v2df_ftype_v2si;
22225 break;
22226 case V2DF_FTYPE_V4SI:
22227 type = v2df_ftype_v4si;
22228 break;
22229 case V2SF_FTYPE_V2SF:
22230 type = v2sf_ftype_v2sf;
22231 break;
22232 case V2SF_FTYPE_V2SI:
22233 type = v2sf_ftype_v2si;
22234 break;
22235 case V16QI_FTYPE_V16QI_V16QI:
22236 type = v16qi_ftype_v16qi_v16qi;
22237 break;
22238 case V16QI_FTYPE_V8HI_V8HI:
22239 type = v16qi_ftype_v8hi_v8hi;
22240 break;
22241 case V8QI_FTYPE_V8QI_V8QI:
22242 type = v8qi_ftype_v8qi_v8qi;
22243 break;
22244 case V8QI_FTYPE_V4HI_V4HI:
22245 type = v8qi_ftype_v4hi_v4hi;
22246 break;
22247 case V8HI_FTYPE_V8HI_V8HI:
22248 case V8HI_FTYPE_V8HI_V8HI_COUNT:
22249 type = v8hi_ftype_v8hi_v8hi;
22250 break;
22251 case V8HI_FTYPE_V16QI_V16QI:
22252 type = v8hi_ftype_v16qi_v16qi;
22253 break;
22254 case V8HI_FTYPE_V4SI_V4SI:
22255 type = v8hi_ftype_v4si_v4si;
22256 break;
22257 case V8HI_FTYPE_V8HI_SI_COUNT:
22258 type = v8hi_ftype_v8hi_int;
22259 break;
22260 case V8SF_FTYPE_V8SF_V8SF:
22261 type = v8sf_ftype_v8sf_v8sf;
22262 break;
22263 case V8SF_FTYPE_V8SF_V8SI:
22264 type = v8sf_ftype_v8sf_v8si;
22265 break;
22266 case V4SI_FTYPE_V4SI_V4SI:
22267 case V4SI_FTYPE_V4SI_V4SI_COUNT:
22268 type = v4si_ftype_v4si_v4si;
22269 break;
22270 case V4SI_FTYPE_V8HI_V8HI:
22271 type = v4si_ftype_v8hi_v8hi;
22272 break;
22273 case V4SI_FTYPE_V4SF_V4SF:
22274 type = v4si_ftype_v4sf_v4sf;
22275 break;
22276 case V4SI_FTYPE_V2DF_V2DF:
22277 type = v4si_ftype_v2df_v2df;
22278 break;
22279 case V4SI_FTYPE_V4SI_SI_COUNT:
22280 type = v4si_ftype_v4si_int;
22281 break;
22282 case V4HI_FTYPE_V4HI_V4HI:
22283 case V4HI_FTYPE_V4HI_V4HI_COUNT:
22284 type = v4hi_ftype_v4hi_v4hi;
22285 break;
22286 case V4HI_FTYPE_V8QI_V8QI:
22287 type = v4hi_ftype_v8qi_v8qi;
22288 break;
22289 case V4HI_FTYPE_V2SI_V2SI:
22290 type = v4hi_ftype_v2si_v2si;
22291 break;
22292 case V4HI_FTYPE_V4HI_SI_COUNT:
22293 type = v4hi_ftype_v4hi_int;
22294 break;
22295 case V4DF_FTYPE_V4DF_V4DF:
22296 type = v4df_ftype_v4df_v4df;
22297 break;
22298 case V4DF_FTYPE_V4DF_V4DI:
22299 type = v4df_ftype_v4df_v4di;
22300 break;
22301 case V4SF_FTYPE_V4SF_V4SF:
22302 case V4SF_FTYPE_V4SF_V4SF_SWAP:
22303 type = v4sf_ftype_v4sf_v4sf;
22304 break;
22305 case V4SF_FTYPE_V4SF_V4SI:
22306 type = v4sf_ftype_v4sf_v4si;
22307 break;
22308 case V4SF_FTYPE_V4SF_V2SI:
22309 type = v4sf_ftype_v4sf_v2si;
22310 break;
22311 case V4SF_FTYPE_V4SF_V2DF:
22312 type = v4sf_ftype_v4sf_v2df;
22313 break;
22314 case V4SF_FTYPE_V4SF_DI:
22315 type = v4sf_ftype_v4sf_int64;
22316 break;
22317 case V4SF_FTYPE_V4SF_SI:
22318 type = v4sf_ftype_v4sf_int;
22319 break;
22320 case V2DI_FTYPE_V2DI_V2DI:
22321 case V2DI_FTYPE_V2DI_V2DI_COUNT:
22322 type = v2di_ftype_v2di_v2di;
22323 break;
22324 case V2DI_FTYPE_V16QI_V16QI:
22325 type = v2di_ftype_v16qi_v16qi;
22326 break;
22327 case V2DI_FTYPE_V4SI_V4SI:
22328 type = v2di_ftype_v4si_v4si;
22329 break;
22330 case V2DI_FTYPE_V2DI_V16QI:
22331 type = v2di_ftype_v2di_v16qi;
22332 break;
22333 case V2DI_FTYPE_V2DF_V2DF:
22334 type = v2di_ftype_v2df_v2df;
22335 break;
22336 case V2DI_FTYPE_V2DI_SI_COUNT:
22337 type = v2di_ftype_v2di_int;
22338 break;
22339 case V2SI_FTYPE_V2SI_V2SI:
22340 case V2SI_FTYPE_V2SI_V2SI_COUNT:
22341 type = v2si_ftype_v2si_v2si;
22342 break;
22343 case V2SI_FTYPE_V4HI_V4HI:
22344 type = v2si_ftype_v4hi_v4hi;
22345 break;
22346 case V2SI_FTYPE_V2SF_V2SF:
22347 type = v2si_ftype_v2sf_v2sf;
22348 break;
22349 case V2SI_FTYPE_V2SI_SI_COUNT:
22350 type = v2si_ftype_v2si_int;
22351 break;
22352 case V2DF_FTYPE_V2DF_V2DF:
22353 case V2DF_FTYPE_V2DF_V2DF_SWAP:
22354 type = v2df_ftype_v2df_v2df;
22355 break;
22356 case V2DF_FTYPE_V2DF_V4SF:
22357 type = v2df_ftype_v2df_v4sf;
22358 break;
22359 case V2DF_FTYPE_V2DF_V2DI:
22360 type = v2df_ftype_v2df_v2di;
22361 break;
22362 case V2DF_FTYPE_V2DF_DI:
22363 type = v2df_ftype_v2df_int64;
22364 break;
22365 case V2DF_FTYPE_V2DF_SI:
22366 type = v2df_ftype_v2df_int;
22367 break;
22368 case V2SF_FTYPE_V2SF_V2SF:
22369 type = v2sf_ftype_v2sf_v2sf;
22370 break;
22371 case V1DI_FTYPE_V1DI_V1DI:
22372 case V1DI_FTYPE_V1DI_V1DI_COUNT:
22373 type = v1di_ftype_v1di_v1di;
22374 break;
22375 case V1DI_FTYPE_V8QI_V8QI:
22376 type = v1di_ftype_v8qi_v8qi;
22377 break;
22378 case V1DI_FTYPE_V2SI_V2SI:
22379 type = v1di_ftype_v2si_v2si;
22380 break;
22381 case V1DI_FTYPE_V1DI_SI_COUNT:
22382 type = v1di_ftype_v1di_int;
22383 break;
22384 case UINT64_FTYPE_UINT64_UINT64:
22385 type = uint64_ftype_uint64_uint64;
22386 break;
22387 case UINT_FTYPE_UINT_UINT:
22388 type = unsigned_ftype_unsigned_unsigned;
22389 break;
22390 case UINT_FTYPE_UINT_USHORT:
22391 type = unsigned_ftype_unsigned_ushort;
22392 break;
22393 case UINT_FTYPE_UINT_UCHAR:
22394 type = unsigned_ftype_unsigned_uchar;
22395 break;
22396 case V8HI_FTYPE_V8HI_INT:
22397 type = v8hi_ftype_v8hi_int;
22398 break;
22399 case V8SF_FTYPE_V8SF_INT:
22400 type = v8sf_ftype_v8sf_int;
22401 break;
22402 case V4SI_FTYPE_V4SI_INT:
22403 type = v4si_ftype_v4si_int;
22404 break;
22405 case V4SI_FTYPE_V8SI_INT:
22406 type = v4si_ftype_v8si_int;
22407 break;
22408 case V4HI_FTYPE_V4HI_INT:
22409 type = v4hi_ftype_v4hi_int;
22410 break;
22411 case V4DF_FTYPE_V4DF_INT:
22412 type = v4df_ftype_v4df_int;
22413 break;
22414 case V4SF_FTYPE_V4SF_INT:
22415 type = v4sf_ftype_v4sf_int;
22416 break;
22417 case V4SF_FTYPE_V8SF_INT:
22418 type = v4sf_ftype_v8sf_int;
22419 break;
22420 case V2DI_FTYPE_V2DI_INT:
22421 case V2DI2TI_FTYPE_V2DI_INT:
22422 type = v2di_ftype_v2di_int;
22423 break;
22424 case V2DF_FTYPE_V2DF_INT:
22425 type = v2df_ftype_v2df_int;
22426 break;
22427 case V2DF_FTYPE_V4DF_INT:
22428 type = v2df_ftype_v4df_int;
22429 break;
22430 case V16QI_FTYPE_V16QI_V16QI_V16QI:
22431 type = v16qi_ftype_v16qi_v16qi_v16qi;
22432 break;
22433 case V8SF_FTYPE_V8SF_V8SF_V8SF:
22434 type = v8sf_ftype_v8sf_v8sf_v8sf;
22435 break;
22436 case V4DF_FTYPE_V4DF_V4DF_V4DF:
22437 type = v4df_ftype_v4df_v4df_v4df;
22438 break;
22439 case V4SF_FTYPE_V4SF_V4SF_V4SF:
22440 type = v4sf_ftype_v4sf_v4sf_v4sf;
22441 break;
22442 case V2DF_FTYPE_V2DF_V2DF_V2DF:
22443 type = v2df_ftype_v2df_v2df_v2df;
22444 break;
22445 case V16QI_FTYPE_V16QI_V16QI_INT:
22446 type = v16qi_ftype_v16qi_v16qi_int;
22447 break;
22448 case V8SI_FTYPE_V8SI_V8SI_INT:
22449 type = v8si_ftype_v8si_v8si_int;
22450 break;
22451 case V8SI_FTYPE_V8SI_V4SI_INT:
22452 type = v8si_ftype_v8si_v4si_int;
22453 break;
22454 case V8HI_FTYPE_V8HI_V8HI_INT:
22455 type = v8hi_ftype_v8hi_v8hi_int;
22456 break;
22457 case V8SF_FTYPE_V8SF_V8SF_INT:
22458 type = v8sf_ftype_v8sf_v8sf_int;
22459 break;
22460 case V8SF_FTYPE_V8SF_V4SF_INT:
22461 type = v8sf_ftype_v8sf_v4sf_int;
22462 break;
22463 case V4SI_FTYPE_V4SI_V4SI_INT:
22464 type = v4si_ftype_v4si_v4si_int;
22465 break;
22466 case V4DF_FTYPE_V4DF_V4DF_INT:
22467 type = v4df_ftype_v4df_v4df_int;
22468 break;
22469 case V4DF_FTYPE_V4DF_V2DF_INT:
22470 type = v4df_ftype_v4df_v2df_int;
22471 break;
22472 case V4SF_FTYPE_V4SF_V4SF_INT:
22473 type = v4sf_ftype_v4sf_v4sf_int;
22474 break;
22475 case V2DI_FTYPE_V2DI_V2DI_INT:
22476 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
22477 type = v2di_ftype_v2di_v2di_int;
22478 break;
22479 case V2DF_FTYPE_V2DF_V2DF_INT:
22480 type = v2df_ftype_v2df_v2df_int;
22481 break;
22482 case V2DI_FTYPE_V2DI_UINT_UINT:
22483 type = v2di_ftype_v2di_unsigned_unsigned;
22484 break;
22485 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
22486 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
22487 break;
22488 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
22489 type = v1di_ftype_v1di_v1di_int;
22490 break;
22491 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
22492 type = v8sf_ftype_v8sf_v8sf_v8si_int;
22493 break;
22494 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
22495 type = v4df_ftype_v4df_v4df_v4di_int;
22496 break;
22497 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
22498 type = v4sf_ftype_v4sf_v4sf_v4si_int;
22499 break;
22500 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
22501 type = v2df_ftype_v2df_v2df_v2di_int;
22502 break;
22503 default:
22504 gcc_unreachable ();
22507 def_builtin_const (d->mask, d->name, type, d->code);
22510 /* pcmpestr[im] insns. */
22511 for (i = 0, d = bdesc_pcmpestr;
22512 i < ARRAY_SIZE (bdesc_pcmpestr);
22513 i++, d++)
22515 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22516 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
22517 else
22518 ftype = int_ftype_v16qi_int_v16qi_int_int;
22519 def_builtin_const (d->mask, d->name, ftype, d->code);
22522 /* pcmpistr[im] insns. */
22523 for (i = 0, d = bdesc_pcmpistr;
22524 i < ARRAY_SIZE (bdesc_pcmpistr);
22525 i++, d++)
22527 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22528 ftype = v16qi_ftype_v16qi_v16qi_int;
22529 else
22530 ftype = int_ftype_v16qi_v16qi_int;
22531 def_builtin_const (d->mask, d->name, ftype, d->code);
22534 /* comi/ucomi insns. */
22535 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22536 if (d->mask == OPTION_MASK_ISA_SSE2)
22537 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
22538 else
22539 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
22541 /* SSE */
22542 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
22543 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
22545 /* SSE or 3DNow!A */
22546 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
22548 /* SSE2 */
22549 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
22551 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
22552 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
22554 /* SSE3. */
22555 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
22556 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
22558 /* AES */
22559 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
22560 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
22561 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
22562 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
22563 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
22564 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
22566 /* PCLMUL */
22567 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
22569 /* AVX */
22570 def_builtin (OPTION_MASK_ISA_AVX, "__builtin_ia32_vzeroupper", void_ftype_void,
22571 TARGET_64BIT ? IX86_BUILTIN_VZEROUPPER_REX64 : IX86_BUILTIN_VZEROUPPER);
22573 /* Access to the vec_init patterns. */
22574 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
22575 integer_type_node, NULL_TREE);
22576 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
22578 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
22579 short_integer_type_node,
22580 short_integer_type_node,
22581 short_integer_type_node, NULL_TREE);
22582 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
22584 ftype = build_function_type_list (V8QI_type_node, char_type_node,
22585 char_type_node, char_type_node,
22586 char_type_node, char_type_node,
22587 char_type_node, char_type_node,
22588 char_type_node, NULL_TREE);
22589 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
22591 /* Access to the vec_extract patterns. */
22592 ftype = build_function_type_list (double_type_node, V2DF_type_node,
22593 integer_type_node, NULL_TREE);
22594 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
22596 ftype = build_function_type_list (long_long_integer_type_node,
22597 V2DI_type_node, integer_type_node,
22598 NULL_TREE);
22599 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
22601 ftype = build_function_type_list (float_type_node, V4SF_type_node,
22602 integer_type_node, NULL_TREE);
22603 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
22605 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
22606 integer_type_node, NULL_TREE);
22607 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
22609 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
22610 integer_type_node, NULL_TREE);
22611 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
22613 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
22614 integer_type_node, NULL_TREE);
22615 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
22617 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
22618 integer_type_node, NULL_TREE);
22619 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
22621 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
22622 integer_type_node, NULL_TREE);
22623 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
22625 /* Access to the vec_set patterns. */
22626 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
22627 intDI_type_node,
22628 integer_type_node, NULL_TREE);
22629 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
22631 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
22632 float_type_node,
22633 integer_type_node, NULL_TREE);
22634 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
22636 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
22637 intSI_type_node,
22638 integer_type_node, NULL_TREE);
22639 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
22641 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
22642 intHI_type_node,
22643 integer_type_node, NULL_TREE);
22644 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
22646 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
22647 intHI_type_node,
22648 integer_type_node, NULL_TREE);
22649 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
22651 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
22652 intQI_type_node,
22653 integer_type_node, NULL_TREE);
22654 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
22656 /* Add SSE5 multi-arg argument instructions */
22657 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22659 tree mtype = NULL_TREE;
22661 if (d->name == 0)
22662 continue;
22664 switch ((enum multi_arg_type)d->flag)
22666 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
22667 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
22668 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
22669 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
22670 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
22671 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
22672 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
22673 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
22674 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
22675 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
22676 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
22677 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
22678 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
22679 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
22680 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
22681 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
22682 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
22683 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
22684 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
22685 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
22686 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
22687 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
22688 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
22689 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
22690 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
22691 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
22692 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
22693 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
22694 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
22695 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
22696 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
22697 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
22698 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
22699 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
22700 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
22701 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
22702 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
22703 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
22704 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
22705 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
22706 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
22707 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
22708 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
22709 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
22710 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
22711 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
22712 case MULTI_ARG_UNKNOWN:
22713 default:
22714 gcc_unreachable ();
22717 if (mtype)
22718 def_builtin_const (d->mask, d->name, mtype, d->code);
22722 /* Internal method for ix86_init_builtins. */
22724 static void
22725 ix86_init_builtins_va_builtins_abi (void)
22727 tree ms_va_ref, sysv_va_ref;
22728 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22729 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22730 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22731 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22733 if (!TARGET_64BIT)
22734 return;
22735 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22736 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22737 ms_va_ref = build_reference_type (ms_va_list_type_node);
22738 sysv_va_ref =
22739 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22741 fnvoid_va_end_ms =
22742 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22743 fnvoid_va_start_ms =
22744 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22745 fnvoid_va_end_sysv =
22746 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22747 fnvoid_va_start_sysv =
22748 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22749 NULL_TREE);
22750 fnvoid_va_copy_ms =
22751 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22752 NULL_TREE);
22753 fnvoid_va_copy_sysv =
22754 build_function_type_list (void_type_node, sysv_va_ref,
22755 sysv_va_ref, NULL_TREE);
22757 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22758 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22759 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22760 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22761 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22762 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22763 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22764 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22765 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22766 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22767 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22768 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22771 static void
22772 ix86_init_builtins (void)
22774 tree float128_type_node = make_node (REAL_TYPE);
22775 tree ftype, decl;
22777 /* The __float80 type. */
22778 if (TYPE_MODE (long_double_type_node) == XFmode)
22779 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
22780 "__float80");
22781 else
22783 /* The __float80 type. */
22784 tree float80_type_node = make_node (REAL_TYPE);
22786 TYPE_PRECISION (float80_type_node) = 80;
22787 layout_type (float80_type_node);
22788 (*lang_hooks.types.register_builtin_type) (float80_type_node,
22789 "__float80");
22792 /* The __float128 type. */
22793 TYPE_PRECISION (float128_type_node) = 128;
22794 layout_type (float128_type_node);
22795 (*lang_hooks.types.register_builtin_type) (float128_type_node,
22796 "__float128");
22798 /* TFmode support builtins. */
22799 ftype = build_function_type (float128_type_node, void_list_node);
22800 decl = add_builtin_function ("__builtin_infq", ftype,
22801 IX86_BUILTIN_INFQ, BUILT_IN_MD,
22802 NULL, NULL_TREE);
22803 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
22805 /* We will expand them to normal call if SSE2 isn't available since
22806 they are used by libgcc. */
22807 ftype = build_function_type_list (float128_type_node,
22808 float128_type_node,
22809 NULL_TREE);
22810 decl = add_builtin_function ("__builtin_fabsq", ftype,
22811 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
22812 "__fabstf2", NULL_TREE);
22813 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
22814 TREE_READONLY (decl) = 1;
22816 ftype = build_function_type_list (float128_type_node,
22817 float128_type_node,
22818 float128_type_node,
22819 NULL_TREE);
22820 decl = add_builtin_function ("__builtin_copysignq", ftype,
22821 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
22822 "__copysigntf3", NULL_TREE);
22823 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
22824 TREE_READONLY (decl) = 1;
22826 ix86_init_mmx_sse_builtins ();
22827 if (TARGET_64BIT)
22828 ix86_init_builtins_va_builtins_abi ();
22831 /* Errors in the source file can cause expand_expr to return const0_rtx
22832 where we expect a vector. To avoid crashing, use one of the vector
22833 clear instructions. */
22834 static rtx
22835 safe_vector_operand (rtx x, enum machine_mode mode)
22837 if (x == const0_rtx)
22838 x = CONST0_RTX (mode);
22839 return x;
22842 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22844 static rtx
22845 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22847 rtx pat;
22848 tree arg0 = CALL_EXPR_ARG (exp, 0);
22849 tree arg1 = CALL_EXPR_ARG (exp, 1);
22850 rtx op0 = expand_normal (arg0);
22851 rtx op1 = expand_normal (arg1);
22852 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22853 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22854 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22856 if (VECTOR_MODE_P (mode0))
22857 op0 = safe_vector_operand (op0, mode0);
22858 if (VECTOR_MODE_P (mode1))
22859 op1 = safe_vector_operand (op1, mode1);
22861 if (optimize || !target
22862 || GET_MODE (target) != tmode
22863 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22864 target = gen_reg_rtx (tmode);
22866 if (GET_MODE (op1) == SImode && mode1 == TImode)
22868 rtx x = gen_reg_rtx (V4SImode);
22869 emit_insn (gen_sse2_loadd (x, op1));
22870 op1 = gen_lowpart (TImode, x);
22873 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22874 op0 = copy_to_mode_reg (mode0, op0);
22875 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22876 op1 = copy_to_mode_reg (mode1, op1);
22878 pat = GEN_FCN (icode) (target, op0, op1);
22879 if (! pat)
22880 return 0;
22882 emit_insn (pat);
22884 return target;
22887 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22889 static rtx
22890 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22891 enum multi_arg_type m_type,
22892 enum insn_code sub_code)
22894 rtx pat;
22895 int i;
22896 int nargs;
22897 bool comparison_p = false;
22898 bool tf_p = false;
22899 bool last_arg_constant = false;
22900 int num_memory = 0;
22901 struct {
22902 rtx op;
22903 enum machine_mode mode;
22904 } args[4];
22906 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22908 switch (m_type)
22910 case MULTI_ARG_3_SF:
22911 case MULTI_ARG_3_DF:
22912 case MULTI_ARG_3_DI:
22913 case MULTI_ARG_3_SI:
22914 case MULTI_ARG_3_SI_DI:
22915 case MULTI_ARG_3_HI:
22916 case MULTI_ARG_3_HI_SI:
22917 case MULTI_ARG_3_QI:
22918 case MULTI_ARG_3_PERMPS:
22919 case MULTI_ARG_3_PERMPD:
22920 nargs = 3;
22921 break;
22923 case MULTI_ARG_2_SF:
22924 case MULTI_ARG_2_DF:
22925 case MULTI_ARG_2_DI:
22926 case MULTI_ARG_2_SI:
22927 case MULTI_ARG_2_HI:
22928 case MULTI_ARG_2_QI:
22929 nargs = 2;
22930 break;
22932 case MULTI_ARG_2_DI_IMM:
22933 case MULTI_ARG_2_SI_IMM:
22934 case MULTI_ARG_2_HI_IMM:
22935 case MULTI_ARG_2_QI_IMM:
22936 nargs = 2;
22937 last_arg_constant = true;
22938 break;
22940 case MULTI_ARG_1_SF:
22941 case MULTI_ARG_1_DF:
22942 case MULTI_ARG_1_DI:
22943 case MULTI_ARG_1_SI:
22944 case MULTI_ARG_1_HI:
22945 case MULTI_ARG_1_QI:
22946 case MULTI_ARG_1_SI_DI:
22947 case MULTI_ARG_1_HI_DI:
22948 case MULTI_ARG_1_HI_SI:
22949 case MULTI_ARG_1_QI_DI:
22950 case MULTI_ARG_1_QI_SI:
22951 case MULTI_ARG_1_QI_HI:
22952 case MULTI_ARG_1_PH2PS:
22953 case MULTI_ARG_1_PS2PH:
22954 nargs = 1;
22955 break;
22957 case MULTI_ARG_2_SF_CMP:
22958 case MULTI_ARG_2_DF_CMP:
22959 case MULTI_ARG_2_DI_CMP:
22960 case MULTI_ARG_2_SI_CMP:
22961 case MULTI_ARG_2_HI_CMP:
22962 case MULTI_ARG_2_QI_CMP:
22963 nargs = 2;
22964 comparison_p = true;
22965 break;
22967 case MULTI_ARG_2_SF_TF:
22968 case MULTI_ARG_2_DF_TF:
22969 case MULTI_ARG_2_DI_TF:
22970 case MULTI_ARG_2_SI_TF:
22971 case MULTI_ARG_2_HI_TF:
22972 case MULTI_ARG_2_QI_TF:
22973 nargs = 2;
22974 tf_p = true;
22975 break;
22977 case MULTI_ARG_UNKNOWN:
22978 default:
22979 gcc_unreachable ();
22982 if (optimize || !target
22983 || GET_MODE (target) != tmode
22984 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22985 target = gen_reg_rtx (tmode);
22987 gcc_assert (nargs <= 4);
22989 for (i = 0; i < nargs; i++)
22991 tree arg = CALL_EXPR_ARG (exp, i);
22992 rtx op = expand_normal (arg);
22993 int adjust = (comparison_p) ? 1 : 0;
22994 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
22996 if (last_arg_constant && i == nargs-1)
22998 if (GET_CODE (op) != CONST_INT)
23000 error ("last argument must be an immediate");
23001 return gen_reg_rtx (tmode);
23004 else
23006 if (VECTOR_MODE_P (mode))
23007 op = safe_vector_operand (op, mode);
23009 /* If we aren't optimizing, only allow one memory operand to be
23010 generated. */
23011 if (memory_operand (op, mode))
23012 num_memory++;
23014 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23016 if (optimize
23017 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23018 || num_memory > 1)
23019 op = force_reg (mode, op);
23022 args[i].op = op;
23023 args[i].mode = mode;
23026 switch (nargs)
23028 case 1:
23029 pat = GEN_FCN (icode) (target, args[0].op);
23030 break;
23032 case 2:
23033 if (tf_p)
23034 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23035 GEN_INT ((int)sub_code));
23036 else if (! comparison_p)
23037 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23038 else
23040 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23041 args[0].op,
23042 args[1].op);
23044 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23046 break;
23048 case 3:
23049 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23050 break;
23052 default:
23053 gcc_unreachable ();
23056 if (! pat)
23057 return 0;
23059 emit_insn (pat);
23060 return target;
23063 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23064 insns with vec_merge. */
23066 static rtx
23067 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23068 rtx target)
23070 rtx pat;
23071 tree arg0 = CALL_EXPR_ARG (exp, 0);
23072 rtx op1, op0 = expand_normal (arg0);
23073 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23074 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23076 if (optimize || !target
23077 || GET_MODE (target) != tmode
23078 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23079 target = gen_reg_rtx (tmode);
23081 if (VECTOR_MODE_P (mode0))
23082 op0 = safe_vector_operand (op0, mode0);
23084 if ((optimize && !register_operand (op0, mode0))
23085 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23086 op0 = copy_to_mode_reg (mode0, op0);
23088 op1 = op0;
23089 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23090 op1 = copy_to_mode_reg (mode0, op1);
23092 pat = GEN_FCN (icode) (target, op0, op1);
23093 if (! pat)
23094 return 0;
23095 emit_insn (pat);
23096 return target;
23099 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23101 static rtx
23102 ix86_expand_sse_compare (const struct builtin_description *d,
23103 tree exp, rtx target, bool swap)
23105 rtx pat;
23106 tree arg0 = CALL_EXPR_ARG (exp, 0);
23107 tree arg1 = CALL_EXPR_ARG (exp, 1);
23108 rtx op0 = expand_normal (arg0);
23109 rtx op1 = expand_normal (arg1);
23110 rtx op2;
23111 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23112 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23113 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23114 enum rtx_code comparison = d->comparison;
23116 if (VECTOR_MODE_P (mode0))
23117 op0 = safe_vector_operand (op0, mode0);
23118 if (VECTOR_MODE_P (mode1))
23119 op1 = safe_vector_operand (op1, mode1);
23121 /* Swap operands if we have a comparison that isn't available in
23122 hardware. */
23123 if (swap)
23125 rtx tmp = gen_reg_rtx (mode1);
23126 emit_move_insn (tmp, op1);
23127 op1 = op0;
23128 op0 = tmp;
23131 if (optimize || !target
23132 || GET_MODE (target) != tmode
23133 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23134 target = gen_reg_rtx (tmode);
23136 if ((optimize && !register_operand (op0, mode0))
23137 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23138 op0 = copy_to_mode_reg (mode0, op0);
23139 if ((optimize && !register_operand (op1, mode1))
23140 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23141 op1 = copy_to_mode_reg (mode1, op1);
23143 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23144 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23145 if (! pat)
23146 return 0;
23147 emit_insn (pat);
23148 return target;
23151 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23153 static rtx
23154 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23155 rtx target)
23157 rtx pat;
23158 tree arg0 = CALL_EXPR_ARG (exp, 0);
23159 tree arg1 = CALL_EXPR_ARG (exp, 1);
23160 rtx op0 = expand_normal (arg0);
23161 rtx op1 = expand_normal (arg1);
23162 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23163 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23164 enum rtx_code comparison = d->comparison;
23166 if (VECTOR_MODE_P (mode0))
23167 op0 = safe_vector_operand (op0, mode0);
23168 if (VECTOR_MODE_P (mode1))
23169 op1 = safe_vector_operand (op1, mode1);
23171 /* Swap operands if we have a comparison that isn't available in
23172 hardware. */
23173 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23175 rtx tmp = op1;
23176 op1 = op0;
23177 op0 = tmp;
23180 target = gen_reg_rtx (SImode);
23181 emit_move_insn (target, const0_rtx);
23182 target = gen_rtx_SUBREG (QImode, target, 0);
23184 if ((optimize && !register_operand (op0, mode0))
23185 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23186 op0 = copy_to_mode_reg (mode0, op0);
23187 if ((optimize && !register_operand (op1, mode1))
23188 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23189 op1 = copy_to_mode_reg (mode1, op1);
23191 pat = GEN_FCN (d->icode) (op0, op1);
23192 if (! pat)
23193 return 0;
23194 emit_insn (pat);
23195 emit_insn (gen_rtx_SET (VOIDmode,
23196 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23197 gen_rtx_fmt_ee (comparison, QImode,
23198 SET_DEST (pat),
23199 const0_rtx)));
23201 return SUBREG_REG (target);
23204 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23206 static rtx
23207 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23208 rtx target)
23210 rtx pat;
23211 tree arg0 = CALL_EXPR_ARG (exp, 0);
23212 tree arg1 = CALL_EXPR_ARG (exp, 1);
23213 rtx op0 = expand_normal (arg0);
23214 rtx op1 = expand_normal (arg1);
23215 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23216 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23217 enum rtx_code comparison = d->comparison;
23219 if (VECTOR_MODE_P (mode0))
23220 op0 = safe_vector_operand (op0, mode0);
23221 if (VECTOR_MODE_P (mode1))
23222 op1 = safe_vector_operand (op1, mode1);
23224 target = gen_reg_rtx (SImode);
23225 emit_move_insn (target, const0_rtx);
23226 target = gen_rtx_SUBREG (QImode, target, 0);
23228 if ((optimize && !register_operand (op0, mode0))
23229 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23230 op0 = copy_to_mode_reg (mode0, op0);
23231 if ((optimize && !register_operand (op1, mode1))
23232 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23233 op1 = copy_to_mode_reg (mode1, op1);
23235 pat = GEN_FCN (d->icode) (op0, op1);
23236 if (! pat)
23237 return 0;
23238 emit_insn (pat);
23239 emit_insn (gen_rtx_SET (VOIDmode,
23240 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23241 gen_rtx_fmt_ee (comparison, QImode,
23242 SET_DEST (pat),
23243 const0_rtx)));
23245 return SUBREG_REG (target);
23248 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23250 static rtx
23251 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23252 tree exp, rtx target)
23254 rtx pat;
23255 tree arg0 = CALL_EXPR_ARG (exp, 0);
23256 tree arg1 = CALL_EXPR_ARG (exp, 1);
23257 tree arg2 = CALL_EXPR_ARG (exp, 2);
23258 tree arg3 = CALL_EXPR_ARG (exp, 3);
23259 tree arg4 = CALL_EXPR_ARG (exp, 4);
23260 rtx scratch0, scratch1;
23261 rtx op0 = expand_normal (arg0);
23262 rtx op1 = expand_normal (arg1);
23263 rtx op2 = expand_normal (arg2);
23264 rtx op3 = expand_normal (arg3);
23265 rtx op4 = expand_normal (arg4);
23266 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23268 tmode0 = insn_data[d->icode].operand[0].mode;
23269 tmode1 = insn_data[d->icode].operand[1].mode;
23270 modev2 = insn_data[d->icode].operand[2].mode;
23271 modei3 = insn_data[d->icode].operand[3].mode;
23272 modev4 = insn_data[d->icode].operand[4].mode;
23273 modei5 = insn_data[d->icode].operand[5].mode;
23274 modeimm = insn_data[d->icode].operand[6].mode;
23276 if (VECTOR_MODE_P (modev2))
23277 op0 = safe_vector_operand (op0, modev2);
23278 if (VECTOR_MODE_P (modev4))
23279 op2 = safe_vector_operand (op2, modev4);
23281 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23282 op0 = copy_to_mode_reg (modev2, op0);
23283 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23284 op1 = copy_to_mode_reg (modei3, op1);
23285 if ((optimize && !register_operand (op2, modev4))
23286 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23287 op2 = copy_to_mode_reg (modev4, op2);
23288 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23289 op3 = copy_to_mode_reg (modei5, op3);
23291 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23293 error ("the fifth argument must be a 8-bit immediate");
23294 return const0_rtx;
23297 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23299 if (optimize || !target
23300 || GET_MODE (target) != tmode0
23301 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23302 target = gen_reg_rtx (tmode0);
23304 scratch1 = gen_reg_rtx (tmode1);
23306 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23308 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23310 if (optimize || !target
23311 || GET_MODE (target) != tmode1
23312 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23313 target = gen_reg_rtx (tmode1);
23315 scratch0 = gen_reg_rtx (tmode0);
23317 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23319 else
23321 gcc_assert (d->flag);
23323 scratch0 = gen_reg_rtx (tmode0);
23324 scratch1 = gen_reg_rtx (tmode1);
23326 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23329 if (! pat)
23330 return 0;
23332 emit_insn (pat);
23334 if (d->flag)
23336 target = gen_reg_rtx (SImode);
23337 emit_move_insn (target, const0_rtx);
23338 target = gen_rtx_SUBREG (QImode, target, 0);
23340 emit_insn
23341 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23342 gen_rtx_fmt_ee (EQ, QImode,
23343 gen_rtx_REG ((enum machine_mode) d->flag,
23344 FLAGS_REG),
23345 const0_rtx)));
23346 return SUBREG_REG (target);
23348 else
23349 return target;
23353 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23355 static rtx
23356 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23357 tree exp, rtx target)
23359 rtx pat;
23360 tree arg0 = CALL_EXPR_ARG (exp, 0);
23361 tree arg1 = CALL_EXPR_ARG (exp, 1);
23362 tree arg2 = CALL_EXPR_ARG (exp, 2);
23363 rtx scratch0, scratch1;
23364 rtx op0 = expand_normal (arg0);
23365 rtx op1 = expand_normal (arg1);
23366 rtx op2 = expand_normal (arg2);
23367 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23369 tmode0 = insn_data[d->icode].operand[0].mode;
23370 tmode1 = insn_data[d->icode].operand[1].mode;
23371 modev2 = insn_data[d->icode].operand[2].mode;
23372 modev3 = insn_data[d->icode].operand[3].mode;
23373 modeimm = insn_data[d->icode].operand[4].mode;
23375 if (VECTOR_MODE_P (modev2))
23376 op0 = safe_vector_operand (op0, modev2);
23377 if (VECTOR_MODE_P (modev3))
23378 op1 = safe_vector_operand (op1, modev3);
23380 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23381 op0 = copy_to_mode_reg (modev2, op0);
23382 if ((optimize && !register_operand (op1, modev3))
23383 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23384 op1 = copy_to_mode_reg (modev3, op1);
23386 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23388 error ("the third argument must be a 8-bit immediate");
23389 return const0_rtx;
23392 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23394 if (optimize || !target
23395 || GET_MODE (target) != tmode0
23396 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23397 target = gen_reg_rtx (tmode0);
23399 scratch1 = gen_reg_rtx (tmode1);
23401 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23403 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23405 if (optimize || !target
23406 || GET_MODE (target) != tmode1
23407 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23408 target = gen_reg_rtx (tmode1);
23410 scratch0 = gen_reg_rtx (tmode0);
23412 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23414 else
23416 gcc_assert (d->flag);
23418 scratch0 = gen_reg_rtx (tmode0);
23419 scratch1 = gen_reg_rtx (tmode1);
23421 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23424 if (! pat)
23425 return 0;
23427 emit_insn (pat);
23429 if (d->flag)
23431 target = gen_reg_rtx (SImode);
23432 emit_move_insn (target, const0_rtx);
23433 target = gen_rtx_SUBREG (QImode, target, 0);
23435 emit_insn
23436 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23437 gen_rtx_fmt_ee (EQ, QImode,
23438 gen_rtx_REG ((enum machine_mode) d->flag,
23439 FLAGS_REG),
23440 const0_rtx)));
23441 return SUBREG_REG (target);
23443 else
23444 return target;
23447 /* Subroutine of ix86_expand_builtin to take care of insns with
23448 variable number of operands. */
23450 static rtx
23451 ix86_expand_args_builtin (const struct builtin_description *d,
23452 tree exp, rtx target)
23454 rtx pat, real_target;
23455 unsigned int i, nargs;
23456 unsigned int nargs_constant = 0;
23457 int num_memory = 0;
23458 struct
23460 rtx op;
23461 enum machine_mode mode;
23462 } args[4];
23463 bool last_arg_count = false;
23464 enum insn_code icode = d->icode;
23465 const struct insn_data *insn_p = &insn_data[icode];
23466 enum machine_mode tmode = insn_p->operand[0].mode;
23467 enum machine_mode rmode = VOIDmode;
23468 bool swap = false;
23469 enum rtx_code comparison = d->comparison;
23471 switch ((enum ix86_builtin_type) d->flag)
23473 case INT_FTYPE_V8SF_V8SF_PTEST:
23474 case INT_FTYPE_V4DI_V4DI_PTEST:
23475 case INT_FTYPE_V4DF_V4DF_PTEST:
23476 case INT_FTYPE_V4SF_V4SF_PTEST:
23477 case INT_FTYPE_V2DI_V2DI_PTEST:
23478 case INT_FTYPE_V2DF_V2DF_PTEST:
23479 return ix86_expand_sse_ptest (d, exp, target);
23480 case FLOAT128_FTYPE_FLOAT128:
23481 case FLOAT_FTYPE_FLOAT:
23482 case INT64_FTYPE_V4SF:
23483 case INT64_FTYPE_V2DF:
23484 case INT_FTYPE_V16QI:
23485 case INT_FTYPE_V8QI:
23486 case INT_FTYPE_V8SF:
23487 case INT_FTYPE_V4DF:
23488 case INT_FTYPE_V4SF:
23489 case INT_FTYPE_V2DF:
23490 case V16QI_FTYPE_V16QI:
23491 case V8SI_FTYPE_V8SF:
23492 case V8SI_FTYPE_V4SI:
23493 case V8HI_FTYPE_V8HI:
23494 case V8HI_FTYPE_V16QI:
23495 case V8QI_FTYPE_V8QI:
23496 case V8SF_FTYPE_V8SF:
23497 case V8SF_FTYPE_V8SI:
23498 case V8SF_FTYPE_V4SF:
23499 case V4SI_FTYPE_V4SI:
23500 case V4SI_FTYPE_V16QI:
23501 case V4SI_FTYPE_V4SF:
23502 case V4SI_FTYPE_V8SI:
23503 case V4SI_FTYPE_V8HI:
23504 case V4SI_FTYPE_V4DF:
23505 case V4SI_FTYPE_V2DF:
23506 case V4HI_FTYPE_V4HI:
23507 case V4DF_FTYPE_V4DF:
23508 case V4DF_FTYPE_V4SI:
23509 case V4DF_FTYPE_V4SF:
23510 case V4DF_FTYPE_V2DF:
23511 case V4SF_FTYPE_V4SF:
23512 case V4SF_FTYPE_V4SI:
23513 case V4SF_FTYPE_V8SF:
23514 case V4SF_FTYPE_V4DF:
23515 case V4SF_FTYPE_V2DF:
23516 case V2DI_FTYPE_V2DI:
23517 case V2DI_FTYPE_V16QI:
23518 case V2DI_FTYPE_V8HI:
23519 case V2DI_FTYPE_V4SI:
23520 case V2DF_FTYPE_V2DF:
23521 case V2DF_FTYPE_V4SI:
23522 case V2DF_FTYPE_V4DF:
23523 case V2DF_FTYPE_V4SF:
23524 case V2DF_FTYPE_V2SI:
23525 case V2SI_FTYPE_V2SI:
23526 case V2SI_FTYPE_V4SF:
23527 case V2SI_FTYPE_V2SF:
23528 case V2SI_FTYPE_V2DF:
23529 case V2SF_FTYPE_V2SF:
23530 case V2SF_FTYPE_V2SI:
23531 nargs = 1;
23532 break;
23533 case V4SF_FTYPE_V4SF_VEC_MERGE:
23534 case V2DF_FTYPE_V2DF_VEC_MERGE:
23535 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23536 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23537 case V16QI_FTYPE_V16QI_V16QI:
23538 case V16QI_FTYPE_V8HI_V8HI:
23539 case V8QI_FTYPE_V8QI_V8QI:
23540 case V8QI_FTYPE_V4HI_V4HI:
23541 case V8HI_FTYPE_V8HI_V8HI:
23542 case V8HI_FTYPE_V16QI_V16QI:
23543 case V8HI_FTYPE_V4SI_V4SI:
23544 case V8SF_FTYPE_V8SF_V8SF:
23545 case V8SF_FTYPE_V8SF_V8SI:
23546 case V4SI_FTYPE_V4SI_V4SI:
23547 case V4SI_FTYPE_V8HI_V8HI:
23548 case V4SI_FTYPE_V4SF_V4SF:
23549 case V4SI_FTYPE_V2DF_V2DF:
23550 case V4HI_FTYPE_V4HI_V4HI:
23551 case V4HI_FTYPE_V8QI_V8QI:
23552 case V4HI_FTYPE_V2SI_V2SI:
23553 case V4DF_FTYPE_V4DF_V4DF:
23554 case V4DF_FTYPE_V4DF_V4DI:
23555 case V4SF_FTYPE_V4SF_V4SF:
23556 case V4SF_FTYPE_V4SF_V4SI:
23557 case V4SF_FTYPE_V4SF_V2SI:
23558 case V4SF_FTYPE_V4SF_V2DF:
23559 case V4SF_FTYPE_V4SF_DI:
23560 case V4SF_FTYPE_V4SF_SI:
23561 case V2DI_FTYPE_V2DI_V2DI:
23562 case V2DI_FTYPE_V16QI_V16QI:
23563 case V2DI_FTYPE_V4SI_V4SI:
23564 case V2DI_FTYPE_V2DI_V16QI:
23565 case V2DI_FTYPE_V2DF_V2DF:
23566 case V2SI_FTYPE_V2SI_V2SI:
23567 case V2SI_FTYPE_V4HI_V4HI:
23568 case V2SI_FTYPE_V2SF_V2SF:
23569 case V2DF_FTYPE_V2DF_V2DF:
23570 case V2DF_FTYPE_V2DF_V4SF:
23571 case V2DF_FTYPE_V2DF_V2DI:
23572 case V2DF_FTYPE_V2DF_DI:
23573 case V2DF_FTYPE_V2DF_SI:
23574 case V2SF_FTYPE_V2SF_V2SF:
23575 case V1DI_FTYPE_V1DI_V1DI:
23576 case V1DI_FTYPE_V8QI_V8QI:
23577 case V1DI_FTYPE_V2SI_V2SI:
23578 if (comparison == UNKNOWN)
23579 return ix86_expand_binop_builtin (icode, exp, target);
23580 nargs = 2;
23581 break;
23582 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23583 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23584 gcc_assert (comparison != UNKNOWN);
23585 nargs = 2;
23586 swap = true;
23587 break;
23588 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23589 case V8HI_FTYPE_V8HI_SI_COUNT:
23590 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23591 case V4SI_FTYPE_V4SI_SI_COUNT:
23592 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23593 case V4HI_FTYPE_V4HI_SI_COUNT:
23594 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23595 case V2DI_FTYPE_V2DI_SI_COUNT:
23596 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23597 case V2SI_FTYPE_V2SI_SI_COUNT:
23598 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23599 case V1DI_FTYPE_V1DI_SI_COUNT:
23600 nargs = 2;
23601 last_arg_count = true;
23602 break;
23603 case UINT64_FTYPE_UINT64_UINT64:
23604 case UINT_FTYPE_UINT_UINT:
23605 case UINT_FTYPE_UINT_USHORT:
23606 case UINT_FTYPE_UINT_UCHAR:
23607 nargs = 2;
23608 break;
23609 case V2DI2TI_FTYPE_V2DI_INT:
23610 nargs = 2;
23611 rmode = V2DImode;
23612 nargs_constant = 1;
23613 break;
23614 case V8HI_FTYPE_V8HI_INT:
23615 case V8SF_FTYPE_V8SF_INT:
23616 case V4SI_FTYPE_V4SI_INT:
23617 case V4SI_FTYPE_V8SI_INT:
23618 case V4HI_FTYPE_V4HI_INT:
23619 case V4DF_FTYPE_V4DF_INT:
23620 case V4SF_FTYPE_V4SF_INT:
23621 case V4SF_FTYPE_V8SF_INT:
23622 case V2DI_FTYPE_V2DI_INT:
23623 case V2DF_FTYPE_V2DF_INT:
23624 case V2DF_FTYPE_V4DF_INT:
23625 nargs = 2;
23626 nargs_constant = 1;
23627 break;
23628 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23629 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23630 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23631 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23632 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23633 nargs = 3;
23634 break;
23635 case V16QI_FTYPE_V16QI_V16QI_INT:
23636 case V8HI_FTYPE_V8HI_V8HI_INT:
23637 case V8SI_FTYPE_V8SI_V8SI_INT:
23638 case V8SI_FTYPE_V8SI_V4SI_INT:
23639 case V8SF_FTYPE_V8SF_V8SF_INT:
23640 case V8SF_FTYPE_V8SF_V4SF_INT:
23641 case V4SI_FTYPE_V4SI_V4SI_INT:
23642 case V4DF_FTYPE_V4DF_V4DF_INT:
23643 case V4DF_FTYPE_V4DF_V2DF_INT:
23644 case V4SF_FTYPE_V4SF_V4SF_INT:
23645 case V2DI_FTYPE_V2DI_V2DI_INT:
23646 case V2DF_FTYPE_V2DF_V2DF_INT:
23647 nargs = 3;
23648 nargs_constant = 1;
23649 break;
23650 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
23651 nargs = 3;
23652 rmode = V2DImode;
23653 nargs_constant = 1;
23654 break;
23655 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
23656 nargs = 3;
23657 rmode = DImode;
23658 nargs_constant = 1;
23659 break;
23660 case V2DI_FTYPE_V2DI_UINT_UINT:
23661 nargs = 3;
23662 nargs_constant = 2;
23663 break;
23664 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23665 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23666 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23667 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23668 nargs = 4;
23669 nargs_constant = 1;
23670 break;
23671 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23672 nargs = 4;
23673 nargs_constant = 2;
23674 break;
23675 default:
23676 gcc_unreachable ();
23679 gcc_assert (nargs <= ARRAY_SIZE (args));
23681 if (comparison != UNKNOWN)
23683 gcc_assert (nargs == 2);
23684 return ix86_expand_sse_compare (d, exp, target, swap);
23687 if (rmode == VOIDmode || rmode == tmode)
23689 if (optimize
23690 || target == 0
23691 || GET_MODE (target) != tmode
23692 || ! (*insn_p->operand[0].predicate) (target, tmode))
23693 target = gen_reg_rtx (tmode);
23694 real_target = target;
23696 else
23698 target = gen_reg_rtx (rmode);
23699 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23702 for (i = 0; i < nargs; i++)
23704 tree arg = CALL_EXPR_ARG (exp, i);
23705 rtx op = expand_normal (arg);
23706 enum machine_mode mode = insn_p->operand[i + 1].mode;
23707 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23709 if (last_arg_count && (i + 1) == nargs)
23711 /* SIMD shift insns take either an 8-bit immediate or
23712 register as count. But builtin functions take int as
23713 count. If count doesn't match, we put it in register. */
23714 if (!match)
23716 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23717 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23718 op = copy_to_reg (op);
23721 else if ((nargs - i) <= nargs_constant)
23723 if (!match)
23724 switch (icode)
23726 case CODE_FOR_sse4_1_roundpd:
23727 case CODE_FOR_sse4_1_roundps:
23728 case CODE_FOR_sse4_1_roundsd:
23729 case CODE_FOR_sse4_1_roundss:
23730 case CODE_FOR_sse4_1_blendps:
23731 case CODE_FOR_avx_blendpd256:
23732 case CODE_FOR_avx_vpermilv4df:
23733 case CODE_FOR_avx_roundpd256:
23734 case CODE_FOR_avx_roundps256:
23735 error ("the last argument must be a 4-bit immediate");
23736 return const0_rtx;
23738 case CODE_FOR_sse4_1_blendpd:
23739 case CODE_FOR_avx_vpermilv2df:
23740 case CODE_FOR_avx_vpermil2v2df3:
23741 case CODE_FOR_avx_vpermil2v4sf3:
23742 case CODE_FOR_avx_vpermil2v4df3:
23743 case CODE_FOR_avx_vpermil2v8sf3:
23744 error ("the last argument must be a 2-bit immediate");
23745 return const0_rtx;
23747 case CODE_FOR_avx_vextractf128v4df:
23748 case CODE_FOR_avx_vextractf128v8sf:
23749 case CODE_FOR_avx_vextractf128v8si:
23750 case CODE_FOR_avx_vinsertf128v4df:
23751 case CODE_FOR_avx_vinsertf128v8sf:
23752 case CODE_FOR_avx_vinsertf128v8si:
23753 error ("the last argument must be a 1-bit immediate");
23754 return const0_rtx;
23756 case CODE_FOR_avx_cmpsdv2df3:
23757 case CODE_FOR_avx_cmpssv4sf3:
23758 case CODE_FOR_avx_cmppdv2df3:
23759 case CODE_FOR_avx_cmppsv4sf3:
23760 case CODE_FOR_avx_cmppdv4df3:
23761 case CODE_FOR_avx_cmppsv8sf3:
23762 error ("the last argument must be a 5-bit immediate");
23763 return const0_rtx;
23765 default:
23766 switch (nargs_constant)
23768 case 2:
23769 if ((nargs - i) == nargs_constant)
23771 error ("the next to last argument must be an 8-bit immediate");
23772 break;
23774 case 1:
23775 error ("the last argument must be an 8-bit immediate");
23776 break;
23777 default:
23778 gcc_unreachable ();
23780 return const0_rtx;
23783 else
23785 if (VECTOR_MODE_P (mode))
23786 op = safe_vector_operand (op, mode);
23788 /* If we aren't optimizing, only allow one memory operand to
23789 be generated. */
23790 if (memory_operand (op, mode))
23791 num_memory++;
23793 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23795 if (optimize || !match || num_memory > 1)
23796 op = copy_to_mode_reg (mode, op);
23798 else
23800 op = copy_to_reg (op);
23801 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23805 args[i].op = op;
23806 args[i].mode = mode;
23809 switch (nargs)
23811 case 1:
23812 pat = GEN_FCN (icode) (real_target, args[0].op);
23813 break;
23814 case 2:
23815 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23816 break;
23817 case 3:
23818 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23819 args[2].op);
23820 break;
23821 case 4:
23822 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23823 args[2].op, args[3].op);
23824 break;
23825 default:
23826 gcc_unreachable ();
23829 if (! pat)
23830 return 0;
23832 emit_insn (pat);
23833 return target;
23836 /* Subroutine of ix86_expand_builtin to take care of special insns
23837 with variable number of operands. */
23839 static rtx
23840 ix86_expand_special_args_builtin (const struct builtin_description *d,
23841 tree exp, rtx target)
23843 tree arg;
23844 rtx pat, op;
23845 unsigned int i, nargs, arg_adjust, memory;
23846 struct
23848 rtx op;
23849 enum machine_mode mode;
23850 } args[2];
23851 enum insn_code icode = d->icode;
23852 bool last_arg_constant = false;
23853 const struct insn_data *insn_p = &insn_data[icode];
23854 enum machine_mode tmode = insn_p->operand[0].mode;
23855 enum { load, store } klass;
23857 switch ((enum ix86_special_builtin_type) d->flag)
23859 case VOID_FTYPE_VOID:
23860 emit_insn (GEN_FCN (icode) (target));
23861 return 0;
23862 case V2DI_FTYPE_PV2DI:
23863 case V32QI_FTYPE_PCCHAR:
23864 case V16QI_FTYPE_PCCHAR:
23865 case V8SF_FTYPE_PCV4SF:
23866 case V8SF_FTYPE_PCFLOAT:
23867 case V4SF_FTYPE_PCFLOAT:
23868 case V4DF_FTYPE_PCV2DF:
23869 case V4DF_FTYPE_PCDOUBLE:
23870 case V2DF_FTYPE_PCDOUBLE:
23871 nargs = 1;
23872 klass = load;
23873 memory = 0;
23874 break;
23875 case VOID_FTYPE_PV2SF_V4SF:
23876 case VOID_FTYPE_PV2DI_V2DI:
23877 case VOID_FTYPE_PCHAR_V32QI:
23878 case VOID_FTYPE_PCHAR_V16QI:
23879 case VOID_FTYPE_PFLOAT_V8SF:
23880 case VOID_FTYPE_PFLOAT_V4SF:
23881 case VOID_FTYPE_PDOUBLE_V4DF:
23882 case VOID_FTYPE_PDOUBLE_V2DF:
23883 case VOID_FTYPE_PDI_DI:
23884 case VOID_FTYPE_PINT_INT:
23885 nargs = 1;
23886 klass = store;
23887 /* Reserve memory operand for target. */
23888 memory = ARRAY_SIZE (args);
23889 break;
23890 case V4SF_FTYPE_V4SF_PCV2SF:
23891 case V2DF_FTYPE_V2DF_PCDOUBLE:
23892 nargs = 2;
23893 klass = load;
23894 memory = 1;
23895 break;
23896 case V8SF_FTYPE_PCV8SF_V8SF:
23897 case V4DF_FTYPE_PCV4DF_V4DF:
23898 case V4SF_FTYPE_PCV4SF_V4SF:
23899 case V2DF_FTYPE_PCV2DF_V2DF:
23900 nargs = 2;
23901 klass = load;
23902 memory = 0;
23903 break;
23904 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23905 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23906 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23907 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23908 nargs = 2;
23909 klass = store;
23910 /* Reserve memory operand for target. */
23911 memory = ARRAY_SIZE (args);
23912 break;
23913 default:
23914 gcc_unreachable ();
23917 gcc_assert (nargs <= ARRAY_SIZE (args));
23919 if (klass == store)
23921 arg = CALL_EXPR_ARG (exp, 0);
23922 op = expand_normal (arg);
23923 gcc_assert (target == 0);
23924 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23925 arg_adjust = 1;
23927 else
23929 arg_adjust = 0;
23930 if (optimize
23931 || target == 0
23932 || GET_MODE (target) != tmode
23933 || ! (*insn_p->operand[0].predicate) (target, tmode))
23934 target = gen_reg_rtx (tmode);
23937 for (i = 0; i < nargs; i++)
23939 enum machine_mode mode = insn_p->operand[i + 1].mode;
23940 bool match;
23942 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23943 op = expand_normal (arg);
23944 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23946 if (last_arg_constant && (i + 1) == nargs)
23948 if (!match)
23949 switch (icode)
23951 default:
23952 error ("the last argument must be an 8-bit immediate");
23953 return const0_rtx;
23956 else
23958 if (i == memory)
23960 /* This must be the memory operand. */
23961 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23962 gcc_assert (GET_MODE (op) == mode
23963 || GET_MODE (op) == VOIDmode);
23965 else
23967 /* This must be register. */
23968 if (VECTOR_MODE_P (mode))
23969 op = safe_vector_operand (op, mode);
23971 gcc_assert (GET_MODE (op) == mode
23972 || GET_MODE (op) == VOIDmode);
23973 op = copy_to_mode_reg (mode, op);
23977 args[i].op = op;
23978 args[i].mode = mode;
23981 switch (nargs)
23983 case 1:
23984 pat = GEN_FCN (icode) (target, args[0].op);
23985 break;
23986 case 2:
23987 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23988 break;
23989 default:
23990 gcc_unreachable ();
23993 if (! pat)
23994 return 0;
23995 emit_insn (pat);
23996 return klass == store ? 0 : target;
23999 /* Return the integer constant in ARG. Constrain it to be in the range
24000 of the subparts of VEC_TYPE; issue an error if not. */
24002 static int
24003 get_element_number (tree vec_type, tree arg)
24005 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24007 if (!host_integerp (arg, 1)
24008 || (elt = tree_low_cst (arg, 1), elt > max))
24010 error ("selector must be an integer constant in the range 0..%wi", max);
24011 return 0;
24014 return elt;
24017 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24018 ix86_expand_vector_init. We DO have language-level syntax for this, in
24019 the form of (type){ init-list }. Except that since we can't place emms
24020 instructions from inside the compiler, we can't allow the use of MMX
24021 registers unless the user explicitly asks for it. So we do *not* define
24022 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24023 we have builtins invoked by mmintrin.h that gives us license to emit
24024 these sorts of instructions. */
24026 static rtx
24027 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24029 enum machine_mode tmode = TYPE_MODE (type);
24030 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24031 int i, n_elt = GET_MODE_NUNITS (tmode);
24032 rtvec v = rtvec_alloc (n_elt);
24034 gcc_assert (VECTOR_MODE_P (tmode));
24035 gcc_assert (call_expr_nargs (exp) == n_elt);
24037 for (i = 0; i < n_elt; ++i)
24039 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24040 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24043 if (!target || !register_operand (target, tmode))
24044 target = gen_reg_rtx (tmode);
24046 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24047 return target;
24050 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24051 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24052 had a language-level syntax for referencing vector elements. */
24054 static rtx
24055 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24057 enum machine_mode tmode, mode0;
24058 tree arg0, arg1;
24059 int elt;
24060 rtx op0;
24062 arg0 = CALL_EXPR_ARG (exp, 0);
24063 arg1 = CALL_EXPR_ARG (exp, 1);
24065 op0 = expand_normal (arg0);
24066 elt = get_element_number (TREE_TYPE (arg0), arg1);
24068 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24069 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24070 gcc_assert (VECTOR_MODE_P (mode0));
24072 op0 = force_reg (mode0, op0);
24074 if (optimize || !target || !register_operand (target, tmode))
24075 target = gen_reg_rtx (tmode);
24077 ix86_expand_vector_extract (true, target, op0, elt);
24079 return target;
24082 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24083 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24084 a language-level syntax for referencing vector elements. */
24086 static rtx
24087 ix86_expand_vec_set_builtin (tree exp)
24089 enum machine_mode tmode, mode1;
24090 tree arg0, arg1, arg2;
24091 int elt;
24092 rtx op0, op1, target;
24094 arg0 = CALL_EXPR_ARG (exp, 0);
24095 arg1 = CALL_EXPR_ARG (exp, 1);
24096 arg2 = CALL_EXPR_ARG (exp, 2);
24098 tmode = TYPE_MODE (TREE_TYPE (arg0));
24099 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24100 gcc_assert (VECTOR_MODE_P (tmode));
24102 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24103 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24104 elt = get_element_number (TREE_TYPE (arg0), arg2);
24106 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24107 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24109 op0 = force_reg (tmode, op0);
24110 op1 = force_reg (mode1, op1);
24112 /* OP0 is the source of these builtin functions and shouldn't be
24113 modified. Create a copy, use it and return it as target. */
24114 target = gen_reg_rtx (tmode);
24115 emit_move_insn (target, op0);
24116 ix86_expand_vector_set (true, target, op1, elt);
24118 return target;
24121 /* Expand an expression EXP that calls a built-in function,
24122 with result going to TARGET if that's convenient
24123 (and in mode MODE if that's convenient).
24124 SUBTARGET may be used as the target for computing one of EXP's operands.
24125 IGNORE is nonzero if the value is to be ignored. */
24127 static rtx
24128 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24129 enum machine_mode mode ATTRIBUTE_UNUSED,
24130 int ignore ATTRIBUTE_UNUSED)
24132 const struct builtin_description *d;
24133 size_t i;
24134 enum insn_code icode;
24135 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24136 tree arg0, arg1, arg2;
24137 rtx op0, op1, op2, pat;
24138 enum machine_mode mode0, mode1, mode2;
24139 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24141 /* Determine whether the builtin function is available under the current ISA.
24142 Originally the builtin was not created if it wasn't applicable to the
24143 current ISA based on the command line switches. With function specific
24144 options, we need to check in the context of the function making the call
24145 whether it is supported. */
24146 if (ix86_builtins_isa[fcode].isa
24147 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24149 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24150 NULL, NULL, false);
24152 if (!opts)
24153 error ("%qE needs unknown isa option", fndecl);
24154 else
24156 gcc_assert (opts != NULL);
24157 error ("%qE needs isa option %s", fndecl, opts);
24158 free (opts);
24160 return const0_rtx;
24163 switch (fcode)
24165 case IX86_BUILTIN_MASKMOVQ:
24166 case IX86_BUILTIN_MASKMOVDQU:
24167 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24168 ? CODE_FOR_mmx_maskmovq
24169 : CODE_FOR_sse2_maskmovdqu);
24170 /* Note the arg order is different from the operand order. */
24171 arg1 = CALL_EXPR_ARG (exp, 0);
24172 arg2 = CALL_EXPR_ARG (exp, 1);
24173 arg0 = CALL_EXPR_ARG (exp, 2);
24174 op0 = expand_normal (arg0);
24175 op1 = expand_normal (arg1);
24176 op2 = expand_normal (arg2);
24177 mode0 = insn_data[icode].operand[0].mode;
24178 mode1 = insn_data[icode].operand[1].mode;
24179 mode2 = insn_data[icode].operand[2].mode;
24181 op0 = force_reg (Pmode, op0);
24182 op0 = gen_rtx_MEM (mode1, op0);
24184 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24185 op0 = copy_to_mode_reg (mode0, op0);
24186 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24187 op1 = copy_to_mode_reg (mode1, op1);
24188 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24189 op2 = copy_to_mode_reg (mode2, op2);
24190 pat = GEN_FCN (icode) (op0, op1, op2);
24191 if (! pat)
24192 return 0;
24193 emit_insn (pat);
24194 return 0;
24196 case IX86_BUILTIN_LDMXCSR:
24197 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24198 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24199 emit_move_insn (target, op0);
24200 emit_insn (gen_sse_ldmxcsr (target));
24201 return 0;
24203 case IX86_BUILTIN_STMXCSR:
24204 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24205 emit_insn (gen_sse_stmxcsr (target));
24206 return copy_to_mode_reg (SImode, target);
24208 case IX86_BUILTIN_CLFLUSH:
24209 arg0 = CALL_EXPR_ARG (exp, 0);
24210 op0 = expand_normal (arg0);
24211 icode = CODE_FOR_sse2_clflush;
24212 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24213 op0 = copy_to_mode_reg (Pmode, op0);
24215 emit_insn (gen_sse2_clflush (op0));
24216 return 0;
24218 case IX86_BUILTIN_MONITOR:
24219 arg0 = CALL_EXPR_ARG (exp, 0);
24220 arg1 = CALL_EXPR_ARG (exp, 1);
24221 arg2 = CALL_EXPR_ARG (exp, 2);
24222 op0 = expand_normal (arg0);
24223 op1 = expand_normal (arg1);
24224 op2 = expand_normal (arg2);
24225 if (!REG_P (op0))
24226 op0 = copy_to_mode_reg (Pmode, op0);
24227 if (!REG_P (op1))
24228 op1 = copy_to_mode_reg (SImode, op1);
24229 if (!REG_P (op2))
24230 op2 = copy_to_mode_reg (SImode, op2);
24231 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24232 return 0;
24234 case IX86_BUILTIN_MWAIT:
24235 arg0 = CALL_EXPR_ARG (exp, 0);
24236 arg1 = CALL_EXPR_ARG (exp, 1);
24237 op0 = expand_normal (arg0);
24238 op1 = expand_normal (arg1);
24239 if (!REG_P (op0))
24240 op0 = copy_to_mode_reg (SImode, op0);
24241 if (!REG_P (op1))
24242 op1 = copy_to_mode_reg (SImode, op1);
24243 emit_insn (gen_sse3_mwait (op0, op1));
24244 return 0;
24246 case IX86_BUILTIN_VEC_INIT_V2SI:
24247 case IX86_BUILTIN_VEC_INIT_V4HI:
24248 case IX86_BUILTIN_VEC_INIT_V8QI:
24249 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24251 case IX86_BUILTIN_VEC_EXT_V2DF:
24252 case IX86_BUILTIN_VEC_EXT_V2DI:
24253 case IX86_BUILTIN_VEC_EXT_V4SF:
24254 case IX86_BUILTIN_VEC_EXT_V4SI:
24255 case IX86_BUILTIN_VEC_EXT_V8HI:
24256 case IX86_BUILTIN_VEC_EXT_V2SI:
24257 case IX86_BUILTIN_VEC_EXT_V4HI:
24258 case IX86_BUILTIN_VEC_EXT_V16QI:
24259 return ix86_expand_vec_ext_builtin (exp, target);
24261 case IX86_BUILTIN_VEC_SET_V2DI:
24262 case IX86_BUILTIN_VEC_SET_V4SF:
24263 case IX86_BUILTIN_VEC_SET_V4SI:
24264 case IX86_BUILTIN_VEC_SET_V8HI:
24265 case IX86_BUILTIN_VEC_SET_V4HI:
24266 case IX86_BUILTIN_VEC_SET_V16QI:
24267 return ix86_expand_vec_set_builtin (exp);
24269 case IX86_BUILTIN_INFQ:
24271 REAL_VALUE_TYPE inf;
24272 rtx tmp;
24274 real_inf (&inf);
24275 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24277 tmp = validize_mem (force_const_mem (mode, tmp));
24279 if (target == 0)
24280 target = gen_reg_rtx (mode);
24282 emit_move_insn (target, tmp);
24283 return target;
24286 default:
24287 break;
24290 for (i = 0, d = bdesc_special_args;
24291 i < ARRAY_SIZE (bdesc_special_args);
24292 i++, d++)
24293 if (d->code == fcode)
24294 return ix86_expand_special_args_builtin (d, exp, target);
24296 for (i = 0, d = bdesc_args;
24297 i < ARRAY_SIZE (bdesc_args);
24298 i++, d++)
24299 if (d->code == fcode)
24300 switch (fcode)
24302 case IX86_BUILTIN_FABSQ:
24303 case IX86_BUILTIN_COPYSIGNQ:
24304 if (!TARGET_SSE2)
24305 /* Emit a normal call if SSE2 isn't available. */
24306 return expand_call (exp, target, ignore);
24307 default:
24308 return ix86_expand_args_builtin (d, exp, target);
24311 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24312 if (d->code == fcode)
24313 return ix86_expand_sse_comi (d, exp, target);
24315 for (i = 0, d = bdesc_pcmpestr;
24316 i < ARRAY_SIZE (bdesc_pcmpestr);
24317 i++, d++)
24318 if (d->code == fcode)
24319 return ix86_expand_sse_pcmpestr (d, exp, target);
24321 for (i = 0, d = bdesc_pcmpistr;
24322 i < ARRAY_SIZE (bdesc_pcmpistr);
24323 i++, d++)
24324 if (d->code == fcode)
24325 return ix86_expand_sse_pcmpistr (d, exp, target);
24327 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24328 if (d->code == fcode)
24329 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24330 (enum multi_arg_type)d->flag,
24331 d->comparison);
24333 gcc_unreachable ();
24336 /* Returns a function decl for a vectorized version of the builtin function
24337 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24338 if it is not available. */
24340 static tree
24341 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24342 tree type_in)
24344 enum machine_mode in_mode, out_mode;
24345 int in_n, out_n;
24347 if (TREE_CODE (type_out) != VECTOR_TYPE
24348 || TREE_CODE (type_in) != VECTOR_TYPE)
24349 return NULL_TREE;
24351 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24352 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24353 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24354 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24356 switch (fn)
24358 case BUILT_IN_SQRT:
24359 if (out_mode == DFmode && out_n == 2
24360 && in_mode == DFmode && in_n == 2)
24361 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24362 break;
24364 case BUILT_IN_SQRTF:
24365 if (out_mode == SFmode && out_n == 4
24366 && in_mode == SFmode && in_n == 4)
24367 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24368 break;
24370 case BUILT_IN_LRINT:
24371 if (out_mode == SImode && out_n == 4
24372 && in_mode == DFmode && in_n == 2)
24373 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24374 break;
24376 case BUILT_IN_LRINTF:
24377 if (out_mode == SImode && out_n == 4
24378 && in_mode == SFmode && in_n == 4)
24379 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24380 break;
24382 default:
24386 /* Dispatch to a handler for a vectorization library. */
24387 if (ix86_veclib_handler)
24388 return (*ix86_veclib_handler)(fn, type_out, type_in);
24390 return NULL_TREE;
24393 /* Handler for an SVML-style interface to
24394 a library with vectorized intrinsics. */
24396 static tree
24397 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24399 char name[20];
24400 tree fntype, new_fndecl, args;
24401 unsigned arity;
24402 const char *bname;
24403 enum machine_mode el_mode, in_mode;
24404 int n, in_n;
24406 /* The SVML is suitable for unsafe math only. */
24407 if (!flag_unsafe_math_optimizations)
24408 return NULL_TREE;
24410 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24411 n = TYPE_VECTOR_SUBPARTS (type_out);
24412 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24413 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24414 if (el_mode != in_mode
24415 || n != in_n)
24416 return NULL_TREE;
24418 switch (fn)
24420 case BUILT_IN_EXP:
24421 case BUILT_IN_LOG:
24422 case BUILT_IN_LOG10:
24423 case BUILT_IN_POW:
24424 case BUILT_IN_TANH:
24425 case BUILT_IN_TAN:
24426 case BUILT_IN_ATAN:
24427 case BUILT_IN_ATAN2:
24428 case BUILT_IN_ATANH:
24429 case BUILT_IN_CBRT:
24430 case BUILT_IN_SINH:
24431 case BUILT_IN_SIN:
24432 case BUILT_IN_ASINH:
24433 case BUILT_IN_ASIN:
24434 case BUILT_IN_COSH:
24435 case BUILT_IN_COS:
24436 case BUILT_IN_ACOSH:
24437 case BUILT_IN_ACOS:
24438 if (el_mode != DFmode || n != 2)
24439 return NULL_TREE;
24440 break;
24442 case BUILT_IN_EXPF:
24443 case BUILT_IN_LOGF:
24444 case BUILT_IN_LOG10F:
24445 case BUILT_IN_POWF:
24446 case BUILT_IN_TANHF:
24447 case BUILT_IN_TANF:
24448 case BUILT_IN_ATANF:
24449 case BUILT_IN_ATAN2F:
24450 case BUILT_IN_ATANHF:
24451 case BUILT_IN_CBRTF:
24452 case BUILT_IN_SINHF:
24453 case BUILT_IN_SINF:
24454 case BUILT_IN_ASINHF:
24455 case BUILT_IN_ASINF:
24456 case BUILT_IN_COSHF:
24457 case BUILT_IN_COSF:
24458 case BUILT_IN_ACOSHF:
24459 case BUILT_IN_ACOSF:
24460 if (el_mode != SFmode || n != 4)
24461 return NULL_TREE;
24462 break;
24464 default:
24465 return NULL_TREE;
24468 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24470 if (fn == BUILT_IN_LOGF)
24471 strcpy (name, "vmlsLn4");
24472 else if (fn == BUILT_IN_LOG)
24473 strcpy (name, "vmldLn2");
24474 else if (n == 4)
24476 sprintf (name, "vmls%s", bname+10);
24477 name[strlen (name)-1] = '4';
24479 else
24480 sprintf (name, "vmld%s2", bname+10);
24482 /* Convert to uppercase. */
24483 name[4] &= ~0x20;
24485 arity = 0;
24486 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24487 args = TREE_CHAIN (args))
24488 arity++;
24490 if (arity == 1)
24491 fntype = build_function_type_list (type_out, type_in, NULL);
24492 else
24493 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24495 /* Build a function declaration for the vectorized function. */
24496 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
24497 TREE_PUBLIC (new_fndecl) = 1;
24498 DECL_EXTERNAL (new_fndecl) = 1;
24499 DECL_IS_NOVOPS (new_fndecl) = 1;
24500 TREE_READONLY (new_fndecl) = 1;
24502 return new_fndecl;
24505 /* Handler for an ACML-style interface to
24506 a library with vectorized intrinsics. */
24508 static tree
24509 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24511 char name[20] = "__vr.._";
24512 tree fntype, new_fndecl, args;
24513 unsigned arity;
24514 const char *bname;
24515 enum machine_mode el_mode, in_mode;
24516 int n, in_n;
24518 /* The ACML is 64bits only and suitable for unsafe math only as
24519 it does not correctly support parts of IEEE with the required
24520 precision such as denormals. */
24521 if (!TARGET_64BIT
24522 || !flag_unsafe_math_optimizations)
24523 return NULL_TREE;
24525 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24526 n = TYPE_VECTOR_SUBPARTS (type_out);
24527 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24528 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24529 if (el_mode != in_mode
24530 || n != in_n)
24531 return NULL_TREE;
24533 switch (fn)
24535 case BUILT_IN_SIN:
24536 case BUILT_IN_COS:
24537 case BUILT_IN_EXP:
24538 case BUILT_IN_LOG:
24539 case BUILT_IN_LOG2:
24540 case BUILT_IN_LOG10:
24541 name[4] = 'd';
24542 name[5] = '2';
24543 if (el_mode != DFmode
24544 || n != 2)
24545 return NULL_TREE;
24546 break;
24548 case BUILT_IN_SINF:
24549 case BUILT_IN_COSF:
24550 case BUILT_IN_EXPF:
24551 case BUILT_IN_POWF:
24552 case BUILT_IN_LOGF:
24553 case BUILT_IN_LOG2F:
24554 case BUILT_IN_LOG10F:
24555 name[4] = 's';
24556 name[5] = '4';
24557 if (el_mode != SFmode
24558 || n != 4)
24559 return NULL_TREE;
24560 break;
24562 default:
24563 return NULL_TREE;
24566 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24567 sprintf (name + 7, "%s", bname+10);
24569 arity = 0;
24570 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24571 args = TREE_CHAIN (args))
24572 arity++;
24574 if (arity == 1)
24575 fntype = build_function_type_list (type_out, type_in, NULL);
24576 else
24577 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24579 /* Build a function declaration for the vectorized function. */
24580 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
24581 TREE_PUBLIC (new_fndecl) = 1;
24582 DECL_EXTERNAL (new_fndecl) = 1;
24583 DECL_IS_NOVOPS (new_fndecl) = 1;
24584 TREE_READONLY (new_fndecl) = 1;
24586 return new_fndecl;
24590 /* Returns a decl of a function that implements conversion of an integer vector
24591 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24592 side of the conversion.
24593 Return NULL_TREE if it is not available. */
24595 static tree
24596 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24598 if (TREE_CODE (type) != VECTOR_TYPE)
24599 return NULL_TREE;
24601 switch (code)
24603 case FLOAT_EXPR:
24604 switch (TYPE_MODE (type))
24606 case V4SImode:
24607 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24608 default:
24609 return NULL_TREE;
24612 case FIX_TRUNC_EXPR:
24613 switch (TYPE_MODE (type))
24615 case V4SImode:
24616 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24617 default:
24618 return NULL_TREE;
24620 default:
24621 return NULL_TREE;
24626 /* Returns a code for a target-specific builtin that implements
24627 reciprocal of the function, or NULL_TREE if not available. */
24629 static tree
24630 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24631 bool sqrt ATTRIBUTE_UNUSED)
24633 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
24634 && flag_finite_math_only && !flag_trapping_math
24635 && flag_unsafe_math_optimizations))
24636 return NULL_TREE;
24638 if (md_fn)
24639 /* Machine dependent builtins. */
24640 switch (fn)
24642 /* Vectorized version of sqrt to rsqrt conversion. */
24643 case IX86_BUILTIN_SQRTPS_NR:
24644 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24646 default:
24647 return NULL_TREE;
24649 else
24650 /* Normal builtins. */
24651 switch (fn)
24653 /* Sqrt to rsqrt conversion. */
24654 case BUILT_IN_SQRTF:
24655 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24657 default:
24658 return NULL_TREE;
24662 /* Store OPERAND to the memory after reload is completed. This means
24663 that we can't easily use assign_stack_local. */
24665 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24667 rtx result;
24669 gcc_assert (reload_completed);
24670 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24672 result = gen_rtx_MEM (mode,
24673 gen_rtx_PLUS (Pmode,
24674 stack_pointer_rtx,
24675 GEN_INT (-RED_ZONE_SIZE)));
24676 emit_move_insn (result, operand);
24678 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24680 switch (mode)
24682 case HImode:
24683 case SImode:
24684 operand = gen_lowpart (DImode, operand);
24685 /* FALLTHRU */
24686 case DImode:
24687 emit_insn (
24688 gen_rtx_SET (VOIDmode,
24689 gen_rtx_MEM (DImode,
24690 gen_rtx_PRE_DEC (DImode,
24691 stack_pointer_rtx)),
24692 operand));
24693 break;
24694 default:
24695 gcc_unreachable ();
24697 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24699 else
24701 switch (mode)
24703 case DImode:
24705 rtx operands[2];
24706 split_di (&operand, 1, operands, operands + 1);
24707 emit_insn (
24708 gen_rtx_SET (VOIDmode,
24709 gen_rtx_MEM (SImode,
24710 gen_rtx_PRE_DEC (Pmode,
24711 stack_pointer_rtx)),
24712 operands[1]));
24713 emit_insn (
24714 gen_rtx_SET (VOIDmode,
24715 gen_rtx_MEM (SImode,
24716 gen_rtx_PRE_DEC (Pmode,
24717 stack_pointer_rtx)),
24718 operands[0]));
24720 break;
24721 case HImode:
24722 /* Store HImodes as SImodes. */
24723 operand = gen_lowpart (SImode, operand);
24724 /* FALLTHRU */
24725 case SImode:
24726 emit_insn (
24727 gen_rtx_SET (VOIDmode,
24728 gen_rtx_MEM (GET_MODE (operand),
24729 gen_rtx_PRE_DEC (SImode,
24730 stack_pointer_rtx)),
24731 operand));
24732 break;
24733 default:
24734 gcc_unreachable ();
24736 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24738 return result;
24741 /* Free operand from the memory. */
24742 void
24743 ix86_free_from_memory (enum machine_mode mode)
24745 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24747 int size;
24749 if (mode == DImode || TARGET_64BIT)
24750 size = 8;
24751 else
24752 size = 4;
24753 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24754 to pop or add instruction if registers are available. */
24755 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24756 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24757 GEN_INT (size))));
24761 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24762 QImode must go into class Q_REGS.
24763 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24764 movdf to do mem-to-mem moves through integer regs. */
24765 enum reg_class
24766 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24768 enum machine_mode mode = GET_MODE (x);
24770 /* We're only allowed to return a subclass of CLASS. Many of the
24771 following checks fail for NO_REGS, so eliminate that early. */
24772 if (regclass == NO_REGS)
24773 return NO_REGS;
24775 /* All classes can load zeros. */
24776 if (x == CONST0_RTX (mode))
24777 return regclass;
24779 /* Force constants into memory if we are loading a (nonzero) constant into
24780 an MMX or SSE register. This is because there are no MMX/SSE instructions
24781 to load from a constant. */
24782 if (CONSTANT_P (x)
24783 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24784 return NO_REGS;
24786 /* Prefer SSE regs only, if we can use them for math. */
24787 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24788 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24790 /* Floating-point constants need more complex checks. */
24791 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24793 /* General regs can load everything. */
24794 if (reg_class_subset_p (regclass, GENERAL_REGS))
24795 return regclass;
24797 /* Floats can load 0 and 1 plus some others. Note that we eliminated
24798 zero above. We only want to wind up preferring 80387 registers if
24799 we plan on doing computation with them. */
24800 if (TARGET_80387
24801 && standard_80387_constant_p (x))
24803 /* Limit class to non-sse. */
24804 if (regclass == FLOAT_SSE_REGS)
24805 return FLOAT_REGS;
24806 if (regclass == FP_TOP_SSE_REGS)
24807 return FP_TOP_REG;
24808 if (regclass == FP_SECOND_SSE_REGS)
24809 return FP_SECOND_REG;
24810 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
24811 return regclass;
24814 return NO_REGS;
24817 /* Generally when we see PLUS here, it's the function invariant
24818 (plus soft-fp const_int). Which can only be computed into general
24819 regs. */
24820 if (GET_CODE (x) == PLUS)
24821 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
24823 /* QImode constants are easy to load, but non-constant QImode data
24824 must go into Q_REGS. */
24825 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
24827 if (reg_class_subset_p (regclass, Q_REGS))
24828 return regclass;
24829 if (reg_class_subset_p (Q_REGS, regclass))
24830 return Q_REGS;
24831 return NO_REGS;
24834 return regclass;
24837 /* Discourage putting floating-point values in SSE registers unless
24838 SSE math is being used, and likewise for the 387 registers. */
24839 enum reg_class
24840 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
24842 enum machine_mode mode = GET_MODE (x);
24844 /* Restrict the output reload class to the register bank that we are doing
24845 math on. If we would like not to return a subset of CLASS, reject this
24846 alternative: if reload cannot do this, it will still use its choice. */
24847 mode = GET_MODE (x);
24848 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
24849 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
24851 if (X87_FLOAT_MODE_P (mode))
24853 if (regclass == FP_TOP_SSE_REGS)
24854 return FP_TOP_REG;
24855 else if (regclass == FP_SECOND_SSE_REGS)
24856 return FP_SECOND_REG;
24857 else
24858 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
24861 return regclass;
24864 static enum reg_class
24865 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
24866 enum machine_mode mode,
24867 secondary_reload_info *sri ATTRIBUTE_UNUSED)
24869 /* QImode spills from non-QI registers require
24870 intermediate register on 32bit targets. */
24871 if (!in_p && mode == QImode && !TARGET_64BIT
24872 && (rclass == GENERAL_REGS
24873 || rclass == LEGACY_REGS
24874 || rclass == INDEX_REGS))
24876 int regno;
24878 if (REG_P (x))
24879 regno = REGNO (x);
24880 else
24881 regno = -1;
24883 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
24884 regno = true_regnum (x);
24886 /* Return Q_REGS if the operand is in memory. */
24887 if (regno == -1)
24888 return Q_REGS;
24891 return NO_REGS;
24894 /* If we are copying between general and FP registers, we need a memory
24895 location. The same is true for SSE and MMX registers.
24897 To optimize register_move_cost performance, allow inline variant.
24899 The macro can't work reliably when one of the CLASSES is class containing
24900 registers from multiple units (SSE, MMX, integer). We avoid this by never
24901 combining those units in single alternative in the machine description.
24902 Ensure that this constraint holds to avoid unexpected surprises.
24904 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
24905 enforce these sanity checks. */
24907 static inline int
24908 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
24909 enum machine_mode mode, int strict)
24911 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
24912 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
24913 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
24914 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
24915 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
24916 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
24918 gcc_assert (!strict);
24919 return true;
24922 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
24923 return true;
24925 /* ??? This is a lie. We do have moves between mmx/general, and for
24926 mmx/sse2. But by saying we need secondary memory we discourage the
24927 register allocator from using the mmx registers unless needed. */
24928 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
24929 return true;
24931 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
24933 /* SSE1 doesn't have any direct moves from other classes. */
24934 if (!TARGET_SSE2)
24935 return true;
24937 /* If the target says that inter-unit moves are more expensive
24938 than moving through memory, then don't generate them. */
24939 if (!TARGET_INTER_UNIT_MOVES)
24940 return true;
24942 /* Between SSE and general, we have moves no larger than word size. */
24943 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
24944 return true;
24947 return false;
24951 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
24952 enum machine_mode mode, int strict)
24954 return inline_secondary_memory_needed (class1, class2, mode, strict);
24957 /* Return true if the registers in CLASS cannot represent the change from
24958 modes FROM to TO. */
24960 bool
24961 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
24962 enum reg_class regclass)
24964 if (from == to)
24965 return false;
24967 /* x87 registers can't do subreg at all, as all values are reformatted
24968 to extended precision. */
24969 if (MAYBE_FLOAT_CLASS_P (regclass))
24970 return true;
24972 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
24974 /* Vector registers do not support QI or HImode loads. If we don't
24975 disallow a change to these modes, reload will assume it's ok to
24976 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
24977 the vec_dupv4hi pattern. */
24978 if (GET_MODE_SIZE (from) < 4)
24979 return true;
24981 /* Vector registers do not support subreg with nonzero offsets, which
24982 are otherwise valid for integer registers. Since we can't see
24983 whether we have a nonzero offset from here, prohibit all
24984 nonparadoxical subregs changing size. */
24985 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
24986 return true;
24989 return false;
24992 /* Return the cost of moving data of mode M between a
24993 register and memory. A value of 2 is the default; this cost is
24994 relative to those in `REGISTER_MOVE_COST'.
24996 This function is used extensively by register_move_cost that is used to
24997 build tables at startup. Make it inline in this case.
24998 When IN is 2, return maximum of in and out move cost.
25000 If moving between registers and memory is more expensive than
25001 between two registers, you should define this macro to express the
25002 relative cost.
25004 Model also increased moving costs of QImode registers in non
25005 Q_REGS classes.
25007 static inline int
25008 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25009 int in)
25011 int cost;
25012 if (FLOAT_CLASS_P (regclass))
25014 int index;
25015 switch (mode)
25017 case SFmode:
25018 index = 0;
25019 break;
25020 case DFmode:
25021 index = 1;
25022 break;
25023 case XFmode:
25024 index = 2;
25025 break;
25026 default:
25027 return 100;
25029 if (in == 2)
25030 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25031 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25033 if (SSE_CLASS_P (regclass))
25035 int index;
25036 switch (GET_MODE_SIZE (mode))
25038 case 4:
25039 index = 0;
25040 break;
25041 case 8:
25042 index = 1;
25043 break;
25044 case 16:
25045 index = 2;
25046 break;
25047 default:
25048 return 100;
25050 if (in == 2)
25051 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25052 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25054 if (MMX_CLASS_P (regclass))
25056 int index;
25057 switch (GET_MODE_SIZE (mode))
25059 case 4:
25060 index = 0;
25061 break;
25062 case 8:
25063 index = 1;
25064 break;
25065 default:
25066 return 100;
25068 if (in)
25069 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25070 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25072 switch (GET_MODE_SIZE (mode))
25074 case 1:
25075 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25077 if (!in)
25078 return ix86_cost->int_store[0];
25079 if (TARGET_PARTIAL_REG_DEPENDENCY
25080 && optimize_function_for_speed_p (cfun))
25081 cost = ix86_cost->movzbl_load;
25082 else
25083 cost = ix86_cost->int_load[0];
25084 if (in == 2)
25085 return MAX (cost, ix86_cost->int_store[0]);
25086 return cost;
25088 else
25090 if (in == 2)
25091 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25092 if (in)
25093 return ix86_cost->movzbl_load;
25094 else
25095 return ix86_cost->int_store[0] + 4;
25097 break;
25098 case 2:
25099 if (in == 2)
25100 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25101 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25102 default:
25103 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25104 if (mode == TFmode)
25105 mode = XFmode;
25106 if (in == 2)
25107 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25108 else if (in)
25109 cost = ix86_cost->int_load[2];
25110 else
25111 cost = ix86_cost->int_store[2];
25112 return (cost * (((int) GET_MODE_SIZE (mode)
25113 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25118 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25120 return inline_memory_move_cost (mode, regclass, in);
25124 /* Return the cost of moving data from a register in class CLASS1 to
25125 one in class CLASS2.
25127 It is not required that the cost always equal 2 when FROM is the same as TO;
25128 on some machines it is expensive to move between registers if they are not
25129 general registers. */
25132 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25133 enum reg_class class2)
25135 /* In case we require secondary memory, compute cost of the store followed
25136 by load. In order to avoid bad register allocation choices, we need
25137 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25139 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25141 int cost = 1;
25143 cost += inline_memory_move_cost (mode, class1, 2);
25144 cost += inline_memory_move_cost (mode, class2, 2);
25146 /* In case of copying from general_purpose_register we may emit multiple
25147 stores followed by single load causing memory size mismatch stall.
25148 Count this as arbitrarily high cost of 20. */
25149 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25150 cost += 20;
25152 /* In the case of FP/MMX moves, the registers actually overlap, and we
25153 have to switch modes in order to treat them differently. */
25154 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25155 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25156 cost += 20;
25158 return cost;
25161 /* Moves between SSE/MMX and integer unit are expensive. */
25162 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25163 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25165 /* ??? By keeping returned value relatively high, we limit the number
25166 of moves between integer and MMX/SSE registers for all targets.
25167 Additionally, high value prevents problem with x86_modes_tieable_p(),
25168 where integer modes in MMX/SSE registers are not tieable
25169 because of missing QImode and HImode moves to, from or between
25170 MMX/SSE registers. */
25171 return MAX (8, ix86_cost->mmxsse_to_integer);
25173 if (MAYBE_FLOAT_CLASS_P (class1))
25174 return ix86_cost->fp_move;
25175 if (MAYBE_SSE_CLASS_P (class1))
25176 return ix86_cost->sse_move;
25177 if (MAYBE_MMX_CLASS_P (class1))
25178 return ix86_cost->mmx_move;
25179 return 2;
25182 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25184 bool
25185 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25187 /* Flags and only flags can only hold CCmode values. */
25188 if (CC_REGNO_P (regno))
25189 return GET_MODE_CLASS (mode) == MODE_CC;
25190 if (GET_MODE_CLASS (mode) == MODE_CC
25191 || GET_MODE_CLASS (mode) == MODE_RANDOM
25192 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25193 return 0;
25194 if (FP_REGNO_P (regno))
25195 return VALID_FP_MODE_P (mode);
25196 if (SSE_REGNO_P (regno))
25198 /* We implement the move patterns for all vector modes into and
25199 out of SSE registers, even when no operation instructions
25200 are available. OImode move is available only when AVX is
25201 enabled. */
25202 return ((TARGET_AVX && mode == OImode)
25203 || VALID_AVX256_REG_MODE (mode)
25204 || VALID_SSE_REG_MODE (mode)
25205 || VALID_SSE2_REG_MODE (mode)
25206 || VALID_MMX_REG_MODE (mode)
25207 || VALID_MMX_REG_MODE_3DNOW (mode));
25209 if (MMX_REGNO_P (regno))
25211 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25212 so if the register is available at all, then we can move data of
25213 the given mode into or out of it. */
25214 return (VALID_MMX_REG_MODE (mode)
25215 || VALID_MMX_REG_MODE_3DNOW (mode));
25218 if (mode == QImode)
25220 /* Take care for QImode values - they can be in non-QI regs,
25221 but then they do cause partial register stalls. */
25222 if (regno < 4 || TARGET_64BIT)
25223 return 1;
25224 if (!TARGET_PARTIAL_REG_STALL)
25225 return 1;
25226 return reload_in_progress || reload_completed;
25228 /* We handle both integer and floats in the general purpose registers. */
25229 else if (VALID_INT_MODE_P (mode))
25230 return 1;
25231 else if (VALID_FP_MODE_P (mode))
25232 return 1;
25233 else if (VALID_DFP_MODE_P (mode))
25234 return 1;
25235 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25236 on to use that value in smaller contexts, this can easily force a
25237 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25238 supporting DImode, allow it. */
25239 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25240 return 1;
25242 return 0;
25245 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25246 tieable integer mode. */
25248 static bool
25249 ix86_tieable_integer_mode_p (enum machine_mode mode)
25251 switch (mode)
25253 case HImode:
25254 case SImode:
25255 return true;
25257 case QImode:
25258 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25260 case DImode:
25261 return TARGET_64BIT;
25263 default:
25264 return false;
25268 /* Return true if MODE1 is accessible in a register that can hold MODE2
25269 without copying. That is, all register classes that can hold MODE2
25270 can also hold MODE1. */
25272 bool
25273 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25275 if (mode1 == mode2)
25276 return true;
25278 if (ix86_tieable_integer_mode_p (mode1)
25279 && ix86_tieable_integer_mode_p (mode2))
25280 return true;
25282 /* MODE2 being XFmode implies fp stack or general regs, which means we
25283 can tie any smaller floating point modes to it. Note that we do not
25284 tie this with TFmode. */
25285 if (mode2 == XFmode)
25286 return mode1 == SFmode || mode1 == DFmode;
25288 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25289 that we can tie it with SFmode. */
25290 if (mode2 == DFmode)
25291 return mode1 == SFmode;
25293 /* If MODE2 is only appropriate for an SSE register, then tie with
25294 any other mode acceptable to SSE registers. */
25295 if (GET_MODE_SIZE (mode2) == 16
25296 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25297 return (GET_MODE_SIZE (mode1) == 16
25298 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25300 /* If MODE2 is appropriate for an MMX register, then tie
25301 with any other mode acceptable to MMX registers. */
25302 if (GET_MODE_SIZE (mode2) == 8
25303 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25304 return (GET_MODE_SIZE (mode1) == 8
25305 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25307 return false;
25310 /* Compute a (partial) cost for rtx X. Return true if the complete
25311 cost has been computed, and false if subexpressions should be
25312 scanned. In either case, *TOTAL contains the cost result. */
25314 static bool
25315 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25317 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25318 enum machine_mode mode = GET_MODE (x);
25319 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25321 switch (code)
25323 case CONST_INT:
25324 case CONST:
25325 case LABEL_REF:
25326 case SYMBOL_REF:
25327 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25328 *total = 3;
25329 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25330 *total = 2;
25331 else if (flag_pic && SYMBOLIC_CONST (x)
25332 && (!TARGET_64BIT
25333 || (!GET_CODE (x) != LABEL_REF
25334 && (GET_CODE (x) != SYMBOL_REF
25335 || !SYMBOL_REF_LOCAL_P (x)))))
25336 *total = 1;
25337 else
25338 *total = 0;
25339 return true;
25341 case CONST_DOUBLE:
25342 if (mode == VOIDmode)
25343 *total = 0;
25344 else
25345 switch (standard_80387_constant_p (x))
25347 case 1: /* 0.0 */
25348 *total = 1;
25349 break;
25350 default: /* Other constants */
25351 *total = 2;
25352 break;
25353 case 0:
25354 case -1:
25355 /* Start with (MEM (SYMBOL_REF)), since that's where
25356 it'll probably end up. Add a penalty for size. */
25357 *total = (COSTS_N_INSNS (1)
25358 + (flag_pic != 0 && !TARGET_64BIT)
25359 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25360 break;
25362 return true;
25364 case ZERO_EXTEND:
25365 /* The zero extensions is often completely free on x86_64, so make
25366 it as cheap as possible. */
25367 if (TARGET_64BIT && mode == DImode
25368 && GET_MODE (XEXP (x, 0)) == SImode)
25369 *total = 1;
25370 else if (TARGET_ZERO_EXTEND_WITH_AND)
25371 *total = cost->add;
25372 else
25373 *total = cost->movzx;
25374 return false;
25376 case SIGN_EXTEND:
25377 *total = cost->movsx;
25378 return false;
25380 case ASHIFT:
25381 if (CONST_INT_P (XEXP (x, 1))
25382 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25384 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25385 if (value == 1)
25387 *total = cost->add;
25388 return false;
25390 if ((value == 2 || value == 3)
25391 && cost->lea <= cost->shift_const)
25393 *total = cost->lea;
25394 return false;
25397 /* FALLTHRU */
25399 case ROTATE:
25400 case ASHIFTRT:
25401 case LSHIFTRT:
25402 case ROTATERT:
25403 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25405 if (CONST_INT_P (XEXP (x, 1)))
25407 if (INTVAL (XEXP (x, 1)) > 32)
25408 *total = cost->shift_const + COSTS_N_INSNS (2);
25409 else
25410 *total = cost->shift_const * 2;
25412 else
25414 if (GET_CODE (XEXP (x, 1)) == AND)
25415 *total = cost->shift_var * 2;
25416 else
25417 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25420 else
25422 if (CONST_INT_P (XEXP (x, 1)))
25423 *total = cost->shift_const;
25424 else
25425 *total = cost->shift_var;
25427 return false;
25429 case MULT:
25430 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25432 /* ??? SSE scalar cost should be used here. */
25433 *total = cost->fmul;
25434 return false;
25436 else if (X87_FLOAT_MODE_P (mode))
25438 *total = cost->fmul;
25439 return false;
25441 else if (FLOAT_MODE_P (mode))
25443 /* ??? SSE vector cost should be used here. */
25444 *total = cost->fmul;
25445 return false;
25447 else
25449 rtx op0 = XEXP (x, 0);
25450 rtx op1 = XEXP (x, 1);
25451 int nbits;
25452 if (CONST_INT_P (XEXP (x, 1)))
25454 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25455 for (nbits = 0; value != 0; value &= value - 1)
25456 nbits++;
25458 else
25459 /* This is arbitrary. */
25460 nbits = 7;
25462 /* Compute costs correctly for widening multiplication. */
25463 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25464 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25465 == GET_MODE_SIZE (mode))
25467 int is_mulwiden = 0;
25468 enum machine_mode inner_mode = GET_MODE (op0);
25470 if (GET_CODE (op0) == GET_CODE (op1))
25471 is_mulwiden = 1, op1 = XEXP (op1, 0);
25472 else if (CONST_INT_P (op1))
25474 if (GET_CODE (op0) == SIGN_EXTEND)
25475 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25476 == INTVAL (op1);
25477 else
25478 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25481 if (is_mulwiden)
25482 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25485 *total = (cost->mult_init[MODE_INDEX (mode)]
25486 + nbits * cost->mult_bit
25487 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25489 return true;
25492 case DIV:
25493 case UDIV:
25494 case MOD:
25495 case UMOD:
25496 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25497 /* ??? SSE cost should be used here. */
25498 *total = cost->fdiv;
25499 else if (X87_FLOAT_MODE_P (mode))
25500 *total = cost->fdiv;
25501 else if (FLOAT_MODE_P (mode))
25502 /* ??? SSE vector cost should be used here. */
25503 *total = cost->fdiv;
25504 else
25505 *total = cost->divide[MODE_INDEX (mode)];
25506 return false;
25508 case PLUS:
25509 if (GET_MODE_CLASS (mode) == MODE_INT
25510 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25512 if (GET_CODE (XEXP (x, 0)) == PLUS
25513 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25514 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25515 && CONSTANT_P (XEXP (x, 1)))
25517 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25518 if (val == 2 || val == 4 || val == 8)
25520 *total = cost->lea;
25521 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25522 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25523 outer_code, speed);
25524 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25525 return true;
25528 else if (GET_CODE (XEXP (x, 0)) == MULT
25529 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25531 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25532 if (val == 2 || val == 4 || val == 8)
25534 *total = cost->lea;
25535 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25536 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25537 return true;
25540 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25542 *total = cost->lea;
25543 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25544 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25545 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25546 return true;
25549 /* FALLTHRU */
25551 case MINUS:
25552 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25554 /* ??? SSE cost should be used here. */
25555 *total = cost->fadd;
25556 return false;
25558 else if (X87_FLOAT_MODE_P (mode))
25560 *total = cost->fadd;
25561 return false;
25563 else if (FLOAT_MODE_P (mode))
25565 /* ??? SSE vector cost should be used here. */
25566 *total = cost->fadd;
25567 return false;
25569 /* FALLTHRU */
25571 case AND:
25572 case IOR:
25573 case XOR:
25574 if (!TARGET_64BIT && mode == DImode)
25576 *total = (cost->add * 2
25577 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25578 << (GET_MODE (XEXP (x, 0)) != DImode))
25579 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25580 << (GET_MODE (XEXP (x, 1)) != DImode)));
25581 return true;
25583 /* FALLTHRU */
25585 case NEG:
25586 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25588 /* ??? SSE cost should be used here. */
25589 *total = cost->fchs;
25590 return false;
25592 else if (X87_FLOAT_MODE_P (mode))
25594 *total = cost->fchs;
25595 return false;
25597 else if (FLOAT_MODE_P (mode))
25599 /* ??? SSE vector cost should be used here. */
25600 *total = cost->fchs;
25601 return false;
25603 /* FALLTHRU */
25605 case NOT:
25606 if (!TARGET_64BIT && mode == DImode)
25607 *total = cost->add * 2;
25608 else
25609 *total = cost->add;
25610 return false;
25612 case COMPARE:
25613 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25614 && XEXP (XEXP (x, 0), 1) == const1_rtx
25615 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25616 && XEXP (x, 1) == const0_rtx)
25618 /* This kind of construct is implemented using test[bwl].
25619 Treat it as if we had an AND. */
25620 *total = (cost->add
25621 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25622 + rtx_cost (const1_rtx, outer_code, speed));
25623 return true;
25625 return false;
25627 case FLOAT_EXTEND:
25628 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25629 *total = 0;
25630 return false;
25632 case ABS:
25633 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25634 /* ??? SSE cost should be used here. */
25635 *total = cost->fabs;
25636 else if (X87_FLOAT_MODE_P (mode))
25637 *total = cost->fabs;
25638 else if (FLOAT_MODE_P (mode))
25639 /* ??? SSE vector cost should be used here. */
25640 *total = cost->fabs;
25641 return false;
25643 case SQRT:
25644 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25645 /* ??? SSE cost should be used here. */
25646 *total = cost->fsqrt;
25647 else if (X87_FLOAT_MODE_P (mode))
25648 *total = cost->fsqrt;
25649 else if (FLOAT_MODE_P (mode))
25650 /* ??? SSE vector cost should be used here. */
25651 *total = cost->fsqrt;
25652 return false;
25654 case UNSPEC:
25655 if (XINT (x, 1) == UNSPEC_TP)
25656 *total = 0;
25657 return false;
25659 default:
25660 return false;
25664 #if TARGET_MACHO
25666 static int current_machopic_label_num;
25668 /* Given a symbol name and its associated stub, write out the
25669 definition of the stub. */
25671 void
25672 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25674 unsigned int length;
25675 char *binder_name, *symbol_name, lazy_ptr_name[32];
25676 int label = ++current_machopic_label_num;
25678 /* For 64-bit we shouldn't get here. */
25679 gcc_assert (!TARGET_64BIT);
25681 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25682 symb = (*targetm.strip_name_encoding) (symb);
25684 length = strlen (stub);
25685 binder_name = XALLOCAVEC (char, length + 32);
25686 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25688 length = strlen (symb);
25689 symbol_name = XALLOCAVEC (char, length + 32);
25690 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25692 sprintf (lazy_ptr_name, "L%d$lz", label);
25694 if (MACHOPIC_PURE)
25695 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25696 else
25697 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25699 fprintf (file, "%s:\n", stub);
25700 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25702 if (MACHOPIC_PURE)
25704 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25705 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25706 fprintf (file, "\tjmp\t*%%edx\n");
25708 else
25709 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25711 fprintf (file, "%s:\n", binder_name);
25713 if (MACHOPIC_PURE)
25715 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25716 fprintf (file, "\tpushl\t%%eax\n");
25718 else
25719 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25721 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
25723 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25724 fprintf (file, "%s:\n", lazy_ptr_name);
25725 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25726 fprintf (file, "\t.long %s\n", binder_name);
25729 void
25730 darwin_x86_file_end (void)
25732 darwin_file_end ();
25733 ix86_file_end ();
25735 #endif /* TARGET_MACHO */
25737 /* Order the registers for register allocator. */
25739 void
25740 x86_order_regs_for_local_alloc (void)
25742 int pos = 0;
25743 int i;
25745 /* First allocate the local general purpose registers. */
25746 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25747 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25748 reg_alloc_order [pos++] = i;
25750 /* Global general purpose registers. */
25751 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25752 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25753 reg_alloc_order [pos++] = i;
25755 /* x87 registers come first in case we are doing FP math
25756 using them. */
25757 if (!TARGET_SSE_MATH)
25758 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25759 reg_alloc_order [pos++] = i;
25761 /* SSE registers. */
25762 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25763 reg_alloc_order [pos++] = i;
25764 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25765 reg_alloc_order [pos++] = i;
25767 /* x87 registers. */
25768 if (TARGET_SSE_MATH)
25769 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25770 reg_alloc_order [pos++] = i;
25772 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25773 reg_alloc_order [pos++] = i;
25775 /* Initialize the rest of array as we do not allocate some registers
25776 at all. */
25777 while (pos < FIRST_PSEUDO_REGISTER)
25778 reg_alloc_order [pos++] = 0;
25781 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25782 struct attribute_spec.handler. */
25783 static tree
25784 ix86_handle_abi_attribute (tree *node, tree name,
25785 tree args ATTRIBUTE_UNUSED,
25786 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25788 if (TREE_CODE (*node) != FUNCTION_TYPE
25789 && TREE_CODE (*node) != METHOD_TYPE
25790 && TREE_CODE (*node) != FIELD_DECL
25791 && TREE_CODE (*node) != TYPE_DECL)
25793 warning (OPT_Wattributes, "%qs attribute only applies to functions",
25794 IDENTIFIER_POINTER (name));
25795 *no_add_attrs = true;
25796 return NULL_TREE;
25798 if (!TARGET_64BIT)
25800 warning (OPT_Wattributes, "%qs attribute only available for 64-bit",
25801 IDENTIFIER_POINTER (name));
25802 *no_add_attrs = true;
25803 return NULL_TREE;
25806 /* Can combine regparm with all attributes but fastcall. */
25807 if (is_attribute_p ("ms_abi", name))
25809 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
25811 error ("ms_abi and sysv_abi attributes are not compatible");
25814 return NULL_TREE;
25816 else if (is_attribute_p ("sysv_abi", name))
25818 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
25820 error ("ms_abi and sysv_abi attributes are not compatible");
25823 return NULL_TREE;
25826 return NULL_TREE;
25829 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
25830 struct attribute_spec.handler. */
25831 static tree
25832 ix86_handle_struct_attribute (tree *node, tree name,
25833 tree args ATTRIBUTE_UNUSED,
25834 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25836 tree *type = NULL;
25837 if (DECL_P (*node))
25839 if (TREE_CODE (*node) == TYPE_DECL)
25840 type = &TREE_TYPE (*node);
25842 else
25843 type = node;
25845 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
25846 || TREE_CODE (*type) == UNION_TYPE)))
25848 warning (OPT_Wattributes, "%qs attribute ignored",
25849 IDENTIFIER_POINTER (name));
25850 *no_add_attrs = true;
25853 else if ((is_attribute_p ("ms_struct", name)
25854 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
25855 || ((is_attribute_p ("gcc_struct", name)
25856 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
25858 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
25859 IDENTIFIER_POINTER (name));
25860 *no_add_attrs = true;
25863 return NULL_TREE;
25866 static bool
25867 ix86_ms_bitfield_layout_p (const_tree record_type)
25869 return (TARGET_MS_BITFIELD_LAYOUT &&
25870 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
25871 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
25874 /* Returns an expression indicating where the this parameter is
25875 located on entry to the FUNCTION. */
25877 static rtx
25878 x86_this_parameter (tree function)
25880 tree type = TREE_TYPE (function);
25881 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
25882 int nregs;
25884 if (TARGET_64BIT)
25886 const int *parm_regs;
25888 if (ix86_function_type_abi (type) == MS_ABI)
25889 parm_regs = x86_64_ms_abi_int_parameter_registers;
25890 else
25891 parm_regs = x86_64_int_parameter_registers;
25892 return gen_rtx_REG (DImode, parm_regs[aggr]);
25895 nregs = ix86_function_regparm (type, function);
25897 if (nregs > 0 && !stdarg_p (type))
25899 int regno;
25901 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
25902 regno = aggr ? DX_REG : CX_REG;
25903 else
25905 regno = AX_REG;
25906 if (aggr)
25908 regno = DX_REG;
25909 if (nregs == 1)
25910 return gen_rtx_MEM (SImode,
25911 plus_constant (stack_pointer_rtx, 4));
25914 return gen_rtx_REG (SImode, regno);
25917 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
25920 /* Determine whether x86_output_mi_thunk can succeed. */
25922 static bool
25923 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
25924 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
25925 HOST_WIDE_INT vcall_offset, const_tree function)
25927 /* 64-bit can handle anything. */
25928 if (TARGET_64BIT)
25929 return true;
25931 /* For 32-bit, everything's fine if we have one free register. */
25932 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
25933 return true;
25935 /* Need a free register for vcall_offset. */
25936 if (vcall_offset)
25937 return false;
25939 /* Need a free register for GOT references. */
25940 if (flag_pic && !(*targetm.binds_local_p) (function))
25941 return false;
25943 /* Otherwise ok. */
25944 return true;
25947 /* Output the assembler code for a thunk function. THUNK_DECL is the
25948 declaration for the thunk function itself, FUNCTION is the decl for
25949 the target function. DELTA is an immediate constant offset to be
25950 added to THIS. If VCALL_OFFSET is nonzero, the word at
25951 *(*this + vcall_offset) should be added to THIS. */
25953 static void
25954 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
25955 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
25956 HOST_WIDE_INT vcall_offset, tree function)
25958 rtx xops[3];
25959 rtx this_param = x86_this_parameter (function);
25960 rtx this_reg, tmp;
25962 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
25963 pull it in now and let DELTA benefit. */
25964 if (REG_P (this_param))
25965 this_reg = this_param;
25966 else if (vcall_offset)
25968 /* Put the this parameter into %eax. */
25969 xops[0] = this_param;
25970 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
25971 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
25973 else
25974 this_reg = NULL_RTX;
25976 /* Adjust the this parameter by a fixed constant. */
25977 if (delta)
25979 xops[0] = GEN_INT (delta);
25980 xops[1] = this_reg ? this_reg : this_param;
25981 if (TARGET_64BIT)
25983 if (!x86_64_general_operand (xops[0], DImode))
25985 tmp = gen_rtx_REG (DImode, R10_REG);
25986 xops[1] = tmp;
25987 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
25988 xops[0] = tmp;
25989 xops[1] = this_param;
25991 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
25993 else
25994 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
25997 /* Adjust the this parameter by a value stored in the vtable. */
25998 if (vcall_offset)
26000 if (TARGET_64BIT)
26001 tmp = gen_rtx_REG (DImode, R10_REG);
26002 else
26004 int tmp_regno = CX_REG;
26005 if (lookup_attribute ("fastcall",
26006 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26007 tmp_regno = AX_REG;
26008 tmp = gen_rtx_REG (SImode, tmp_regno);
26011 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26012 xops[1] = tmp;
26013 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26015 /* Adjust the this parameter. */
26016 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26017 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26019 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26020 xops[0] = GEN_INT (vcall_offset);
26021 xops[1] = tmp2;
26022 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26023 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26025 xops[1] = this_reg;
26026 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26029 /* If necessary, drop THIS back to its stack slot. */
26030 if (this_reg && this_reg != this_param)
26032 xops[0] = this_reg;
26033 xops[1] = this_param;
26034 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26037 xops[0] = XEXP (DECL_RTL (function), 0);
26038 if (TARGET_64BIT)
26040 if (!flag_pic || (*targetm.binds_local_p) (function))
26041 output_asm_insn ("jmp\t%P0", xops);
26042 /* All thunks should be in the same object as their target,
26043 and thus binds_local_p should be true. */
26044 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26045 gcc_unreachable ();
26046 else
26048 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26049 tmp = gen_rtx_CONST (Pmode, tmp);
26050 tmp = gen_rtx_MEM (QImode, tmp);
26051 xops[0] = tmp;
26052 output_asm_insn ("jmp\t%A0", xops);
26055 else
26057 if (!flag_pic || (*targetm.binds_local_p) (function))
26058 output_asm_insn ("jmp\t%P0", xops);
26059 else
26060 #if TARGET_MACHO
26061 if (TARGET_MACHO)
26063 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26064 tmp = (gen_rtx_SYMBOL_REF
26065 (Pmode,
26066 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26067 tmp = gen_rtx_MEM (QImode, tmp);
26068 xops[0] = tmp;
26069 output_asm_insn ("jmp\t%0", xops);
26071 else
26072 #endif /* TARGET_MACHO */
26074 tmp = gen_rtx_REG (SImode, CX_REG);
26075 output_set_got (tmp, NULL_RTX);
26077 xops[1] = tmp;
26078 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26079 output_asm_insn ("jmp\t{*}%1", xops);
26084 static void
26085 x86_file_start (void)
26087 default_file_start ();
26088 #if TARGET_MACHO
26089 darwin_file_start ();
26090 #endif
26091 if (X86_FILE_START_VERSION_DIRECTIVE)
26092 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26093 if (X86_FILE_START_FLTUSED)
26094 fputs ("\t.global\t__fltused\n", asm_out_file);
26095 if (ix86_asm_dialect == ASM_INTEL)
26096 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26100 x86_field_alignment (tree field, int computed)
26102 enum machine_mode mode;
26103 tree type = TREE_TYPE (field);
26105 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26106 return computed;
26107 mode = TYPE_MODE (strip_array_types (type));
26108 if (mode == DFmode || mode == DCmode
26109 || GET_MODE_CLASS (mode) == MODE_INT
26110 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26111 return MIN (32, computed);
26112 return computed;
26115 /* Output assembler code to FILE to increment profiler label # LABELNO
26116 for profiling a function entry. */
26117 void
26118 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26120 if (TARGET_64BIT)
26122 #ifndef NO_PROFILE_COUNTERS
26123 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
26124 #endif
26126 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26127 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
26128 else
26129 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26131 else if (flag_pic)
26133 #ifndef NO_PROFILE_COUNTERS
26134 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
26135 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
26136 #endif
26137 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
26139 else
26141 #ifndef NO_PROFILE_COUNTERS
26142 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
26143 PROFILE_COUNT_REGISTER);
26144 #endif
26145 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26149 /* We don't have exact information about the insn sizes, but we may assume
26150 quite safely that we are informed about all 1 byte insns and memory
26151 address sizes. This is enough to eliminate unnecessary padding in
26152 99% of cases. */
26154 static int
26155 min_insn_size (rtx insn)
26157 int l = 0;
26159 if (!INSN_P (insn) || !active_insn_p (insn))
26160 return 0;
26162 /* Discard alignments we've emit and jump instructions. */
26163 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26164 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26165 return 0;
26166 if (JUMP_P (insn)
26167 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
26168 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
26169 return 0;
26171 /* Important case - calls are always 5 bytes.
26172 It is common to have many calls in the row. */
26173 if (CALL_P (insn)
26174 && symbolic_reference_mentioned_p (PATTERN (insn))
26175 && !SIBLING_CALL_P (insn))
26176 return 5;
26177 if (get_attr_length (insn) <= 1)
26178 return 1;
26180 /* For normal instructions we may rely on the sizes of addresses
26181 and the presence of symbol to require 4 bytes of encoding.
26182 This is not the case for jumps where references are PC relative. */
26183 if (!JUMP_P (insn))
26185 l = get_attr_length_address (insn);
26186 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26187 l = 4;
26189 if (l)
26190 return 1+l;
26191 else
26192 return 2;
26195 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26196 window. */
26198 static void
26199 ix86_avoid_jump_misspredicts (void)
26201 rtx insn, start = get_insns ();
26202 int nbytes = 0, njumps = 0;
26203 int isjump = 0;
26205 /* Look for all minimal intervals of instructions containing 4 jumps.
26206 The intervals are bounded by START and INSN. NBYTES is the total
26207 size of instructions in the interval including INSN and not including
26208 START. When the NBYTES is smaller than 16 bytes, it is possible
26209 that the end of START and INSN ends up in the same 16byte page.
26211 The smallest offset in the page INSN can start is the case where START
26212 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26213 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
26215 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
26218 nbytes += min_insn_size (insn);
26219 if (dump_file)
26220 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
26221 INSN_UID (insn), min_insn_size (insn));
26222 if ((JUMP_P (insn)
26223 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26224 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26225 || CALL_P (insn))
26226 njumps++;
26227 else
26228 continue;
26230 while (njumps > 3)
26232 start = NEXT_INSN (start);
26233 if ((JUMP_P (start)
26234 && GET_CODE (PATTERN (start)) != ADDR_VEC
26235 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26236 || CALL_P (start))
26237 njumps--, isjump = 1;
26238 else
26239 isjump = 0;
26240 nbytes -= min_insn_size (start);
26242 gcc_assert (njumps >= 0);
26243 if (dump_file)
26244 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26245 INSN_UID (start), INSN_UID (insn), nbytes);
26247 if (njumps == 3 && isjump && nbytes < 16)
26249 int padsize = 15 - nbytes + min_insn_size (insn);
26251 if (dump_file)
26252 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26253 INSN_UID (insn), padsize);
26254 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
26259 /* AMD Athlon works faster
26260 when RET is not destination of conditional jump or directly preceded
26261 by other jump instruction. We avoid the penalty by inserting NOP just
26262 before the RET instructions in such cases. */
26263 static void
26264 ix86_pad_returns (void)
26266 edge e;
26267 edge_iterator ei;
26269 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26271 basic_block bb = e->src;
26272 rtx ret = BB_END (bb);
26273 rtx prev;
26274 bool replace = false;
26276 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26277 || optimize_bb_for_size_p (bb))
26278 continue;
26279 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26280 if (active_insn_p (prev) || LABEL_P (prev))
26281 break;
26282 if (prev && LABEL_P (prev))
26284 edge e;
26285 edge_iterator ei;
26287 FOR_EACH_EDGE (e, ei, bb->preds)
26288 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26289 && !(e->flags & EDGE_FALLTHRU))
26290 replace = true;
26292 if (!replace)
26294 prev = prev_active_insn (ret);
26295 if (prev
26296 && ((JUMP_P (prev) && any_condjump_p (prev))
26297 || CALL_P (prev)))
26298 replace = true;
26299 /* Empty functions get branch mispredict even when the jump destination
26300 is not visible to us. */
26301 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26302 replace = true;
26304 if (replace)
26306 emit_insn_before (gen_return_internal_long (), ret);
26307 delete_insn (ret);
26312 /* Implement machine specific optimizations. We implement padding of returns
26313 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26314 static void
26315 ix86_reorg (void)
26317 if (TARGET_PAD_RETURNS && optimize
26318 && optimize_function_for_speed_p (cfun))
26319 ix86_pad_returns ();
26320 if (TARGET_FOUR_JUMP_LIMIT && optimize
26321 && optimize_function_for_speed_p (cfun))
26322 ix86_avoid_jump_misspredicts ();
26325 /* Return nonzero when QImode register that must be represented via REX prefix
26326 is used. */
26327 bool
26328 x86_extended_QIreg_mentioned_p (rtx insn)
26330 int i;
26331 extract_insn_cached (insn);
26332 for (i = 0; i < recog_data.n_operands; i++)
26333 if (REG_P (recog_data.operand[i])
26334 && REGNO (recog_data.operand[i]) >= 4)
26335 return true;
26336 return false;
26339 /* Return nonzero when P points to register encoded via REX prefix.
26340 Called via for_each_rtx. */
26341 static int
26342 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26344 unsigned int regno;
26345 if (!REG_P (*p))
26346 return 0;
26347 regno = REGNO (*p);
26348 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26351 /* Return true when INSN mentions register that must be encoded using REX
26352 prefix. */
26353 bool
26354 x86_extended_reg_mentioned_p (rtx insn)
26356 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26357 extended_reg_mentioned_1, NULL);
26360 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26361 optabs would emit if we didn't have TFmode patterns. */
26363 void
26364 x86_emit_floatuns (rtx operands[2])
26366 rtx neglab, donelab, i0, i1, f0, in, out;
26367 enum machine_mode mode, inmode;
26369 inmode = GET_MODE (operands[1]);
26370 gcc_assert (inmode == SImode || inmode == DImode);
26372 out = operands[0];
26373 in = force_reg (inmode, operands[1]);
26374 mode = GET_MODE (out);
26375 neglab = gen_label_rtx ();
26376 donelab = gen_label_rtx ();
26377 f0 = gen_reg_rtx (mode);
26379 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26381 expand_float (out, in, 0);
26383 emit_jump_insn (gen_jump (donelab));
26384 emit_barrier ();
26386 emit_label (neglab);
26388 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26389 1, OPTAB_DIRECT);
26390 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26391 1, OPTAB_DIRECT);
26392 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26394 expand_float (f0, i0, 0);
26396 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26398 emit_label (donelab);
26401 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26402 with all elements equal to VAR. Return true if successful. */
26404 static bool
26405 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26406 rtx target, rtx val)
26408 enum machine_mode hmode, smode, wsmode, wvmode;
26409 rtx x;
26411 switch (mode)
26413 case V2SImode:
26414 case V2SFmode:
26415 if (!mmx_ok)
26416 return false;
26417 /* FALLTHRU */
26419 case V2DFmode:
26420 case V2DImode:
26421 case V4SFmode:
26422 case V4SImode:
26423 val = force_reg (GET_MODE_INNER (mode), val);
26424 x = gen_rtx_VEC_DUPLICATE (mode, val);
26425 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26426 return true;
26428 case V4HImode:
26429 if (!mmx_ok)
26430 return false;
26431 if (TARGET_SSE || TARGET_3DNOW_A)
26433 val = gen_lowpart (SImode, val);
26434 x = gen_rtx_TRUNCATE (HImode, val);
26435 x = gen_rtx_VEC_DUPLICATE (mode, x);
26436 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26437 return true;
26439 else
26441 smode = HImode;
26442 wsmode = SImode;
26443 wvmode = V2SImode;
26444 goto widen;
26447 case V8QImode:
26448 if (!mmx_ok)
26449 return false;
26450 smode = QImode;
26451 wsmode = HImode;
26452 wvmode = V4HImode;
26453 goto widen;
26454 case V8HImode:
26455 if (TARGET_SSE2)
26457 rtx tmp1, tmp2;
26458 /* Extend HImode to SImode using a paradoxical SUBREG. */
26459 tmp1 = gen_reg_rtx (SImode);
26460 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26461 /* Insert the SImode value as low element of V4SImode vector. */
26462 tmp2 = gen_reg_rtx (V4SImode);
26463 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26464 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26465 CONST0_RTX (V4SImode),
26466 const1_rtx);
26467 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26468 /* Cast the V4SImode vector back to a V8HImode vector. */
26469 tmp1 = gen_reg_rtx (V8HImode);
26470 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
26471 /* Duplicate the low short through the whole low SImode word. */
26472 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
26473 /* Cast the V8HImode vector back to a V4SImode vector. */
26474 tmp2 = gen_reg_rtx (V4SImode);
26475 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26476 /* Replicate the low element of the V4SImode vector. */
26477 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26478 /* Cast the V2SImode back to V8HImode, and store in target. */
26479 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
26480 return true;
26482 smode = HImode;
26483 wsmode = SImode;
26484 wvmode = V4SImode;
26485 goto widen;
26486 case V16QImode:
26487 if (TARGET_SSE2)
26489 rtx tmp1, tmp2;
26490 /* Extend QImode to SImode using a paradoxical SUBREG. */
26491 tmp1 = gen_reg_rtx (SImode);
26492 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26493 /* Insert the SImode value as low element of V4SImode vector. */
26494 tmp2 = gen_reg_rtx (V4SImode);
26495 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26496 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26497 CONST0_RTX (V4SImode),
26498 const1_rtx);
26499 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26500 /* Cast the V4SImode vector back to a V16QImode vector. */
26501 tmp1 = gen_reg_rtx (V16QImode);
26502 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
26503 /* Duplicate the low byte through the whole low SImode word. */
26504 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26505 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26506 /* Cast the V16QImode vector back to a V4SImode vector. */
26507 tmp2 = gen_reg_rtx (V4SImode);
26508 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26509 /* Replicate the low element of the V4SImode vector. */
26510 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26511 /* Cast the V2SImode back to V16QImode, and store in target. */
26512 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
26513 return true;
26515 smode = QImode;
26516 wsmode = HImode;
26517 wvmode = V8HImode;
26518 goto widen;
26519 widen:
26520 /* Replicate the value once into the next wider mode and recurse. */
26521 val = convert_modes (wsmode, smode, val, true);
26522 x = expand_simple_binop (wsmode, ASHIFT, val,
26523 GEN_INT (GET_MODE_BITSIZE (smode)),
26524 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26525 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26527 x = gen_reg_rtx (wvmode);
26528 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
26529 gcc_unreachable ();
26530 emit_move_insn (target, gen_lowpart (mode, x));
26531 return true;
26533 case V4DFmode:
26534 hmode = V2DFmode;
26535 goto half;
26536 case V4DImode:
26537 hmode = V2DImode;
26538 goto half;
26539 case V8SFmode:
26540 hmode = V4SFmode;
26541 goto half;
26542 case V8SImode:
26543 hmode = V4SImode;
26544 goto half;
26545 case V16HImode:
26546 hmode = V8HImode;
26547 goto half;
26548 case V32QImode:
26549 hmode = V16QImode;
26550 goto half;
26551 half:
26553 rtx tmp = gen_reg_rtx (hmode);
26554 ix86_expand_vector_init_duplicate (mmx_ok, hmode, tmp, val);
26555 emit_insn (gen_rtx_SET (VOIDmode, target,
26556 gen_rtx_VEC_CONCAT (mode, tmp, tmp)));
26558 return true;
26560 default:
26561 return false;
26565 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26566 whose ONE_VAR element is VAR, and other elements are zero. Return true
26567 if successful. */
26569 static bool
26570 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26571 rtx target, rtx var, int one_var)
26573 enum machine_mode vsimode;
26574 rtx new_target;
26575 rtx x, tmp;
26576 bool use_vector_set = false;
26578 switch (mode)
26580 case V2DImode:
26581 /* For SSE4.1, we normally use vector set. But if the second
26582 element is zero and inter-unit moves are OK, we use movq
26583 instead. */
26584 use_vector_set = (TARGET_64BIT
26585 && TARGET_SSE4_1
26586 && !(TARGET_INTER_UNIT_MOVES
26587 && one_var == 0));
26588 break;
26589 case V16QImode:
26590 case V4SImode:
26591 case V4SFmode:
26592 use_vector_set = TARGET_SSE4_1;
26593 break;
26594 case V8HImode:
26595 use_vector_set = TARGET_SSE2;
26596 break;
26597 case V4HImode:
26598 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26599 break;
26600 case V32QImode:
26601 case V16HImode:
26602 case V8SImode:
26603 case V8SFmode:
26604 case V4DImode:
26605 case V4DFmode:
26606 use_vector_set = TARGET_AVX;
26607 break;
26608 default:
26609 break;
26612 if (use_vector_set)
26614 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26615 var = force_reg (GET_MODE_INNER (mode), var);
26616 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26617 return true;
26620 switch (mode)
26622 case V2SFmode:
26623 case V2SImode:
26624 if (!mmx_ok)
26625 return false;
26626 /* FALLTHRU */
26628 case V2DFmode:
26629 case V2DImode:
26630 if (one_var != 0)
26631 return false;
26632 var = force_reg (GET_MODE_INNER (mode), var);
26633 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26634 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26635 return true;
26637 case V4SFmode:
26638 case V4SImode:
26639 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26640 new_target = gen_reg_rtx (mode);
26641 else
26642 new_target = target;
26643 var = force_reg (GET_MODE_INNER (mode), var);
26644 x = gen_rtx_VEC_DUPLICATE (mode, var);
26645 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26646 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26647 if (one_var != 0)
26649 /* We need to shuffle the value to the correct position, so
26650 create a new pseudo to store the intermediate result. */
26652 /* With SSE2, we can use the integer shuffle insns. */
26653 if (mode != V4SFmode && TARGET_SSE2)
26655 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26656 GEN_INT (1),
26657 GEN_INT (one_var == 1 ? 0 : 1),
26658 GEN_INT (one_var == 2 ? 0 : 1),
26659 GEN_INT (one_var == 3 ? 0 : 1)));
26660 if (target != new_target)
26661 emit_move_insn (target, new_target);
26662 return true;
26665 /* Otherwise convert the intermediate result to V4SFmode and
26666 use the SSE1 shuffle instructions. */
26667 if (mode != V4SFmode)
26669 tmp = gen_reg_rtx (V4SFmode);
26670 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26672 else
26673 tmp = new_target;
26675 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26676 GEN_INT (1),
26677 GEN_INT (one_var == 1 ? 0 : 1),
26678 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26679 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26681 if (mode != V4SFmode)
26682 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26683 else if (tmp != target)
26684 emit_move_insn (target, tmp);
26686 else if (target != new_target)
26687 emit_move_insn (target, new_target);
26688 return true;
26690 case V8HImode:
26691 case V16QImode:
26692 vsimode = V4SImode;
26693 goto widen;
26694 case V4HImode:
26695 case V8QImode:
26696 if (!mmx_ok)
26697 return false;
26698 vsimode = V2SImode;
26699 goto widen;
26700 widen:
26701 if (one_var != 0)
26702 return false;
26704 /* Zero extend the variable element to SImode and recurse. */
26705 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
26707 x = gen_reg_rtx (vsimode);
26708 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
26709 var, one_var))
26710 gcc_unreachable ();
26712 emit_move_insn (target, gen_lowpart (mode, x));
26713 return true;
26715 default:
26716 return false;
26720 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26721 consisting of the values in VALS. It is known that all elements
26722 except ONE_VAR are constants. Return true if successful. */
26724 static bool
26725 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
26726 rtx target, rtx vals, int one_var)
26728 rtx var = XVECEXP (vals, 0, one_var);
26729 enum machine_mode wmode;
26730 rtx const_vec, x;
26732 const_vec = copy_rtx (vals);
26733 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
26734 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
26736 switch (mode)
26738 case V2DFmode:
26739 case V2DImode:
26740 case V2SFmode:
26741 case V2SImode:
26742 /* For the two element vectors, it's just as easy to use
26743 the general case. */
26744 return false;
26746 case V4DFmode:
26747 case V4DImode:
26748 case V8SFmode:
26749 case V8SImode:
26750 case V16HImode:
26751 case V32QImode:
26752 case V4SFmode:
26753 case V4SImode:
26754 case V8HImode:
26755 case V4HImode:
26756 break;
26758 case V16QImode:
26759 if (TARGET_SSE4_1)
26760 break;
26761 wmode = V8HImode;
26762 goto widen;
26763 case V8QImode:
26764 wmode = V4HImode;
26765 goto widen;
26766 widen:
26767 /* There's no way to set one QImode entry easily. Combine
26768 the variable value with its adjacent constant value, and
26769 promote to an HImode set. */
26770 x = XVECEXP (vals, 0, one_var ^ 1);
26771 if (one_var & 1)
26773 var = convert_modes (HImode, QImode, var, true);
26774 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
26775 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26776 x = GEN_INT (INTVAL (x) & 0xff);
26778 else
26780 var = convert_modes (HImode, QImode, var, true);
26781 x = gen_int_mode (INTVAL (x) << 8, HImode);
26783 if (x != const0_rtx)
26784 var = expand_simple_binop (HImode, IOR, var, x, var,
26785 1, OPTAB_LIB_WIDEN);
26787 x = gen_reg_rtx (wmode);
26788 emit_move_insn (x, gen_lowpart (wmode, const_vec));
26789 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
26791 emit_move_insn (target, gen_lowpart (mode, x));
26792 return true;
26794 default:
26795 return false;
26798 emit_move_insn (target, const_vec);
26799 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26800 return true;
26803 /* A subroutine of ix86_expand_vector_init_general. Use vector
26804 concatenate to handle the most general case: all values variable,
26805 and none identical. */
26807 static void
26808 ix86_expand_vector_init_concat (enum machine_mode mode,
26809 rtx target, rtx *ops, int n)
26811 enum machine_mode cmode, hmode = VOIDmode;
26812 rtx first[8], second[4];
26813 rtvec v;
26814 int i, j;
26816 switch (n)
26818 case 2:
26819 switch (mode)
26821 case V8SImode:
26822 cmode = V4SImode;
26823 break;
26824 case V8SFmode:
26825 cmode = V4SFmode;
26826 break;
26827 case V4DImode:
26828 cmode = V2DImode;
26829 break;
26830 case V4DFmode:
26831 cmode = V2DFmode;
26832 break;
26833 case V4SImode:
26834 cmode = V2SImode;
26835 break;
26836 case V4SFmode:
26837 cmode = V2SFmode;
26838 break;
26839 case V2DImode:
26840 cmode = DImode;
26841 break;
26842 case V2SImode:
26843 cmode = SImode;
26844 break;
26845 case V2DFmode:
26846 cmode = DFmode;
26847 break;
26848 case V2SFmode:
26849 cmode = SFmode;
26850 break;
26851 default:
26852 gcc_unreachable ();
26855 if (!register_operand (ops[1], cmode))
26856 ops[1] = force_reg (cmode, ops[1]);
26857 if (!register_operand (ops[0], cmode))
26858 ops[0] = force_reg (cmode, ops[0]);
26859 emit_insn (gen_rtx_SET (VOIDmode, target,
26860 gen_rtx_VEC_CONCAT (mode, ops[0],
26861 ops[1])));
26862 break;
26864 case 4:
26865 switch (mode)
26867 case V4DImode:
26868 cmode = V2DImode;
26869 break;
26870 case V4DFmode:
26871 cmode = V2DFmode;
26872 break;
26873 case V4SImode:
26874 cmode = V2SImode;
26875 break;
26876 case V4SFmode:
26877 cmode = V2SFmode;
26878 break;
26879 default:
26880 gcc_unreachable ();
26882 goto half;
26884 case 8:
26885 switch (mode)
26887 case V8SImode:
26888 cmode = V2SImode;
26889 hmode = V4SImode;
26890 break;
26891 case V8SFmode:
26892 cmode = V2SFmode;
26893 hmode = V4SFmode;
26894 break;
26895 default:
26896 gcc_unreachable ();
26898 goto half;
26900 half:
26901 /* FIXME: We process inputs backward to help RA. PR 36222. */
26902 i = n - 1;
26903 j = (n >> 1) - 1;
26904 for (; i > 0; i -= 2, j--)
26906 first[j] = gen_reg_rtx (cmode);
26907 v = gen_rtvec (2, ops[i - 1], ops[i]);
26908 ix86_expand_vector_init (false, first[j],
26909 gen_rtx_PARALLEL (cmode, v));
26912 n >>= 1;
26913 if (n > 2)
26915 gcc_assert (hmode != VOIDmode);
26916 for (i = j = 0; i < n; i += 2, j++)
26918 second[j] = gen_reg_rtx (hmode);
26919 ix86_expand_vector_init_concat (hmode, second [j],
26920 &first [i], 2);
26922 n >>= 1;
26923 ix86_expand_vector_init_concat (mode, target, second, n);
26925 else
26926 ix86_expand_vector_init_concat (mode, target, first, n);
26927 break;
26929 default:
26930 gcc_unreachable ();
26934 /* A subroutine of ix86_expand_vector_init_general. Use vector
26935 interleave to handle the most general case: all values variable,
26936 and none identical. */
26938 static void
26939 ix86_expand_vector_init_interleave (enum machine_mode mode,
26940 rtx target, rtx *ops, int n)
26942 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
26943 int i, j;
26944 rtx op0, op1;
26945 rtx (*gen_load_even) (rtx, rtx, rtx);
26946 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
26947 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
26949 switch (mode)
26951 case V8HImode:
26952 gen_load_even = gen_vec_setv8hi;
26953 gen_interleave_first_low = gen_vec_interleave_lowv4si;
26954 gen_interleave_second_low = gen_vec_interleave_lowv2di;
26955 inner_mode = HImode;
26956 first_imode = V4SImode;
26957 second_imode = V2DImode;
26958 third_imode = VOIDmode;
26959 break;
26960 case V16QImode:
26961 gen_load_even = gen_vec_setv16qi;
26962 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
26963 gen_interleave_second_low = gen_vec_interleave_lowv4si;
26964 inner_mode = QImode;
26965 first_imode = V8HImode;
26966 second_imode = V4SImode;
26967 third_imode = V2DImode;
26968 break;
26969 default:
26970 gcc_unreachable ();
26973 for (i = 0; i < n; i++)
26975 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
26976 op0 = gen_reg_rtx (SImode);
26977 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
26979 /* Insert the SImode value as low element of V4SImode vector. */
26980 op1 = gen_reg_rtx (V4SImode);
26981 op0 = gen_rtx_VEC_MERGE (V4SImode,
26982 gen_rtx_VEC_DUPLICATE (V4SImode,
26983 op0),
26984 CONST0_RTX (V4SImode),
26985 const1_rtx);
26986 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
26988 /* Cast the V4SImode vector back to a vector in orignal mode. */
26989 op0 = gen_reg_rtx (mode);
26990 emit_move_insn (op0, gen_lowpart (mode, op1));
26992 /* Load even elements into the second positon. */
26993 emit_insn ((*gen_load_even) (op0,
26994 force_reg (inner_mode,
26995 ops [i + i + 1]),
26996 const1_rtx));
26998 /* Cast vector to FIRST_IMODE vector. */
26999 ops[i] = gen_reg_rtx (first_imode);
27000 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27003 /* Interleave low FIRST_IMODE vectors. */
27004 for (i = j = 0; i < n; i += 2, j++)
27006 op0 = gen_reg_rtx (first_imode);
27007 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27009 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27010 ops[j] = gen_reg_rtx (second_imode);
27011 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27014 /* Interleave low SECOND_IMODE vectors. */
27015 switch (second_imode)
27017 case V4SImode:
27018 for (i = j = 0; i < n / 2; i += 2, j++)
27020 op0 = gen_reg_rtx (second_imode);
27021 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27022 ops[i + 1]));
27024 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27025 vector. */
27026 ops[j] = gen_reg_rtx (third_imode);
27027 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27029 second_imode = V2DImode;
27030 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27031 /* FALLTHRU */
27033 case V2DImode:
27034 op0 = gen_reg_rtx (second_imode);
27035 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27036 ops[1]));
27038 /* Cast the SECOND_IMODE vector back to a vector on original
27039 mode. */
27040 emit_insn (gen_rtx_SET (VOIDmode, target,
27041 gen_lowpart (mode, op0)));
27042 break;
27044 default:
27045 gcc_unreachable ();
27049 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27050 all values variable, and none identical. */
27052 static void
27053 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27054 rtx target, rtx vals)
27056 rtx ops[32], op0, op1;
27057 enum machine_mode half_mode = VOIDmode;
27058 int n, i;
27060 switch (mode)
27062 case V2SFmode:
27063 case V2SImode:
27064 if (!mmx_ok && !TARGET_SSE)
27065 break;
27066 /* FALLTHRU */
27068 case V8SFmode:
27069 case V8SImode:
27070 case V4DFmode:
27071 case V4DImode:
27072 case V4SFmode:
27073 case V4SImode:
27074 case V2DFmode:
27075 case V2DImode:
27076 n = GET_MODE_NUNITS (mode);
27077 for (i = 0; i < n; i++)
27078 ops[i] = XVECEXP (vals, 0, i);
27079 ix86_expand_vector_init_concat (mode, target, ops, n);
27080 return;
27082 case V32QImode:
27083 half_mode = V16QImode;
27084 goto half;
27086 case V16HImode:
27087 half_mode = V8HImode;
27088 goto half;
27090 half:
27091 n = GET_MODE_NUNITS (mode);
27092 for (i = 0; i < n; i++)
27093 ops[i] = XVECEXP (vals, 0, i);
27094 op0 = gen_reg_rtx (half_mode);
27095 op1 = gen_reg_rtx (half_mode);
27096 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27097 n >> 2);
27098 ix86_expand_vector_init_interleave (half_mode, op1,
27099 &ops [n >> 1], n >> 2);
27100 emit_insn (gen_rtx_SET (VOIDmode, target,
27101 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27102 return;
27104 case V16QImode:
27105 if (!TARGET_SSE4_1)
27106 break;
27107 /* FALLTHRU */
27109 case V8HImode:
27110 if (!TARGET_SSE2)
27111 break;
27113 /* Don't use ix86_expand_vector_init_interleave if we can't
27114 move from GPR to SSE register directly. */
27115 if (!TARGET_INTER_UNIT_MOVES)
27116 break;
27118 n = GET_MODE_NUNITS (mode);
27119 for (i = 0; i < n; i++)
27120 ops[i] = XVECEXP (vals, 0, i);
27121 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27122 return;
27124 case V4HImode:
27125 case V8QImode:
27126 break;
27128 default:
27129 gcc_unreachable ();
27133 int i, j, n_elts, n_words, n_elt_per_word;
27134 enum machine_mode inner_mode;
27135 rtx words[4], shift;
27137 inner_mode = GET_MODE_INNER (mode);
27138 n_elts = GET_MODE_NUNITS (mode);
27139 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27140 n_elt_per_word = n_elts / n_words;
27141 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27143 for (i = 0; i < n_words; ++i)
27145 rtx word = NULL_RTX;
27147 for (j = 0; j < n_elt_per_word; ++j)
27149 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27150 elt = convert_modes (word_mode, inner_mode, elt, true);
27152 if (j == 0)
27153 word = elt;
27154 else
27156 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27157 word, 1, OPTAB_LIB_WIDEN);
27158 word = expand_simple_binop (word_mode, IOR, word, elt,
27159 word, 1, OPTAB_LIB_WIDEN);
27163 words[i] = word;
27166 if (n_words == 1)
27167 emit_move_insn (target, gen_lowpart (mode, words[0]));
27168 else if (n_words == 2)
27170 rtx tmp = gen_reg_rtx (mode);
27171 emit_clobber (tmp);
27172 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27173 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27174 emit_move_insn (target, tmp);
27176 else if (n_words == 4)
27178 rtx tmp = gen_reg_rtx (V4SImode);
27179 gcc_assert (word_mode == SImode);
27180 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27181 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27182 emit_move_insn (target, gen_lowpart (mode, tmp));
27184 else
27185 gcc_unreachable ();
27189 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27190 instructions unless MMX_OK is true. */
27192 void
27193 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27195 enum machine_mode mode = GET_MODE (target);
27196 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27197 int n_elts = GET_MODE_NUNITS (mode);
27198 int n_var = 0, one_var = -1;
27199 bool all_same = true, all_const_zero = true;
27200 int i;
27201 rtx x;
27203 for (i = 0; i < n_elts; ++i)
27205 x = XVECEXP (vals, 0, i);
27206 if (!(CONST_INT_P (x)
27207 || GET_CODE (x) == CONST_DOUBLE
27208 || GET_CODE (x) == CONST_FIXED))
27209 n_var++, one_var = i;
27210 else if (x != CONST0_RTX (inner_mode))
27211 all_const_zero = false;
27212 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27213 all_same = false;
27216 /* Constants are best loaded from the constant pool. */
27217 if (n_var == 0)
27219 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27220 return;
27223 /* If all values are identical, broadcast the value. */
27224 if (all_same
27225 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27226 XVECEXP (vals, 0, 0)))
27227 return;
27229 /* Values where only one field is non-constant are best loaded from
27230 the pool and overwritten via move later. */
27231 if (n_var == 1)
27233 if (all_const_zero
27234 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27235 XVECEXP (vals, 0, one_var),
27236 one_var))
27237 return;
27239 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27240 return;
27243 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27246 void
27247 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27249 enum machine_mode mode = GET_MODE (target);
27250 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27251 enum machine_mode half_mode;
27252 bool use_vec_merge = false;
27253 rtx tmp;
27254 static rtx (*gen_extract[6][2]) (rtx, rtx)
27256 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27257 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27258 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27259 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27260 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27261 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27263 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27265 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27266 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27267 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27268 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27269 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27270 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27272 int i, j, n;
27274 switch (mode)
27276 case V2SFmode:
27277 case V2SImode:
27278 if (mmx_ok)
27280 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27281 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27282 if (elt == 0)
27283 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27284 else
27285 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27286 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27287 return;
27289 break;
27291 case V2DImode:
27292 use_vec_merge = TARGET_SSE4_1;
27293 if (use_vec_merge)
27294 break;
27296 case V2DFmode:
27298 rtx op0, op1;
27300 /* For the two element vectors, we implement a VEC_CONCAT with
27301 the extraction of the other element. */
27303 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27304 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27306 if (elt == 0)
27307 op0 = val, op1 = tmp;
27308 else
27309 op0 = tmp, op1 = val;
27311 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27312 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27314 return;
27316 case V4SFmode:
27317 use_vec_merge = TARGET_SSE4_1;
27318 if (use_vec_merge)
27319 break;
27321 switch (elt)
27323 case 0:
27324 use_vec_merge = true;
27325 break;
27327 case 1:
27328 /* tmp = target = A B C D */
27329 tmp = copy_to_reg (target);
27330 /* target = A A B B */
27331 emit_insn (gen_sse_unpcklps (target, target, target));
27332 /* target = X A B B */
27333 ix86_expand_vector_set (false, target, val, 0);
27334 /* target = A X C D */
27335 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27336 GEN_INT (1), GEN_INT (0),
27337 GEN_INT (2+4), GEN_INT (3+4)));
27338 return;
27340 case 2:
27341 /* tmp = target = A B C D */
27342 tmp = copy_to_reg (target);
27343 /* tmp = X B C D */
27344 ix86_expand_vector_set (false, tmp, val, 0);
27345 /* target = A B X D */
27346 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27347 GEN_INT (0), GEN_INT (1),
27348 GEN_INT (0+4), GEN_INT (3+4)));
27349 return;
27351 case 3:
27352 /* tmp = target = A B C D */
27353 tmp = copy_to_reg (target);
27354 /* tmp = X B C D */
27355 ix86_expand_vector_set (false, tmp, val, 0);
27356 /* target = A B X D */
27357 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27358 GEN_INT (0), GEN_INT (1),
27359 GEN_INT (2+4), GEN_INT (0+4)));
27360 return;
27362 default:
27363 gcc_unreachable ();
27365 break;
27367 case V4SImode:
27368 use_vec_merge = TARGET_SSE4_1;
27369 if (use_vec_merge)
27370 break;
27372 /* Element 0 handled by vec_merge below. */
27373 if (elt == 0)
27375 use_vec_merge = true;
27376 break;
27379 if (TARGET_SSE2)
27381 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27382 store into element 0, then shuffle them back. */
27384 rtx order[4];
27386 order[0] = GEN_INT (elt);
27387 order[1] = const1_rtx;
27388 order[2] = const2_rtx;
27389 order[3] = GEN_INT (3);
27390 order[elt] = const0_rtx;
27392 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27393 order[1], order[2], order[3]));
27395 ix86_expand_vector_set (false, target, val, 0);
27397 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27398 order[1], order[2], order[3]));
27400 else
27402 /* For SSE1, we have to reuse the V4SF code. */
27403 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27404 gen_lowpart (SFmode, val), elt);
27406 return;
27408 case V8HImode:
27409 use_vec_merge = TARGET_SSE2;
27410 break;
27411 case V4HImode:
27412 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27413 break;
27415 case V16QImode:
27416 use_vec_merge = TARGET_SSE4_1;
27417 break;
27419 case V8QImode:
27420 break;
27422 case V32QImode:
27423 half_mode = V16QImode;
27424 j = 0;
27425 n = 16;
27426 goto half;
27428 case V16HImode:
27429 half_mode = V8HImode;
27430 j = 1;
27431 n = 8;
27432 goto half;
27434 case V8SImode:
27435 half_mode = V4SImode;
27436 j = 2;
27437 n = 4;
27438 goto half;
27440 case V4DImode:
27441 half_mode = V2DImode;
27442 j = 3;
27443 n = 2;
27444 goto half;
27446 case V8SFmode:
27447 half_mode = V4SFmode;
27448 j = 4;
27449 n = 4;
27450 goto half;
27452 case V4DFmode:
27453 half_mode = V2DFmode;
27454 j = 5;
27455 n = 2;
27456 goto half;
27458 half:
27459 /* Compute offset. */
27460 i = elt / n;
27461 elt %= n;
27463 gcc_assert (i <= 1);
27465 /* Extract the half. */
27466 tmp = gen_reg_rtx (half_mode);
27467 emit_insn ((*gen_extract[j][i]) (tmp, target));
27469 /* Put val in tmp at elt. */
27470 ix86_expand_vector_set (false, tmp, val, elt);
27472 /* Put it back. */
27473 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27474 return;
27476 default:
27477 break;
27480 if (use_vec_merge)
27482 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27483 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27484 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27486 else
27488 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27490 emit_move_insn (mem, target);
27492 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27493 emit_move_insn (tmp, val);
27495 emit_move_insn (target, mem);
27499 void
27500 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27502 enum machine_mode mode = GET_MODE (vec);
27503 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27504 bool use_vec_extr = false;
27505 rtx tmp;
27507 switch (mode)
27509 case V2SImode:
27510 case V2SFmode:
27511 if (!mmx_ok)
27512 break;
27513 /* FALLTHRU */
27515 case V2DFmode:
27516 case V2DImode:
27517 use_vec_extr = true;
27518 break;
27520 case V4SFmode:
27521 use_vec_extr = TARGET_SSE4_1;
27522 if (use_vec_extr)
27523 break;
27525 switch (elt)
27527 case 0:
27528 tmp = vec;
27529 break;
27531 case 1:
27532 case 3:
27533 tmp = gen_reg_rtx (mode);
27534 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27535 GEN_INT (elt), GEN_INT (elt),
27536 GEN_INT (elt+4), GEN_INT (elt+4)));
27537 break;
27539 case 2:
27540 tmp = gen_reg_rtx (mode);
27541 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
27542 break;
27544 default:
27545 gcc_unreachable ();
27547 vec = tmp;
27548 use_vec_extr = true;
27549 elt = 0;
27550 break;
27552 case V4SImode:
27553 use_vec_extr = TARGET_SSE4_1;
27554 if (use_vec_extr)
27555 break;
27557 if (TARGET_SSE2)
27559 switch (elt)
27561 case 0:
27562 tmp = vec;
27563 break;
27565 case 1:
27566 case 3:
27567 tmp = gen_reg_rtx (mode);
27568 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27569 GEN_INT (elt), GEN_INT (elt),
27570 GEN_INT (elt), GEN_INT (elt)));
27571 break;
27573 case 2:
27574 tmp = gen_reg_rtx (mode);
27575 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
27576 break;
27578 default:
27579 gcc_unreachable ();
27581 vec = tmp;
27582 use_vec_extr = true;
27583 elt = 0;
27585 else
27587 /* For SSE1, we have to reuse the V4SF code. */
27588 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27589 gen_lowpart (V4SFmode, vec), elt);
27590 return;
27592 break;
27594 case V8HImode:
27595 use_vec_extr = TARGET_SSE2;
27596 break;
27597 case V4HImode:
27598 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27599 break;
27601 case V16QImode:
27602 use_vec_extr = TARGET_SSE4_1;
27603 break;
27605 case V8QImode:
27606 /* ??? Could extract the appropriate HImode element and shift. */
27607 default:
27608 break;
27611 if (use_vec_extr)
27613 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27614 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27616 /* Let the rtl optimizers know about the zero extension performed. */
27617 if (inner_mode == QImode || inner_mode == HImode)
27619 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27620 target = gen_lowpart (SImode, target);
27623 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27625 else
27627 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27629 emit_move_insn (mem, vec);
27631 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27632 emit_move_insn (target, tmp);
27636 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27637 pattern to reduce; DEST is the destination; IN is the input vector. */
27639 void
27640 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27642 rtx tmp1, tmp2, tmp3;
27644 tmp1 = gen_reg_rtx (V4SFmode);
27645 tmp2 = gen_reg_rtx (V4SFmode);
27646 tmp3 = gen_reg_rtx (V4SFmode);
27648 emit_insn (gen_sse_movhlps (tmp1, in, in));
27649 emit_insn (fn (tmp2, tmp1, in));
27651 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27652 GEN_INT (1), GEN_INT (1),
27653 GEN_INT (1+4), GEN_INT (1+4)));
27654 emit_insn (fn (dest, tmp2, tmp3));
27657 /* Target hook for scalar_mode_supported_p. */
27658 static bool
27659 ix86_scalar_mode_supported_p (enum machine_mode mode)
27661 if (DECIMAL_FLOAT_MODE_P (mode))
27662 return true;
27663 else if (mode == TFmode)
27664 return true;
27665 else
27666 return default_scalar_mode_supported_p (mode);
27669 /* Implements target hook vector_mode_supported_p. */
27670 static bool
27671 ix86_vector_mode_supported_p (enum machine_mode mode)
27673 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27674 return true;
27675 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27676 return true;
27677 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27678 return true;
27679 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27680 return true;
27681 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
27682 return true;
27683 return false;
27686 /* Target hook for c_mode_for_suffix. */
27687 static enum machine_mode
27688 ix86_c_mode_for_suffix (char suffix)
27690 if (suffix == 'q')
27691 return TFmode;
27692 if (suffix == 'w')
27693 return XFmode;
27695 return VOIDmode;
27698 /* Worker function for TARGET_MD_ASM_CLOBBERS.
27700 We do this in the new i386 backend to maintain source compatibility
27701 with the old cc0-based compiler. */
27703 static tree
27704 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
27705 tree inputs ATTRIBUTE_UNUSED,
27706 tree clobbers)
27708 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
27709 clobbers);
27710 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
27711 clobbers);
27712 return clobbers;
27715 /* Implements target vector targetm.asm.encode_section_info. This
27716 is not used by netware. */
27718 static void ATTRIBUTE_UNUSED
27719 ix86_encode_section_info (tree decl, rtx rtl, int first)
27721 default_encode_section_info (decl, rtl, first);
27723 if (TREE_CODE (decl) == VAR_DECL
27724 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
27725 && ix86_in_large_data_p (decl))
27726 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
27729 /* Worker function for REVERSE_CONDITION. */
27731 enum rtx_code
27732 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
27734 return (mode != CCFPmode && mode != CCFPUmode
27735 ? reverse_condition (code)
27736 : reverse_condition_maybe_unordered (code));
27739 /* Output code to perform an x87 FP register move, from OPERANDS[1]
27740 to OPERANDS[0]. */
27742 const char *
27743 output_387_reg_move (rtx insn, rtx *operands)
27745 if (REG_P (operands[0]))
27747 if (REG_P (operands[1])
27748 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27750 if (REGNO (operands[0]) == FIRST_STACK_REG)
27751 return output_387_ffreep (operands, 0);
27752 return "fstp\t%y0";
27754 if (STACK_TOP_P (operands[0]))
27755 return "fld%z1\t%y1";
27756 return "fst\t%y0";
27758 else if (MEM_P (operands[0]))
27760 gcc_assert (REG_P (operands[1]));
27761 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27762 return "fstp%z0\t%y0";
27763 else
27765 /* There is no non-popping store to memory for XFmode.
27766 So if we need one, follow the store with a load. */
27767 if (GET_MODE (operands[0]) == XFmode)
27768 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
27769 else
27770 return "fst%z0\t%y0";
27773 else
27774 gcc_unreachable();
27777 /* Output code to perform a conditional jump to LABEL, if C2 flag in
27778 FP status register is set. */
27780 void
27781 ix86_emit_fp_unordered_jump (rtx label)
27783 rtx reg = gen_reg_rtx (HImode);
27784 rtx temp;
27786 emit_insn (gen_x86_fnstsw_1 (reg));
27788 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
27790 emit_insn (gen_x86_sahf_1 (reg));
27792 temp = gen_rtx_REG (CCmode, FLAGS_REG);
27793 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
27795 else
27797 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
27799 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
27800 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
27803 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
27804 gen_rtx_LABEL_REF (VOIDmode, label),
27805 pc_rtx);
27806 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
27808 emit_jump_insn (temp);
27809 predict_jump (REG_BR_PROB_BASE * 10 / 100);
27812 /* Output code to perform a log1p XFmode calculation. */
27814 void ix86_emit_i387_log1p (rtx op0, rtx op1)
27816 rtx label1 = gen_label_rtx ();
27817 rtx label2 = gen_label_rtx ();
27819 rtx tmp = gen_reg_rtx (XFmode);
27820 rtx tmp2 = gen_reg_rtx (XFmode);
27822 emit_insn (gen_absxf2 (tmp, op1));
27823 emit_insn (gen_cmpxf (tmp,
27824 CONST_DOUBLE_FROM_REAL_VALUE (
27825 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
27826 XFmode)));
27827 emit_jump_insn (gen_bge (label1));
27829 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
27830 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
27831 emit_jump (label2);
27833 emit_label (label1);
27834 emit_move_insn (tmp, CONST1_RTX (XFmode));
27835 emit_insn (gen_addxf3 (tmp, op1, tmp));
27836 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
27837 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
27839 emit_label (label2);
27842 /* Output code to perform a Newton-Rhapson approximation of a single precision
27843 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
27845 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
27847 rtx x0, x1, e0, e1, two;
27849 x0 = gen_reg_rtx (mode);
27850 e0 = gen_reg_rtx (mode);
27851 e1 = gen_reg_rtx (mode);
27852 x1 = gen_reg_rtx (mode);
27854 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
27856 if (VECTOR_MODE_P (mode))
27857 two = ix86_build_const_vector (SFmode, true, two);
27859 two = force_reg (mode, two);
27861 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
27863 /* x0 = rcp(b) estimate */
27864 emit_insn (gen_rtx_SET (VOIDmode, x0,
27865 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
27866 UNSPEC_RCP)));
27867 /* e0 = x0 * b */
27868 emit_insn (gen_rtx_SET (VOIDmode, e0,
27869 gen_rtx_MULT (mode, x0, b)));
27870 /* e1 = 2. - e0 */
27871 emit_insn (gen_rtx_SET (VOIDmode, e1,
27872 gen_rtx_MINUS (mode, two, e0)));
27873 /* x1 = x0 * e1 */
27874 emit_insn (gen_rtx_SET (VOIDmode, x1,
27875 gen_rtx_MULT (mode, x0, e1)));
27876 /* res = a * x1 */
27877 emit_insn (gen_rtx_SET (VOIDmode, res,
27878 gen_rtx_MULT (mode, a, x1)));
27881 /* Output code to perform a Newton-Rhapson approximation of a
27882 single precision floating point [reciprocal] square root. */
27884 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
27885 bool recip)
27887 rtx x0, e0, e1, e2, e3, mthree, mhalf;
27888 REAL_VALUE_TYPE r;
27890 x0 = gen_reg_rtx (mode);
27891 e0 = gen_reg_rtx (mode);
27892 e1 = gen_reg_rtx (mode);
27893 e2 = gen_reg_rtx (mode);
27894 e3 = gen_reg_rtx (mode);
27896 real_from_integer (&r, VOIDmode, -3, -1, 0);
27897 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
27899 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
27900 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
27902 if (VECTOR_MODE_P (mode))
27904 mthree = ix86_build_const_vector (SFmode, true, mthree);
27905 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
27908 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
27909 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
27911 /* x0 = rsqrt(a) estimate */
27912 emit_insn (gen_rtx_SET (VOIDmode, x0,
27913 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
27914 UNSPEC_RSQRT)));
27916 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
27917 if (!recip)
27919 rtx zero, mask;
27921 zero = gen_reg_rtx (mode);
27922 mask = gen_reg_rtx (mode);
27924 zero = force_reg (mode, CONST0_RTX(mode));
27925 emit_insn (gen_rtx_SET (VOIDmode, mask,
27926 gen_rtx_NE (mode, zero, a)));
27928 emit_insn (gen_rtx_SET (VOIDmode, x0,
27929 gen_rtx_AND (mode, x0, mask)));
27932 /* e0 = x0 * a */
27933 emit_insn (gen_rtx_SET (VOIDmode, e0,
27934 gen_rtx_MULT (mode, x0, a)));
27935 /* e1 = e0 * x0 */
27936 emit_insn (gen_rtx_SET (VOIDmode, e1,
27937 gen_rtx_MULT (mode, e0, x0)));
27939 /* e2 = e1 - 3. */
27940 mthree = force_reg (mode, mthree);
27941 emit_insn (gen_rtx_SET (VOIDmode, e2,
27942 gen_rtx_PLUS (mode, e1, mthree)));
27944 mhalf = force_reg (mode, mhalf);
27945 if (recip)
27946 /* e3 = -.5 * x0 */
27947 emit_insn (gen_rtx_SET (VOIDmode, e3,
27948 gen_rtx_MULT (mode, x0, mhalf)));
27949 else
27950 /* e3 = -.5 * e0 */
27951 emit_insn (gen_rtx_SET (VOIDmode, e3,
27952 gen_rtx_MULT (mode, e0, mhalf)));
27953 /* ret = e2 * e3 */
27954 emit_insn (gen_rtx_SET (VOIDmode, res,
27955 gen_rtx_MULT (mode, e2, e3)));
27958 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
27960 static void ATTRIBUTE_UNUSED
27961 i386_solaris_elf_named_section (const char *name, unsigned int flags,
27962 tree decl)
27964 /* With Binutils 2.15, the "@unwind" marker must be specified on
27965 every occurrence of the ".eh_frame" section, not just the first
27966 one. */
27967 if (TARGET_64BIT
27968 && strcmp (name, ".eh_frame") == 0)
27970 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
27971 flags & SECTION_WRITE ? "aw" : "a");
27972 return;
27974 default_elf_asm_named_section (name, flags, decl);
27977 /* Return the mangling of TYPE if it is an extended fundamental type. */
27979 static const char *
27980 ix86_mangle_type (const_tree type)
27982 type = TYPE_MAIN_VARIANT (type);
27984 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
27985 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
27986 return NULL;
27988 switch (TYPE_MODE (type))
27990 case TFmode:
27991 /* __float128 is "g". */
27992 return "g";
27993 case XFmode:
27994 /* "long double" or __float80 is "e". */
27995 return "e";
27996 default:
27997 return NULL;
28001 /* For 32-bit code we can save PIC register setup by using
28002 __stack_chk_fail_local hidden function instead of calling
28003 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28004 register, so it is better to call __stack_chk_fail directly. */
28006 static tree
28007 ix86_stack_protect_fail (void)
28009 return TARGET_64BIT
28010 ? default_external_stack_protect_fail ()
28011 : default_hidden_stack_protect_fail ();
28014 /* Select a format to encode pointers in exception handling data. CODE
28015 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28016 true if the symbol may be affected by dynamic relocations.
28018 ??? All x86 object file formats are capable of representing this.
28019 After all, the relocation needed is the same as for the call insn.
28020 Whether or not a particular assembler allows us to enter such, I
28021 guess we'll have to see. */
28023 asm_preferred_eh_data_format (int code, int global)
28025 if (flag_pic)
28027 int type = DW_EH_PE_sdata8;
28028 if (!TARGET_64BIT
28029 || ix86_cmodel == CM_SMALL_PIC
28030 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28031 type = DW_EH_PE_sdata4;
28032 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28034 if (ix86_cmodel == CM_SMALL
28035 || (ix86_cmodel == CM_MEDIUM && code))
28036 return DW_EH_PE_udata4;
28037 return DW_EH_PE_absptr;
28040 /* Expand copysign from SIGN to the positive value ABS_VALUE
28041 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28042 the sign-bit. */
28043 static void
28044 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28046 enum machine_mode mode = GET_MODE (sign);
28047 rtx sgn = gen_reg_rtx (mode);
28048 if (mask == NULL_RTX)
28050 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28051 if (!VECTOR_MODE_P (mode))
28053 /* We need to generate a scalar mode mask in this case. */
28054 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28055 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28056 mask = gen_reg_rtx (mode);
28057 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28060 else
28061 mask = gen_rtx_NOT (mode, mask);
28062 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28063 gen_rtx_AND (mode, mask, sign)));
28064 emit_insn (gen_rtx_SET (VOIDmode, result,
28065 gen_rtx_IOR (mode, abs_value, sgn)));
28068 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28069 mask for masking out the sign-bit is stored in *SMASK, if that is
28070 non-null. */
28071 static rtx
28072 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28074 enum machine_mode mode = GET_MODE (op0);
28075 rtx xa, mask;
28077 xa = gen_reg_rtx (mode);
28078 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28079 if (!VECTOR_MODE_P (mode))
28081 /* We need to generate a scalar mode mask in this case. */
28082 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28083 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28084 mask = gen_reg_rtx (mode);
28085 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28087 emit_insn (gen_rtx_SET (VOIDmode, xa,
28088 gen_rtx_AND (mode, op0, mask)));
28090 if (smask)
28091 *smask = mask;
28093 return xa;
28096 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28097 swapping the operands if SWAP_OPERANDS is true. The expanded
28098 code is a forward jump to a newly created label in case the
28099 comparison is true. The generated label rtx is returned. */
28100 static rtx
28101 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28102 bool swap_operands)
28104 rtx label, tmp;
28106 if (swap_operands)
28108 tmp = op0;
28109 op0 = op1;
28110 op1 = tmp;
28113 label = gen_label_rtx ();
28114 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28115 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28116 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28117 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28118 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28119 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28120 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28121 JUMP_LABEL (tmp) = label;
28123 return label;
28126 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28127 using comparison code CODE. Operands are swapped for the comparison if
28128 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28129 static rtx
28130 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28131 bool swap_operands)
28133 enum machine_mode mode = GET_MODE (op0);
28134 rtx mask = gen_reg_rtx (mode);
28136 if (swap_operands)
28138 rtx tmp = op0;
28139 op0 = op1;
28140 op1 = tmp;
28143 if (mode == DFmode)
28144 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28145 gen_rtx_fmt_ee (code, mode, op0, op1)));
28146 else
28147 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28148 gen_rtx_fmt_ee (code, mode, op0, op1)));
28150 return mask;
28153 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28154 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28155 static rtx
28156 ix86_gen_TWO52 (enum machine_mode mode)
28158 REAL_VALUE_TYPE TWO52r;
28159 rtx TWO52;
28161 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28162 TWO52 = const_double_from_real_value (TWO52r, mode);
28163 TWO52 = force_reg (mode, TWO52);
28165 return TWO52;
28168 /* Expand SSE sequence for computing lround from OP1 storing
28169 into OP0. */
28170 void
28171 ix86_expand_lround (rtx op0, rtx op1)
28173 /* C code for the stuff we're doing below:
28174 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28175 return (long)tmp;
28177 enum machine_mode mode = GET_MODE (op1);
28178 const struct real_format *fmt;
28179 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28180 rtx adj;
28182 /* load nextafter (0.5, 0.0) */
28183 fmt = REAL_MODE_FORMAT (mode);
28184 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28185 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28187 /* adj = copysign (0.5, op1) */
28188 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28189 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28191 /* adj = op1 + adj */
28192 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28194 /* op0 = (imode)adj */
28195 expand_fix (op0, adj, 0);
28198 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28199 into OPERAND0. */
28200 void
28201 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28203 /* C code for the stuff we're doing below (for do_floor):
28204 xi = (long)op1;
28205 xi -= (double)xi > op1 ? 1 : 0;
28206 return xi;
28208 enum machine_mode fmode = GET_MODE (op1);
28209 enum machine_mode imode = GET_MODE (op0);
28210 rtx ireg, freg, label, tmp;
28212 /* reg = (long)op1 */
28213 ireg = gen_reg_rtx (imode);
28214 expand_fix (ireg, op1, 0);
28216 /* freg = (double)reg */
28217 freg = gen_reg_rtx (fmode);
28218 expand_float (freg, ireg, 0);
28220 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28221 label = ix86_expand_sse_compare_and_jump (UNLE,
28222 freg, op1, !do_floor);
28223 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28224 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28225 emit_move_insn (ireg, tmp);
28227 emit_label (label);
28228 LABEL_NUSES (label) = 1;
28230 emit_move_insn (op0, ireg);
28233 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28234 result in OPERAND0. */
28235 void
28236 ix86_expand_rint (rtx operand0, rtx operand1)
28238 /* C code for the stuff we're doing below:
28239 xa = fabs (operand1);
28240 if (!isless (xa, 2**52))
28241 return operand1;
28242 xa = xa + 2**52 - 2**52;
28243 return copysign (xa, operand1);
28245 enum machine_mode mode = GET_MODE (operand0);
28246 rtx res, xa, label, TWO52, mask;
28248 res = gen_reg_rtx (mode);
28249 emit_move_insn (res, operand1);
28251 /* xa = abs (operand1) */
28252 xa = ix86_expand_sse_fabs (res, &mask);
28254 /* if (!isless (xa, TWO52)) goto label; */
28255 TWO52 = ix86_gen_TWO52 (mode);
28256 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28258 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28259 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28261 ix86_sse_copysign_to_positive (res, xa, res, mask);
28263 emit_label (label);
28264 LABEL_NUSES (label) = 1;
28266 emit_move_insn (operand0, res);
28269 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28270 into OPERAND0. */
28271 void
28272 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28274 /* C code for the stuff we expand below.
28275 double xa = fabs (x), x2;
28276 if (!isless (xa, TWO52))
28277 return x;
28278 xa = xa + TWO52 - TWO52;
28279 x2 = copysign (xa, x);
28280 Compensate. Floor:
28281 if (x2 > x)
28282 x2 -= 1;
28283 Compensate. Ceil:
28284 if (x2 < x)
28285 x2 -= -1;
28286 return x2;
28288 enum machine_mode mode = GET_MODE (operand0);
28289 rtx xa, TWO52, tmp, label, one, res, mask;
28291 TWO52 = ix86_gen_TWO52 (mode);
28293 /* Temporary for holding the result, initialized to the input
28294 operand to ease control flow. */
28295 res = gen_reg_rtx (mode);
28296 emit_move_insn (res, operand1);
28298 /* xa = abs (operand1) */
28299 xa = ix86_expand_sse_fabs (res, &mask);
28301 /* if (!isless (xa, TWO52)) goto label; */
28302 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28304 /* xa = xa + TWO52 - TWO52; */
28305 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28306 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28308 /* xa = copysign (xa, operand1) */
28309 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28311 /* generate 1.0 or -1.0 */
28312 one = force_reg (mode,
28313 const_double_from_real_value (do_floor
28314 ? dconst1 : dconstm1, mode));
28316 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28317 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28318 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28319 gen_rtx_AND (mode, one, tmp)));
28320 /* We always need to subtract here to preserve signed zero. */
28321 tmp = expand_simple_binop (mode, MINUS,
28322 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28323 emit_move_insn (res, tmp);
28325 emit_label (label);
28326 LABEL_NUSES (label) = 1;
28328 emit_move_insn (operand0, res);
28331 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28332 into OPERAND0. */
28333 void
28334 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28336 /* C code for the stuff we expand below.
28337 double xa = fabs (x), x2;
28338 if (!isless (xa, TWO52))
28339 return x;
28340 x2 = (double)(long)x;
28341 Compensate. Floor:
28342 if (x2 > x)
28343 x2 -= 1;
28344 Compensate. Ceil:
28345 if (x2 < x)
28346 x2 += 1;
28347 if (HONOR_SIGNED_ZEROS (mode))
28348 return copysign (x2, x);
28349 return x2;
28351 enum machine_mode mode = GET_MODE (operand0);
28352 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28354 TWO52 = ix86_gen_TWO52 (mode);
28356 /* Temporary for holding the result, initialized to the input
28357 operand to ease control flow. */
28358 res = gen_reg_rtx (mode);
28359 emit_move_insn (res, operand1);
28361 /* xa = abs (operand1) */
28362 xa = ix86_expand_sse_fabs (res, &mask);
28364 /* if (!isless (xa, TWO52)) goto label; */
28365 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28367 /* xa = (double)(long)x */
28368 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28369 expand_fix (xi, res, 0);
28370 expand_float (xa, xi, 0);
28372 /* generate 1.0 */
28373 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28375 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28376 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28377 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28378 gen_rtx_AND (mode, one, tmp)));
28379 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28380 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28381 emit_move_insn (res, tmp);
28383 if (HONOR_SIGNED_ZEROS (mode))
28384 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28386 emit_label (label);
28387 LABEL_NUSES (label) = 1;
28389 emit_move_insn (operand0, res);
28392 /* Expand SSE sequence for computing round from OPERAND1 storing
28393 into OPERAND0. Sequence that works without relying on DImode truncation
28394 via cvttsd2siq that is only available on 64bit targets. */
28395 void
28396 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28398 /* C code for the stuff we expand below.
28399 double xa = fabs (x), xa2, x2;
28400 if (!isless (xa, TWO52))
28401 return x;
28402 Using the absolute value and copying back sign makes
28403 -0.0 -> -0.0 correct.
28404 xa2 = xa + TWO52 - TWO52;
28405 Compensate.
28406 dxa = xa2 - xa;
28407 if (dxa <= -0.5)
28408 xa2 += 1;
28409 else if (dxa > 0.5)
28410 xa2 -= 1;
28411 x2 = copysign (xa2, x);
28412 return x2;
28414 enum machine_mode mode = GET_MODE (operand0);
28415 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28417 TWO52 = ix86_gen_TWO52 (mode);
28419 /* Temporary for holding the result, initialized to the input
28420 operand to ease control flow. */
28421 res = gen_reg_rtx (mode);
28422 emit_move_insn (res, operand1);
28424 /* xa = abs (operand1) */
28425 xa = ix86_expand_sse_fabs (res, &mask);
28427 /* if (!isless (xa, TWO52)) goto label; */
28428 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28430 /* xa2 = xa + TWO52 - TWO52; */
28431 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28432 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28434 /* dxa = xa2 - xa; */
28435 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28437 /* generate 0.5, 1.0 and -0.5 */
28438 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28439 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28440 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28441 0, OPTAB_DIRECT);
28443 /* Compensate. */
28444 tmp = gen_reg_rtx (mode);
28445 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28446 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28447 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28448 gen_rtx_AND (mode, one, tmp)));
28449 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28450 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28451 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28452 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28453 gen_rtx_AND (mode, one, tmp)));
28454 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28456 /* res = copysign (xa2, operand1) */
28457 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28459 emit_label (label);
28460 LABEL_NUSES (label) = 1;
28462 emit_move_insn (operand0, res);
28465 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28466 into OPERAND0. */
28467 void
28468 ix86_expand_trunc (rtx operand0, rtx operand1)
28470 /* C code for SSE variant we expand below.
28471 double xa = fabs (x), x2;
28472 if (!isless (xa, TWO52))
28473 return x;
28474 x2 = (double)(long)x;
28475 if (HONOR_SIGNED_ZEROS (mode))
28476 return copysign (x2, x);
28477 return x2;
28479 enum machine_mode mode = GET_MODE (operand0);
28480 rtx xa, xi, TWO52, label, res, mask;
28482 TWO52 = ix86_gen_TWO52 (mode);
28484 /* Temporary for holding the result, initialized to the input
28485 operand to ease control flow. */
28486 res = gen_reg_rtx (mode);
28487 emit_move_insn (res, operand1);
28489 /* xa = abs (operand1) */
28490 xa = ix86_expand_sse_fabs (res, &mask);
28492 /* if (!isless (xa, TWO52)) goto label; */
28493 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28495 /* x = (double)(long)x */
28496 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28497 expand_fix (xi, res, 0);
28498 expand_float (res, xi, 0);
28500 if (HONOR_SIGNED_ZEROS (mode))
28501 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28503 emit_label (label);
28504 LABEL_NUSES (label) = 1;
28506 emit_move_insn (operand0, res);
28509 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28510 into OPERAND0. */
28511 void
28512 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28514 enum machine_mode mode = GET_MODE (operand0);
28515 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28517 /* C code for SSE variant we expand below.
28518 double xa = fabs (x), x2;
28519 if (!isless (xa, TWO52))
28520 return x;
28521 xa2 = xa + TWO52 - TWO52;
28522 Compensate:
28523 if (xa2 > xa)
28524 xa2 -= 1.0;
28525 x2 = copysign (xa2, x);
28526 return x2;
28529 TWO52 = ix86_gen_TWO52 (mode);
28531 /* Temporary for holding the result, initialized to the input
28532 operand to ease control flow. */
28533 res = gen_reg_rtx (mode);
28534 emit_move_insn (res, operand1);
28536 /* xa = abs (operand1) */
28537 xa = ix86_expand_sse_fabs (res, &smask);
28539 /* if (!isless (xa, TWO52)) goto label; */
28540 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28542 /* res = xa + TWO52 - TWO52; */
28543 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28544 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28545 emit_move_insn (res, tmp);
28547 /* generate 1.0 */
28548 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28550 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28551 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28552 emit_insn (gen_rtx_SET (VOIDmode, mask,
28553 gen_rtx_AND (mode, mask, one)));
28554 tmp = expand_simple_binop (mode, MINUS,
28555 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28556 emit_move_insn (res, tmp);
28558 /* res = copysign (res, operand1) */
28559 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28561 emit_label (label);
28562 LABEL_NUSES (label) = 1;
28564 emit_move_insn (operand0, res);
28567 /* Expand SSE sequence for computing round from OPERAND1 storing
28568 into OPERAND0. */
28569 void
28570 ix86_expand_round (rtx operand0, rtx operand1)
28572 /* C code for the stuff we're doing below:
28573 double xa = fabs (x);
28574 if (!isless (xa, TWO52))
28575 return x;
28576 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28577 return copysign (xa, x);
28579 enum machine_mode mode = GET_MODE (operand0);
28580 rtx res, TWO52, xa, label, xi, half, mask;
28581 const struct real_format *fmt;
28582 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28584 /* Temporary for holding the result, initialized to the input
28585 operand to ease control flow. */
28586 res = gen_reg_rtx (mode);
28587 emit_move_insn (res, operand1);
28589 TWO52 = ix86_gen_TWO52 (mode);
28590 xa = ix86_expand_sse_fabs (res, &mask);
28591 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28593 /* load nextafter (0.5, 0.0) */
28594 fmt = REAL_MODE_FORMAT (mode);
28595 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28596 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28598 /* xa = xa + 0.5 */
28599 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28600 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28602 /* xa = (double)(int64_t)xa */
28603 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28604 expand_fix (xi, xa, 0);
28605 expand_float (xa, xi, 0);
28607 /* res = copysign (xa, operand1) */
28608 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28610 emit_label (label);
28611 LABEL_NUSES (label) = 1;
28613 emit_move_insn (operand0, res);
28617 /* Validate whether a SSE5 instruction is valid or not.
28618 OPERANDS is the array of operands.
28619 NUM is the number of operands.
28620 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
28621 NUM_MEMORY is the maximum number of memory operands to accept.
28622 when COMMUTATIVE is set, operand 1 and 2 can be swapped. */
28624 bool
28625 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
28626 bool uses_oc0, int num_memory, bool commutative)
28628 int mem_mask;
28629 int mem_count;
28630 int i;
28632 /* Count the number of memory arguments */
28633 mem_mask = 0;
28634 mem_count = 0;
28635 for (i = 0; i < num; i++)
28637 enum machine_mode mode = GET_MODE (operands[i]);
28638 if (register_operand (operands[i], mode))
28641 else if (memory_operand (operands[i], mode))
28643 mem_mask |= (1 << i);
28644 mem_count++;
28647 else
28649 rtx pattern = PATTERN (insn);
28651 /* allow 0 for pcmov */
28652 if (GET_CODE (pattern) != SET
28653 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
28654 || i < 2
28655 || operands[i] != CONST0_RTX (mode))
28656 return false;
28660 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
28661 a memory operation. */
28662 if (num_memory < 0)
28664 num_memory = -num_memory;
28665 if ((mem_mask & (1 << (num-1))) != 0)
28667 mem_mask &= ~(1 << (num-1));
28668 mem_count--;
28672 /* If there were no memory operations, allow the insn */
28673 if (mem_mask == 0)
28674 return true;
28676 /* Do not allow the destination register to be a memory operand. */
28677 else if (mem_mask & (1 << 0))
28678 return false;
28680 /* If there are too many memory operations, disallow the instruction. While
28681 the hardware only allows 1 memory reference, before register allocation
28682 for some insns, we allow two memory operations sometimes in order to allow
28683 code like the following to be optimized:
28685 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
28687 or similar cases that are vectorized into using the fmaddss
28688 instruction. */
28689 else if (mem_count > num_memory)
28690 return false;
28692 /* Don't allow more than one memory operation if not optimizing. */
28693 else if (mem_count > 1 && !optimize)
28694 return false;
28696 else if (num == 4 && mem_count == 1)
28698 /* formats (destination is the first argument), example fmaddss:
28699 xmm1, xmm1, xmm2, xmm3/mem
28700 xmm1, xmm1, xmm2/mem, xmm3
28701 xmm1, xmm2, xmm3/mem, xmm1
28702 xmm1, xmm2/mem, xmm3, xmm1 */
28703 if (uses_oc0)
28704 return ((mem_mask == (1 << 1))
28705 || (mem_mask == (1 << 2))
28706 || (mem_mask == (1 << 3)));
28708 /* format, example pmacsdd:
28709 xmm1, xmm2, xmm3/mem, xmm1 */
28710 if (commutative)
28711 return (mem_mask == (1 << 2) || mem_mask == (1 << 1));
28712 else
28713 return (mem_mask == (1 << 2));
28716 else if (num == 4 && num_memory == 2)
28718 /* If there are two memory operations, we can load one of the memory ops
28719 into the destination register. This is for optimizing the
28720 multiply/add ops, which the combiner has optimized both the multiply
28721 and the add insns to have a memory operation. We have to be careful
28722 that the destination doesn't overlap with the inputs. */
28723 rtx op0 = operands[0];
28725 if (reg_mentioned_p (op0, operands[1])
28726 || reg_mentioned_p (op0, operands[2])
28727 || reg_mentioned_p (op0, operands[3]))
28728 return false;
28730 /* formats (destination is the first argument), example fmaddss:
28731 xmm1, xmm1, xmm2, xmm3/mem
28732 xmm1, xmm1, xmm2/mem, xmm3
28733 xmm1, xmm2, xmm3/mem, xmm1
28734 xmm1, xmm2/mem, xmm3, xmm1
28736 For the oc0 case, we will load either operands[1] or operands[3] into
28737 operands[0], so any combination of 2 memory operands is ok. */
28738 if (uses_oc0)
28739 return true;
28741 /* format, example pmacsdd:
28742 xmm1, xmm2, xmm3/mem, xmm1
28744 For the integer multiply/add instructions be more restrictive and
28745 require operands[2] and operands[3] to be the memory operands. */
28746 if (commutative)
28747 return (mem_mask == ((1 << 1) | (1 << 3)) || ((1 << 2) | (1 << 3)));
28748 else
28749 return (mem_mask == ((1 << 2) | (1 << 3)));
28752 else if (num == 3 && num_memory == 1)
28754 /* formats, example protb:
28755 xmm1, xmm2, xmm3/mem
28756 xmm1, xmm2/mem, xmm3 */
28757 if (uses_oc0)
28758 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
28760 /* format, example comeq:
28761 xmm1, xmm2, xmm3/mem */
28762 else
28763 return (mem_mask == (1 << 2));
28766 else
28767 gcc_unreachable ();
28769 return false;
28773 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
28774 hardware will allow by using the destination register to load one of the
28775 memory operations. Presently this is used by the multiply/add routines to
28776 allow 2 memory references. */
28778 void
28779 ix86_expand_sse5_multiple_memory (rtx operands[],
28780 int num,
28781 enum machine_mode mode)
28783 rtx op0 = operands[0];
28784 if (num != 4
28785 || memory_operand (op0, mode)
28786 || reg_mentioned_p (op0, operands[1])
28787 || reg_mentioned_p (op0, operands[2])
28788 || reg_mentioned_p (op0, operands[3]))
28789 gcc_unreachable ();
28791 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
28792 the destination register. */
28793 if (memory_operand (operands[1], mode))
28795 emit_move_insn (op0, operands[1]);
28796 operands[1] = op0;
28798 else if (memory_operand (operands[3], mode))
28800 emit_move_insn (op0, operands[3]);
28801 operands[3] = op0;
28803 else
28804 gcc_unreachable ();
28806 return;
28810 /* Table of valid machine attributes. */
28811 static const struct attribute_spec ix86_attribute_table[] =
28813 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28814 /* Stdcall attribute says callee is responsible for popping arguments
28815 if they are not variable. */
28816 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28817 /* Fastcall attribute says callee is responsible for popping arguments
28818 if they are not variable. */
28819 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28820 /* Cdecl attribute says the callee is a normal C declaration */
28821 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28822 /* Regparm attribute specifies how many integer arguments are to be
28823 passed in registers. */
28824 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28825 /* Sseregparm attribute says we are using x86_64 calling conventions
28826 for FP arguments. */
28827 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28828 /* force_align_arg_pointer says this function realigns the stack at entry. */
28829 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28830 false, true, true, ix86_handle_cconv_attribute },
28831 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28832 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28833 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28834 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28835 #endif
28836 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28837 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28838 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28839 SUBTARGET_ATTRIBUTE_TABLE,
28840 #endif
28841 /* ms_abi and sysv_abi calling convention function attributes. */
28842 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28843 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28844 /* End element. */
28845 { NULL, 0, 0, false, false, false, NULL }
28848 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28849 static int
28850 x86_builtin_vectorization_cost (bool runtime_test)
28852 /* If the branch of the runtime test is taken - i.e. - the vectorized
28853 version is skipped - this incurs a misprediction cost (because the
28854 vectorized version is expected to be the fall-through). So we subtract
28855 the latency of a mispredicted branch from the costs that are incured
28856 when the vectorized version is executed.
28858 TODO: The values in individual target tables have to be tuned or new
28859 fields may be needed. For eg. on K8, the default branch path is the
28860 not-taken path. If the taken path is predicted correctly, the minimum
28861 penalty of going down the taken-path is 1 cycle. If the taken-path is
28862 not predicted correctly, then the minimum penalty is 10 cycles. */
28864 if (runtime_test)
28866 return (-(ix86_cost->cond_taken_branch_cost));
28868 else
28869 return 0;
28872 /* This function returns the calling abi specific va_list type node.
28873 It returns the FNDECL specific va_list type. */
28875 tree
28876 ix86_fn_abi_va_list (tree fndecl)
28878 int abi;
28880 if (!TARGET_64BIT)
28881 return va_list_type_node;
28882 gcc_assert (fndecl != NULL_TREE);
28883 abi = ix86_function_abi ((const_tree) fndecl);
28885 if (abi == MS_ABI)
28886 return ms_va_list_type_node;
28887 else
28888 return sysv_va_list_type_node;
28891 /* Returns the canonical va_list type specified by TYPE. If there
28892 is no valid TYPE provided, it return NULL_TREE. */
28894 tree
28895 ix86_canonical_va_list_type (tree type)
28897 tree wtype, htype;
28899 /* Resolve references and pointers to va_list type. */
28900 if (INDIRECT_REF_P (type))
28901 type = TREE_TYPE (type);
28902 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
28903 type = TREE_TYPE (type);
28905 if (TARGET_64BIT)
28907 wtype = va_list_type_node;
28908 gcc_assert (wtype != NULL_TREE);
28909 htype = type;
28910 if (TREE_CODE (wtype) == ARRAY_TYPE)
28912 /* If va_list is an array type, the argument may have decayed
28913 to a pointer type, e.g. by being passed to another function.
28914 In that case, unwrap both types so that we can compare the
28915 underlying records. */
28916 if (TREE_CODE (htype) == ARRAY_TYPE
28917 || POINTER_TYPE_P (htype))
28919 wtype = TREE_TYPE (wtype);
28920 htype = TREE_TYPE (htype);
28923 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28924 return va_list_type_node;
28925 wtype = sysv_va_list_type_node;
28926 gcc_assert (wtype != NULL_TREE);
28927 htype = type;
28928 if (TREE_CODE (wtype) == ARRAY_TYPE)
28930 /* If va_list is an array type, the argument may have decayed
28931 to a pointer type, e.g. by being passed to another function.
28932 In that case, unwrap both types so that we can compare the
28933 underlying records. */
28934 if (TREE_CODE (htype) == ARRAY_TYPE
28935 || POINTER_TYPE_P (htype))
28937 wtype = TREE_TYPE (wtype);
28938 htype = TREE_TYPE (htype);
28941 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28942 return sysv_va_list_type_node;
28943 wtype = ms_va_list_type_node;
28944 gcc_assert (wtype != NULL_TREE);
28945 htype = type;
28946 if (TREE_CODE (wtype) == ARRAY_TYPE)
28948 /* If va_list is an array type, the argument may have decayed
28949 to a pointer type, e.g. by being passed to another function.
28950 In that case, unwrap both types so that we can compare the
28951 underlying records. */
28952 if (TREE_CODE (htype) == ARRAY_TYPE
28953 || POINTER_TYPE_P (htype))
28955 wtype = TREE_TYPE (wtype);
28956 htype = TREE_TYPE (htype);
28959 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28960 return ms_va_list_type_node;
28961 return NULL_TREE;
28963 return std_canonical_va_list_type (type);
28966 /* Iterate through the target-specific builtin types for va_list.
28967 IDX denotes the iterator, *PTREE is set to the result type of
28968 the va_list builtin, and *PNAME to its internal type.
28969 Returns zero if there is no element for this index, otherwise
28970 IDX should be increased upon the next call.
28971 Note, do not iterate a base builtin's name like __builtin_va_list.
28972 Used from c_common_nodes_and_builtins. */
28975 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
28977 if (!TARGET_64BIT)
28978 return 0;
28979 switch (idx) {
28980 case 0:
28981 *ptree = ms_va_list_type_node;
28982 *pname = "__builtin_ms_va_list";
28983 break;
28984 case 1:
28985 *ptree = sysv_va_list_type_node;
28986 *pname = "__builtin_sysv_va_list";
28987 break;
28988 default:
28989 return 0;
28991 return 1;
28994 /* Initialize the GCC target structure. */
28995 #undef TARGET_RETURN_IN_MEMORY
28996 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
28998 #undef TARGET_ATTRIBUTE_TABLE
28999 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
29000 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29001 # undef TARGET_MERGE_DECL_ATTRIBUTES
29002 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
29003 #endif
29005 #undef TARGET_COMP_TYPE_ATTRIBUTES
29006 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
29008 #undef TARGET_INIT_BUILTINS
29009 #define TARGET_INIT_BUILTINS ix86_init_builtins
29010 #undef TARGET_EXPAND_BUILTIN
29011 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
29013 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
29014 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
29015 ix86_builtin_vectorized_function
29017 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
29018 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
29020 #undef TARGET_BUILTIN_RECIPROCAL
29021 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
29023 #undef TARGET_ASM_FUNCTION_EPILOGUE
29024 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
29026 #undef TARGET_ENCODE_SECTION_INFO
29027 #ifndef SUBTARGET_ENCODE_SECTION_INFO
29028 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
29029 #else
29030 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
29031 #endif
29033 #undef TARGET_ASM_OPEN_PAREN
29034 #define TARGET_ASM_OPEN_PAREN ""
29035 #undef TARGET_ASM_CLOSE_PAREN
29036 #define TARGET_ASM_CLOSE_PAREN ""
29038 #undef TARGET_ASM_ALIGNED_HI_OP
29039 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
29040 #undef TARGET_ASM_ALIGNED_SI_OP
29041 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
29042 #ifdef ASM_QUAD
29043 #undef TARGET_ASM_ALIGNED_DI_OP
29044 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
29045 #endif
29047 #undef TARGET_ASM_UNALIGNED_HI_OP
29048 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
29049 #undef TARGET_ASM_UNALIGNED_SI_OP
29050 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
29051 #undef TARGET_ASM_UNALIGNED_DI_OP
29052 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
29054 #undef TARGET_SCHED_ADJUST_COST
29055 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
29056 #undef TARGET_SCHED_ISSUE_RATE
29057 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
29058 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
29059 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
29060 ia32_multipass_dfa_lookahead
29062 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
29063 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
29065 #ifdef HAVE_AS_TLS
29066 #undef TARGET_HAVE_TLS
29067 #define TARGET_HAVE_TLS true
29068 #endif
29069 #undef TARGET_CANNOT_FORCE_CONST_MEM
29070 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
29071 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
29072 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
29074 #undef TARGET_DELEGITIMIZE_ADDRESS
29075 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
29077 #undef TARGET_MS_BITFIELD_LAYOUT_P
29078 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
29080 #if TARGET_MACHO
29081 #undef TARGET_BINDS_LOCAL_P
29082 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
29083 #endif
29084 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29085 #undef TARGET_BINDS_LOCAL_P
29086 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
29087 #endif
29089 #undef TARGET_ASM_OUTPUT_MI_THUNK
29090 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
29091 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
29092 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
29094 #undef TARGET_ASM_FILE_START
29095 #define TARGET_ASM_FILE_START x86_file_start
29097 #undef TARGET_DEFAULT_TARGET_FLAGS
29098 #define TARGET_DEFAULT_TARGET_FLAGS \
29099 (TARGET_DEFAULT \
29100 | TARGET_SUBTARGET_DEFAULT \
29101 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
29103 #undef TARGET_HANDLE_OPTION
29104 #define TARGET_HANDLE_OPTION ix86_handle_option
29106 #undef TARGET_RTX_COSTS
29107 #define TARGET_RTX_COSTS ix86_rtx_costs
29108 #undef TARGET_ADDRESS_COST
29109 #define TARGET_ADDRESS_COST ix86_address_cost
29111 #undef TARGET_FIXED_CONDITION_CODE_REGS
29112 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
29113 #undef TARGET_CC_MODES_COMPATIBLE
29114 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
29116 #undef TARGET_MACHINE_DEPENDENT_REORG
29117 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
29119 #undef TARGET_BUILD_BUILTIN_VA_LIST
29120 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
29122 #undef TARGET_FN_ABI_VA_LIST
29123 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
29125 #undef TARGET_CANONICAL_VA_LIST_TYPE
29126 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
29128 #undef TARGET_EXPAND_BUILTIN_VA_START
29129 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
29131 #undef TARGET_MD_ASM_CLOBBERS
29132 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
29134 #undef TARGET_PROMOTE_PROTOTYPES
29135 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
29136 #undef TARGET_STRUCT_VALUE_RTX
29137 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
29138 #undef TARGET_SETUP_INCOMING_VARARGS
29139 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
29140 #undef TARGET_MUST_PASS_IN_STACK
29141 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
29142 #undef TARGET_PASS_BY_REFERENCE
29143 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
29144 #undef TARGET_INTERNAL_ARG_POINTER
29145 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
29146 #undef TARGET_UPDATE_STACK_BOUNDARY
29147 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
29148 #undef TARGET_GET_DRAP_RTX
29149 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
29150 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
29151 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
29152 #undef TARGET_STRICT_ARGUMENT_NAMING
29153 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
29155 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
29156 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
29158 #undef TARGET_SCALAR_MODE_SUPPORTED_P
29159 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
29161 #undef TARGET_VECTOR_MODE_SUPPORTED_P
29162 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
29164 #undef TARGET_C_MODE_FOR_SUFFIX
29165 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
29167 #ifdef HAVE_AS_TLS
29168 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
29169 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
29170 #endif
29172 #ifdef SUBTARGET_INSERT_ATTRIBUTES
29173 #undef TARGET_INSERT_ATTRIBUTES
29174 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
29175 #endif
29177 #undef TARGET_MANGLE_TYPE
29178 #define TARGET_MANGLE_TYPE ix86_mangle_type
29180 #undef TARGET_STACK_PROTECT_FAIL
29181 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
29183 #undef TARGET_FUNCTION_VALUE
29184 #define TARGET_FUNCTION_VALUE ix86_function_value
29186 #undef TARGET_SECONDARY_RELOAD
29187 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
29189 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
29190 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
29192 #undef TARGET_SET_CURRENT_FUNCTION
29193 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
29195 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
29196 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
29198 #undef TARGET_OPTION_SAVE
29199 #define TARGET_OPTION_SAVE ix86_function_specific_save
29201 #undef TARGET_OPTION_RESTORE
29202 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
29204 #undef TARGET_OPTION_PRINT
29205 #define TARGET_OPTION_PRINT ix86_function_specific_print
29207 #undef TARGET_OPTION_CAN_INLINE_P
29208 #define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
29210 struct gcc_target targetm = TARGET_INITIALIZER;
29212 #include "gt-i386.h"