2008-12-02 Jakub Jelinek <jakub@redhat.com>
[official-gcc.git] / gcc / config / i386 / i386.c
blob7f806d4dcc64484ed3a51fe16c88499d9f20bed6
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
56 #include "cselib.h"
58 static int x86_builtin_vectorization_cost (bool);
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1039 /* Generic64 should produce code tuned for Nocona and K8. */
1040 static const
1041 struct processor_costs generic64_cost = {
1042 COSTS_N_INSNS (1), /* cost of an add instruction */
1043 /* On all chips taken into consideration lea is 2 cycles and more. With
1044 this cost however our current implementation of synth_mult results in
1045 use of unnecessary temporary registers causing regression on several
1046 SPECfp benchmarks. */
1047 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1048 COSTS_N_INSNS (1), /* variable shift costs */
1049 COSTS_N_INSNS (1), /* constant shift costs */
1050 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1051 COSTS_N_INSNS (4), /* HI */
1052 COSTS_N_INSNS (3), /* SI */
1053 COSTS_N_INSNS (4), /* DI */
1054 COSTS_N_INSNS (2)}, /* other */
1055 0, /* cost of multiply per each bit set */
1056 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1057 COSTS_N_INSNS (26), /* HI */
1058 COSTS_N_INSNS (42), /* SI */
1059 COSTS_N_INSNS (74), /* DI */
1060 COSTS_N_INSNS (74)}, /* other */
1061 COSTS_N_INSNS (1), /* cost of movsx */
1062 COSTS_N_INSNS (1), /* cost of movzx */
1063 8, /* "large" insn */
1064 17, /* MOVE_RATIO */
1065 4, /* cost for loading QImode using movzbl */
1066 {4, 4, 4}, /* cost of loading integer registers
1067 in QImode, HImode and SImode.
1068 Relative to reg-reg move (2). */
1069 {4, 4, 4}, /* cost of storing integer registers */
1070 4, /* cost of reg,reg fld/fst */
1071 {12, 12, 12}, /* cost of loading fp registers
1072 in SFmode, DFmode and XFmode */
1073 {6, 6, 8}, /* cost of storing fp registers
1074 in SFmode, DFmode and XFmode */
1075 2, /* cost of moving MMX register */
1076 {8, 8}, /* cost of loading MMX registers
1077 in SImode and DImode */
1078 {8, 8}, /* cost of storing MMX registers
1079 in SImode and DImode */
1080 2, /* cost of moving SSE register */
1081 {8, 8, 8}, /* cost of loading SSE registers
1082 in SImode, DImode and TImode */
1083 {8, 8, 8}, /* cost of storing SSE registers
1084 in SImode, DImode and TImode */
1085 5, /* MMX or SSE register to integer */
1086 32, /* size of l1 cache. */
1087 512, /* size of l2 cache. */
1088 64, /* size of prefetch block */
1089 6, /* number of parallel prefetches */
1090 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1091 is increased to perhaps more appropriate value of 5. */
1092 3, /* Branch cost */
1093 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1094 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1095 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1096 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1097 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1098 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 {DUMMY_STRINGOP_ALGS,
1102 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1103 1, /* scalar_stmt_cost. */
1104 1, /* scalar load_cost. */
1105 1, /* scalar_store_cost. */
1106 1, /* vec_stmt_cost. */
1107 1, /* vec_to_scalar_cost. */
1108 1, /* scalar_to_vec_cost. */
1109 1, /* vec_align_load_cost. */
1110 2, /* vec_unalign_load_cost. */
1111 1, /* vec_store_cost. */
1112 3, /* cond_taken_branch_cost. */
1113 1, /* cond_not_taken_branch_cost. */
1116 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1117 static const
1118 struct processor_costs generic32_cost = {
1119 COSTS_N_INSNS (1), /* cost of an add instruction */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 256, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1173 DUMMY_STRINGOP_ALGS},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 const struct processor_costs *ix86_cost = &pentium_cost;
1189 /* Processor feature/optimization bitmasks. */
1190 #define m_386 (1<<PROCESSOR_I386)
1191 #define m_486 (1<<PROCESSOR_I486)
1192 #define m_PENT (1<<PROCESSOR_PENTIUM)
1193 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1194 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1195 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1196 #define m_CORE2 (1<<PROCESSOR_CORE2)
1198 #define m_GEODE (1<<PROCESSOR_GEODE)
1199 #define m_K6 (1<<PROCESSOR_K6)
1200 #define m_K6_GEODE (m_K6 | m_GEODE)
1201 #define m_K8 (1<<PROCESSOR_K8)
1202 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1203 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1204 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1205 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1207 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1208 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1210 /* Generic instruction choice should be common subset of supported CPUs
1211 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1212 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1214 /* Feature tests against the various tunings. */
1215 unsigned char ix86_tune_features[X86_TUNE_LAST];
1217 /* Feature tests against the various tunings used to create ix86_tune_features
1218 based on the processor mask. */
1219 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1220 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1221 negatively, so enabling for Generic64 seems like good code size
1222 tradeoff. We can't enable it for 32bit generic because it does not
1223 work well with PPro base chips. */
1224 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1226 /* X86_TUNE_PUSH_MEMORY */
1227 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1228 | m_NOCONA | m_CORE2 | m_GENERIC,
1230 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1231 m_486 | m_PENT,
1233 /* X86_TUNE_UNROLL_STRLEN */
1234 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1236 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1237 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1239 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1240 on simulation result. But after P4 was made, no performance benefit
1241 was observed with branch hints. It also increases the code size.
1242 As a result, icc never generates branch hints. */
1245 /* X86_TUNE_DOUBLE_WITH_ADD */
1246 ~m_386,
1248 /* X86_TUNE_USE_SAHF */
1249 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1250 | m_NOCONA | m_CORE2 | m_GENERIC,
1252 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1253 partial dependencies. */
1254 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1255 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1257 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1258 register stalls on Generic32 compilation setting as well. However
1259 in current implementation the partial register stalls are not eliminated
1260 very well - they can be introduced via subregs synthesized by combine
1261 and can happen in caller/callee saving sequences. Because this option
1262 pays back little on PPro based chips and is in conflict with partial reg
1263 dependencies used by Athlon/P4 based chips, it is better to leave it off
1264 for generic32 for now. */
1265 m_PPRO,
1267 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1268 m_CORE2 | m_GENERIC,
1270 /* X86_TUNE_USE_HIMODE_FIOP */
1271 m_386 | m_486 | m_K6_GEODE,
1273 /* X86_TUNE_USE_SIMODE_FIOP */
1274 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1276 /* X86_TUNE_USE_MOV0 */
1277 m_K6,
1279 /* X86_TUNE_USE_CLTD */
1280 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1282 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1283 m_PENT4,
1285 /* X86_TUNE_SPLIT_LONG_MOVES */
1286 m_PPRO,
1288 /* X86_TUNE_READ_MODIFY_WRITE */
1289 ~m_PENT,
1291 /* X86_TUNE_READ_MODIFY */
1292 ~(m_PENT | m_PPRO),
1294 /* X86_TUNE_PROMOTE_QIMODE */
1295 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1296 | m_GENERIC /* | m_PENT4 ? */,
1298 /* X86_TUNE_FAST_PREFIX */
1299 ~(m_PENT | m_486 | m_386),
1301 /* X86_TUNE_SINGLE_STRINGOP */
1302 m_386 | m_PENT4 | m_NOCONA,
1304 /* X86_TUNE_QIMODE_MATH */
1307 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1308 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1309 might be considered for Generic32 if our scheme for avoiding partial
1310 stalls was more effective. */
1311 ~m_PPRO,
1313 /* X86_TUNE_PROMOTE_QI_REGS */
1316 /* X86_TUNE_PROMOTE_HI_REGS */
1317 m_PPRO,
1319 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1320 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1322 /* X86_TUNE_ADD_ESP_8 */
1323 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1324 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_SUB_ESP_4 */
1327 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1329 /* X86_TUNE_SUB_ESP_8 */
1330 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1331 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1333 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1334 for DFmode copies */
1335 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1336 | m_GENERIC | m_GEODE),
1338 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1339 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1341 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1342 conflict here in between PPro/Pentium4 based chips that thread 128bit
1343 SSE registers as single units versus K8 based chips that divide SSE
1344 registers to two 64bit halves. This knob promotes all store destinations
1345 to be 128bit to allow register renaming on 128bit SSE units, but usually
1346 results in one extra microop on 64bit SSE units. Experimental results
1347 shows that disabling this option on P4 brings over 20% SPECfp regression,
1348 while enabling it on K8 brings roughly 2.4% regression that can be partly
1349 masked by careful scheduling of moves. */
1350 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1352 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1353 m_AMDFAM10,
1355 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1356 are resolved on SSE register parts instead of whole registers, so we may
1357 maintain just lower part of scalar values in proper format leaving the
1358 upper part undefined. */
1359 m_ATHLON_K8,
1361 /* X86_TUNE_SSE_TYPELESS_STORES */
1362 m_AMD_MULTIPLE,
1364 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1365 m_PPRO | m_PENT4 | m_NOCONA,
1367 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1368 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1370 /* X86_TUNE_PROLOGUE_USING_MOVE */
1371 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1373 /* X86_TUNE_EPILOGUE_USING_MOVE */
1374 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1376 /* X86_TUNE_SHIFT1 */
1377 ~m_486,
1379 /* X86_TUNE_USE_FFREEP */
1380 m_AMD_MULTIPLE,
1382 /* X86_TUNE_INTER_UNIT_MOVES */
1383 ~(m_AMD_MULTIPLE | m_GENERIC),
1385 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1386 ~(m_AMDFAM10),
1388 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1389 than 4 branch instructions in the 16 byte window. */
1390 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_SCHEDULE */
1393 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1395 /* X86_TUNE_USE_BT */
1396 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_USE_INCDEC */
1399 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1401 /* X86_TUNE_PAD_RETURNS */
1402 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1404 /* X86_TUNE_EXT_80387_CONSTANTS */
1405 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1407 /* X86_TUNE_SHORTEN_X87_SSE */
1408 ~m_K8,
1410 /* X86_TUNE_AVOID_VECTOR_DECODE */
1411 m_K8 | m_GENERIC64,
1413 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1414 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1415 ~(m_386 | m_486),
1417 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1418 vector path on AMD machines. */
1419 m_K8 | m_GENERIC64 | m_AMDFAM10,
1421 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1422 machines. */
1423 m_K8 | m_GENERIC64 | m_AMDFAM10,
1425 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1426 than a MOV. */
1427 m_PENT,
1429 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1430 but one byte longer. */
1431 m_PENT,
1433 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1434 operand that cannot be represented using a modRM byte. The XOR
1435 replacement is long decoded, so this split helps here as well. */
1436 m_K6,
1438 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1439 from FP to FP. */
1440 m_AMDFAM10 | m_GENERIC,
1442 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1443 from integer to FP. */
1444 m_AMDFAM10,
1446 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1447 with a subsequent conditional jump instruction into a single
1448 compare-and-branch uop. */
1449 m_CORE2,
1452 /* Feature tests against the various architecture variations. */
1453 unsigned char ix86_arch_features[X86_ARCH_LAST];
1455 /* Feature tests against the various architecture variations, used to create
1456 ix86_arch_features based on the processor mask. */
1457 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1458 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1459 ~(m_386 | m_486 | m_PENT | m_K6),
1461 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1462 ~m_386,
1464 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1465 ~(m_386 | m_486),
1467 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1468 ~m_386,
1470 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1471 ~m_386,
1474 static const unsigned int x86_accumulate_outgoing_args
1475 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1477 static const unsigned int x86_arch_always_fancy_math_387
1478 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1479 | m_NOCONA | m_CORE2 | m_GENERIC;
1481 static enum stringop_alg stringop_alg = no_stringop;
1483 /* In case the average insn count for single function invocation is
1484 lower than this constant, emit fast (but longer) prologue and
1485 epilogue code. */
1486 #define FAST_PROLOGUE_INSN_COUNT 20
1488 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1489 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1490 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1491 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1493 /* Array of the smallest class containing reg number REGNO, indexed by
1494 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1496 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1498 /* ax, dx, cx, bx */
1499 AREG, DREG, CREG, BREG,
1500 /* si, di, bp, sp */
1501 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1502 /* FP registers */
1503 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1504 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1505 /* arg pointer */
1506 NON_Q_REGS,
1507 /* flags, fpsr, fpcr, frame */
1508 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1509 /* SSE registers */
1510 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1511 SSE_REGS, SSE_REGS,
1512 /* MMX registers */
1513 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1514 MMX_REGS, MMX_REGS,
1515 /* REX registers */
1516 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1517 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1518 /* SSE REX registers */
1519 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1520 SSE_REGS, SSE_REGS,
1523 /* The "default" register map used in 32bit mode. */
1525 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1527 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1528 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1529 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1530 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1531 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1532 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1533 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1536 static int const x86_64_int_parameter_registers[6] =
1538 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1539 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1542 static int const x86_64_ms_abi_int_parameter_registers[4] =
1544 2 /*RCX*/, 1 /*RDX*/,
1545 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1548 static int const x86_64_int_return_registers[4] =
1550 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1553 /* The "default" register map used in 64bit mode. */
1554 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1556 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1557 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1558 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1559 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1560 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1561 8,9,10,11,12,13,14,15, /* extended integer registers */
1562 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1565 /* Define the register numbers to be used in Dwarf debugging information.
1566 The SVR4 reference port C compiler uses the following register numbers
1567 in its Dwarf output code:
1568 0 for %eax (gcc regno = 0)
1569 1 for %ecx (gcc regno = 2)
1570 2 for %edx (gcc regno = 1)
1571 3 for %ebx (gcc regno = 3)
1572 4 for %esp (gcc regno = 7)
1573 5 for %ebp (gcc regno = 6)
1574 6 for %esi (gcc regno = 4)
1575 7 for %edi (gcc regno = 5)
1576 The following three DWARF register numbers are never generated by
1577 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1578 believes these numbers have these meanings.
1579 8 for %eip (no gcc equivalent)
1580 9 for %eflags (gcc regno = 17)
1581 10 for %trapno (no gcc equivalent)
1582 It is not at all clear how we should number the FP stack registers
1583 for the x86 architecture. If the version of SDB on x86/svr4 were
1584 a bit less brain dead with respect to floating-point then we would
1585 have a precedent to follow with respect to DWARF register numbers
1586 for x86 FP registers, but the SDB on x86/svr4 is so completely
1587 broken with respect to FP registers that it is hardly worth thinking
1588 of it as something to strive for compatibility with.
1589 The version of x86/svr4 SDB I have at the moment does (partially)
1590 seem to believe that DWARF register number 11 is associated with
1591 the x86 register %st(0), but that's about all. Higher DWARF
1592 register numbers don't seem to be associated with anything in
1593 particular, and even for DWARF regno 11, SDB only seems to under-
1594 stand that it should say that a variable lives in %st(0) (when
1595 asked via an `=' command) if we said it was in DWARF regno 11,
1596 but SDB still prints garbage when asked for the value of the
1597 variable in question (via a `/' command).
1598 (Also note that the labels SDB prints for various FP stack regs
1599 when doing an `x' command are all wrong.)
1600 Note that these problems generally don't affect the native SVR4
1601 C compiler because it doesn't allow the use of -O with -g and
1602 because when it is *not* optimizing, it allocates a memory
1603 location for each floating-point variable, and the memory
1604 location is what gets described in the DWARF AT_location
1605 attribute for the variable in question.
1606 Regardless of the severe mental illness of the x86/svr4 SDB, we
1607 do something sensible here and we use the following DWARF
1608 register numbers. Note that these are all stack-top-relative
1609 numbers.
1610 11 for %st(0) (gcc regno = 8)
1611 12 for %st(1) (gcc regno = 9)
1612 13 for %st(2) (gcc regno = 10)
1613 14 for %st(3) (gcc regno = 11)
1614 15 for %st(4) (gcc regno = 12)
1615 16 for %st(5) (gcc regno = 13)
1616 17 for %st(6) (gcc regno = 14)
1617 18 for %st(7) (gcc regno = 15)
1619 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1621 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1622 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1623 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1624 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1625 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1626 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1630 /* Test and compare insns in i386.md store the information needed to
1631 generate branch and scc insns here. */
1633 rtx ix86_compare_op0 = NULL_RTX;
1634 rtx ix86_compare_op1 = NULL_RTX;
1635 rtx ix86_compare_emitted = NULL_RTX;
1637 /* Define the structure for the machine field in struct function. */
1639 struct stack_local_entry GTY(())
1641 unsigned short mode;
1642 unsigned short n;
1643 rtx rtl;
1644 struct stack_local_entry *next;
1647 /* Structure describing stack frame layout.
1648 Stack grows downward:
1650 [arguments]
1651 <- ARG_POINTER
1652 saved pc
1654 saved frame pointer if frame_pointer_needed
1655 <- HARD_FRAME_POINTER
1656 [saved regs]
1658 [padding1] \
1660 [va_arg registers] (
1661 > to_allocate <- FRAME_POINTER
1662 [frame] (
1664 [padding2] /
1666 struct ix86_frame
1668 int nregs;
1669 int padding1;
1670 int va_arg_size;
1671 HOST_WIDE_INT frame;
1672 int padding2;
1673 int outgoing_arguments_size;
1674 int red_zone_size;
1676 HOST_WIDE_INT to_allocate;
1677 /* The offsets relative to ARG_POINTER. */
1678 HOST_WIDE_INT frame_pointer_offset;
1679 HOST_WIDE_INT hard_frame_pointer_offset;
1680 HOST_WIDE_INT stack_pointer_offset;
1682 /* When save_regs_using_mov is set, emit prologue using
1683 move instead of push instructions. */
1684 bool save_regs_using_mov;
1687 /* Code model option. */
1688 enum cmodel ix86_cmodel;
1689 /* Asm dialect. */
1690 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1691 /* TLS dialects. */
1692 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1694 /* Which unit we are generating floating point math for. */
1695 enum fpmath_unit ix86_fpmath;
1697 /* Which cpu are we scheduling for. */
1698 enum attr_cpu ix86_schedule;
1700 /* Which cpu are we optimizing for. */
1701 enum processor_type ix86_tune;
1703 /* Which instruction set architecture to use. */
1704 enum processor_type ix86_arch;
1706 /* true if sse prefetch instruction is not NOOP. */
1707 int x86_prefetch_sse;
1709 /* ix86_regparm_string as a number */
1710 static int ix86_regparm;
1712 /* -mstackrealign option */
1713 extern int ix86_force_align_arg_pointer;
1714 static const char ix86_force_align_arg_pointer_string[]
1715 = "force_align_arg_pointer";
1717 static rtx (*ix86_gen_leave) (void);
1718 static rtx (*ix86_gen_pop1) (rtx);
1719 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1720 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1721 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1722 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1723 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1724 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1726 /* Preferred alignment for stack boundary in bits. */
1727 unsigned int ix86_preferred_stack_boundary;
1729 /* Alignment for incoming stack boundary in bits specified at
1730 command line. */
1731 static unsigned int ix86_user_incoming_stack_boundary;
1733 /* Default alignment for incoming stack boundary in bits. */
1734 static unsigned int ix86_default_incoming_stack_boundary;
1736 /* Alignment for incoming stack boundary in bits. */
1737 unsigned int ix86_incoming_stack_boundary;
1739 /* Values 1-5: see jump.c */
1740 int ix86_branch_cost;
1742 /* Calling abi specific va_list type nodes. */
1743 static GTY(()) tree sysv_va_list_type_node;
1744 static GTY(()) tree ms_va_list_type_node;
1746 /* Variables which are this size or smaller are put in the data/bss
1747 or ldata/lbss sections. */
1749 int ix86_section_threshold = 65536;
1751 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1752 char internal_label_prefix[16];
1753 int internal_label_prefix_len;
1755 /* Fence to use after loop using movnt. */
1756 tree x86_mfence;
1758 /* Register class used for passing given 64bit part of the argument.
1759 These represent classes as documented by the PS ABI, with the exception
1760 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1761 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1763 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1764 whenever possible (upper half does contain padding). */
1765 enum x86_64_reg_class
1767 X86_64_NO_CLASS,
1768 X86_64_INTEGER_CLASS,
1769 X86_64_INTEGERSI_CLASS,
1770 X86_64_AVX_CLASS,
1771 X86_64_SSE_CLASS,
1772 X86_64_SSESF_CLASS,
1773 X86_64_SSEDF_CLASS,
1774 X86_64_SSEUP_CLASS,
1775 X86_64_X87_CLASS,
1776 X86_64_X87UP_CLASS,
1777 X86_64_COMPLEX_X87_CLASS,
1778 X86_64_MEMORY_CLASS
1780 static const char * const x86_64_reg_class_name[] =
1782 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1783 "sseup", "x87", "x87up", "cplx87", "no"
1786 #define MAX_CLASSES 4
1788 /* Table of constants used by fldpi, fldln2, etc.... */
1789 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1790 static bool ext_80387_constants_init = 0;
1793 static struct machine_function * ix86_init_machine_status (void);
1794 static rtx ix86_function_value (const_tree, const_tree, bool);
1795 static int ix86_function_regparm (const_tree, const_tree);
1796 static void ix86_compute_frame_layout (struct ix86_frame *);
1797 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1798 rtx, rtx, int);
1799 static void ix86_add_new_builtins (int);
1801 enum ix86_function_specific_strings
1803 IX86_FUNCTION_SPECIFIC_ARCH,
1804 IX86_FUNCTION_SPECIFIC_TUNE,
1805 IX86_FUNCTION_SPECIFIC_FPMATH,
1806 IX86_FUNCTION_SPECIFIC_MAX
1809 static char *ix86_target_string (int, int, const char *, const char *,
1810 const char *, bool);
1811 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1812 static void ix86_function_specific_save (struct cl_target_option *);
1813 static void ix86_function_specific_restore (struct cl_target_option *);
1814 static void ix86_function_specific_print (FILE *, int,
1815 struct cl_target_option *);
1816 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1817 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1818 static bool ix86_can_inline_p (tree, tree);
1819 static void ix86_set_current_function (tree);
1822 /* The svr4 ABI for the i386 says that records and unions are returned
1823 in memory. */
1824 #ifndef DEFAULT_PCC_STRUCT_RETURN
1825 #define DEFAULT_PCC_STRUCT_RETURN 1
1826 #endif
1828 /* Whether -mtune= or -march= were specified */
1829 static int ix86_tune_defaulted;
1830 static int ix86_arch_specified;
1832 /* Bit flags that specify the ISA we are compiling for. */
1833 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1835 /* A mask of ix86_isa_flags that includes bit X if X
1836 was set or cleared on the command line. */
1837 static int ix86_isa_flags_explicit;
1839 /* Define a set of ISAs which are available when a given ISA is
1840 enabled. MMX and SSE ISAs are handled separately. */
1842 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1843 #define OPTION_MASK_ISA_3DNOW_SET \
1844 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1846 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1847 #define OPTION_MASK_ISA_SSE2_SET \
1848 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1849 #define OPTION_MASK_ISA_SSE3_SET \
1850 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1851 #define OPTION_MASK_ISA_SSSE3_SET \
1852 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1853 #define OPTION_MASK_ISA_SSE4_1_SET \
1854 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1855 #define OPTION_MASK_ISA_SSE4_2_SET \
1856 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1857 #define OPTION_MASK_ISA_AVX_SET \
1858 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1859 #define OPTION_MASK_ISA_FMA_SET \
1860 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1862 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1863 as -msse4.2. */
1864 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1866 #define OPTION_MASK_ISA_SSE4A_SET \
1867 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1868 #define OPTION_MASK_ISA_SSE5_SET \
1869 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1871 /* AES and PCLMUL need SSE2 because they use xmm registers */
1872 #define OPTION_MASK_ISA_AES_SET \
1873 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1874 #define OPTION_MASK_ISA_PCLMUL_SET \
1875 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1877 #define OPTION_MASK_ISA_ABM_SET \
1878 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1879 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1880 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1881 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1883 /* Define a set of ISAs which aren't available when a given ISA is
1884 disabled. MMX and SSE ISAs are handled separately. */
1886 #define OPTION_MASK_ISA_MMX_UNSET \
1887 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1888 #define OPTION_MASK_ISA_3DNOW_UNSET \
1889 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1890 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1892 #define OPTION_MASK_ISA_SSE_UNSET \
1893 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1894 #define OPTION_MASK_ISA_SSE2_UNSET \
1895 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1896 #define OPTION_MASK_ISA_SSE3_UNSET \
1897 (OPTION_MASK_ISA_SSE3 \
1898 | OPTION_MASK_ISA_SSSE3_UNSET \
1899 | OPTION_MASK_ISA_SSE4A_UNSET )
1900 #define OPTION_MASK_ISA_SSSE3_UNSET \
1901 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1902 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1903 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1904 #define OPTION_MASK_ISA_SSE4_2_UNSET \
1905 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
1906 #define OPTION_MASK_ISA_AVX_UNSET \
1907 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET)
1908 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
1910 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1911 as -mno-sse4.1. */
1912 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1914 #define OPTION_MASK_ISA_SSE4A_UNSET \
1915 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1916 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1917 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
1918 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
1919 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
1920 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
1921 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
1922 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
1924 /* Vectorization library interface and handlers. */
1925 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1926 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1927 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1929 /* Processor target table, indexed by processor number */
1930 struct ptt
1932 const struct processor_costs *cost; /* Processor costs */
1933 const int align_loop; /* Default alignments. */
1934 const int align_loop_max_skip;
1935 const int align_jump;
1936 const int align_jump_max_skip;
1937 const int align_func;
1940 static const struct ptt processor_target_table[PROCESSOR_max] =
1942 {&i386_cost, 4, 3, 4, 3, 4},
1943 {&i486_cost, 16, 15, 16, 15, 16},
1944 {&pentium_cost, 16, 7, 16, 7, 16},
1945 {&pentiumpro_cost, 16, 15, 16, 10, 16},
1946 {&geode_cost, 0, 0, 0, 0, 0},
1947 {&k6_cost, 32, 7, 32, 7, 32},
1948 {&athlon_cost, 16, 7, 16, 7, 16},
1949 {&pentium4_cost, 0, 0, 0, 0, 0},
1950 {&k8_cost, 16, 7, 16, 7, 16},
1951 {&nocona_cost, 0, 0, 0, 0, 0},
1952 {&core2_cost, 16, 10, 16, 10, 16},
1953 {&generic32_cost, 16, 7, 16, 7, 16},
1954 {&generic64_cost, 16, 10, 16, 10, 16},
1955 {&amdfam10_cost, 32, 24, 32, 7, 32}
1958 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
1960 "generic",
1961 "i386",
1962 "i486",
1963 "pentium",
1964 "pentium-mmx",
1965 "pentiumpro",
1966 "pentium2",
1967 "pentium3",
1968 "pentium4",
1969 "pentium-m",
1970 "prescott",
1971 "nocona",
1972 "core2",
1973 "geode",
1974 "k6",
1975 "k6-2",
1976 "k6-3",
1977 "athlon",
1978 "athlon-4",
1979 "k8",
1980 "amdfam10"
1983 /* Implement TARGET_HANDLE_OPTION. */
1985 static bool
1986 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1988 switch (code)
1990 case OPT_mmmx:
1991 if (value)
1993 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1994 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1996 else
1998 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
1999 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2001 return true;
2003 case OPT_m3dnow:
2004 if (value)
2006 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2007 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2009 else
2011 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2012 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2014 return true;
2016 case OPT_m3dnowa:
2017 return false;
2019 case OPT_msse:
2020 if (value)
2022 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2023 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2025 else
2027 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2028 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2030 return true;
2032 case OPT_msse2:
2033 if (value)
2035 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2036 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2038 else
2040 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2041 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2043 return true;
2045 case OPT_msse3:
2046 if (value)
2048 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2049 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2051 else
2053 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2054 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2056 return true;
2058 case OPT_mssse3:
2059 if (value)
2061 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2062 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2064 else
2066 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2067 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2069 return true;
2071 case OPT_msse4_1:
2072 if (value)
2074 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2075 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2077 else
2079 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2080 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2082 return true;
2084 case OPT_msse4_2:
2085 if (value)
2087 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2088 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2090 else
2092 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2093 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2095 return true;
2097 case OPT_mavx:
2098 if (value)
2100 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2101 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2103 else
2105 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2106 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2108 return true;
2110 case OPT_mfma:
2111 if (value)
2113 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2114 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2116 else
2118 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2119 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2121 return true;
2123 case OPT_msse4:
2124 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2125 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2126 return true;
2128 case OPT_mno_sse4:
2129 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2130 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2131 return true;
2133 case OPT_msse4a:
2134 if (value)
2136 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2137 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2139 else
2141 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2142 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2144 return true;
2146 case OPT_msse5:
2147 if (value)
2149 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
2150 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
2152 else
2154 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
2155 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
2157 return true;
2159 case OPT_mabm:
2160 if (value)
2162 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2163 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2165 else
2167 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2168 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2170 return true;
2172 case OPT_mpopcnt:
2173 if (value)
2175 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2176 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2178 else
2180 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2181 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2183 return true;
2185 case OPT_msahf:
2186 if (value)
2188 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2189 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2191 else
2193 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2194 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2196 return true;
2198 case OPT_mcx16:
2199 if (value)
2201 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2202 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2204 else
2206 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2207 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2209 return true;
2211 case OPT_maes:
2212 if (value)
2214 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2215 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2217 else
2219 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2220 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2222 return true;
2224 case OPT_mpclmul:
2225 if (value)
2227 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2228 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2230 else
2232 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2233 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2235 return true;
2237 default:
2238 return true;
2242 /* Return a string the documents the current -m options. The caller is
2243 responsible for freeing the string. */
2245 static char *
2246 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2247 const char *fpmath, bool add_nl_p)
2249 struct ix86_target_opts
2251 const char *option; /* option string */
2252 int mask; /* isa mask options */
2255 /* This table is ordered so that options like -msse5 or -msse4.2 that imply
2256 preceding options while match those first. */
2257 static struct ix86_target_opts isa_opts[] =
2259 { "-m64", OPTION_MASK_ISA_64BIT },
2260 { "-msse5", OPTION_MASK_ISA_SSE5 },
2261 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2262 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2263 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2264 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2265 { "-msse3", OPTION_MASK_ISA_SSE3 },
2266 { "-msse2", OPTION_MASK_ISA_SSE2 },
2267 { "-msse", OPTION_MASK_ISA_SSE },
2268 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2269 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2270 { "-mmmx", OPTION_MASK_ISA_MMX },
2271 { "-mabm", OPTION_MASK_ISA_ABM },
2272 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2273 { "-maes", OPTION_MASK_ISA_AES },
2274 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2277 /* Flag options. */
2278 static struct ix86_target_opts flag_opts[] =
2280 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2281 { "-m80387", MASK_80387 },
2282 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2283 { "-malign-double", MASK_ALIGN_DOUBLE },
2284 { "-mcld", MASK_CLD },
2285 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2286 { "-mieee-fp", MASK_IEEE_FP },
2287 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2288 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2289 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2290 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2291 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2292 { "-mno-fused-madd", MASK_NO_FUSED_MADD },
2293 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2294 { "-mno-red-zone", MASK_NO_RED_ZONE },
2295 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2296 { "-mrecip", MASK_RECIP },
2297 { "-mrtd", MASK_RTD },
2298 { "-msseregparm", MASK_SSEREGPARM },
2299 { "-mstack-arg-probe", MASK_STACK_PROBE },
2300 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2303 const char *opts[ (sizeof (isa_opts) / sizeof (isa_opts[0])
2304 + sizeof (flag_opts) / sizeof (flag_opts[0])
2305 + 6)][2];
2307 char isa_other[40];
2308 char target_other[40];
2309 unsigned num = 0;
2310 unsigned i, j;
2311 char *ret;
2312 char *ptr;
2313 size_t len;
2314 size_t line_len;
2315 size_t sep_len;
2317 memset (opts, '\0', sizeof (opts));
2319 /* Add -march= option. */
2320 if (arch)
2322 opts[num][0] = "-march=";
2323 opts[num++][1] = arch;
2326 /* Add -mtune= option. */
2327 if (tune)
2329 opts[num][0] = "-mtune=";
2330 opts[num++][1] = tune;
2333 /* Pick out the options in isa options. */
2334 for (i = 0; i < sizeof (isa_opts) / sizeof (isa_opts[0]); i++)
2336 if ((isa & isa_opts[i].mask) != 0)
2338 opts[num++][0] = isa_opts[i].option;
2339 isa &= ~ isa_opts[i].mask;
2343 if (isa && add_nl_p)
2345 opts[num++][0] = isa_other;
2346 sprintf (isa_other, "(other isa: 0x%x)", isa);
2349 /* Add flag options. */
2350 for (i = 0; i < sizeof (flag_opts) / sizeof (flag_opts[0]); i++)
2352 if ((flags & flag_opts[i].mask) != 0)
2354 opts[num++][0] = flag_opts[i].option;
2355 flags &= ~ flag_opts[i].mask;
2359 if (flags && add_nl_p)
2361 opts[num++][0] = target_other;
2362 sprintf (target_other, "(other flags: 0x%x)", isa);
2365 /* Add -fpmath= option. */
2366 if (fpmath)
2368 opts[num][0] = "-mfpmath=";
2369 opts[num++][1] = fpmath;
2372 /* Any options? */
2373 if (num == 0)
2374 return NULL;
2376 gcc_assert (num < sizeof (opts) / sizeof (opts[0]));
2378 /* Size the string. */
2379 len = 0;
2380 sep_len = (add_nl_p) ? 3 : 1;
2381 for (i = 0; i < num; i++)
2383 len += sep_len;
2384 for (j = 0; j < 2; j++)
2385 if (opts[i][j])
2386 len += strlen (opts[i][j]);
2389 /* Build the string. */
2390 ret = ptr = (char *) xmalloc (len);
2391 line_len = 0;
2393 for (i = 0; i < num; i++)
2395 size_t len2[2];
2397 for (j = 0; j < 2; j++)
2398 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2400 if (i != 0)
2402 *ptr++ = ' ';
2403 line_len++;
2405 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2407 *ptr++ = '\\';
2408 *ptr++ = '\n';
2409 line_len = 0;
2413 for (j = 0; j < 2; j++)
2414 if (opts[i][j])
2416 memcpy (ptr, opts[i][j], len2[j]);
2417 ptr += len2[j];
2418 line_len += len2[j];
2422 *ptr = '\0';
2423 gcc_assert (ret + len >= ptr);
2425 return ret;
2428 /* Function that is callable from the debugger to print the current
2429 options. */
2430 void
2431 ix86_debug_options (void)
2433 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2434 ix86_arch_string, ix86_tune_string,
2435 ix86_fpmath_string, true);
2437 if (opts)
2439 fprintf (stderr, "%s\n\n", opts);
2440 free (opts);
2442 else
2443 fprintf (stderr, "<no options>\n\n");
2445 return;
2448 /* Sometimes certain combinations of command options do not make
2449 sense on a particular target machine. You can define a macro
2450 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2451 defined, is executed once just after all the command options have
2452 been parsed.
2454 Don't use this macro to turn on various extra optimizations for
2455 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2457 void
2458 override_options (bool main_args_p)
2460 int i;
2461 unsigned int ix86_arch_mask, ix86_tune_mask;
2462 const char *prefix;
2463 const char *suffix;
2464 const char *sw;
2466 /* Comes from final.c -- no real reason to change it. */
2467 #define MAX_CODE_ALIGN 16
2469 enum pta_flags
2471 PTA_SSE = 1 << 0,
2472 PTA_SSE2 = 1 << 1,
2473 PTA_SSE3 = 1 << 2,
2474 PTA_MMX = 1 << 3,
2475 PTA_PREFETCH_SSE = 1 << 4,
2476 PTA_3DNOW = 1 << 5,
2477 PTA_3DNOW_A = 1 << 6,
2478 PTA_64BIT = 1 << 7,
2479 PTA_SSSE3 = 1 << 8,
2480 PTA_CX16 = 1 << 9,
2481 PTA_POPCNT = 1 << 10,
2482 PTA_ABM = 1 << 11,
2483 PTA_SSE4A = 1 << 12,
2484 PTA_NO_SAHF = 1 << 13,
2485 PTA_SSE4_1 = 1 << 14,
2486 PTA_SSE4_2 = 1 << 15,
2487 PTA_SSE5 = 1 << 16,
2488 PTA_AES = 1 << 17,
2489 PTA_PCLMUL = 1 << 18,
2490 PTA_AVX = 1 << 19,
2491 PTA_FMA = 1 << 20
2494 static struct pta
2496 const char *const name; /* processor name or nickname. */
2497 const enum processor_type processor;
2498 const enum attr_cpu schedule;
2499 const unsigned /*enum pta_flags*/ flags;
2501 const processor_alias_table[] =
2503 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2504 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2505 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2506 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2507 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2508 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2509 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2510 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2511 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2512 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2513 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2514 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2515 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2516 PTA_MMX | PTA_SSE},
2517 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2518 PTA_MMX | PTA_SSE},
2519 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2520 PTA_MMX | PTA_SSE | PTA_SSE2},
2521 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2522 PTA_MMX |PTA_SSE | PTA_SSE2},
2523 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2524 PTA_MMX | PTA_SSE | PTA_SSE2},
2525 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2526 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2527 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2528 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2529 | PTA_CX16 | PTA_NO_SAHF},
2530 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2531 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2532 | PTA_SSSE3 | PTA_CX16},
2533 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2534 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2535 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2536 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2537 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2538 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2539 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2540 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2541 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2542 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2543 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2544 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2545 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2546 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2547 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2548 {"x86-64", PROCESSOR_K8, CPU_K8,
2549 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2550 {"k8", PROCESSOR_K8, CPU_K8,
2551 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2552 | PTA_SSE2 | PTA_NO_SAHF},
2553 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2554 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2555 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2556 {"opteron", PROCESSOR_K8, CPU_K8,
2557 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2558 | PTA_SSE2 | PTA_NO_SAHF},
2559 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2560 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2561 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2562 {"athlon64", PROCESSOR_K8, CPU_K8,
2563 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2564 | PTA_SSE2 | PTA_NO_SAHF},
2565 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2566 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2567 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2568 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2569 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2570 | PTA_SSE2 | PTA_NO_SAHF},
2571 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2572 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2573 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2574 {"barcelona", PROCESSOR_AMDFAM10, PROCESSOR_AMDFAM10,
2575 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2576 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2577 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2578 0 /* flags are only used for -march switch. */ },
2579 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2580 PTA_64BIT /* flags are only used for -march switch. */ },
2583 int const pta_size = ARRAY_SIZE (processor_alias_table);
2585 /* Set up prefix/suffix so the error messages refer to either the command
2586 line argument, or the attribute(target). */
2587 if (main_args_p)
2589 prefix = "-m";
2590 suffix = "";
2591 sw = "switch";
2593 else
2595 prefix = "option(\"";
2596 suffix = "\")";
2597 sw = "attribute";
2600 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2601 SUBTARGET_OVERRIDE_OPTIONS;
2602 #endif
2604 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2605 SUBSUBTARGET_OVERRIDE_OPTIONS;
2606 #endif
2608 /* -fPIC is the default for x86_64. */
2609 if (TARGET_MACHO && TARGET_64BIT)
2610 flag_pic = 2;
2612 /* Set the default values for switches whose default depends on TARGET_64BIT
2613 in case they weren't overwritten by command line options. */
2614 if (TARGET_64BIT)
2616 /* Mach-O doesn't support omitting the frame pointer for now. */
2617 if (flag_omit_frame_pointer == 2)
2618 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2619 if (flag_asynchronous_unwind_tables == 2)
2620 flag_asynchronous_unwind_tables = 1;
2621 if (flag_pcc_struct_return == 2)
2622 flag_pcc_struct_return = 0;
2624 else
2626 if (flag_omit_frame_pointer == 2)
2627 flag_omit_frame_pointer = 0;
2628 if (flag_asynchronous_unwind_tables == 2)
2629 flag_asynchronous_unwind_tables = 0;
2630 if (flag_pcc_struct_return == 2)
2631 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2634 /* Need to check -mtune=generic first. */
2635 if (ix86_tune_string)
2637 if (!strcmp (ix86_tune_string, "generic")
2638 || !strcmp (ix86_tune_string, "i686")
2639 /* As special support for cross compilers we read -mtune=native
2640 as -mtune=generic. With native compilers we won't see the
2641 -mtune=native, as it was changed by the driver. */
2642 || !strcmp (ix86_tune_string, "native"))
2644 if (TARGET_64BIT)
2645 ix86_tune_string = "generic64";
2646 else
2647 ix86_tune_string = "generic32";
2649 /* If this call is for setting the option attribute, allow the
2650 generic32/generic64 that was previously set. */
2651 else if (!main_args_p
2652 && (!strcmp (ix86_tune_string, "generic32")
2653 || !strcmp (ix86_tune_string, "generic64")))
2655 else if (!strncmp (ix86_tune_string, "generic", 7))
2656 error ("bad value (%s) for %stune=%s %s",
2657 ix86_tune_string, prefix, suffix, sw);
2659 else
2661 if (ix86_arch_string)
2662 ix86_tune_string = ix86_arch_string;
2663 if (!ix86_tune_string)
2665 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2666 ix86_tune_defaulted = 1;
2669 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2670 need to use a sensible tune option. */
2671 if (!strcmp (ix86_tune_string, "generic")
2672 || !strcmp (ix86_tune_string, "x86-64")
2673 || !strcmp (ix86_tune_string, "i686"))
2675 if (TARGET_64BIT)
2676 ix86_tune_string = "generic64";
2677 else
2678 ix86_tune_string = "generic32";
2681 if (ix86_stringop_string)
2683 if (!strcmp (ix86_stringop_string, "rep_byte"))
2684 stringop_alg = rep_prefix_1_byte;
2685 else if (!strcmp (ix86_stringop_string, "libcall"))
2686 stringop_alg = libcall;
2687 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2688 stringop_alg = rep_prefix_4_byte;
2689 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
2690 stringop_alg = rep_prefix_8_byte;
2691 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2692 stringop_alg = loop_1_byte;
2693 else if (!strcmp (ix86_stringop_string, "loop"))
2694 stringop_alg = loop;
2695 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2696 stringop_alg = unrolled_loop;
2697 else
2698 error ("bad value (%s) for %sstringop-strategy=%s %s",
2699 ix86_stringop_string, prefix, suffix, sw);
2701 if (!strcmp (ix86_tune_string, "x86-64"))
2702 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2703 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2704 prefix, suffix, prefix, suffix, prefix, suffix);
2706 if (!ix86_arch_string)
2707 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2708 else
2709 ix86_arch_specified = 1;
2711 if (!strcmp (ix86_arch_string, "generic"))
2712 error ("generic CPU can be used only for %stune=%s %s",
2713 prefix, suffix, sw);
2714 if (!strncmp (ix86_arch_string, "generic", 7))
2715 error ("bad value (%s) for %sarch=%s %s",
2716 ix86_arch_string, prefix, suffix, sw);
2718 if (ix86_cmodel_string != 0)
2720 if (!strcmp (ix86_cmodel_string, "small"))
2721 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2722 else if (!strcmp (ix86_cmodel_string, "medium"))
2723 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2724 else if (!strcmp (ix86_cmodel_string, "large"))
2725 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2726 else if (flag_pic)
2727 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2728 else if (!strcmp (ix86_cmodel_string, "32"))
2729 ix86_cmodel = CM_32;
2730 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2731 ix86_cmodel = CM_KERNEL;
2732 else
2733 error ("bad value (%s) for %scmodel=%s %s",
2734 ix86_cmodel_string, prefix, suffix, sw);
2736 else
2738 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2739 use of rip-relative addressing. This eliminates fixups that
2740 would otherwise be needed if this object is to be placed in a
2741 DLL, and is essentially just as efficient as direct addressing. */
2742 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2743 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2744 else if (TARGET_64BIT)
2745 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2746 else
2747 ix86_cmodel = CM_32;
2749 if (ix86_asm_string != 0)
2751 if (! TARGET_MACHO
2752 && !strcmp (ix86_asm_string, "intel"))
2753 ix86_asm_dialect = ASM_INTEL;
2754 else if (!strcmp (ix86_asm_string, "att"))
2755 ix86_asm_dialect = ASM_ATT;
2756 else
2757 error ("bad value (%s) for %sasm=%s %s",
2758 ix86_asm_string, prefix, suffix, sw);
2760 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2761 error ("code model %qs not supported in the %s bit mode",
2762 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2763 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2764 sorry ("%i-bit mode not compiled in",
2765 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2767 for (i = 0; i < pta_size; i++)
2768 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2770 ix86_schedule = processor_alias_table[i].schedule;
2771 ix86_arch = processor_alias_table[i].processor;
2772 /* Default cpu tuning to the architecture. */
2773 ix86_tune = ix86_arch;
2775 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2776 error ("CPU you selected does not support x86-64 "
2777 "instruction set");
2779 if (processor_alias_table[i].flags & PTA_MMX
2780 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2781 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2782 if (processor_alias_table[i].flags & PTA_3DNOW
2783 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2784 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2785 if (processor_alias_table[i].flags & PTA_3DNOW_A
2786 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2787 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2788 if (processor_alias_table[i].flags & PTA_SSE
2789 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2790 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2791 if (processor_alias_table[i].flags & PTA_SSE2
2792 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2793 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2794 if (processor_alias_table[i].flags & PTA_SSE3
2795 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2796 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2797 if (processor_alias_table[i].flags & PTA_SSSE3
2798 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2799 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2800 if (processor_alias_table[i].flags & PTA_SSE4_1
2801 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2802 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2803 if (processor_alias_table[i].flags & PTA_SSE4_2
2804 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2805 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2806 if (processor_alias_table[i].flags & PTA_AVX
2807 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2808 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2809 if (processor_alias_table[i].flags & PTA_FMA
2810 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2811 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2812 if (processor_alias_table[i].flags & PTA_SSE4A
2813 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2814 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2815 if (processor_alias_table[i].flags & PTA_SSE5
2816 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2817 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2818 if (processor_alias_table[i].flags & PTA_ABM
2819 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2820 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2821 if (processor_alias_table[i].flags & PTA_CX16
2822 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2823 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2824 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2825 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2826 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2827 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2828 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2829 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2830 if (processor_alias_table[i].flags & PTA_AES
2831 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2832 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2833 if (processor_alias_table[i].flags & PTA_PCLMUL
2834 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2835 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2836 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2837 x86_prefetch_sse = true;
2839 break;
2842 if (i == pta_size)
2843 error ("bad value (%s) for %sarch=%s %s",
2844 ix86_arch_string, prefix, suffix, sw);
2846 ix86_arch_mask = 1u << ix86_arch;
2847 for (i = 0; i < X86_ARCH_LAST; ++i)
2848 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2850 for (i = 0; i < pta_size; i++)
2851 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2853 ix86_schedule = processor_alias_table[i].schedule;
2854 ix86_tune = processor_alias_table[i].processor;
2855 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2857 if (ix86_tune_defaulted)
2859 ix86_tune_string = "x86-64";
2860 for (i = 0; i < pta_size; i++)
2861 if (! strcmp (ix86_tune_string,
2862 processor_alias_table[i].name))
2863 break;
2864 ix86_schedule = processor_alias_table[i].schedule;
2865 ix86_tune = processor_alias_table[i].processor;
2867 else
2868 error ("CPU you selected does not support x86-64 "
2869 "instruction set");
2871 /* Intel CPUs have always interpreted SSE prefetch instructions as
2872 NOPs; so, we can enable SSE prefetch instructions even when
2873 -mtune (rather than -march) points us to a processor that has them.
2874 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2875 higher processors. */
2876 if (TARGET_CMOVE
2877 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2878 x86_prefetch_sse = true;
2879 break;
2881 if (i == pta_size)
2882 error ("bad value (%s) for %stune=%s %s",
2883 ix86_tune_string, prefix, suffix, sw);
2885 ix86_tune_mask = 1u << ix86_tune;
2886 for (i = 0; i < X86_TUNE_LAST; ++i)
2887 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
2889 if (optimize_size)
2890 ix86_cost = &ix86_size_cost;
2891 else
2892 ix86_cost = processor_target_table[ix86_tune].cost;
2894 /* Arrange to set up i386_stack_locals for all functions. */
2895 init_machine_status = ix86_init_machine_status;
2897 /* Validate -mregparm= value. */
2898 if (ix86_regparm_string)
2900 if (TARGET_64BIT)
2901 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
2902 i = atoi (ix86_regparm_string);
2903 if (i < 0 || i > REGPARM_MAX)
2904 error ("%sregparm=%d%s is not between 0 and %d",
2905 prefix, i, suffix, REGPARM_MAX);
2906 else
2907 ix86_regparm = i;
2909 if (TARGET_64BIT)
2910 ix86_regparm = REGPARM_MAX;
2912 /* If the user has provided any of the -malign-* options,
2913 warn and use that value only if -falign-* is not set.
2914 Remove this code in GCC 3.2 or later. */
2915 if (ix86_align_loops_string)
2917 warning (0, "%salign-loops%s is obsolete, use %salign-loops%s",
2918 prefix, suffix, prefix, suffix);
2919 if (align_loops == 0)
2921 i = atoi (ix86_align_loops_string);
2922 if (i < 0 || i > MAX_CODE_ALIGN)
2923 error ("%salign-loops=%d%s is not between 0 and %d",
2924 prefix, i, suffix, MAX_CODE_ALIGN);
2925 else
2926 align_loops = 1 << i;
2930 if (ix86_align_jumps_string)
2932 warning (0, "%salign-jumps%s is obsolete, use %salign-jumps%s",
2933 prefix, suffix, prefix, suffix);
2934 if (align_jumps == 0)
2936 i = atoi (ix86_align_jumps_string);
2937 if (i < 0 || i > MAX_CODE_ALIGN)
2938 error ("%salign-loops=%d%s is not between 0 and %d",
2939 prefix, i, suffix, MAX_CODE_ALIGN);
2940 else
2941 align_jumps = 1 << i;
2945 if (ix86_align_funcs_string)
2947 warning (0, "%salign-functions%s is obsolete, use %salign-functions%s",
2948 prefix, suffix, prefix, suffix);
2949 if (align_functions == 0)
2951 i = atoi (ix86_align_funcs_string);
2952 if (i < 0 || i > MAX_CODE_ALIGN)
2953 error ("%salign-loops=%d%s is not between 0 and %d",
2954 prefix, i, suffix, MAX_CODE_ALIGN);
2955 else
2956 align_functions = 1 << i;
2960 /* Default align_* from the processor table. */
2961 if (align_loops == 0)
2963 align_loops = processor_target_table[ix86_tune].align_loop;
2964 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2966 if (align_jumps == 0)
2968 align_jumps = processor_target_table[ix86_tune].align_jump;
2969 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2971 if (align_functions == 0)
2973 align_functions = processor_target_table[ix86_tune].align_func;
2976 /* Validate -mbranch-cost= value, or provide default. */
2977 ix86_branch_cost = ix86_cost->branch_cost;
2978 if (ix86_branch_cost_string)
2980 i = atoi (ix86_branch_cost_string);
2981 if (i < 0 || i > 5)
2982 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
2983 else
2984 ix86_branch_cost = i;
2986 if (ix86_section_threshold_string)
2988 i = atoi (ix86_section_threshold_string);
2989 if (i < 0)
2990 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
2991 else
2992 ix86_section_threshold = i;
2995 if (ix86_tls_dialect_string)
2997 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2998 ix86_tls_dialect = TLS_DIALECT_GNU;
2999 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3000 ix86_tls_dialect = TLS_DIALECT_GNU2;
3001 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3002 ix86_tls_dialect = TLS_DIALECT_SUN;
3003 else
3004 error ("bad value (%s) for %stls-dialect=%s %s",
3005 ix86_tls_dialect_string, prefix, suffix, sw);
3008 if (ix87_precision_string)
3010 i = atoi (ix87_precision_string);
3011 if (i != 32 && i != 64 && i != 80)
3012 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3015 if (TARGET_64BIT)
3017 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3019 /* Enable by default the SSE and MMX builtins. Do allow the user to
3020 explicitly disable any of these. In particular, disabling SSE and
3021 MMX for kernel code is extremely useful. */
3022 if (!ix86_arch_specified)
3023 ix86_isa_flags
3024 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3025 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3027 if (TARGET_RTD)
3028 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3030 else
3032 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3034 if (!ix86_arch_specified)
3035 ix86_isa_flags
3036 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3038 /* i386 ABI does not specify red zone. It still makes sense to use it
3039 when programmer takes care to stack from being destroyed. */
3040 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3041 target_flags |= MASK_NO_RED_ZONE;
3044 /* Keep nonleaf frame pointers. */
3045 if (flag_omit_frame_pointer)
3046 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3047 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3048 flag_omit_frame_pointer = 1;
3050 /* If we're doing fast math, we don't care about comparison order
3051 wrt NaNs. This lets us use a shorter comparison sequence. */
3052 if (flag_finite_math_only)
3053 target_flags &= ~MASK_IEEE_FP;
3055 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3056 since the insns won't need emulation. */
3057 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3058 target_flags &= ~MASK_NO_FANCY_MATH_387;
3060 /* Likewise, if the target doesn't have a 387, or we've specified
3061 software floating point, don't use 387 inline intrinsics. */
3062 if (!TARGET_80387)
3063 target_flags |= MASK_NO_FANCY_MATH_387;
3065 /* Turn on MMX builtins for -msse. */
3066 if (TARGET_SSE)
3068 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3069 x86_prefetch_sse = true;
3072 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3073 if (TARGET_SSE4_2 || TARGET_ABM)
3074 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3076 /* Validate -mpreferred-stack-boundary= value or default it to
3077 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3078 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3079 if (ix86_preferred_stack_boundary_string)
3081 i = atoi (ix86_preferred_stack_boundary_string);
3082 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3083 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3084 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3085 else
3086 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3089 /* Set the default value for -mstackrealign. */
3090 if (ix86_force_align_arg_pointer == -1)
3091 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3093 /* Validate -mincoming-stack-boundary= value or default it to
3094 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3095 if (ix86_force_align_arg_pointer)
3096 ix86_default_incoming_stack_boundary = MIN_STACK_BOUNDARY;
3097 else
3098 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3099 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3100 if (ix86_incoming_stack_boundary_string)
3102 i = atoi (ix86_incoming_stack_boundary_string);
3103 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3104 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3105 i, TARGET_64BIT ? 4 : 2);
3106 else
3108 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3109 ix86_incoming_stack_boundary
3110 = ix86_user_incoming_stack_boundary;
3114 /* Accept -msseregparm only if at least SSE support is enabled. */
3115 if (TARGET_SSEREGPARM
3116 && ! TARGET_SSE)
3117 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3119 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3120 if (ix86_fpmath_string != 0)
3122 if (! strcmp (ix86_fpmath_string, "387"))
3123 ix86_fpmath = FPMATH_387;
3124 else if (! strcmp (ix86_fpmath_string, "sse"))
3126 if (!TARGET_SSE)
3128 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3129 ix86_fpmath = FPMATH_387;
3131 else
3132 ix86_fpmath = FPMATH_SSE;
3134 else if (! strcmp (ix86_fpmath_string, "387,sse")
3135 || ! strcmp (ix86_fpmath_string, "387+sse")
3136 || ! strcmp (ix86_fpmath_string, "sse,387")
3137 || ! strcmp (ix86_fpmath_string, "sse+387")
3138 || ! strcmp (ix86_fpmath_string, "both"))
3140 if (!TARGET_SSE)
3142 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3143 ix86_fpmath = FPMATH_387;
3145 else if (!TARGET_80387)
3147 warning (0, "387 instruction set disabled, using SSE arithmetics");
3148 ix86_fpmath = FPMATH_SSE;
3150 else
3151 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3153 else
3154 error ("bad value (%s) for %sfpmath=%s %s",
3155 ix86_fpmath_string, prefix, suffix, sw);
3158 /* If the i387 is disabled, then do not return values in it. */
3159 if (!TARGET_80387)
3160 target_flags &= ~MASK_FLOAT_RETURNS;
3162 /* Use external vectorized library in vectorizing intrinsics. */
3163 if (ix86_veclibabi_string)
3165 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3166 ix86_veclib_handler = ix86_veclibabi_svml;
3167 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3168 ix86_veclib_handler = ix86_veclibabi_acml;
3169 else
3170 error ("unknown vectorization library ABI type (%s) for "
3171 "%sveclibabi=%s %s", ix86_veclibabi_string,
3172 prefix, suffix, sw);
3175 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3176 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3177 && !optimize_size)
3178 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3180 /* ??? Unwind info is not correct around the CFG unless either a frame
3181 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3182 unwind info generation to be aware of the CFG and propagating states
3183 around edges. */
3184 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3185 || flag_exceptions || flag_non_call_exceptions)
3186 && flag_omit_frame_pointer
3187 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3189 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3190 warning (0, "unwind tables currently require either a frame pointer "
3191 "or %saccumulate-outgoing-args%s for correctness",
3192 prefix, suffix);
3193 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3196 /* If stack probes are required, the space used for large function
3197 arguments on the stack must also be probed, so enable
3198 -maccumulate-outgoing-args so this happens in the prologue. */
3199 if (TARGET_STACK_PROBE
3200 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3202 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3203 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3204 "for correctness", prefix, suffix);
3205 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3208 /* For sane SSE instruction set generation we need fcomi instruction.
3209 It is safe to enable all CMOVE instructions. */
3210 if (TARGET_SSE)
3211 TARGET_CMOVE = 1;
3213 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3215 char *p;
3216 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3217 p = strchr (internal_label_prefix, 'X');
3218 internal_label_prefix_len = p - internal_label_prefix;
3219 *p = '\0';
3222 /* When scheduling description is not available, disable scheduler pass
3223 so it won't slow down the compilation and make x87 code slower. */
3224 if (!TARGET_SCHEDULE)
3225 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3227 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3228 set_param_value ("simultaneous-prefetches",
3229 ix86_cost->simultaneous_prefetches);
3230 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3231 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3232 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3233 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3234 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3235 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3237 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3238 can be optimized to ap = __builtin_next_arg (0). */
3239 if (!TARGET_64BIT)
3240 targetm.expand_builtin_va_start = NULL;
3242 if (TARGET_64BIT)
3244 ix86_gen_leave = gen_leave_rex64;
3245 ix86_gen_pop1 = gen_popdi1;
3246 ix86_gen_add3 = gen_adddi3;
3247 ix86_gen_sub3 = gen_subdi3;
3248 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3249 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3250 ix86_gen_monitor = gen_sse3_monitor64;
3251 ix86_gen_andsp = gen_anddi3;
3253 else
3255 ix86_gen_leave = gen_leave;
3256 ix86_gen_pop1 = gen_popsi1;
3257 ix86_gen_add3 = gen_addsi3;
3258 ix86_gen_sub3 = gen_subsi3;
3259 ix86_gen_sub3_carry = gen_subsi3_carry;
3260 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3261 ix86_gen_monitor = gen_sse3_monitor;
3262 ix86_gen_andsp = gen_andsi3;
3265 #ifdef USE_IX86_CLD
3266 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3267 if (!TARGET_64BIT)
3268 target_flags |= MASK_CLD & ~target_flags_explicit;
3269 #endif
3271 /* Save the initial options in case the user does function specific options */
3272 if (main_args_p)
3273 target_option_default_node = target_option_current_node
3274 = build_target_option_node ();
3277 /* Save the current options */
3279 static void
3280 ix86_function_specific_save (struct cl_target_option *ptr)
3282 gcc_assert (IN_RANGE (ix86_arch, 0, 255));
3283 gcc_assert (IN_RANGE (ix86_schedule, 0, 255));
3284 gcc_assert (IN_RANGE (ix86_tune, 0, 255));
3285 gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
3286 gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
3288 ptr->arch = ix86_arch;
3289 ptr->schedule = ix86_schedule;
3290 ptr->tune = ix86_tune;
3291 ptr->fpmath = ix86_fpmath;
3292 ptr->branch_cost = ix86_branch_cost;
3293 ptr->tune_defaulted = ix86_tune_defaulted;
3294 ptr->arch_specified = ix86_arch_specified;
3295 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3296 ptr->target_flags_explicit = target_flags_explicit;
3299 /* Restore the current options */
3301 static void
3302 ix86_function_specific_restore (struct cl_target_option *ptr)
3304 enum processor_type old_tune = ix86_tune;
3305 enum processor_type old_arch = ix86_arch;
3306 unsigned int ix86_arch_mask, ix86_tune_mask;
3307 int i;
3309 ix86_arch = ptr->arch;
3310 ix86_schedule = ptr->schedule;
3311 ix86_tune = ptr->tune;
3312 ix86_fpmath = ptr->fpmath;
3313 ix86_branch_cost = ptr->branch_cost;
3314 ix86_tune_defaulted = ptr->tune_defaulted;
3315 ix86_arch_specified = ptr->arch_specified;
3316 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3317 target_flags_explicit = ptr->target_flags_explicit;
3319 /* Recreate the arch feature tests if the arch changed */
3320 if (old_arch != ix86_arch)
3322 ix86_arch_mask = 1u << ix86_arch;
3323 for (i = 0; i < X86_ARCH_LAST; ++i)
3324 ix86_arch_features[i]
3325 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3328 /* Recreate the tune optimization tests */
3329 if (old_tune != ix86_tune)
3331 ix86_tune_mask = 1u << ix86_tune;
3332 for (i = 0; i < X86_TUNE_LAST; ++i)
3333 ix86_tune_features[i]
3334 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3338 /* Print the current options */
3340 static void
3341 ix86_function_specific_print (FILE *file, int indent,
3342 struct cl_target_option *ptr)
3344 char *target_string
3345 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3346 NULL, NULL, NULL, false);
3348 fprintf (file, "%*sarch = %d (%s)\n",
3349 indent, "",
3350 ptr->arch,
3351 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3352 ? cpu_names[ptr->arch]
3353 : "<unknown>"));
3355 fprintf (file, "%*stune = %d (%s)\n",
3356 indent, "",
3357 ptr->tune,
3358 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3359 ? cpu_names[ptr->tune]
3360 : "<unknown>"));
3362 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3363 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3364 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3365 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3367 if (target_string)
3369 fprintf (file, "%*s%s\n", indent, "", target_string);
3370 free (target_string);
3375 /* Inner function to process the attribute((target(...))), take an argument and
3376 set the current options from the argument. If we have a list, recursively go
3377 over the list. */
3379 static bool
3380 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3382 char *next_optstr;
3383 bool ret = true;
3385 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3386 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3387 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3388 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3390 enum ix86_opt_type
3392 ix86_opt_unknown,
3393 ix86_opt_yes,
3394 ix86_opt_no,
3395 ix86_opt_str,
3396 ix86_opt_isa
3399 static const struct
3401 const char *string;
3402 size_t len;
3403 enum ix86_opt_type type;
3404 int opt;
3405 int mask;
3406 } attrs[] = {
3407 /* isa options */
3408 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3409 IX86_ATTR_ISA ("abm", OPT_mabm),
3410 IX86_ATTR_ISA ("aes", OPT_maes),
3411 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3412 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3413 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3414 IX86_ATTR_ISA ("sse", OPT_msse),
3415 IX86_ATTR_ISA ("sse2", OPT_msse2),
3416 IX86_ATTR_ISA ("sse3", OPT_msse3),
3417 IX86_ATTR_ISA ("sse4", OPT_msse4),
3418 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3419 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3420 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3421 IX86_ATTR_ISA ("sse5", OPT_msse5),
3422 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3424 /* string options */
3425 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3426 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3427 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3429 /* flag options */
3430 IX86_ATTR_YES ("cld",
3431 OPT_mcld,
3432 MASK_CLD),
3434 IX86_ATTR_NO ("fancy-math-387",
3435 OPT_mfancy_math_387,
3436 MASK_NO_FANCY_MATH_387),
3438 IX86_ATTR_NO ("fused-madd",
3439 OPT_mfused_madd,
3440 MASK_NO_FUSED_MADD),
3442 IX86_ATTR_YES ("ieee-fp",
3443 OPT_mieee_fp,
3444 MASK_IEEE_FP),
3446 IX86_ATTR_YES ("inline-all-stringops",
3447 OPT_minline_all_stringops,
3448 MASK_INLINE_ALL_STRINGOPS),
3450 IX86_ATTR_YES ("inline-stringops-dynamically",
3451 OPT_minline_stringops_dynamically,
3452 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3454 IX86_ATTR_NO ("align-stringops",
3455 OPT_mno_align_stringops,
3456 MASK_NO_ALIGN_STRINGOPS),
3458 IX86_ATTR_YES ("recip",
3459 OPT_mrecip,
3460 MASK_RECIP),
3464 /* If this is a list, recurse to get the options. */
3465 if (TREE_CODE (args) == TREE_LIST)
3467 bool ret = true;
3469 for (; args; args = TREE_CHAIN (args))
3470 if (TREE_VALUE (args)
3471 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3472 ret = false;
3474 return ret;
3477 else if (TREE_CODE (args) != STRING_CST)
3478 gcc_unreachable ();
3480 /* Handle multiple arguments separated by commas. */
3481 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3483 while (next_optstr && *next_optstr != '\0')
3485 char *p = next_optstr;
3486 char *orig_p = p;
3487 char *comma = strchr (next_optstr, ',');
3488 const char *opt_string;
3489 size_t len, opt_len;
3490 int opt;
3491 bool opt_set_p;
3492 char ch;
3493 unsigned i;
3494 enum ix86_opt_type type = ix86_opt_unknown;
3495 int mask = 0;
3497 if (comma)
3499 *comma = '\0';
3500 len = comma - next_optstr;
3501 next_optstr = comma + 1;
3503 else
3505 len = strlen (p);
3506 next_optstr = NULL;
3509 /* Recognize no-xxx. */
3510 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3512 opt_set_p = false;
3513 p += 3;
3514 len -= 3;
3516 else
3517 opt_set_p = true;
3519 /* Find the option. */
3520 ch = *p;
3521 opt = N_OPTS;
3522 for (i = 0; i < sizeof (attrs) / sizeof (attrs[0]); i++)
3524 type = attrs[i].type;
3525 opt_len = attrs[i].len;
3526 if (ch == attrs[i].string[0]
3527 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3528 && memcmp (p, attrs[i].string, opt_len) == 0)
3530 opt = attrs[i].opt;
3531 mask = attrs[i].mask;
3532 opt_string = attrs[i].string;
3533 break;
3537 /* Process the option. */
3538 if (opt == N_OPTS)
3540 error ("attribute(target(\"%s\")) is unknown", orig_p);
3541 ret = false;
3544 else if (type == ix86_opt_isa)
3545 ix86_handle_option (opt, p, opt_set_p);
3547 else if (type == ix86_opt_yes || type == ix86_opt_no)
3549 if (type == ix86_opt_no)
3550 opt_set_p = !opt_set_p;
3552 if (opt_set_p)
3553 target_flags |= mask;
3554 else
3555 target_flags &= ~mask;
3558 else if (type == ix86_opt_str)
3560 if (p_strings[opt])
3562 error ("option(\"%s\") was already specified", opt_string);
3563 ret = false;
3565 else
3566 p_strings[opt] = xstrdup (p + opt_len);
3569 else
3570 gcc_unreachable ();
3573 return ret;
3576 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3578 tree
3579 ix86_valid_target_attribute_tree (tree args)
3581 const char *orig_arch_string = ix86_arch_string;
3582 const char *orig_tune_string = ix86_tune_string;
3583 const char *orig_fpmath_string = ix86_fpmath_string;
3584 int orig_tune_defaulted = ix86_tune_defaulted;
3585 int orig_arch_specified = ix86_arch_specified;
3586 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3587 tree t = NULL_TREE;
3588 int i;
3589 struct cl_target_option *def
3590 = TREE_TARGET_OPTION (target_option_default_node);
3592 /* Process each of the options on the chain. */
3593 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3594 return NULL_TREE;
3596 /* If the changed options are different from the default, rerun override_options,
3597 and then save the options away. The string options are are attribute options,
3598 and will be undone when we copy the save structure. */
3599 if (ix86_isa_flags != def->ix86_isa_flags
3600 || target_flags != def->target_flags
3601 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3602 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3603 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3605 /* If we are using the default tune= or arch=, undo the string assigned,
3606 and use the default. */
3607 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3608 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3609 else if (!orig_arch_specified)
3610 ix86_arch_string = NULL;
3612 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3613 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3614 else if (orig_tune_defaulted)
3615 ix86_tune_string = NULL;
3617 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3618 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3619 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3620 else if (!TARGET_64BIT && TARGET_SSE)
3621 ix86_fpmath_string = "sse,387";
3623 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3624 override_options (false);
3626 /* Add any builtin functions with the new isa if any. */
3627 ix86_add_new_builtins (ix86_isa_flags);
3629 /* Save the current options unless we are validating options for
3630 #pragma. */
3631 t = build_target_option_node ();
3633 ix86_arch_string = orig_arch_string;
3634 ix86_tune_string = orig_tune_string;
3635 ix86_fpmath_string = orig_fpmath_string;
3637 /* Free up memory allocated to hold the strings */
3638 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3639 if (option_strings[i])
3640 free (option_strings[i]);
3643 return t;
3646 /* Hook to validate attribute((target("string"))). */
3648 static bool
3649 ix86_valid_target_attribute_p (tree fndecl,
3650 tree ARG_UNUSED (name),
3651 tree args,
3652 int ARG_UNUSED (flags))
3654 struct cl_target_option cur_target;
3655 bool ret = true;
3656 tree old_optimize = build_optimization_node ();
3657 tree new_target, new_optimize;
3658 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3660 /* If the function changed the optimization levels as well as setting target
3661 options, start with the optimizations specified. */
3662 if (func_optimize && func_optimize != old_optimize)
3663 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3665 /* The target attributes may also change some optimization flags, so update
3666 the optimization options if necessary. */
3667 cl_target_option_save (&cur_target);
3668 new_target = ix86_valid_target_attribute_tree (args);
3669 new_optimize = build_optimization_node ();
3671 if (!new_target)
3672 ret = false;
3674 else if (fndecl)
3676 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3678 if (old_optimize != new_optimize)
3679 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3682 cl_target_option_restore (&cur_target);
3684 if (old_optimize != new_optimize)
3685 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3687 return ret;
3691 /* Hook to determine if one function can safely inline another. */
3693 static bool
3694 ix86_can_inline_p (tree caller, tree callee)
3696 bool ret = false;
3697 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3698 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3700 /* If callee has no option attributes, then it is ok to inline. */
3701 if (!callee_tree)
3702 ret = true;
3704 /* If caller has no option attributes, but callee does then it is not ok to
3705 inline. */
3706 else if (!caller_tree)
3707 ret = false;
3709 else
3711 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3712 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3714 /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
3715 can inline a SSE2 function but a SSE2 function can't inline a SSE5
3716 function. */
3717 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3718 != callee_opts->ix86_isa_flags)
3719 ret = false;
3721 /* See if we have the same non-isa options. */
3722 else if (caller_opts->target_flags != callee_opts->target_flags)
3723 ret = false;
3725 /* See if arch, tune, etc. are the same. */
3726 else if (caller_opts->arch != callee_opts->arch)
3727 ret = false;
3729 else if (caller_opts->tune != callee_opts->tune)
3730 ret = false;
3732 else if (caller_opts->fpmath != callee_opts->fpmath)
3733 ret = false;
3735 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3736 ret = false;
3738 else
3739 ret = true;
3742 return ret;
3746 /* Remember the last target of ix86_set_current_function. */
3747 static GTY(()) tree ix86_previous_fndecl;
3749 /* Establish appropriate back-end context for processing the function
3750 FNDECL. The argument might be NULL to indicate processing at top
3751 level, outside of any function scope. */
3752 static void
3753 ix86_set_current_function (tree fndecl)
3755 /* Only change the context if the function changes. This hook is called
3756 several times in the course of compiling a function, and we don't want to
3757 slow things down too much or call target_reinit when it isn't safe. */
3758 if (fndecl && fndecl != ix86_previous_fndecl)
3760 tree old_tree = (ix86_previous_fndecl
3761 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3762 : NULL_TREE);
3764 tree new_tree = (fndecl
3765 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3766 : NULL_TREE);
3768 ix86_previous_fndecl = fndecl;
3769 if (old_tree == new_tree)
3772 else if (new_tree)
3774 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3775 target_reinit ();
3778 else if (old_tree)
3780 struct cl_target_option *def
3781 = TREE_TARGET_OPTION (target_option_current_node);
3783 cl_target_option_restore (def);
3784 target_reinit ();
3790 /* Return true if this goes in large data/bss. */
3792 static bool
3793 ix86_in_large_data_p (tree exp)
3795 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3796 return false;
3798 /* Functions are never large data. */
3799 if (TREE_CODE (exp) == FUNCTION_DECL)
3800 return false;
3802 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3804 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3805 if (strcmp (section, ".ldata") == 0
3806 || strcmp (section, ".lbss") == 0)
3807 return true;
3808 return false;
3810 else
3812 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
3814 /* If this is an incomplete type with size 0, then we can't put it
3815 in data because it might be too big when completed. */
3816 if (!size || size > ix86_section_threshold)
3817 return true;
3820 return false;
3823 /* Switch to the appropriate section for output of DECL.
3824 DECL is either a `VAR_DECL' node or a constant of some sort.
3825 RELOC indicates whether forming the initial value of DECL requires
3826 link-time relocations. */
3828 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
3829 ATTRIBUTE_UNUSED;
3831 static section *
3832 x86_64_elf_select_section (tree decl, int reloc,
3833 unsigned HOST_WIDE_INT align)
3835 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3836 && ix86_in_large_data_p (decl))
3838 const char *sname = NULL;
3839 unsigned int flags = SECTION_WRITE;
3840 switch (categorize_decl_for_section (decl, reloc))
3842 case SECCAT_DATA:
3843 sname = ".ldata";
3844 break;
3845 case SECCAT_DATA_REL:
3846 sname = ".ldata.rel";
3847 break;
3848 case SECCAT_DATA_REL_LOCAL:
3849 sname = ".ldata.rel.local";
3850 break;
3851 case SECCAT_DATA_REL_RO:
3852 sname = ".ldata.rel.ro";
3853 break;
3854 case SECCAT_DATA_REL_RO_LOCAL:
3855 sname = ".ldata.rel.ro.local";
3856 break;
3857 case SECCAT_BSS:
3858 sname = ".lbss";
3859 flags |= SECTION_BSS;
3860 break;
3861 case SECCAT_RODATA:
3862 case SECCAT_RODATA_MERGE_STR:
3863 case SECCAT_RODATA_MERGE_STR_INIT:
3864 case SECCAT_RODATA_MERGE_CONST:
3865 sname = ".lrodata";
3866 flags = 0;
3867 break;
3868 case SECCAT_SRODATA:
3869 case SECCAT_SDATA:
3870 case SECCAT_SBSS:
3871 gcc_unreachable ();
3872 case SECCAT_TEXT:
3873 case SECCAT_TDATA:
3874 case SECCAT_TBSS:
3875 /* We don't split these for medium model. Place them into
3876 default sections and hope for best. */
3877 break;
3878 case SECCAT_EMUTLS_VAR:
3879 case SECCAT_EMUTLS_TMPL:
3880 gcc_unreachable ();
3882 if (sname)
3884 /* We might get called with string constants, but get_named_section
3885 doesn't like them as they are not DECLs. Also, we need to set
3886 flags in that case. */
3887 if (!DECL_P (decl))
3888 return get_section (sname, flags, NULL);
3889 return get_named_section (decl, sname, reloc);
3892 return default_elf_select_section (decl, reloc, align);
3895 /* Build up a unique section name, expressed as a
3896 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
3897 RELOC indicates whether the initial value of EXP requires
3898 link-time relocations. */
3900 static void ATTRIBUTE_UNUSED
3901 x86_64_elf_unique_section (tree decl, int reloc)
3903 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3904 && ix86_in_large_data_p (decl))
3906 const char *prefix = NULL;
3907 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
3908 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3910 switch (categorize_decl_for_section (decl, reloc))
3912 case SECCAT_DATA:
3913 case SECCAT_DATA_REL:
3914 case SECCAT_DATA_REL_LOCAL:
3915 case SECCAT_DATA_REL_RO:
3916 case SECCAT_DATA_REL_RO_LOCAL:
3917 prefix = one_only ? ".ld" : ".ldata";
3918 break;
3919 case SECCAT_BSS:
3920 prefix = one_only ? ".lb" : ".lbss";
3921 break;
3922 case SECCAT_RODATA:
3923 case SECCAT_RODATA_MERGE_STR:
3924 case SECCAT_RODATA_MERGE_STR_INIT:
3925 case SECCAT_RODATA_MERGE_CONST:
3926 prefix = one_only ? ".lr" : ".lrodata";
3927 break;
3928 case SECCAT_SRODATA:
3929 case SECCAT_SDATA:
3930 case SECCAT_SBSS:
3931 gcc_unreachable ();
3932 case SECCAT_TEXT:
3933 case SECCAT_TDATA:
3934 case SECCAT_TBSS:
3935 /* We don't split these for medium model. Place them into
3936 default sections and hope for best. */
3937 break;
3938 case SECCAT_EMUTLS_VAR:
3939 prefix = targetm.emutls.var_section;
3940 break;
3941 case SECCAT_EMUTLS_TMPL:
3942 prefix = targetm.emutls.tmpl_section;
3943 break;
3945 if (prefix)
3947 const char *name, *linkonce;
3948 char *string;
3950 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3951 name = targetm.strip_name_encoding (name);
3953 /* If we're using one_only, then there needs to be a .gnu.linkonce
3954 prefix to the section name. */
3955 linkonce = one_only ? ".gnu.linkonce" : "";
3957 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
3959 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
3960 return;
3963 default_unique_section (decl, reloc);
3966 #ifdef COMMON_ASM_OP
3967 /* This says how to output assembler code to declare an
3968 uninitialized external linkage data object.
3970 For medium model x86-64 we need to use .largecomm opcode for
3971 large objects. */
3972 void
3973 x86_elf_aligned_common (FILE *file,
3974 const char *name, unsigned HOST_WIDE_INT size,
3975 int align)
3977 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3978 && size > (unsigned int)ix86_section_threshold)
3979 fprintf (file, ".largecomm\t");
3980 else
3981 fprintf (file, "%s", COMMON_ASM_OP);
3982 assemble_name (file, name);
3983 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
3984 size, align / BITS_PER_UNIT);
3986 #endif
3988 /* Utility function for targets to use in implementing
3989 ASM_OUTPUT_ALIGNED_BSS. */
3991 void
3992 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
3993 const char *name, unsigned HOST_WIDE_INT size,
3994 int align)
3996 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3997 && size > (unsigned int)ix86_section_threshold)
3998 switch_to_section (get_named_section (decl, ".lbss", 0));
3999 else
4000 switch_to_section (bss_section);
4001 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4002 #ifdef ASM_DECLARE_OBJECT_NAME
4003 last_assemble_variable_decl = decl;
4004 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4005 #else
4006 /* Standard thing is just output label for the object. */
4007 ASM_OUTPUT_LABEL (file, name);
4008 #endif /* ASM_DECLARE_OBJECT_NAME */
4009 ASM_OUTPUT_SKIP (file, size ? size : 1);
4012 void
4013 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4015 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4016 make the problem with not enough registers even worse. */
4017 #ifdef INSN_SCHEDULING
4018 if (level > 1)
4019 flag_schedule_insns = 0;
4020 #endif
4022 if (TARGET_MACHO)
4023 /* The Darwin libraries never set errno, so we might as well
4024 avoid calling them when that's the only reason we would. */
4025 flag_errno_math = 0;
4027 /* The default values of these switches depend on the TARGET_64BIT
4028 that is not known at this moment. Mark these values with 2 and
4029 let user the to override these. In case there is no command line option
4030 specifying them, we will set the defaults in override_options. */
4031 if (optimize >= 1)
4032 flag_omit_frame_pointer = 2;
4033 flag_pcc_struct_return = 2;
4034 flag_asynchronous_unwind_tables = 2;
4035 flag_vect_cost_model = 1;
4036 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4037 SUBTARGET_OPTIMIZATION_OPTIONS;
4038 #endif
4041 /* Decide whether we can make a sibling call to a function. DECL is the
4042 declaration of the function being targeted by the call and EXP is the
4043 CALL_EXPR representing the call. */
4045 static bool
4046 ix86_function_ok_for_sibcall (tree decl, tree exp)
4048 tree func;
4049 rtx a, b;
4051 /* If we are generating position-independent code, we cannot sibcall
4052 optimize any indirect call, or a direct call to a global function,
4053 as the PLT requires %ebx be live. */
4054 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4055 return false;
4057 if (decl)
4058 func = decl;
4059 else
4061 func = TREE_TYPE (CALL_EXPR_FN (exp));
4062 if (POINTER_TYPE_P (func))
4063 func = TREE_TYPE (func);
4066 /* Check that the return value locations are the same. Like
4067 if we are returning floats on the 80387 register stack, we cannot
4068 make a sibcall from a function that doesn't return a float to a
4069 function that does or, conversely, from a function that does return
4070 a float to a function that doesn't; the necessary stack adjustment
4071 would not be executed. This is also the place we notice
4072 differences in the return value ABI. Note that it is ok for one
4073 of the functions to have void return type as long as the return
4074 value of the other is passed in a register. */
4075 a = ix86_function_value (TREE_TYPE (exp), func, false);
4076 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4077 cfun->decl, false);
4078 if (STACK_REG_P (a) || STACK_REG_P (b))
4080 if (!rtx_equal_p (a, b))
4081 return false;
4083 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4085 else if (!rtx_equal_p (a, b))
4086 return false;
4088 /* If this call is indirect, we'll need to be able to use a call-clobbered
4089 register for the address of the target function. Make sure that all
4090 such registers are not used for passing parameters. */
4091 if (!decl && !TARGET_64BIT)
4093 tree type;
4095 /* We're looking at the CALL_EXPR, we need the type of the function. */
4096 type = CALL_EXPR_FN (exp); /* pointer expression */
4097 type = TREE_TYPE (type); /* pointer type */
4098 type = TREE_TYPE (type); /* function type */
4100 if (ix86_function_regparm (type, NULL) >= 3)
4102 /* ??? Need to count the actual number of registers to be used,
4103 not the possible number of registers. Fix later. */
4104 return false;
4108 /* Dllimport'd functions are also called indirectly. */
4109 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
4110 && !TARGET_64BIT
4111 && decl && DECL_DLLIMPORT_P (decl)
4112 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
4113 return false;
4115 /* If we need to align the outgoing stack, then sibcalling would
4116 unalign the stack, which may break the called function. */
4117 if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
4118 return false;
4120 /* Otherwise okay. That also includes certain types of indirect calls. */
4121 return true;
4124 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4125 calling convention attributes;
4126 arguments as in struct attribute_spec.handler. */
4128 static tree
4129 ix86_handle_cconv_attribute (tree *node, tree name,
4130 tree args,
4131 int flags ATTRIBUTE_UNUSED,
4132 bool *no_add_attrs)
4134 if (TREE_CODE (*node) != FUNCTION_TYPE
4135 && TREE_CODE (*node) != METHOD_TYPE
4136 && TREE_CODE (*node) != FIELD_DECL
4137 && TREE_CODE (*node) != TYPE_DECL)
4139 warning (OPT_Wattributes, "%qs attribute only applies to functions",
4140 IDENTIFIER_POINTER (name));
4141 *no_add_attrs = true;
4142 return NULL_TREE;
4145 /* Can combine regparm with all attributes but fastcall. */
4146 if (is_attribute_p ("regparm", name))
4148 tree cst;
4150 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4152 error ("fastcall and regparm attributes are not compatible");
4155 cst = TREE_VALUE (args);
4156 if (TREE_CODE (cst) != INTEGER_CST)
4158 warning (OPT_Wattributes,
4159 "%qs attribute requires an integer constant argument",
4160 IDENTIFIER_POINTER (name));
4161 *no_add_attrs = true;
4163 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4165 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
4166 IDENTIFIER_POINTER (name), REGPARM_MAX);
4167 *no_add_attrs = true;
4170 return NULL_TREE;
4173 if (TARGET_64BIT)
4175 /* Do not warn when emulating the MS ABI. */
4176 if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
4177 warning (OPT_Wattributes, "%qs attribute ignored",
4178 IDENTIFIER_POINTER (name));
4179 *no_add_attrs = true;
4180 return NULL_TREE;
4183 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4184 if (is_attribute_p ("fastcall", name))
4186 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4188 error ("fastcall and cdecl attributes are not compatible");
4190 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4192 error ("fastcall and stdcall attributes are not compatible");
4194 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4196 error ("fastcall and regparm attributes are not compatible");
4200 /* Can combine stdcall with fastcall (redundant), regparm and
4201 sseregparm. */
4202 else if (is_attribute_p ("stdcall", name))
4204 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4206 error ("stdcall and cdecl attributes are not compatible");
4208 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4210 error ("stdcall and fastcall attributes are not compatible");
4214 /* Can combine cdecl with regparm and sseregparm. */
4215 else if (is_attribute_p ("cdecl", name))
4217 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4219 error ("stdcall and cdecl attributes are not compatible");
4221 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4223 error ("fastcall and cdecl attributes are not compatible");
4227 /* Can combine sseregparm with all attributes. */
4229 return NULL_TREE;
4232 /* Return 0 if the attributes for two types are incompatible, 1 if they
4233 are compatible, and 2 if they are nearly compatible (which causes a
4234 warning to be generated). */
4236 static int
4237 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4239 /* Check for mismatch of non-default calling convention. */
4240 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4242 if (TREE_CODE (type1) != FUNCTION_TYPE
4243 && TREE_CODE (type1) != METHOD_TYPE)
4244 return 1;
4246 /* Check for mismatched fastcall/regparm types. */
4247 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4248 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4249 || (ix86_function_regparm (type1, NULL)
4250 != ix86_function_regparm (type2, NULL)))
4251 return 0;
4253 /* Check for mismatched sseregparm types. */
4254 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4255 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4256 return 0;
4258 /* Check for mismatched return types (cdecl vs stdcall). */
4259 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4260 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4261 return 0;
4263 return 1;
4266 /* Return the regparm value for a function with the indicated TYPE and DECL.
4267 DECL may be NULL when calling function indirectly
4268 or considering a libcall. */
4270 static int
4271 ix86_function_regparm (const_tree type, const_tree decl)
4273 tree attr;
4274 int regparm = ix86_regparm;
4276 static bool error_issued;
4278 if (TARGET_64BIT)
4280 if (ix86_function_type_abi (type) == DEFAULT_ABI)
4281 return regparm;
4282 return DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
4285 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4286 if (attr)
4288 regparm
4289 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4291 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4293 /* We can't use regparm(3) for nested functions because
4294 these pass static chain pointer in %ecx register. */
4295 if (!error_issued && regparm == 3
4296 && decl_function_context (decl)
4297 && !DECL_NO_STATIC_CHAIN (decl))
4299 error ("nested functions are limited to 2 register parameters");
4300 error_issued = true;
4301 return 0;
4305 return regparm;
4308 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4309 return 2;
4311 /* Use register calling convention for local functions when possible. */
4312 if (decl && TREE_CODE (decl) == FUNCTION_DECL
4313 && !profile_flag)
4315 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4316 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4317 if (i && i->local)
4319 int local_regparm, globals = 0, regno;
4320 struct function *f;
4322 /* Make sure no regparm register is taken by a
4323 fixed register variable. */
4324 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4325 if (fixed_regs[local_regparm])
4326 break;
4328 /* We can't use regparm(3) for nested functions as these use
4329 static chain pointer in third argument. */
4330 if (local_regparm == 3
4331 && decl_function_context (decl)
4332 && !DECL_NO_STATIC_CHAIN (decl))
4333 local_regparm = 2;
4335 /* If the function realigns its stackpointer, the prologue will
4336 clobber %ecx. If we've already generated code for the callee,
4337 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
4338 scanning the attributes for the self-realigning property. */
4339 f = DECL_STRUCT_FUNCTION (decl);
4340 /* Since current internal arg pointer won't conflict with
4341 parameter passing regs, so no need to change stack
4342 realignment and adjust regparm number.
4344 Each fixed register usage increases register pressure,
4345 so less registers should be used for argument passing.
4346 This functionality can be overriden by an explicit
4347 regparm value. */
4348 for (regno = 0; regno <= DI_REG; regno++)
4349 if (fixed_regs[regno])
4350 globals++;
4352 local_regparm
4353 = globals < local_regparm ? local_regparm - globals : 0;
4355 if (local_regparm > regparm)
4356 regparm = local_regparm;
4360 return regparm;
4363 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4364 DFmode (2) arguments in SSE registers for a function with the
4365 indicated TYPE and DECL. DECL may be NULL when calling function
4366 indirectly or considering a libcall. Otherwise return 0. */
4368 static int
4369 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4371 gcc_assert (!TARGET_64BIT);
4373 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4374 by the sseregparm attribute. */
4375 if (TARGET_SSEREGPARM
4376 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4378 if (!TARGET_SSE)
4380 if (warn)
4382 if (decl)
4383 error ("Calling %qD with attribute sseregparm without "
4384 "SSE/SSE2 enabled", decl);
4385 else
4386 error ("Calling %qT with attribute sseregparm without "
4387 "SSE/SSE2 enabled", type);
4389 return 0;
4392 return 2;
4395 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4396 (and DFmode for SSE2) arguments in SSE registers. */
4397 if (decl && TARGET_SSE_MATH && !profile_flag)
4399 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4400 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4401 if (i && i->local)
4402 return TARGET_SSE2 ? 2 : 1;
4405 return 0;
4408 /* Return true if EAX is live at the start of the function. Used by
4409 ix86_expand_prologue to determine if we need special help before
4410 calling allocate_stack_worker. */
4412 static bool
4413 ix86_eax_live_at_start_p (void)
4415 /* Cheat. Don't bother working forward from ix86_function_regparm
4416 to the function type to whether an actual argument is located in
4417 eax. Instead just look at cfg info, which is still close enough
4418 to correct at this point. This gives false positives for broken
4419 functions that might use uninitialized data that happens to be
4420 allocated in eax, but who cares? */
4421 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4424 /* Value is the number of bytes of arguments automatically
4425 popped when returning from a subroutine call.
4426 FUNDECL is the declaration node of the function (as a tree),
4427 FUNTYPE is the data type of the function (as a tree),
4428 or for a library call it is an identifier node for the subroutine name.
4429 SIZE is the number of bytes of arguments passed on the stack.
4431 On the 80386, the RTD insn may be used to pop them if the number
4432 of args is fixed, but if the number is variable then the caller
4433 must pop them all. RTD can't be used for library calls now
4434 because the library is compiled with the Unix compiler.
4435 Use of RTD is a selectable option, since it is incompatible with
4436 standard Unix calling sequences. If the option is not selected,
4437 the caller must always pop the args.
4439 The attribute stdcall is equivalent to RTD on a per module basis. */
4442 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4444 int rtd;
4446 /* None of the 64-bit ABIs pop arguments. */
4447 if (TARGET_64BIT)
4448 return 0;
4450 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4452 /* Cdecl functions override -mrtd, and never pop the stack. */
4453 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4455 /* Stdcall and fastcall functions will pop the stack if not
4456 variable args. */
4457 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4458 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4459 rtd = 1;
4461 if (rtd && ! stdarg_p (funtype))
4462 return size;
4465 /* Lose any fake structure return argument if it is passed on the stack. */
4466 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4467 && !KEEP_AGGREGATE_RETURN_POINTER)
4469 int nregs = ix86_function_regparm (funtype, fundecl);
4470 if (nregs == 0)
4471 return GET_MODE_SIZE (Pmode);
4474 return 0;
4477 /* Argument support functions. */
4479 /* Return true when register may be used to pass function parameters. */
4480 bool
4481 ix86_function_arg_regno_p (int regno)
4483 int i;
4484 const int *parm_regs;
4486 if (!TARGET_64BIT)
4488 if (TARGET_MACHO)
4489 return (regno < REGPARM_MAX
4490 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4491 else
4492 return (regno < REGPARM_MAX
4493 || (TARGET_MMX && MMX_REGNO_P (regno)
4494 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4495 || (TARGET_SSE && SSE_REGNO_P (regno)
4496 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4499 if (TARGET_MACHO)
4501 if (SSE_REGNO_P (regno) && TARGET_SSE)
4502 return true;
4504 else
4506 if (TARGET_SSE && SSE_REGNO_P (regno)
4507 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4508 return true;
4511 /* TODO: The function should depend on current function ABI but
4512 builtins.c would need updating then. Therefore we use the
4513 default ABI. */
4515 /* RAX is used as hidden argument to va_arg functions. */
4516 if (DEFAULT_ABI == SYSV_ABI && regno == AX_REG)
4517 return true;
4519 if (DEFAULT_ABI == MS_ABI)
4520 parm_regs = x86_64_ms_abi_int_parameter_registers;
4521 else
4522 parm_regs = x86_64_int_parameter_registers;
4523 for (i = 0; i < (DEFAULT_ABI == MS_ABI ? X64_REGPARM_MAX
4524 : X86_64_REGPARM_MAX); i++)
4525 if (regno == parm_regs[i])
4526 return true;
4527 return false;
4530 /* Return if we do not know how to pass TYPE solely in registers. */
4532 static bool
4533 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4535 if (must_pass_in_stack_var_size_or_pad (mode, type))
4536 return true;
4538 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4539 The layout_type routine is crafty and tries to trick us into passing
4540 currently unsupported vector types on the stack by using TImode. */
4541 return (!TARGET_64BIT && mode == TImode
4542 && type && TREE_CODE (type) != VECTOR_TYPE);
4545 /* It returns the size, in bytes, of the area reserved for arguments passed
4546 in registers for the function represented by fndecl dependent to the used
4547 abi format. */
4549 ix86_reg_parm_stack_space (const_tree fndecl)
4551 int call_abi = 0;
4552 /* For libcalls it is possible that there is no fndecl at hand.
4553 Therefore assume for this case the default abi of the target. */
4554 if (!fndecl)
4555 call_abi = (cfun ? cfun->machine->call_abi : DEFAULT_ABI);
4556 else if (TREE_CODE (fndecl) == FUNCTION_DECL)
4557 call_abi = ix86_function_abi (fndecl);
4558 else
4559 call_abi = ix86_function_type_abi (fndecl);
4560 if (call_abi == 1)
4561 return 32;
4562 return 0;
4565 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4566 call abi used. */
4568 ix86_function_type_abi (const_tree fntype)
4570 if (TARGET_64BIT && fntype != NULL)
4572 int abi;
4573 if (DEFAULT_ABI == SYSV_ABI)
4574 abi = lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)) ? MS_ABI : SYSV_ABI;
4575 else
4576 abi = lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)) ? SYSV_ABI : MS_ABI;
4578 return abi;
4580 return DEFAULT_ABI;
4584 ix86_function_abi (const_tree fndecl)
4586 if (! fndecl)
4587 return DEFAULT_ABI;
4588 return ix86_function_type_abi (TREE_TYPE (fndecl));
4591 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4592 call abi used. */
4594 ix86_cfun_abi (void)
4596 if (! cfun || ! TARGET_64BIT)
4597 return DEFAULT_ABI;
4598 return cfun->machine->call_abi;
4601 /* regclass.c */
4602 extern void init_regs (void);
4604 /* Implementation of call abi switching target hook. Specific to FNDECL
4605 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4606 for more details.
4607 To prevent redudant calls of costy function init_regs (), it checks not to
4608 reset register usage for default abi. */
4609 void
4610 ix86_call_abi_override (const_tree fndecl)
4612 if (fndecl == NULL_TREE)
4613 cfun->machine->call_abi = DEFAULT_ABI;
4614 else
4615 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4616 if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
4618 if (call_used_regs[4 /*RSI*/] != 0 || call_used_regs[5 /*RDI*/] != 0)
4620 call_used_regs[4 /*RSI*/] = 0;
4621 call_used_regs[5 /*RDI*/] = 0;
4622 init_regs ();
4625 else if (TARGET_64BIT)
4627 if (call_used_regs[4 /*RSI*/] != 1 || call_used_regs[5 /*RDI*/] != 1)
4629 call_used_regs[4 /*RSI*/] = 1;
4630 call_used_regs[5 /*RDI*/] = 1;
4631 init_regs ();
4636 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4637 for a call to a function whose data type is FNTYPE.
4638 For a library call, FNTYPE is 0. */
4640 void
4641 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4642 tree fntype, /* tree ptr for function decl */
4643 rtx libname, /* SYMBOL_REF of library name or 0 */
4644 tree fndecl)
4646 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4647 memset (cum, 0, sizeof (*cum));
4649 cum->call_abi = ix86_function_type_abi (fntype);
4650 /* Set up the number of registers to use for passing arguments. */
4651 cum->nregs = ix86_regparm;
4652 if (TARGET_64BIT)
4654 if (cum->call_abi != DEFAULT_ABI)
4655 cum->nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX
4656 : X64_REGPARM_MAX;
4658 if (TARGET_SSE)
4660 cum->sse_nregs = SSE_REGPARM_MAX;
4661 if (TARGET_64BIT)
4663 if (cum->call_abi != DEFAULT_ABI)
4664 cum->sse_nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
4665 : X64_SSE_REGPARM_MAX;
4668 if (TARGET_MMX)
4669 cum->mmx_nregs = MMX_REGPARM_MAX;
4670 cum->warn_avx = true;
4671 cum->warn_sse = true;
4672 cum->warn_mmx = true;
4674 /* Because type might mismatch in between caller and callee, we need to
4675 use actual type of function for local calls.
4676 FIXME: cgraph_analyze can be told to actually record if function uses
4677 va_start so for local functions maybe_vaarg can be made aggressive
4678 helping K&R code.
4679 FIXME: once typesytem is fixed, we won't need this code anymore. */
4680 if (i && i->local)
4681 fntype = TREE_TYPE (fndecl);
4682 cum->maybe_vaarg = (fntype
4683 ? (!prototype_p (fntype) || stdarg_p (fntype))
4684 : !libname);
4686 if (!TARGET_64BIT)
4688 /* If there are variable arguments, then we won't pass anything
4689 in registers in 32-bit mode. */
4690 if (stdarg_p (fntype))
4692 cum->nregs = 0;
4693 cum->sse_nregs = 0;
4694 cum->mmx_nregs = 0;
4695 cum->warn_avx = 0;
4696 cum->warn_sse = 0;
4697 cum->warn_mmx = 0;
4698 return;
4701 /* Use ecx and edx registers if function has fastcall attribute,
4702 else look for regparm information. */
4703 if (fntype)
4705 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4707 cum->nregs = 2;
4708 cum->fastcall = 1;
4710 else
4711 cum->nregs = ix86_function_regparm (fntype, fndecl);
4714 /* Set up the number of SSE registers used for passing SFmode
4715 and DFmode arguments. Warn for mismatching ABI. */
4716 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4720 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4721 But in the case of vector types, it is some vector mode.
4723 When we have only some of our vector isa extensions enabled, then there
4724 are some modes for which vector_mode_supported_p is false. For these
4725 modes, the generic vector support in gcc will choose some non-vector mode
4726 in order to implement the type. By computing the natural mode, we'll
4727 select the proper ABI location for the operand and not depend on whatever
4728 the middle-end decides to do with these vector types. */
4730 static enum machine_mode
4731 type_natural_mode (const_tree type)
4733 enum machine_mode mode = TYPE_MODE (type);
4735 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4737 HOST_WIDE_INT size = int_size_in_bytes (type);
4738 if ((size == 8 || size == 16)
4739 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4740 && TYPE_VECTOR_SUBPARTS (type) > 1)
4742 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4744 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4745 mode = MIN_MODE_VECTOR_FLOAT;
4746 else
4747 mode = MIN_MODE_VECTOR_INT;
4749 /* Get the mode which has this inner mode and number of units. */
4750 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4751 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4752 && GET_MODE_INNER (mode) == innermode)
4753 return mode;
4755 gcc_unreachable ();
4759 return mode;
4762 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4763 this may not agree with the mode that the type system has chosen for the
4764 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4765 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4767 static rtx
4768 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4769 unsigned int regno)
4771 rtx tmp;
4773 if (orig_mode != BLKmode)
4774 tmp = gen_rtx_REG (orig_mode, regno);
4775 else
4777 tmp = gen_rtx_REG (mode, regno);
4778 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4779 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4782 return tmp;
4785 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4786 of this code is to classify each 8bytes of incoming argument by the register
4787 class and assign registers accordingly. */
4789 /* Return the union class of CLASS1 and CLASS2.
4790 See the x86-64 PS ABI for details. */
4792 static enum x86_64_reg_class
4793 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
4795 /* Rule #1: If both classes are equal, this is the resulting class. */
4796 if (class1 == class2)
4797 return class1;
4799 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
4800 the other class. */
4801 if (class1 == X86_64_NO_CLASS)
4802 return class2;
4803 if (class2 == X86_64_NO_CLASS)
4804 return class1;
4806 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
4807 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
4808 return X86_64_MEMORY_CLASS;
4810 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
4811 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
4812 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
4813 return X86_64_INTEGERSI_CLASS;
4814 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
4815 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
4816 return X86_64_INTEGER_CLASS;
4818 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
4819 MEMORY is used. */
4820 if (class1 == X86_64_X87_CLASS
4821 || class1 == X86_64_X87UP_CLASS
4822 || class1 == X86_64_COMPLEX_X87_CLASS
4823 || class2 == X86_64_X87_CLASS
4824 || class2 == X86_64_X87UP_CLASS
4825 || class2 == X86_64_COMPLEX_X87_CLASS)
4826 return X86_64_MEMORY_CLASS;
4828 /* Rule #6: Otherwise class SSE is used. */
4829 return X86_64_SSE_CLASS;
4832 /* Classify the argument of type TYPE and mode MODE.
4833 CLASSES will be filled by the register class used to pass each word
4834 of the operand. The number of words is returned. In case the parameter
4835 should be passed in memory, 0 is returned. As a special case for zero
4836 sized containers, classes[0] will be NO_CLASS and 1 is returned.
4838 BIT_OFFSET is used internally for handling records and specifies offset
4839 of the offset in bits modulo 256 to avoid overflow cases.
4841 See the x86-64 PS ABI for details.
4844 static int
4845 classify_argument (enum machine_mode mode, const_tree type,
4846 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
4848 HOST_WIDE_INT bytes =
4849 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4850 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4852 /* Variable sized entities are always passed/returned in memory. */
4853 if (bytes < 0)
4854 return 0;
4856 if (mode != VOIDmode
4857 && targetm.calls.must_pass_in_stack (mode, type))
4858 return 0;
4860 if (type && AGGREGATE_TYPE_P (type))
4862 int i;
4863 tree field;
4864 enum x86_64_reg_class subclasses[MAX_CLASSES];
4866 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
4867 if (bytes > 16)
4868 return 0;
4870 for (i = 0; i < words; i++)
4871 classes[i] = X86_64_NO_CLASS;
4873 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
4874 signalize memory class, so handle it as special case. */
4875 if (!words)
4877 classes[0] = X86_64_NO_CLASS;
4878 return 1;
4881 /* Classify each field of record and merge classes. */
4882 switch (TREE_CODE (type))
4884 case RECORD_TYPE:
4885 /* And now merge the fields of structure. */
4886 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4888 if (TREE_CODE (field) == FIELD_DECL)
4890 int num;
4892 if (TREE_TYPE (field) == error_mark_node)
4893 continue;
4895 /* Bitfields are always classified as integer. Handle them
4896 early, since later code would consider them to be
4897 misaligned integers. */
4898 if (DECL_BIT_FIELD (field))
4900 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4901 i < ((int_bit_position (field) + (bit_offset % 64))
4902 + tree_low_cst (DECL_SIZE (field), 0)
4903 + 63) / 8 / 8; i++)
4904 classes[i] =
4905 merge_classes (X86_64_INTEGER_CLASS,
4906 classes[i]);
4908 else
4910 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4911 TREE_TYPE (field), subclasses,
4912 (int_bit_position (field)
4913 + bit_offset) % 256);
4914 if (!num)
4915 return 0;
4916 for (i = 0; i < num; i++)
4918 int pos =
4919 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4920 classes[i + pos] =
4921 merge_classes (subclasses[i], classes[i + pos]);
4926 break;
4928 case ARRAY_TYPE:
4929 /* Arrays are handled as small records. */
4931 int num;
4932 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
4933 TREE_TYPE (type), subclasses, bit_offset);
4934 if (!num)
4935 return 0;
4937 /* The partial classes are now full classes. */
4938 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
4939 subclasses[0] = X86_64_SSE_CLASS;
4940 if (subclasses[0] == X86_64_INTEGERSI_CLASS
4941 && !((bit_offset % 64) == 0 && bytes == 4))
4942 subclasses[0] = X86_64_INTEGER_CLASS;
4944 for (i = 0; i < words; i++)
4945 classes[i] = subclasses[i % num];
4947 break;
4949 case UNION_TYPE:
4950 case QUAL_UNION_TYPE:
4951 /* Unions are similar to RECORD_TYPE but offset is always 0.
4953 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4955 if (TREE_CODE (field) == FIELD_DECL)
4957 int num;
4959 if (TREE_TYPE (field) == error_mark_node)
4960 continue;
4962 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4963 TREE_TYPE (field), subclasses,
4964 bit_offset);
4965 if (!num)
4966 return 0;
4967 for (i = 0; i < num; i++)
4968 classes[i] = merge_classes (subclasses[i], classes[i]);
4971 break;
4973 default:
4974 gcc_unreachable ();
4977 /* Final merger cleanup. */
4978 for (i = 0; i < words; i++)
4980 /* If one class is MEMORY, everything should be passed in
4981 memory. */
4982 if (classes[i] == X86_64_MEMORY_CLASS)
4983 return 0;
4985 /* The X86_64_SSEUP_CLASS should be always preceded by
4986 X86_64_SSE_CLASS. */
4987 if (classes[i] == X86_64_SSEUP_CLASS
4988 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
4989 classes[i] = X86_64_SSE_CLASS;
4991 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
4992 if (classes[i] == X86_64_X87UP_CLASS
4993 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
4994 classes[i] = X86_64_SSE_CLASS;
4996 return words;
4999 /* Compute alignment needed. We align all types to natural boundaries with
5000 exception of XFmode that is aligned to 64bits. */
5001 if (mode != VOIDmode && mode != BLKmode)
5003 int mode_alignment = GET_MODE_BITSIZE (mode);
5005 if (mode == XFmode)
5006 mode_alignment = 128;
5007 else if (mode == XCmode)
5008 mode_alignment = 256;
5009 if (COMPLEX_MODE_P (mode))
5010 mode_alignment /= 2;
5011 /* Misaligned fields are always returned in memory. */
5012 if (bit_offset % mode_alignment)
5013 return 0;
5016 /* for V1xx modes, just use the base mode */
5017 if (VECTOR_MODE_P (mode) && mode != V1DImode
5018 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5019 mode = GET_MODE_INNER (mode);
5021 /* Classification of atomic types. */
5022 switch (mode)
5024 case SDmode:
5025 case DDmode:
5026 classes[0] = X86_64_SSE_CLASS;
5027 return 1;
5028 case TDmode:
5029 classes[0] = X86_64_SSE_CLASS;
5030 classes[1] = X86_64_SSEUP_CLASS;
5031 return 2;
5032 case DImode:
5033 case SImode:
5034 case HImode:
5035 case QImode:
5036 case CSImode:
5037 case CHImode:
5038 case CQImode:
5040 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5042 if (size <= 32)
5044 classes[0] = X86_64_INTEGERSI_CLASS;
5045 return 1;
5047 else if (size <= 64)
5049 classes[0] = X86_64_INTEGER_CLASS;
5050 return 1;
5052 else if (size <= 64+32)
5054 classes[0] = X86_64_INTEGER_CLASS;
5055 classes[1] = X86_64_INTEGERSI_CLASS;
5056 return 2;
5058 else if (size <= 64+64)
5060 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5061 return 2;
5063 else
5064 gcc_unreachable ();
5066 case CDImode:
5067 case TImode:
5068 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5069 return 2;
5070 case CTImode:
5071 case COImode:
5072 case OImode:
5073 return 0;
5074 case SFmode:
5075 if (!(bit_offset % 64))
5076 classes[0] = X86_64_SSESF_CLASS;
5077 else
5078 classes[0] = X86_64_SSE_CLASS;
5079 return 1;
5080 case DFmode:
5081 classes[0] = X86_64_SSEDF_CLASS;
5082 return 1;
5083 case XFmode:
5084 classes[0] = X86_64_X87_CLASS;
5085 classes[1] = X86_64_X87UP_CLASS;
5086 return 2;
5087 case TFmode:
5088 classes[0] = X86_64_SSE_CLASS;
5089 classes[1] = X86_64_SSEUP_CLASS;
5090 return 2;
5091 case SCmode:
5092 classes[0] = X86_64_SSE_CLASS;
5093 return 1;
5094 case DCmode:
5095 classes[0] = X86_64_SSEDF_CLASS;
5096 classes[1] = X86_64_SSEDF_CLASS;
5097 return 2;
5098 case XCmode:
5099 classes[0] = X86_64_COMPLEX_X87_CLASS;
5100 return 1;
5101 case TCmode:
5102 /* This modes is larger than 16 bytes. */
5103 return 0;
5104 case V8SFmode:
5105 case V8SImode:
5106 case V32QImode:
5107 case V16HImode:
5108 case V4DFmode:
5109 case V4DImode:
5110 classes[0] = X86_64_AVX_CLASS;
5111 return 1;
5112 case V4SFmode:
5113 case V4SImode:
5114 case V16QImode:
5115 case V8HImode:
5116 case V2DFmode:
5117 case V2DImode:
5118 classes[0] = X86_64_SSE_CLASS;
5119 classes[1] = X86_64_SSEUP_CLASS;
5120 return 2;
5121 case V1DImode:
5122 case V2SFmode:
5123 case V2SImode:
5124 case V4HImode:
5125 case V8QImode:
5126 classes[0] = X86_64_SSE_CLASS;
5127 return 1;
5128 case BLKmode:
5129 case VOIDmode:
5130 return 0;
5131 default:
5132 gcc_assert (VECTOR_MODE_P (mode));
5134 if (bytes > 16)
5135 return 0;
5137 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5139 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5140 classes[0] = X86_64_INTEGERSI_CLASS;
5141 else
5142 classes[0] = X86_64_INTEGER_CLASS;
5143 classes[1] = X86_64_INTEGER_CLASS;
5144 return 1 + (bytes > 8);
5148 /* Examine the argument and return set number of register required in each
5149 class. Return 0 iff parameter should be passed in memory. */
5150 static int
5151 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5152 int *int_nregs, int *sse_nregs)
5154 enum x86_64_reg_class regclass[MAX_CLASSES];
5155 int n = classify_argument (mode, type, regclass, 0);
5157 *int_nregs = 0;
5158 *sse_nregs = 0;
5159 if (!n)
5160 return 0;
5161 for (n--; n >= 0; n--)
5162 switch (regclass[n])
5164 case X86_64_INTEGER_CLASS:
5165 case X86_64_INTEGERSI_CLASS:
5166 (*int_nregs)++;
5167 break;
5168 case X86_64_AVX_CLASS:
5169 case X86_64_SSE_CLASS:
5170 case X86_64_SSESF_CLASS:
5171 case X86_64_SSEDF_CLASS:
5172 (*sse_nregs)++;
5173 break;
5174 case X86_64_NO_CLASS:
5175 case X86_64_SSEUP_CLASS:
5176 break;
5177 case X86_64_X87_CLASS:
5178 case X86_64_X87UP_CLASS:
5179 if (!in_return)
5180 return 0;
5181 break;
5182 case X86_64_COMPLEX_X87_CLASS:
5183 return in_return ? 2 : 0;
5184 case X86_64_MEMORY_CLASS:
5185 gcc_unreachable ();
5187 return 1;
5190 /* Construct container for the argument used by GCC interface. See
5191 FUNCTION_ARG for the detailed description. */
5193 static rtx
5194 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5195 const_tree type, int in_return, int nintregs, int nsseregs,
5196 const int *intreg, int sse_regno)
5198 /* The following variables hold the static issued_error state. */
5199 static bool issued_sse_arg_error;
5200 static bool issued_sse_ret_error;
5201 static bool issued_x87_ret_error;
5203 enum machine_mode tmpmode;
5204 int bytes =
5205 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5206 enum x86_64_reg_class regclass[MAX_CLASSES];
5207 int n;
5208 int i;
5209 int nexps = 0;
5210 int needed_sseregs, needed_intregs;
5211 rtx exp[MAX_CLASSES];
5212 rtx ret;
5214 n = classify_argument (mode, type, regclass, 0);
5215 if (!n)
5216 return NULL;
5217 if (!examine_argument (mode, type, in_return, &needed_intregs,
5218 &needed_sseregs))
5219 return NULL;
5220 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5221 return NULL;
5223 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5224 some less clueful developer tries to use floating-point anyway. */
5225 if (needed_sseregs && !TARGET_SSE)
5227 if (in_return)
5229 if (!issued_sse_ret_error)
5231 error ("SSE register return with SSE disabled");
5232 issued_sse_ret_error = true;
5235 else if (!issued_sse_arg_error)
5237 error ("SSE register argument with SSE disabled");
5238 issued_sse_arg_error = true;
5240 return NULL;
5243 /* Likewise, error if the ABI requires us to return values in the
5244 x87 registers and the user specified -mno-80387. */
5245 if (!TARGET_80387 && in_return)
5246 for (i = 0; i < n; i++)
5247 if (regclass[i] == X86_64_X87_CLASS
5248 || regclass[i] == X86_64_X87UP_CLASS
5249 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5251 if (!issued_x87_ret_error)
5253 error ("x87 register return with x87 disabled");
5254 issued_x87_ret_error = true;
5256 return NULL;
5259 /* First construct simple cases. Avoid SCmode, since we want to use
5260 single register to pass this type. */
5261 if (n == 1 && mode != SCmode)
5262 switch (regclass[0])
5264 case X86_64_INTEGER_CLASS:
5265 case X86_64_INTEGERSI_CLASS:
5266 return gen_rtx_REG (mode, intreg[0]);
5267 case X86_64_AVX_CLASS:
5268 case X86_64_SSE_CLASS:
5269 case X86_64_SSESF_CLASS:
5270 case X86_64_SSEDF_CLASS:
5271 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
5272 case X86_64_X87_CLASS:
5273 case X86_64_COMPLEX_X87_CLASS:
5274 return gen_rtx_REG (mode, FIRST_STACK_REG);
5275 case X86_64_NO_CLASS:
5276 /* Zero sized array, struct or class. */
5277 return NULL;
5278 default:
5279 gcc_unreachable ();
5281 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5282 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5283 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5285 if (n == 2
5286 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5287 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5288 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5289 && regclass[1] == X86_64_INTEGER_CLASS
5290 && (mode == CDImode || mode == TImode || mode == TFmode)
5291 && intreg[0] + 1 == intreg[1])
5292 return gen_rtx_REG (mode, intreg[0]);
5294 /* Otherwise figure out the entries of the PARALLEL. */
5295 for (i = 0; i < n; i++)
5297 switch (regclass[i])
5299 case X86_64_NO_CLASS:
5300 break;
5301 case X86_64_INTEGER_CLASS:
5302 case X86_64_INTEGERSI_CLASS:
5303 /* Merge TImodes on aligned occasions here too. */
5304 if (i * 8 + 8 > bytes)
5305 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5306 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5307 tmpmode = SImode;
5308 else
5309 tmpmode = DImode;
5310 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5311 if (tmpmode == BLKmode)
5312 tmpmode = DImode;
5313 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5314 gen_rtx_REG (tmpmode, *intreg),
5315 GEN_INT (i*8));
5316 intreg++;
5317 break;
5318 case X86_64_SSESF_CLASS:
5319 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5320 gen_rtx_REG (SFmode,
5321 SSE_REGNO (sse_regno)),
5322 GEN_INT (i*8));
5323 sse_regno++;
5324 break;
5325 case X86_64_SSEDF_CLASS:
5326 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5327 gen_rtx_REG (DFmode,
5328 SSE_REGNO (sse_regno)),
5329 GEN_INT (i*8));
5330 sse_regno++;
5331 break;
5332 case X86_64_SSE_CLASS:
5333 if (i < n - 1 && regclass[i + 1] == X86_64_SSEUP_CLASS)
5334 tmpmode = TImode;
5335 else
5336 tmpmode = DImode;
5337 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5338 gen_rtx_REG (tmpmode,
5339 SSE_REGNO (sse_regno)),
5340 GEN_INT (i*8));
5341 if (tmpmode == TImode)
5342 i++;
5343 sse_regno++;
5344 break;
5345 default:
5346 gcc_unreachable ();
5350 /* Empty aligned struct, union or class. */
5351 if (nexps == 0)
5352 return NULL;
5354 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5355 for (i = 0; i < nexps; i++)
5356 XVECEXP (ret, 0, i) = exp [i];
5357 return ret;
5360 /* Update the data in CUM to advance over an argument of mode MODE
5361 and data type TYPE. (TYPE is null for libcalls where that information
5362 may not be available.) */
5364 static void
5365 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5366 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5368 switch (mode)
5370 default:
5371 break;
5373 case BLKmode:
5374 if (bytes < 0)
5375 break;
5376 /* FALLTHRU */
5378 case DImode:
5379 case SImode:
5380 case HImode:
5381 case QImode:
5382 cum->words += words;
5383 cum->nregs -= words;
5384 cum->regno += words;
5386 if (cum->nregs <= 0)
5388 cum->nregs = 0;
5389 cum->regno = 0;
5391 break;
5393 case DFmode:
5394 if (cum->float_in_sse < 2)
5395 break;
5396 case SFmode:
5397 if (cum->float_in_sse < 1)
5398 break;
5399 /* FALLTHRU */
5401 case OImode:
5402 case V8SFmode:
5403 case V8SImode:
5404 case V32QImode:
5405 case V16HImode:
5406 case V4DFmode:
5407 case V4DImode:
5408 case TImode:
5409 case V16QImode:
5410 case V8HImode:
5411 case V4SImode:
5412 case V2DImode:
5413 case V4SFmode:
5414 case V2DFmode:
5415 if (!type || !AGGREGATE_TYPE_P (type))
5417 cum->sse_words += words;
5418 cum->sse_nregs -= 1;
5419 cum->sse_regno += 1;
5420 if (cum->sse_nregs <= 0)
5422 cum->sse_nregs = 0;
5423 cum->sse_regno = 0;
5426 break;
5428 case V8QImode:
5429 case V4HImode:
5430 case V2SImode:
5431 case V2SFmode:
5432 case V1DImode:
5433 if (!type || !AGGREGATE_TYPE_P (type))
5435 cum->mmx_words += words;
5436 cum->mmx_nregs -= 1;
5437 cum->mmx_regno += 1;
5438 if (cum->mmx_nregs <= 0)
5440 cum->mmx_nregs = 0;
5441 cum->mmx_regno = 0;
5444 break;
5448 static void
5449 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5450 tree type, HOST_WIDE_INT words, int named)
5452 int int_nregs, sse_nregs;
5454 /* Unnamed 256bit vector mode parameters are passed on stack. */
5455 if (!named && VALID_AVX256_REG_MODE (mode))
5456 return;
5458 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5459 cum->words += words;
5460 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5462 cum->nregs -= int_nregs;
5463 cum->sse_nregs -= sse_nregs;
5464 cum->regno += int_nregs;
5465 cum->sse_regno += sse_nregs;
5467 else
5468 cum->words += words;
5471 static void
5472 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5473 HOST_WIDE_INT words)
5475 /* Otherwise, this should be passed indirect. */
5476 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5478 cum->words += words;
5479 if (cum->nregs > 0)
5481 cum->nregs -= 1;
5482 cum->regno += 1;
5486 void
5487 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5488 tree type, int named)
5490 HOST_WIDE_INT bytes, words;
5492 if (mode == BLKmode)
5493 bytes = int_size_in_bytes (type);
5494 else
5495 bytes = GET_MODE_SIZE (mode);
5496 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5498 if (type)
5499 mode = type_natural_mode (type);
5501 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5502 function_arg_advance_ms_64 (cum, bytes, words);
5503 else if (TARGET_64BIT)
5504 function_arg_advance_64 (cum, mode, type, words, named);
5505 else
5506 function_arg_advance_32 (cum, mode, type, bytes, words);
5509 /* Define where to put the arguments to a function.
5510 Value is zero to push the argument on the stack,
5511 or a hard register in which to store the argument.
5513 MODE is the argument's machine mode.
5514 TYPE is the data type of the argument (as a tree).
5515 This is null for libcalls where that information may
5516 not be available.
5517 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5518 the preceding args and about the function being called.
5519 NAMED is nonzero if this argument is a named parameter
5520 (otherwise it is an extra parameter matching an ellipsis). */
5522 static rtx
5523 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5524 enum machine_mode orig_mode, tree type,
5525 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5527 static bool warnedavx, warnedsse, warnedmmx;
5529 /* Avoid the AL settings for the Unix64 ABI. */
5530 if (mode == VOIDmode)
5531 return constm1_rtx;
5533 switch (mode)
5535 default:
5536 break;
5538 case BLKmode:
5539 if (bytes < 0)
5540 break;
5541 /* FALLTHRU */
5542 case DImode:
5543 case SImode:
5544 case HImode:
5545 case QImode:
5546 if (words <= cum->nregs)
5548 int regno = cum->regno;
5550 /* Fastcall allocates the first two DWORD (SImode) or
5551 smaller arguments to ECX and EDX if it isn't an
5552 aggregate type . */
5553 if (cum->fastcall)
5555 if (mode == BLKmode
5556 || mode == DImode
5557 || (type && AGGREGATE_TYPE_P (type)))
5558 break;
5560 /* ECX not EAX is the first allocated register. */
5561 if (regno == AX_REG)
5562 regno = CX_REG;
5564 return gen_rtx_REG (mode, regno);
5566 break;
5568 case DFmode:
5569 if (cum->float_in_sse < 2)
5570 break;
5571 case SFmode:
5572 if (cum->float_in_sse < 1)
5573 break;
5574 /* FALLTHRU */
5575 case TImode:
5576 /* In 32bit, we pass TImode in xmm registers. */
5577 case V16QImode:
5578 case V8HImode:
5579 case V4SImode:
5580 case V2DImode:
5581 case V4SFmode:
5582 case V2DFmode:
5583 if (!type || !AGGREGATE_TYPE_P (type))
5585 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5587 warnedsse = true;
5588 warning (0, "SSE vector argument without SSE enabled "
5589 "changes the ABI");
5591 if (cum->sse_nregs)
5592 return gen_reg_or_parallel (mode, orig_mode,
5593 cum->sse_regno + FIRST_SSE_REG);
5595 break;
5597 case OImode:
5598 /* In 32bit, we pass OImode in ymm registers. */
5599 case V8SFmode:
5600 case V8SImode:
5601 case V32QImode:
5602 case V16HImode:
5603 case V4DFmode:
5604 case V4DImode:
5605 if (!type || !AGGREGATE_TYPE_P (type))
5607 if (!TARGET_AVX && !warnedavx && cum->warn_avx)
5609 warnedavx = true;
5610 warning (0, "AVX vector argument without AVX enabled "
5611 "changes the ABI");
5613 if (cum->sse_nregs)
5614 return gen_reg_or_parallel (mode, orig_mode,
5615 cum->sse_regno + FIRST_SSE_REG);
5617 break;
5619 case V8QImode:
5620 case V4HImode:
5621 case V2SImode:
5622 case V2SFmode:
5623 case V1DImode:
5624 if (!type || !AGGREGATE_TYPE_P (type))
5626 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5628 warnedmmx = true;
5629 warning (0, "MMX vector argument without MMX enabled "
5630 "changes the ABI");
5632 if (cum->mmx_nregs)
5633 return gen_reg_or_parallel (mode, orig_mode,
5634 cum->mmx_regno + FIRST_MMX_REG);
5636 break;
5639 return NULL_RTX;
5642 static rtx
5643 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5644 enum machine_mode orig_mode, tree type, int named)
5646 static bool warnedavx;
5648 /* Handle a hidden AL argument containing number of registers
5649 for varargs x86-64 functions. */
5650 if (mode == VOIDmode)
5651 return GEN_INT (cum->maybe_vaarg
5652 ? (cum->sse_nregs < 0
5653 ? (cum->call_abi == DEFAULT_ABI
5654 ? SSE_REGPARM_MAX
5655 : (DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
5656 : X64_SSE_REGPARM_MAX))
5657 : cum->sse_regno)
5658 : -1);
5660 switch (mode)
5662 default:
5663 break;
5665 case V8SFmode:
5666 case V8SImode:
5667 case V32QImode:
5668 case V16HImode:
5669 case V4DFmode:
5670 case V4DImode:
5671 /* In 64bit, we pass TImode in interger registers and OImode on
5672 stack. */
5673 if (!type || !AGGREGATE_TYPE_P (type))
5675 if (!TARGET_AVX && !warnedavx && cum->warn_avx)
5677 warnedavx = true;
5678 warning (0, "AVX vector argument without AVX enabled "
5679 "changes the ABI");
5683 /* Unnamed 256bit vector mode parameters are passed on stack. */
5684 if (!named)
5685 return NULL;
5686 break;
5689 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5690 cum->sse_nregs,
5691 &x86_64_int_parameter_registers [cum->regno],
5692 cum->sse_regno);
5695 static rtx
5696 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5697 enum machine_mode orig_mode, int named,
5698 HOST_WIDE_INT bytes)
5700 unsigned int regno;
5702 /* Avoid the AL settings for the Unix64 ABI. */
5703 if (mode == VOIDmode)
5704 return constm1_rtx;
5706 /* If we've run out of registers, it goes on the stack. */
5707 if (cum->nregs == 0)
5708 return NULL_RTX;
5710 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
5712 /* Only floating point modes are passed in anything but integer regs. */
5713 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
5715 if (named)
5716 regno = cum->regno + FIRST_SSE_REG;
5717 else
5719 rtx t1, t2;
5721 /* Unnamed floating parameters are passed in both the
5722 SSE and integer registers. */
5723 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
5724 t2 = gen_rtx_REG (mode, regno);
5725 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
5726 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
5727 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
5730 /* Handle aggregated types passed in register. */
5731 if (orig_mode == BLKmode)
5733 if (bytes > 0 && bytes <= 8)
5734 mode = (bytes > 4 ? DImode : SImode);
5735 if (mode == BLKmode)
5736 mode = DImode;
5739 return gen_reg_or_parallel (mode, orig_mode, regno);
5743 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
5744 tree type, int named)
5746 enum machine_mode mode = omode;
5747 HOST_WIDE_INT bytes, words;
5749 if (mode == BLKmode)
5750 bytes = int_size_in_bytes (type);
5751 else
5752 bytes = GET_MODE_SIZE (mode);
5753 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5755 /* To simplify the code below, represent vector types with a vector mode
5756 even if MMX/SSE are not active. */
5757 if (type && TREE_CODE (type) == VECTOR_TYPE)
5758 mode = type_natural_mode (type);
5760 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5761 return function_arg_ms_64 (cum, mode, omode, named, bytes);
5762 else if (TARGET_64BIT)
5763 return function_arg_64 (cum, mode, omode, type, named);
5764 else
5765 return function_arg_32 (cum, mode, omode, type, bytes, words);
5768 /* A C expression that indicates when an argument must be passed by
5769 reference. If nonzero for an argument, a copy of that argument is
5770 made in memory and a pointer to the argument is passed instead of
5771 the argument itself. The pointer is passed in whatever way is
5772 appropriate for passing a pointer to that type. */
5774 static bool
5775 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5776 enum machine_mode mode ATTRIBUTE_UNUSED,
5777 const_tree type, bool named ATTRIBUTE_UNUSED)
5779 /* See Windows x64 Software Convention. */
5780 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5782 int msize = (int) GET_MODE_SIZE (mode);
5783 if (type)
5785 /* Arrays are passed by reference. */
5786 if (TREE_CODE (type) == ARRAY_TYPE)
5787 return true;
5789 if (AGGREGATE_TYPE_P (type))
5791 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
5792 are passed by reference. */
5793 msize = int_size_in_bytes (type);
5797 /* __m128 is passed by reference. */
5798 switch (msize) {
5799 case 1: case 2: case 4: case 8:
5800 break;
5801 default:
5802 return true;
5805 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
5806 return 1;
5808 return 0;
5811 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
5812 ABI. */
5813 static bool
5814 contains_aligned_value_p (tree type)
5816 enum machine_mode mode = TYPE_MODE (type);
5817 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
5818 || mode == TDmode
5819 || mode == TFmode
5820 || mode == TCmode)
5821 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
5822 return true;
5823 if (TYPE_ALIGN (type) < 128)
5824 return false;
5826 if (AGGREGATE_TYPE_P (type))
5828 /* Walk the aggregates recursively. */
5829 switch (TREE_CODE (type))
5831 case RECORD_TYPE:
5832 case UNION_TYPE:
5833 case QUAL_UNION_TYPE:
5835 tree field;
5837 /* Walk all the structure fields. */
5838 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5840 if (TREE_CODE (field) == FIELD_DECL
5841 && contains_aligned_value_p (TREE_TYPE (field)))
5842 return true;
5844 break;
5847 case ARRAY_TYPE:
5848 /* Just for use if some languages passes arrays by value. */
5849 if (contains_aligned_value_p (TREE_TYPE (type)))
5850 return true;
5851 break;
5853 default:
5854 gcc_unreachable ();
5857 return false;
5860 /* Gives the alignment boundary, in bits, of an argument with the
5861 specified mode and type. */
5864 ix86_function_arg_boundary (enum machine_mode mode, tree type)
5866 int align;
5867 if (type)
5869 /* Since canonical type is used for call, we convert it to
5870 canonical type if needed. */
5871 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
5872 type = TYPE_CANONICAL (type);
5873 align = TYPE_ALIGN (type);
5875 else
5876 align = GET_MODE_ALIGNMENT (mode);
5877 if (align < PARM_BOUNDARY)
5878 align = PARM_BOUNDARY;
5879 /* In 32bit, only _Decimal128 and __float128 are aligned to their
5880 natural boundaries. */
5881 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
5883 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
5884 make an exception for SSE modes since these require 128bit
5885 alignment.
5887 The handling here differs from field_alignment. ICC aligns MMX
5888 arguments to 4 byte boundaries, while structure fields are aligned
5889 to 8 byte boundaries. */
5890 if (!type)
5892 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
5893 align = PARM_BOUNDARY;
5895 else
5897 if (!contains_aligned_value_p (type))
5898 align = PARM_BOUNDARY;
5901 if (align > BIGGEST_ALIGNMENT)
5902 align = BIGGEST_ALIGNMENT;
5903 return align;
5906 /* Return true if N is a possible register number of function value. */
5908 bool
5909 ix86_function_value_regno_p (int regno)
5911 switch (regno)
5913 case 0:
5914 return true;
5916 case FIRST_FLOAT_REG:
5917 /* TODO: The function should depend on current function ABI but
5918 builtins.c would need updating then. Therefore we use the
5919 default ABI. */
5920 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
5921 return false;
5922 return TARGET_FLOAT_RETURNS_IN_80387;
5924 case FIRST_SSE_REG:
5925 return TARGET_SSE;
5927 case FIRST_MMX_REG:
5928 if (TARGET_MACHO || TARGET_64BIT)
5929 return false;
5930 return TARGET_MMX;
5933 return false;
5936 /* Define how to find the value returned by a function.
5937 VALTYPE is the data type of the value (as a tree).
5938 If the precise function being called is known, FUNC is its FUNCTION_DECL;
5939 otherwise, FUNC is 0. */
5941 static rtx
5942 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
5943 const_tree fntype, const_tree fn)
5945 unsigned int regno;
5947 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
5948 we normally prevent this case when mmx is not available. However
5949 some ABIs may require the result to be returned like DImode. */
5950 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
5951 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
5953 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
5954 we prevent this case when sse is not available. However some ABIs
5955 may require the result to be returned like integer TImode. */
5956 else if (mode == TImode
5957 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
5958 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
5960 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
5961 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
5962 regno = FIRST_FLOAT_REG;
5963 else
5964 /* Most things go in %eax. */
5965 regno = AX_REG;
5967 /* Override FP return register with %xmm0 for local functions when
5968 SSE math is enabled or for functions with sseregparm attribute. */
5969 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
5971 int sse_level = ix86_function_sseregparm (fntype, fn, false);
5972 if ((sse_level >= 1 && mode == SFmode)
5973 || (sse_level == 2 && mode == DFmode))
5974 regno = FIRST_SSE_REG;
5977 return gen_rtx_REG (orig_mode, regno);
5980 static rtx
5981 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
5982 const_tree valtype)
5984 rtx ret;
5986 /* Handle libcalls, which don't provide a type node. */
5987 if (valtype == NULL)
5989 switch (mode)
5991 case SFmode:
5992 case SCmode:
5993 case DFmode:
5994 case DCmode:
5995 case TFmode:
5996 case SDmode:
5997 case DDmode:
5998 case TDmode:
5999 return gen_rtx_REG (mode, FIRST_SSE_REG);
6000 case XFmode:
6001 case XCmode:
6002 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6003 case TCmode:
6004 return NULL;
6005 default:
6006 return gen_rtx_REG (mode, AX_REG);
6010 ret = construct_container (mode, orig_mode, valtype, 1,
6011 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6012 x86_64_int_return_registers, 0);
6014 /* For zero sized structures, construct_container returns NULL, but we
6015 need to keep rest of compiler happy by returning meaningful value. */
6016 if (!ret)
6017 ret = gen_rtx_REG (orig_mode, AX_REG);
6019 return ret;
6022 static rtx
6023 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6025 unsigned int regno = AX_REG;
6027 if (TARGET_SSE)
6029 switch (GET_MODE_SIZE (mode))
6031 case 16:
6032 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6033 && !COMPLEX_MODE_P (mode))
6034 regno = FIRST_SSE_REG;
6035 break;
6036 case 8:
6037 case 4:
6038 if (mode == SFmode || mode == DFmode)
6039 regno = FIRST_SSE_REG;
6040 break;
6041 default:
6042 break;
6045 return gen_rtx_REG (orig_mode, regno);
6048 static rtx
6049 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6050 enum machine_mode orig_mode, enum machine_mode mode)
6052 const_tree fn, fntype;
6054 fn = NULL_TREE;
6055 if (fntype_or_decl && DECL_P (fntype_or_decl))
6056 fn = fntype_or_decl;
6057 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6059 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6060 return function_value_ms_64 (orig_mode, mode);
6061 else if (TARGET_64BIT)
6062 return function_value_64 (orig_mode, mode, valtype);
6063 else
6064 return function_value_32 (orig_mode, mode, fntype, fn);
6067 static rtx
6068 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6069 bool outgoing ATTRIBUTE_UNUSED)
6071 enum machine_mode mode, orig_mode;
6073 orig_mode = TYPE_MODE (valtype);
6074 mode = type_natural_mode (valtype);
6075 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6079 ix86_libcall_value (enum machine_mode mode)
6081 return ix86_function_value_1 (NULL, NULL, mode, mode);
6084 /* Return true iff type is returned in memory. */
6086 static int ATTRIBUTE_UNUSED
6087 return_in_memory_32 (const_tree type, enum machine_mode mode)
6089 HOST_WIDE_INT size;
6091 if (mode == BLKmode)
6092 return 1;
6094 size = int_size_in_bytes (type);
6096 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6097 return 0;
6099 if (VECTOR_MODE_P (mode) || mode == TImode)
6101 /* User-created vectors small enough to fit in EAX. */
6102 if (size < 8)
6103 return 0;
6105 /* MMX/3dNow values are returned in MM0,
6106 except when it doesn't exits. */
6107 if (size == 8)
6108 return (TARGET_MMX ? 0 : 1);
6110 /* SSE values are returned in XMM0, except when it doesn't exist. */
6111 if (size == 16)
6112 return (TARGET_SSE ? 0 : 1);
6115 if (mode == XFmode)
6116 return 0;
6118 if (size > 12)
6119 return 1;
6120 return 0;
6123 static int ATTRIBUTE_UNUSED
6124 return_in_memory_64 (const_tree type, enum machine_mode mode)
6126 int needed_intregs, needed_sseregs;
6127 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6130 static int ATTRIBUTE_UNUSED
6131 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6133 HOST_WIDE_INT size = int_size_in_bytes (type);
6135 /* __m128 is returned in xmm0. */
6136 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6137 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6138 return 0;
6140 /* Otherwise, the size must be exactly in [1248]. */
6141 return (size != 1 && size != 2 && size != 4 && size != 8);
6144 static bool
6145 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6147 #ifdef SUBTARGET_RETURN_IN_MEMORY
6148 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6149 #else
6150 const enum machine_mode mode = type_natural_mode (type);
6152 if (TARGET_64BIT_MS_ABI)
6153 return return_in_memory_ms_64 (type, mode);
6154 else if (TARGET_64BIT)
6155 return return_in_memory_64 (type, mode);
6156 else
6157 return return_in_memory_32 (type, mode);
6158 #endif
6161 /* Return false iff TYPE is returned in memory. This version is used
6162 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6163 but differs notably in that when MMX is available, 8-byte vectors
6164 are returned in memory, rather than in MMX registers. */
6166 bool
6167 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6169 int size;
6170 enum machine_mode mode = type_natural_mode (type);
6172 if (TARGET_64BIT)
6173 return return_in_memory_64 (type, mode);
6175 if (mode == BLKmode)
6176 return 1;
6178 size = int_size_in_bytes (type);
6180 if (VECTOR_MODE_P (mode))
6182 /* Return in memory only if MMX registers *are* available. This
6183 seems backwards, but it is consistent with the existing
6184 Solaris x86 ABI. */
6185 if (size == 8)
6186 return TARGET_MMX;
6187 if (size == 16)
6188 return !TARGET_SSE;
6190 else if (mode == TImode)
6191 return !TARGET_SSE;
6192 else if (mode == XFmode)
6193 return 0;
6195 return size > 12;
6198 /* When returning SSE vector types, we have a choice of either
6199 (1) being abi incompatible with a -march switch, or
6200 (2) generating an error.
6201 Given no good solution, I think the safest thing is one warning.
6202 The user won't be able to use -Werror, but....
6204 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6205 called in response to actually generating a caller or callee that
6206 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6207 via aggregate_value_p for general type probing from tree-ssa. */
6209 static rtx
6210 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6212 static bool warnedsse, warnedmmx;
6214 if (!TARGET_64BIT && type)
6216 /* Look at the return type of the function, not the function type. */
6217 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6219 if (!TARGET_SSE && !warnedsse)
6221 if (mode == TImode
6222 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6224 warnedsse = true;
6225 warning (0, "SSE vector return without SSE enabled "
6226 "changes the ABI");
6230 if (!TARGET_MMX && !warnedmmx)
6232 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6234 warnedmmx = true;
6235 warning (0, "MMX vector return without MMX enabled "
6236 "changes the ABI");
6241 return NULL;
6245 /* Create the va_list data type. */
6247 /* Returns the calling convention specific va_list date type.
6248 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6250 static tree
6251 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6253 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6255 /* For i386 we use plain pointer to argument area. */
6256 if (!TARGET_64BIT || abi == MS_ABI)
6257 return build_pointer_type (char_type_node);
6259 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6260 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6262 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
6263 unsigned_type_node);
6264 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
6265 unsigned_type_node);
6266 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6267 ptr_type_node);
6268 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6269 ptr_type_node);
6271 va_list_gpr_counter_field = f_gpr;
6272 va_list_fpr_counter_field = f_fpr;
6274 DECL_FIELD_CONTEXT (f_gpr) = record;
6275 DECL_FIELD_CONTEXT (f_fpr) = record;
6276 DECL_FIELD_CONTEXT (f_ovf) = record;
6277 DECL_FIELD_CONTEXT (f_sav) = record;
6279 TREE_CHAIN (record) = type_decl;
6280 TYPE_NAME (record) = type_decl;
6281 TYPE_FIELDS (record) = f_gpr;
6282 TREE_CHAIN (f_gpr) = f_fpr;
6283 TREE_CHAIN (f_fpr) = f_ovf;
6284 TREE_CHAIN (f_ovf) = f_sav;
6286 layout_type (record);
6288 /* The correct type is an array type of one element. */
6289 return build_array_type (record, build_index_type (size_zero_node));
6292 /* Setup the builtin va_list data type and for 64-bit the additional
6293 calling convention specific va_list data types. */
6295 static tree
6296 ix86_build_builtin_va_list (void)
6298 tree ret = ix86_build_builtin_va_list_abi (DEFAULT_ABI);
6300 /* Initialize abi specific va_list builtin types. */
6301 if (TARGET_64BIT)
6303 tree t;
6304 if (DEFAULT_ABI == MS_ABI)
6306 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6307 if (TREE_CODE (t) != RECORD_TYPE)
6308 t = build_variant_type_copy (t);
6309 sysv_va_list_type_node = t;
6311 else
6313 t = ret;
6314 if (TREE_CODE (t) != RECORD_TYPE)
6315 t = build_variant_type_copy (t);
6316 sysv_va_list_type_node = t;
6318 if (DEFAULT_ABI != MS_ABI)
6320 t = ix86_build_builtin_va_list_abi (MS_ABI);
6321 if (TREE_CODE (t) != RECORD_TYPE)
6322 t = build_variant_type_copy (t);
6323 ms_va_list_type_node = t;
6325 else
6327 t = ret;
6328 if (TREE_CODE (t) != RECORD_TYPE)
6329 t = build_variant_type_copy (t);
6330 ms_va_list_type_node = t;
6334 return ret;
6337 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6339 static void
6340 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6342 rtx save_area, mem;
6343 rtx label;
6344 rtx label_ref;
6345 rtx tmp_reg;
6346 rtx nsse_reg;
6347 alias_set_type set;
6348 int i;
6349 int regparm = ix86_regparm;
6351 if (cum->call_abi != DEFAULT_ABI)
6352 regparm = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
6354 /* GPR size of varargs save area. */
6355 if (cfun->va_list_gpr_size)
6356 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6357 else
6358 ix86_varargs_gpr_size = 0;
6360 /* FPR size of varargs save area. We don't need it if we don't pass
6361 anything in SSE registers. */
6362 if (cum->sse_nregs && cfun->va_list_fpr_size)
6363 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6364 else
6365 ix86_varargs_fpr_size = 0;
6367 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6368 return;
6370 save_area = frame_pointer_rtx;
6371 set = get_varargs_alias_set ();
6373 for (i = cum->regno;
6374 i < regparm
6375 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6376 i++)
6378 mem = gen_rtx_MEM (Pmode,
6379 plus_constant (save_area, i * UNITS_PER_WORD));
6380 MEM_NOTRAP_P (mem) = 1;
6381 set_mem_alias_set (mem, set);
6382 emit_move_insn (mem, gen_rtx_REG (Pmode,
6383 x86_64_int_parameter_registers[i]));
6386 if (ix86_varargs_fpr_size)
6388 /* Now emit code to save SSE registers. The AX parameter contains number
6389 of SSE parameter registers used to call this function. We use
6390 sse_prologue_save insn template that produces computed jump across
6391 SSE saves. We need some preparation work to get this working. */
6393 label = gen_label_rtx ();
6394 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6396 /* Compute address to jump to :
6397 label - eax*4 + nnamed_sse_arguments*4 Or
6398 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6399 tmp_reg = gen_reg_rtx (Pmode);
6400 nsse_reg = gen_reg_rtx (Pmode);
6401 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6402 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6403 gen_rtx_MULT (Pmode, nsse_reg,
6404 GEN_INT (4))));
6406 /* vmovaps is one byte longer than movaps. */
6407 if (TARGET_AVX)
6408 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6409 gen_rtx_PLUS (Pmode, tmp_reg,
6410 nsse_reg)));
6412 if (cum->sse_regno)
6413 emit_move_insn
6414 (nsse_reg,
6415 gen_rtx_CONST (DImode,
6416 gen_rtx_PLUS (DImode,
6417 label_ref,
6418 GEN_INT (cum->sse_regno
6419 * (TARGET_AVX ? 5 : 4)))));
6420 else
6421 emit_move_insn (nsse_reg, label_ref);
6422 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6424 /* Compute address of memory block we save into. We always use pointer
6425 pointing 127 bytes after first byte to store - this is needed to keep
6426 instruction size limited by 4 bytes (5 bytes for AVX) with one
6427 byte displacement. */
6428 tmp_reg = gen_reg_rtx (Pmode);
6429 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6430 plus_constant (save_area,
6431 ix86_varargs_gpr_size + 127)));
6432 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6433 MEM_NOTRAP_P (mem) = 1;
6434 set_mem_alias_set (mem, set);
6435 set_mem_align (mem, BITS_PER_WORD);
6437 /* And finally do the dirty job! */
6438 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6439 GEN_INT (cum->sse_regno), label));
6443 static void
6444 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6446 alias_set_type set = get_varargs_alias_set ();
6447 int i;
6449 for (i = cum->regno; i < X64_REGPARM_MAX; i++)
6451 rtx reg, mem;
6453 mem = gen_rtx_MEM (Pmode,
6454 plus_constant (virtual_incoming_args_rtx,
6455 i * UNITS_PER_WORD));
6456 MEM_NOTRAP_P (mem) = 1;
6457 set_mem_alias_set (mem, set);
6459 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6460 emit_move_insn (mem, reg);
6464 static void
6465 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6466 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6467 int no_rtl)
6469 CUMULATIVE_ARGS next_cum;
6470 tree fntype;
6472 /* This argument doesn't appear to be used anymore. Which is good,
6473 because the old code here didn't suppress rtl generation. */
6474 gcc_assert (!no_rtl);
6476 if (!TARGET_64BIT)
6477 return;
6479 fntype = TREE_TYPE (current_function_decl);
6481 /* For varargs, we do not want to skip the dummy va_dcl argument.
6482 For stdargs, we do want to skip the last named argument. */
6483 next_cum = *cum;
6484 if (stdarg_p (fntype))
6485 function_arg_advance (&next_cum, mode, type, 1);
6487 if (cum->call_abi == MS_ABI)
6488 setup_incoming_varargs_ms_64 (&next_cum);
6489 else
6490 setup_incoming_varargs_64 (&next_cum);
6493 /* Checks if TYPE is of kind va_list char *. */
6495 static bool
6496 is_va_list_char_pointer (tree type)
6498 tree canonic;
6500 /* For 32-bit it is always true. */
6501 if (!TARGET_64BIT)
6502 return true;
6503 canonic = ix86_canonical_va_list_type (type);
6504 return (canonic == ms_va_list_type_node
6505 || (DEFAULT_ABI == MS_ABI && canonic == va_list_type_node));
6508 /* Implement va_start. */
6510 static void
6511 ix86_va_start (tree valist, rtx nextarg)
6513 HOST_WIDE_INT words, n_gpr, n_fpr;
6514 tree f_gpr, f_fpr, f_ovf, f_sav;
6515 tree gpr, fpr, ovf, sav, t;
6516 tree type;
6518 /* Only 64bit target needs something special. */
6519 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6521 std_expand_builtin_va_start (valist, nextarg);
6522 return;
6525 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6526 f_fpr = TREE_CHAIN (f_gpr);
6527 f_ovf = TREE_CHAIN (f_fpr);
6528 f_sav = TREE_CHAIN (f_ovf);
6530 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6531 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6532 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6533 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6534 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6536 /* Count number of gp and fp argument registers used. */
6537 words = crtl->args.info.words;
6538 n_gpr = crtl->args.info.regno;
6539 n_fpr = crtl->args.info.sse_regno;
6541 if (cfun->va_list_gpr_size)
6543 type = TREE_TYPE (gpr);
6544 t = build2 (MODIFY_EXPR, type,
6545 gpr, build_int_cst (type, n_gpr * 8));
6546 TREE_SIDE_EFFECTS (t) = 1;
6547 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6550 if (TARGET_SSE && cfun->va_list_fpr_size)
6552 type = TREE_TYPE (fpr);
6553 t = build2 (MODIFY_EXPR, type, fpr,
6554 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6555 TREE_SIDE_EFFECTS (t) = 1;
6556 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6559 /* Find the overflow area. */
6560 type = TREE_TYPE (ovf);
6561 t = make_tree (type, crtl->args.internal_arg_pointer);
6562 if (words != 0)
6563 t = build2 (POINTER_PLUS_EXPR, type, t,
6564 size_int (words * UNITS_PER_WORD));
6565 t = build2 (MODIFY_EXPR, type, ovf, t);
6566 TREE_SIDE_EFFECTS (t) = 1;
6567 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6569 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6571 /* Find the register save area.
6572 Prologue of the function save it right above stack frame. */
6573 type = TREE_TYPE (sav);
6574 t = make_tree (type, frame_pointer_rtx);
6575 if (!ix86_varargs_gpr_size)
6576 t = build2 (POINTER_PLUS_EXPR, type, t,
6577 size_int (-8 * X86_64_REGPARM_MAX));
6578 t = build2 (MODIFY_EXPR, type, sav, t);
6579 TREE_SIDE_EFFECTS (t) = 1;
6580 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6584 /* Implement va_arg. */
6586 static tree
6587 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6588 gimple_seq *post_p)
6590 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6591 tree f_gpr, f_fpr, f_ovf, f_sav;
6592 tree gpr, fpr, ovf, sav, t;
6593 int size, rsize;
6594 tree lab_false, lab_over = NULL_TREE;
6595 tree addr, t2;
6596 rtx container;
6597 int indirect_p = 0;
6598 tree ptrtype;
6599 enum machine_mode nat_mode;
6600 int arg_boundary;
6602 /* Only 64bit target needs something special. */
6603 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6604 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6606 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6607 f_fpr = TREE_CHAIN (f_gpr);
6608 f_ovf = TREE_CHAIN (f_fpr);
6609 f_sav = TREE_CHAIN (f_ovf);
6611 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
6612 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
6613 valist = build_va_arg_indirect_ref (valist);
6614 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6615 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6616 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6618 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6619 if (indirect_p)
6620 type = build_pointer_type (type);
6621 size = int_size_in_bytes (type);
6622 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6624 nat_mode = type_natural_mode (type);
6625 switch (nat_mode)
6627 case V8SFmode:
6628 case V8SImode:
6629 case V32QImode:
6630 case V16HImode:
6631 case V4DFmode:
6632 case V4DImode:
6633 /* Unnamed 256bit vector mode parameters are passed on stack. */
6634 if (ix86_cfun_abi () == SYSV_ABI)
6636 container = NULL;
6637 break;
6640 default:
6641 container = construct_container (nat_mode, TYPE_MODE (type),
6642 type, 0, X86_64_REGPARM_MAX,
6643 X86_64_SSE_REGPARM_MAX, intreg,
6645 break;
6648 /* Pull the value out of the saved registers. */
6650 addr = create_tmp_var (ptr_type_node, "addr");
6651 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
6653 if (container)
6655 int needed_intregs, needed_sseregs;
6656 bool need_temp;
6657 tree int_addr, sse_addr;
6659 lab_false = create_artificial_label ();
6660 lab_over = create_artificial_label ();
6662 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6664 need_temp = (!REG_P (container)
6665 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6666 || TYPE_ALIGN (type) > 128));
6668 /* In case we are passing structure, verify that it is consecutive block
6669 on the register save area. If not we need to do moves. */
6670 if (!need_temp && !REG_P (container))
6672 /* Verify that all registers are strictly consecutive */
6673 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6675 int i;
6677 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6679 rtx slot = XVECEXP (container, 0, i);
6680 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
6681 || INTVAL (XEXP (slot, 1)) != i * 16)
6682 need_temp = 1;
6685 else
6687 int i;
6689 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6691 rtx slot = XVECEXP (container, 0, i);
6692 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
6693 || INTVAL (XEXP (slot, 1)) != i * 8)
6694 need_temp = 1;
6698 if (!need_temp)
6700 int_addr = addr;
6701 sse_addr = addr;
6703 else
6705 int_addr = create_tmp_var (ptr_type_node, "int_addr");
6706 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
6707 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
6708 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
6711 /* First ensure that we fit completely in registers. */
6712 if (needed_intregs)
6714 t = build_int_cst (TREE_TYPE (gpr),
6715 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
6716 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
6717 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6718 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6719 gimplify_and_add (t, pre_p);
6721 if (needed_sseregs)
6723 t = build_int_cst (TREE_TYPE (fpr),
6724 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
6725 + X86_64_REGPARM_MAX * 8);
6726 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
6727 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6728 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6729 gimplify_and_add (t, pre_p);
6732 /* Compute index to start of area used for integer regs. */
6733 if (needed_intregs)
6735 /* int_addr = gpr + sav; */
6736 t = fold_convert (sizetype, gpr);
6737 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6738 gimplify_assign (int_addr, t, pre_p);
6740 if (needed_sseregs)
6742 /* sse_addr = fpr + sav; */
6743 t = fold_convert (sizetype, fpr);
6744 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6745 gimplify_assign (sse_addr, t, pre_p);
6747 if (need_temp)
6749 int i;
6750 tree temp = create_tmp_var (type, "va_arg_tmp");
6752 /* addr = &temp; */
6753 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
6754 gimplify_assign (addr, t, pre_p);
6756 for (i = 0; i < XVECLEN (container, 0); i++)
6758 rtx slot = XVECEXP (container, 0, i);
6759 rtx reg = XEXP (slot, 0);
6760 enum machine_mode mode = GET_MODE (reg);
6761 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
6762 tree addr_type = build_pointer_type (piece_type);
6763 tree daddr_type = build_pointer_type_for_mode (piece_type,
6764 ptr_mode, true);
6765 tree src_addr, src;
6766 int src_offset;
6767 tree dest_addr, dest;
6769 if (SSE_REGNO_P (REGNO (reg)))
6771 src_addr = sse_addr;
6772 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
6774 else
6776 src_addr = int_addr;
6777 src_offset = REGNO (reg) * 8;
6779 src_addr = fold_convert (addr_type, src_addr);
6780 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
6781 size_int (src_offset));
6782 src = build_va_arg_indirect_ref (src_addr);
6784 dest_addr = fold_convert (daddr_type, addr);
6785 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
6786 size_int (INTVAL (XEXP (slot, 1))));
6787 dest = build_va_arg_indirect_ref (dest_addr);
6789 gimplify_assign (dest, src, pre_p);
6793 if (needed_intregs)
6795 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
6796 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
6797 gimplify_assign (gpr, t, pre_p);
6800 if (needed_sseregs)
6802 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
6803 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
6804 gimplify_assign (fpr, t, pre_p);
6807 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
6809 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
6812 /* ... otherwise out of the overflow area. */
6814 /* When we align parameter on stack for caller, if the parameter
6815 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6816 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
6817 here with caller. */
6818 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
6819 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
6820 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
6822 /* Care for on-stack alignment if needed. */
6823 if (arg_boundary <= 64
6824 || integer_zerop (TYPE_SIZE (type)))
6825 t = ovf;
6826 else
6828 HOST_WIDE_INT align = arg_boundary / 8;
6829 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
6830 size_int (align - 1));
6831 t = fold_convert (sizetype, t);
6832 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6833 size_int (-align));
6834 t = fold_convert (TREE_TYPE (ovf), t);
6836 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
6837 gimplify_assign (addr, t, pre_p);
6839 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
6840 size_int (rsize * UNITS_PER_WORD));
6841 gimplify_assign (unshare_expr (ovf), t, pre_p);
6843 if (container)
6844 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
6846 ptrtype = build_pointer_type (type);
6847 addr = fold_convert (ptrtype, addr);
6849 if (indirect_p)
6850 addr = build_va_arg_indirect_ref (addr);
6851 return build_va_arg_indirect_ref (addr);
6854 /* Return nonzero if OPNUM's MEM should be matched
6855 in movabs* patterns. */
6858 ix86_check_movabs (rtx insn, int opnum)
6860 rtx set, mem;
6862 set = PATTERN (insn);
6863 if (GET_CODE (set) == PARALLEL)
6864 set = XVECEXP (set, 0, 0);
6865 gcc_assert (GET_CODE (set) == SET);
6866 mem = XEXP (set, opnum);
6867 while (GET_CODE (mem) == SUBREG)
6868 mem = SUBREG_REG (mem);
6869 gcc_assert (MEM_P (mem));
6870 return (volatile_ok || !MEM_VOLATILE_P (mem));
6873 /* Initialize the table of extra 80387 mathematical constants. */
6875 static void
6876 init_ext_80387_constants (void)
6878 static const char * cst[5] =
6880 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
6881 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
6882 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
6883 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
6884 "3.1415926535897932385128089594061862044", /* 4: fldpi */
6886 int i;
6888 for (i = 0; i < 5; i++)
6890 real_from_string (&ext_80387_constants_table[i], cst[i]);
6891 /* Ensure each constant is rounded to XFmode precision. */
6892 real_convert (&ext_80387_constants_table[i],
6893 XFmode, &ext_80387_constants_table[i]);
6896 ext_80387_constants_init = 1;
6899 /* Return true if the constant is something that can be loaded with
6900 a special instruction. */
6903 standard_80387_constant_p (rtx x)
6905 enum machine_mode mode = GET_MODE (x);
6907 REAL_VALUE_TYPE r;
6909 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
6910 return -1;
6912 if (x == CONST0_RTX (mode))
6913 return 1;
6914 if (x == CONST1_RTX (mode))
6915 return 2;
6917 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6919 /* For XFmode constants, try to find a special 80387 instruction when
6920 optimizing for size or on those CPUs that benefit from them. */
6921 if (mode == XFmode
6922 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
6924 int i;
6926 if (! ext_80387_constants_init)
6927 init_ext_80387_constants ();
6929 for (i = 0; i < 5; i++)
6930 if (real_identical (&r, &ext_80387_constants_table[i]))
6931 return i + 3;
6934 /* Load of the constant -0.0 or -1.0 will be split as
6935 fldz;fchs or fld1;fchs sequence. */
6936 if (real_isnegzero (&r))
6937 return 8;
6938 if (real_identical (&r, &dconstm1))
6939 return 9;
6941 return 0;
6944 /* Return the opcode of the special instruction to be used to load
6945 the constant X. */
6947 const char *
6948 standard_80387_constant_opcode (rtx x)
6950 switch (standard_80387_constant_p (x))
6952 case 1:
6953 return "fldz";
6954 case 2:
6955 return "fld1";
6956 case 3:
6957 return "fldlg2";
6958 case 4:
6959 return "fldln2";
6960 case 5:
6961 return "fldl2e";
6962 case 6:
6963 return "fldl2t";
6964 case 7:
6965 return "fldpi";
6966 case 8:
6967 case 9:
6968 return "#";
6969 default:
6970 gcc_unreachable ();
6974 /* Return the CONST_DOUBLE representing the 80387 constant that is
6975 loaded by the specified special instruction. The argument IDX
6976 matches the return value from standard_80387_constant_p. */
6979 standard_80387_constant_rtx (int idx)
6981 int i;
6983 if (! ext_80387_constants_init)
6984 init_ext_80387_constants ();
6986 switch (idx)
6988 case 3:
6989 case 4:
6990 case 5:
6991 case 6:
6992 case 7:
6993 i = idx - 3;
6994 break;
6996 default:
6997 gcc_unreachable ();
7000 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7001 XFmode);
7004 /* Return 1 if mode is a valid mode for sse. */
7005 static int
7006 standard_sse_mode_p (enum machine_mode mode)
7008 switch (mode)
7010 case V16QImode:
7011 case V8HImode:
7012 case V4SImode:
7013 case V2DImode:
7014 case V4SFmode:
7015 case V2DFmode:
7016 return 1;
7018 default:
7019 return 0;
7023 /* Return 1 if X is all 0s. For all 1s, return 2 if X is in 128bit
7024 SSE modes and SSE2 is enabled, return 3 if X is in 256bit AVX
7025 modes and AVX is enabled. */
7028 standard_sse_constant_p (rtx x)
7030 enum machine_mode mode = GET_MODE (x);
7032 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7033 return 1;
7034 if (vector_all_ones_operand (x, mode))
7036 if (standard_sse_mode_p (mode))
7037 return TARGET_SSE2 ? 2 : -2;
7038 else if (VALID_AVX256_REG_MODE (mode))
7039 return TARGET_AVX ? 3 : -3;
7042 return 0;
7045 /* Return the opcode of the special instruction to be used to load
7046 the constant X. */
7048 const char *
7049 standard_sse_constant_opcode (rtx insn, rtx x)
7051 switch (standard_sse_constant_p (x))
7053 case 1:
7054 switch (get_attr_mode (insn))
7056 case MODE_V4SF:
7057 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7058 case MODE_V2DF:
7059 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7060 case MODE_TI:
7061 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7062 case MODE_V8SF:
7063 return "vxorps\t%x0, %x0, %x0";
7064 case MODE_V4DF:
7065 return "vxorpd\t%x0, %x0, %x0";
7066 case MODE_OI:
7067 return "vpxor\t%x0, %x0, %x0";
7068 default:
7069 gcc_unreachable ();
7071 case 2:
7072 if (TARGET_AVX)
7073 switch (get_attr_mode (insn))
7075 case MODE_V4SF:
7076 case MODE_V2DF:
7077 case MODE_TI:
7078 return "vpcmpeqd\t%0, %0, %0";
7079 break;
7080 default:
7081 gcc_unreachable ();
7083 else
7084 return "pcmpeqd\t%0, %0";
7086 gcc_unreachable ();
7089 /* Returns 1 if OP contains a symbol reference */
7092 symbolic_reference_mentioned_p (rtx op)
7094 const char *fmt;
7095 int i;
7097 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7098 return 1;
7100 fmt = GET_RTX_FORMAT (GET_CODE (op));
7101 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7103 if (fmt[i] == 'E')
7105 int j;
7107 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7108 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7109 return 1;
7112 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7113 return 1;
7116 return 0;
7119 /* Return 1 if it is appropriate to emit `ret' instructions in the
7120 body of a function. Do this only if the epilogue is simple, needing a
7121 couple of insns. Prior to reloading, we can't tell how many registers
7122 must be saved, so return 0 then. Return 0 if there is no frame
7123 marker to de-allocate. */
7126 ix86_can_use_return_insn_p (void)
7128 struct ix86_frame frame;
7130 if (! reload_completed || frame_pointer_needed)
7131 return 0;
7133 /* Don't allow more than 32 pop, since that's all we can do
7134 with one instruction. */
7135 if (crtl->args.pops_args
7136 && crtl->args.size >= 32768)
7137 return 0;
7139 ix86_compute_frame_layout (&frame);
7140 return frame.to_allocate == 0 && frame.nregs == 0;
7143 /* Value should be nonzero if functions must have frame pointers.
7144 Zero means the frame pointer need not be set up (and parms may
7145 be accessed via the stack pointer) in functions that seem suitable. */
7148 ix86_frame_pointer_required (void)
7150 /* If we accessed previous frames, then the generated code expects
7151 to be able to access the saved ebp value in our frame. */
7152 if (cfun->machine->accesses_prev_frame)
7153 return 1;
7155 /* Several x86 os'es need a frame pointer for other reasons,
7156 usually pertaining to setjmp. */
7157 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7158 return 1;
7160 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7161 the frame pointer by default. Turn it back on now if we've not
7162 got a leaf function. */
7163 if (TARGET_OMIT_LEAF_FRAME_POINTER
7164 && (!current_function_is_leaf
7165 || ix86_current_function_calls_tls_descriptor))
7166 return 1;
7168 if (crtl->profile)
7169 return 1;
7171 return 0;
7174 /* Record that the current function accesses previous call frames. */
7176 void
7177 ix86_setup_frame_addresses (void)
7179 cfun->machine->accesses_prev_frame = 1;
7182 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7183 # define USE_HIDDEN_LINKONCE 1
7184 #else
7185 # define USE_HIDDEN_LINKONCE 0
7186 #endif
7188 static int pic_labels_used;
7190 /* Fills in the label name that should be used for a pc thunk for
7191 the given register. */
7193 static void
7194 get_pc_thunk_name (char name[32], unsigned int regno)
7196 gcc_assert (!TARGET_64BIT);
7198 if (USE_HIDDEN_LINKONCE)
7199 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7200 else
7201 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7205 /* This function generates code for -fpic that loads %ebx with
7206 the return address of the caller and then returns. */
7208 void
7209 ix86_file_end (void)
7211 rtx xops[2];
7212 int regno;
7214 for (regno = 0; regno < 8; ++regno)
7216 char name[32];
7218 if (! ((pic_labels_used >> regno) & 1))
7219 continue;
7221 get_pc_thunk_name (name, regno);
7223 #if TARGET_MACHO
7224 if (TARGET_MACHO)
7226 switch_to_section (darwin_sections[text_coal_section]);
7227 fputs ("\t.weak_definition\t", asm_out_file);
7228 assemble_name (asm_out_file, name);
7229 fputs ("\n\t.private_extern\t", asm_out_file);
7230 assemble_name (asm_out_file, name);
7231 fputs ("\n", asm_out_file);
7232 ASM_OUTPUT_LABEL (asm_out_file, name);
7234 else
7235 #endif
7236 if (USE_HIDDEN_LINKONCE)
7238 tree decl;
7240 decl = build_decl (FUNCTION_DECL, get_identifier (name),
7241 error_mark_node);
7242 TREE_PUBLIC (decl) = 1;
7243 TREE_STATIC (decl) = 1;
7244 DECL_ONE_ONLY (decl) = 1;
7246 (*targetm.asm_out.unique_section) (decl, 0);
7247 switch_to_section (get_named_section (decl, NULL, 0));
7249 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7250 fputs ("\t.hidden\t", asm_out_file);
7251 assemble_name (asm_out_file, name);
7252 fputc ('\n', asm_out_file);
7253 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7255 else
7257 switch_to_section (text_section);
7258 ASM_OUTPUT_LABEL (asm_out_file, name);
7261 xops[0] = gen_rtx_REG (Pmode, regno);
7262 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7263 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7264 output_asm_insn ("ret", xops);
7267 if (NEED_INDICATE_EXEC_STACK)
7268 file_end_indicate_exec_stack ();
7271 /* Emit code for the SET_GOT patterns. */
7273 const char *
7274 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7276 rtx xops[3];
7278 xops[0] = dest;
7280 if (TARGET_VXWORKS_RTP && flag_pic)
7282 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7283 xops[2] = gen_rtx_MEM (Pmode,
7284 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7285 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7287 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7288 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7289 an unadorned address. */
7290 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7291 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7292 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7293 return "";
7296 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7298 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7300 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7302 if (!flag_pic)
7303 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7304 else
7305 output_asm_insn ("call\t%a2", xops);
7307 #if TARGET_MACHO
7308 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7309 is what will be referenced by the Mach-O PIC subsystem. */
7310 if (!label)
7311 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7312 #endif
7314 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7315 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7317 if (flag_pic)
7318 output_asm_insn ("pop%z0\t%0", xops);
7320 else
7322 char name[32];
7323 get_pc_thunk_name (name, REGNO (dest));
7324 pic_labels_used |= 1 << REGNO (dest);
7326 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7327 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7328 output_asm_insn ("call\t%X2", xops);
7329 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7330 is what will be referenced by the Mach-O PIC subsystem. */
7331 #if TARGET_MACHO
7332 if (!label)
7333 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7334 else
7335 targetm.asm_out.internal_label (asm_out_file, "L",
7336 CODE_LABEL_NUMBER (label));
7337 #endif
7340 if (TARGET_MACHO)
7341 return "";
7343 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7344 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7345 else
7346 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7348 return "";
7351 /* Generate an "push" pattern for input ARG. */
7353 static rtx
7354 gen_push (rtx arg)
7356 return gen_rtx_SET (VOIDmode,
7357 gen_rtx_MEM (Pmode,
7358 gen_rtx_PRE_DEC (Pmode,
7359 stack_pointer_rtx)),
7360 arg);
7363 /* Return >= 0 if there is an unused call-clobbered register available
7364 for the entire function. */
7366 static unsigned int
7367 ix86_select_alt_pic_regnum (void)
7369 if (current_function_is_leaf && !crtl->profile
7370 && !ix86_current_function_calls_tls_descriptor)
7372 int i, drap;
7373 /* Can't use the same register for both PIC and DRAP. */
7374 if (crtl->drap_reg)
7375 drap = REGNO (crtl->drap_reg);
7376 else
7377 drap = -1;
7378 for (i = 2; i >= 0; --i)
7379 if (i != drap && !df_regs_ever_live_p (i))
7380 return i;
7383 return INVALID_REGNUM;
7386 /* Return 1 if we need to save REGNO. */
7387 static int
7388 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7390 if (pic_offset_table_rtx
7391 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7392 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7393 || crtl->profile
7394 || crtl->calls_eh_return
7395 || crtl->uses_const_pool))
7397 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7398 return 0;
7399 return 1;
7402 if (crtl->calls_eh_return && maybe_eh_return)
7404 unsigned i;
7405 for (i = 0; ; i++)
7407 unsigned test = EH_RETURN_DATA_REGNO (i);
7408 if (test == INVALID_REGNUM)
7409 break;
7410 if (test == regno)
7411 return 1;
7415 if (crtl->drap_reg
7416 && regno == REGNO (crtl->drap_reg))
7417 return 1;
7419 return (df_regs_ever_live_p (regno)
7420 && !call_used_regs[regno]
7421 && !fixed_regs[regno]
7422 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7425 /* Return number of registers to be saved on the stack. */
7427 static int
7428 ix86_nsaved_regs (void)
7430 int nregs = 0;
7431 int regno;
7433 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
7434 if (ix86_save_reg (regno, true))
7435 nregs++;
7436 return nregs;
7439 /* Given FROM and TO register numbers, say whether this elimination is
7440 allowed. If stack alignment is needed, we can only replace argument
7441 pointer with hard frame pointer, or replace frame pointer with stack
7442 pointer. Otherwise, frame pointer elimination is automatically
7443 handled and all other eliminations are valid. */
7446 ix86_can_eliminate (int from, int to)
7448 if (stack_realign_fp)
7449 return ((from == ARG_POINTER_REGNUM
7450 && to == HARD_FRAME_POINTER_REGNUM)
7451 || (from == FRAME_POINTER_REGNUM
7452 && to == STACK_POINTER_REGNUM));
7453 else
7454 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : 1;
7457 /* Return the offset between two registers, one to be eliminated, and the other
7458 its replacement, at the start of a routine. */
7460 HOST_WIDE_INT
7461 ix86_initial_elimination_offset (int from, int to)
7463 struct ix86_frame frame;
7464 ix86_compute_frame_layout (&frame);
7466 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7467 return frame.hard_frame_pointer_offset;
7468 else if (from == FRAME_POINTER_REGNUM
7469 && to == HARD_FRAME_POINTER_REGNUM)
7470 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7471 else
7473 gcc_assert (to == STACK_POINTER_REGNUM);
7475 if (from == ARG_POINTER_REGNUM)
7476 return frame.stack_pointer_offset;
7478 gcc_assert (from == FRAME_POINTER_REGNUM);
7479 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7483 /* Fill structure ix86_frame about frame of currently computed function. */
7485 static void
7486 ix86_compute_frame_layout (struct ix86_frame *frame)
7488 HOST_WIDE_INT total_size;
7489 unsigned int stack_alignment_needed;
7490 HOST_WIDE_INT offset;
7491 unsigned int preferred_alignment;
7492 HOST_WIDE_INT size = get_frame_size ();
7494 frame->nregs = ix86_nsaved_regs ();
7495 total_size = size;
7497 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7498 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7500 gcc_assert (!size || stack_alignment_needed);
7501 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7502 gcc_assert (preferred_alignment <= stack_alignment_needed);
7504 /* During reload iteration the amount of registers saved can change.
7505 Recompute the value as needed. Do not recompute when amount of registers
7506 didn't change as reload does multiple calls to the function and does not
7507 expect the decision to change within single iteration. */
7508 if (!optimize_function_for_size_p (cfun)
7509 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7511 int count = frame->nregs;
7513 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7514 /* The fast prologue uses move instead of push to save registers. This
7515 is significantly longer, but also executes faster as modern hardware
7516 can execute the moves in parallel, but can't do that for push/pop.
7518 Be careful about choosing what prologue to emit: When function takes
7519 many instructions to execute we may use slow version as well as in
7520 case function is known to be outside hot spot (this is known with
7521 feedback only). Weight the size of function by number of registers
7522 to save as it is cheap to use one or two push instructions but very
7523 slow to use many of them. */
7524 if (count)
7525 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7526 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7527 || (flag_branch_probabilities
7528 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7529 cfun->machine->use_fast_prologue_epilogue = false;
7530 else
7531 cfun->machine->use_fast_prologue_epilogue
7532 = !expensive_function_p (count);
7534 if (TARGET_PROLOGUE_USING_MOVE
7535 && cfun->machine->use_fast_prologue_epilogue)
7536 frame->save_regs_using_mov = true;
7537 else
7538 frame->save_regs_using_mov = false;
7541 /* Skip return address and saved base pointer. */
7542 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7544 frame->hard_frame_pointer_offset = offset;
7546 /* Set offset to aligned because the realigned frame starts from
7547 here. */
7548 if (stack_realign_fp)
7549 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7551 /* Register save area */
7552 offset += frame->nregs * UNITS_PER_WORD;
7554 /* Va-arg area */
7555 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7556 offset += frame->va_arg_size;
7558 /* Align start of frame for local function. */
7559 frame->padding1 = ((offset + stack_alignment_needed - 1)
7560 & -stack_alignment_needed) - offset;
7562 offset += frame->padding1;
7564 /* Frame pointer points here. */
7565 frame->frame_pointer_offset = offset;
7567 offset += size;
7569 /* Add outgoing arguments area. Can be skipped if we eliminated
7570 all the function calls as dead code.
7571 Skipping is however impossible when function calls alloca. Alloca
7572 expander assumes that last crtl->outgoing_args_size
7573 of stack frame are unused. */
7574 if (ACCUMULATE_OUTGOING_ARGS
7575 && (!current_function_is_leaf || cfun->calls_alloca
7576 || ix86_current_function_calls_tls_descriptor))
7578 offset += crtl->outgoing_args_size;
7579 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7581 else
7582 frame->outgoing_arguments_size = 0;
7584 /* Align stack boundary. Only needed if we're calling another function
7585 or using alloca. */
7586 if (!current_function_is_leaf || cfun->calls_alloca
7587 || ix86_current_function_calls_tls_descriptor)
7588 frame->padding2 = ((offset + preferred_alignment - 1)
7589 & -preferred_alignment) - offset;
7590 else
7591 frame->padding2 = 0;
7593 offset += frame->padding2;
7595 /* We've reached end of stack frame. */
7596 frame->stack_pointer_offset = offset;
7598 /* Size prologue needs to allocate. */
7599 frame->to_allocate =
7600 (size + frame->padding1 + frame->padding2
7601 + frame->outgoing_arguments_size + frame->va_arg_size);
7603 if ((!frame->to_allocate && frame->nregs <= 1)
7604 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7605 frame->save_regs_using_mov = false;
7607 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && current_function_sp_is_unchanging
7608 && current_function_is_leaf
7609 && !ix86_current_function_calls_tls_descriptor)
7611 frame->red_zone_size = frame->to_allocate;
7612 if (frame->save_regs_using_mov)
7613 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7614 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7615 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7617 else
7618 frame->red_zone_size = 0;
7619 frame->to_allocate -= frame->red_zone_size;
7620 frame->stack_pointer_offset -= frame->red_zone_size;
7621 #if 0
7622 fprintf (stderr, "\n");
7623 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
7624 fprintf (stderr, "size: %ld\n", (long)size);
7625 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
7626 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
7627 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
7628 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
7629 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
7630 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
7631 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
7632 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
7633 (long)frame->hard_frame_pointer_offset);
7634 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
7635 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
7636 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
7637 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
7638 #endif
7641 /* Emit code to save registers in the prologue. */
7643 static void
7644 ix86_emit_save_regs (void)
7646 unsigned int regno;
7647 rtx insn;
7649 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
7650 if (ix86_save_reg (regno, true))
7652 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7653 RTX_FRAME_RELATED_P (insn) = 1;
7657 /* Emit code to save registers using MOV insns. First register
7658 is restored from POINTER + OFFSET. */
7659 static void
7660 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7662 unsigned int regno;
7663 rtx insn;
7665 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7666 if (ix86_save_reg (regno, true))
7668 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7669 Pmode, offset),
7670 gen_rtx_REG (Pmode, regno));
7671 RTX_FRAME_RELATED_P (insn) = 1;
7672 offset += UNITS_PER_WORD;
7676 /* Expand prologue or epilogue stack adjustment.
7677 The pattern exist to put a dependency on all ebp-based memory accesses.
7678 STYLE should be negative if instructions should be marked as frame related,
7679 zero if %r11 register is live and cannot be freely used and positive
7680 otherwise. */
7682 static void
7683 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
7685 rtx insn;
7687 if (! TARGET_64BIT)
7688 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
7689 else if (x86_64_immediate_operand (offset, DImode))
7690 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
7691 else
7693 rtx r11;
7694 /* r11 is used by indirect sibcall return as well, set before the
7695 epilogue and used after the epilogue. ATM indirect sibcall
7696 shouldn't be used together with huge frame sizes in one
7697 function because of the frame_size check in sibcall.c. */
7698 gcc_assert (style);
7699 r11 = gen_rtx_REG (DImode, R11_REG);
7700 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
7701 if (style < 0)
7702 RTX_FRAME_RELATED_P (insn) = 1;
7703 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
7704 offset));
7706 if (style < 0)
7707 RTX_FRAME_RELATED_P (insn) = 1;
7710 /* Find an available register to be used as dynamic realign argument
7711 pointer regsiter. Such a register will be written in prologue and
7712 used in begin of body, so it must not be
7713 1. parameter passing register.
7714 2. GOT pointer.
7715 We reuse static-chain register if it is available. Otherwise, we
7716 use DI for i386 and R13 for x86-64. We chose R13 since it has
7717 shorter encoding.
7719 Return: the regno of chosen register. */
7721 static unsigned int
7722 find_drap_reg (void)
7724 tree decl = cfun->decl;
7726 if (TARGET_64BIT)
7728 /* Use R13 for nested function or function need static chain.
7729 Since function with tail call may use any caller-saved
7730 registers in epilogue, DRAP must not use caller-saved
7731 register in such case. */
7732 if ((decl_function_context (decl)
7733 && !DECL_NO_STATIC_CHAIN (decl))
7734 || crtl->tail_call_emit)
7735 return R13_REG;
7737 return R10_REG;
7739 else
7741 /* Use DI for nested function or function need static chain.
7742 Since function with tail call may use any caller-saved
7743 registers in epilogue, DRAP must not use caller-saved
7744 register in such case. */
7745 if ((decl_function_context (decl)
7746 && !DECL_NO_STATIC_CHAIN (decl))
7747 || crtl->tail_call_emit)
7748 return DI_REG;
7750 /* Reuse static chain register if it isn't used for parameter
7751 passing. */
7752 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
7753 && !lookup_attribute ("fastcall",
7754 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
7755 return CX_REG;
7756 else
7757 return DI_REG;
7761 /* Update incoming stack boundary and estimated stack alignment. */
7763 static void
7764 ix86_update_stack_boundary (void)
7766 /* Prefer the one specified at command line. */
7767 ix86_incoming_stack_boundary
7768 = (ix86_user_incoming_stack_boundary
7769 ? ix86_user_incoming_stack_boundary
7770 : ix86_default_incoming_stack_boundary);
7772 /* Incoming stack alignment can be changed on individual functions
7773 via force_align_arg_pointer attribute. We use the smallest
7774 incoming stack boundary. */
7775 if (ix86_incoming_stack_boundary > MIN_STACK_BOUNDARY
7776 && lookup_attribute (ix86_force_align_arg_pointer_string,
7777 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
7778 ix86_incoming_stack_boundary = MIN_STACK_BOUNDARY;
7780 /* The incoming stack frame has to be aligned at least at
7781 parm_stack_boundary. */
7782 if (ix86_incoming_stack_boundary < crtl->parm_stack_boundary)
7783 ix86_incoming_stack_boundary = crtl->parm_stack_boundary;
7785 /* Stack at entrance of main is aligned by runtime. We use the
7786 smallest incoming stack boundary. */
7787 if (ix86_incoming_stack_boundary > MAIN_STACK_BOUNDARY
7788 && DECL_NAME (current_function_decl)
7789 && MAIN_NAME_P (DECL_NAME (current_function_decl))
7790 && DECL_FILE_SCOPE_P (current_function_decl))
7791 ix86_incoming_stack_boundary = MAIN_STACK_BOUNDARY;
7793 /* x86_64 vararg needs 16byte stack alignment for register save
7794 area. */
7795 if (TARGET_64BIT
7796 && cfun->stdarg
7797 && crtl->stack_alignment_estimated < 128)
7798 crtl->stack_alignment_estimated = 128;
7801 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
7802 needed or an rtx for DRAP otherwise. */
7804 static rtx
7805 ix86_get_drap_rtx (void)
7807 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
7808 crtl->need_drap = true;
7810 if (stack_realign_drap)
7812 /* Assign DRAP to vDRAP and returns vDRAP */
7813 unsigned int regno = find_drap_reg ();
7814 rtx drap_vreg;
7815 rtx arg_ptr;
7816 rtx seq, insn;
7818 arg_ptr = gen_rtx_REG (Pmode, regno);
7819 crtl->drap_reg = arg_ptr;
7821 start_sequence ();
7822 drap_vreg = copy_to_reg (arg_ptr);
7823 seq = get_insns ();
7824 end_sequence ();
7826 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
7827 RTX_FRAME_RELATED_P (insn) = 1;
7828 return drap_vreg;
7830 else
7831 return NULL;
7834 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
7836 static rtx
7837 ix86_internal_arg_pointer (void)
7839 return virtual_incoming_args_rtx;
7842 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
7843 This is called from dwarf2out.c to emit call frame instructions
7844 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
7845 static void
7846 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
7848 rtx unspec = SET_SRC (pattern);
7849 gcc_assert (GET_CODE (unspec) == UNSPEC);
7851 switch (index)
7853 case UNSPEC_REG_SAVE:
7854 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
7855 SET_DEST (pattern));
7856 break;
7857 case UNSPEC_DEF_CFA:
7858 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
7859 INTVAL (XVECEXP (unspec, 0, 0)));
7860 break;
7861 default:
7862 gcc_unreachable ();
7866 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
7867 to be generated in correct form. */
7868 static void
7869 ix86_finalize_stack_realign_flags (void)
7871 /* Check if stack realign is really needed after reload, and
7872 stores result in cfun */
7873 unsigned int incoming_stack_boundary
7874 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
7875 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
7876 unsigned int stack_realign = (incoming_stack_boundary
7877 < (current_function_is_leaf
7878 ? crtl->max_used_stack_slot_alignment
7879 : crtl->stack_alignment_needed));
7881 if (crtl->stack_realign_finalized)
7883 /* After stack_realign_needed is finalized, we can't no longer
7884 change it. */
7885 gcc_assert (crtl->stack_realign_needed == stack_realign);
7887 else
7889 crtl->stack_realign_needed = stack_realign;
7890 crtl->stack_realign_finalized = true;
7894 /* Expand the prologue into a bunch of separate insns. */
7896 void
7897 ix86_expand_prologue (void)
7899 rtx insn;
7900 bool pic_reg_used;
7901 struct ix86_frame frame;
7902 HOST_WIDE_INT allocate;
7904 ix86_finalize_stack_realign_flags ();
7906 /* DRAP should not coexist with stack_realign_fp */
7907 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
7909 ix86_compute_frame_layout (&frame);
7911 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
7912 of DRAP is needed and stack realignment is really needed after reload */
7913 if (crtl->drap_reg && crtl->stack_realign_needed)
7915 rtx x, y;
7916 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
7917 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
7918 ? 0 : UNITS_PER_WORD);
7920 gcc_assert (stack_realign_drap);
7922 /* Grab the argument pointer. */
7923 x = plus_constant (stack_pointer_rtx,
7924 (UNITS_PER_WORD + param_ptr_offset));
7925 y = crtl->drap_reg;
7927 /* Only need to push parameter pointer reg if it is caller
7928 saved reg */
7929 if (!call_used_regs[REGNO (crtl->drap_reg)])
7931 /* Push arg pointer reg */
7932 insn = emit_insn (gen_push (y));
7933 RTX_FRAME_RELATED_P (insn) = 1;
7936 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
7937 RTX_FRAME_RELATED_P (insn) = 1;
7939 /* Align the stack. */
7940 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
7941 stack_pointer_rtx,
7942 GEN_INT (-align_bytes)));
7943 RTX_FRAME_RELATED_P (insn) = 1;
7945 /* Replicate the return address on the stack so that return
7946 address can be reached via (argp - 1) slot. This is needed
7947 to implement macro RETURN_ADDR_RTX and intrinsic function
7948 expand_builtin_return_addr etc. */
7949 x = crtl->drap_reg;
7950 x = gen_frame_mem (Pmode,
7951 plus_constant (x, -UNITS_PER_WORD));
7952 insn = emit_insn (gen_push (x));
7953 RTX_FRAME_RELATED_P (insn) = 1;
7956 /* Note: AT&T enter does NOT have reversed args. Enter is probably
7957 slower on all targets. Also sdb doesn't like it. */
7959 if (frame_pointer_needed)
7961 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
7962 RTX_FRAME_RELATED_P (insn) = 1;
7964 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7965 RTX_FRAME_RELATED_P (insn) = 1;
7968 if (stack_realign_fp)
7970 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
7971 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
7973 /* Align the stack. */
7974 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
7975 stack_pointer_rtx,
7976 GEN_INT (-align_bytes)));
7977 RTX_FRAME_RELATED_P (insn) = 1;
7980 allocate = frame.to_allocate;
7982 if (!frame.save_regs_using_mov)
7983 ix86_emit_save_regs ();
7984 else
7985 allocate += frame.nregs * UNITS_PER_WORD;
7987 /* When using red zone we may start register saving before allocating
7988 the stack frame saving one cycle of the prologue. However I will
7989 avoid doing this if I am going to have to probe the stack since
7990 at least on x86_64 the stack probe can turn into a call that clobbers
7991 a red zone location */
7992 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
7993 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
7994 ix86_emit_save_regs_using_mov ((frame_pointer_needed
7995 && !crtl->stack_realign_needed)
7996 ? hard_frame_pointer_rtx
7997 : stack_pointer_rtx,
7998 -frame.nregs * UNITS_PER_WORD);
8000 if (allocate == 0)
8002 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8003 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8004 GEN_INT (-allocate), -1);
8005 else
8007 /* Only valid for Win32. */
8008 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8009 bool eax_live;
8010 rtx t;
8012 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8014 if (cfun->machine->call_abi == MS_ABI)
8015 eax_live = false;
8016 else
8017 eax_live = ix86_eax_live_at_start_p ();
8019 if (eax_live)
8021 emit_insn (gen_push (eax));
8022 allocate -= UNITS_PER_WORD;
8025 emit_move_insn (eax, GEN_INT (allocate));
8027 if (TARGET_64BIT)
8028 insn = gen_allocate_stack_worker_64 (eax, eax);
8029 else
8030 insn = gen_allocate_stack_worker_32 (eax, eax);
8031 insn = emit_insn (insn);
8032 RTX_FRAME_RELATED_P (insn) = 1;
8033 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8034 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8035 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8036 t, REG_NOTES (insn));
8038 if (eax_live)
8040 if (frame_pointer_needed)
8041 t = plus_constant (hard_frame_pointer_rtx,
8042 allocate
8043 - frame.to_allocate
8044 - frame.nregs * UNITS_PER_WORD);
8045 else
8046 t = plus_constant (stack_pointer_rtx, allocate);
8047 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8051 if (frame.save_regs_using_mov
8052 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8053 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8055 if (!frame_pointer_needed
8056 || !frame.to_allocate
8057 || crtl->stack_realign_needed)
8058 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8059 frame.to_allocate);
8060 else
8061 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8062 -frame.nregs * UNITS_PER_WORD);
8065 pic_reg_used = false;
8066 if (pic_offset_table_rtx
8067 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8068 || crtl->profile))
8070 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8072 if (alt_pic_reg_used != INVALID_REGNUM)
8073 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8075 pic_reg_used = true;
8078 if (pic_reg_used)
8080 if (TARGET_64BIT)
8082 if (ix86_cmodel == CM_LARGE_PIC)
8084 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8085 rtx label = gen_label_rtx ();
8086 emit_label (label);
8087 LABEL_PRESERVE_P (label) = 1;
8088 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8089 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8090 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8091 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8092 pic_offset_table_rtx, tmp_reg));
8094 else
8095 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8097 else
8098 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8101 /* Prevent function calls from being scheduled before the call to mcount.
8102 In the pic_reg_used case, make sure that the got load isn't deleted. */
8103 if (crtl->profile)
8105 if (pic_reg_used)
8106 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8107 emit_insn (gen_blockage ());
8110 if (crtl->drap_reg && !crtl->stack_realign_needed)
8112 /* vDRAP is setup but after reload it turns out stack realign
8113 isn't necessary, here we will emit prologue to setup DRAP
8114 without stack realign adjustment */
8115 int drap_bp_offset = UNITS_PER_WORD * 2;
8116 rtx x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8117 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8120 /* Emit cld instruction if stringops are used in the function. */
8121 if (TARGET_CLD && ix86_current_function_needs_cld)
8122 emit_insn (gen_cld ());
8125 /* Emit code to restore saved registers using MOV insns. First register
8126 is restored from POINTER + OFFSET. */
8127 static void
8128 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8129 int maybe_eh_return)
8131 int regno;
8132 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8134 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8135 if (ix86_save_reg (regno, maybe_eh_return))
8137 /* Ensure that adjust_address won't be forced to produce pointer
8138 out of range allowed by x86-64 instruction set. */
8139 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8141 rtx r11;
8143 r11 = gen_rtx_REG (DImode, R11_REG);
8144 emit_move_insn (r11, GEN_INT (offset));
8145 emit_insn (gen_adddi3 (r11, r11, pointer));
8146 base_address = gen_rtx_MEM (Pmode, r11);
8147 offset = 0;
8149 emit_move_insn (gen_rtx_REG (Pmode, regno),
8150 adjust_address (base_address, Pmode, offset));
8151 offset += UNITS_PER_WORD;
8155 /* Restore function stack, frame, and registers. */
8157 void
8158 ix86_expand_epilogue (int style)
8160 int regno;
8161 int sp_valid;
8162 struct ix86_frame frame;
8163 HOST_WIDE_INT offset;
8165 ix86_finalize_stack_realign_flags ();
8167 /* When stack is realigned, SP must be valid. */
8168 sp_valid = (!frame_pointer_needed
8169 || current_function_sp_is_unchanging
8170 || stack_realign_fp);
8172 ix86_compute_frame_layout (&frame);
8174 /* Calculate start of saved registers relative to ebp. Special care
8175 must be taken for the normal return case of a function using
8176 eh_return: the eax and edx registers are marked as saved, but not
8177 restored along this path. */
8178 offset = frame.nregs;
8179 if (crtl->calls_eh_return && style != 2)
8180 offset -= 2;
8181 offset *= -UNITS_PER_WORD;
8183 /* If we're only restoring one register and sp is not valid then
8184 using a move instruction to restore the register since it's
8185 less work than reloading sp and popping the register.
8187 The default code result in stack adjustment using add/lea instruction,
8188 while this code results in LEAVE instruction (or discrete equivalent),
8189 so it is profitable in some other cases as well. Especially when there
8190 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8191 and there is exactly one register to pop. This heuristic may need some
8192 tuning in future. */
8193 if ((!sp_valid && frame.nregs <= 1)
8194 || (TARGET_EPILOGUE_USING_MOVE
8195 && cfun->machine->use_fast_prologue_epilogue
8196 && (frame.nregs > 1 || frame.to_allocate))
8197 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
8198 || (frame_pointer_needed && TARGET_USE_LEAVE
8199 && cfun->machine->use_fast_prologue_epilogue
8200 && frame.nregs == 1)
8201 || crtl->calls_eh_return)
8203 /* Restore registers. We can use ebp or esp to address the memory
8204 locations. If both are available, default to ebp, since offsets
8205 are known to be small. Only exception is esp pointing directly
8206 to the end of block of saved registers, where we may simplify
8207 addressing mode.
8209 If we are realigning stack with bp and sp, regs restore can't
8210 be addressed by bp. sp must be used instead. */
8212 if (!frame_pointer_needed
8213 || (sp_valid && !frame.to_allocate)
8214 || stack_realign_fp)
8215 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8216 frame.to_allocate, style == 2);
8217 else
8218 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8219 offset, style == 2);
8221 /* eh_return epilogues need %ecx added to the stack pointer. */
8222 if (style == 2)
8224 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8226 /* Stack align doesn't work with eh_return. */
8227 gcc_assert (!crtl->stack_realign_needed);
8229 if (frame_pointer_needed)
8231 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
8232 tmp = plus_constant (tmp, UNITS_PER_WORD);
8233 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
8235 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
8236 emit_move_insn (hard_frame_pointer_rtx, tmp);
8238 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
8239 const0_rtx, style);
8241 else
8243 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
8244 tmp = plus_constant (tmp, (frame.to_allocate
8245 + frame.nregs * UNITS_PER_WORD));
8246 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
8249 else if (!frame_pointer_needed)
8250 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8251 GEN_INT (frame.to_allocate
8252 + frame.nregs * UNITS_PER_WORD),
8253 style);
8254 /* If not an i386, mov & pop is faster than "leave". */
8255 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
8256 || !cfun->machine->use_fast_prologue_epilogue)
8257 emit_insn ((*ix86_gen_leave) ());
8258 else
8260 pro_epilogue_adjust_stack (stack_pointer_rtx,
8261 hard_frame_pointer_rtx,
8262 const0_rtx, style);
8264 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8267 else
8269 /* First step is to deallocate the stack frame so that we can
8270 pop the registers.
8272 If we realign stack with frame pointer, then stack pointer
8273 won't be able to recover via lea $offset(%bp), %sp, because
8274 there is a padding area between bp and sp for realign.
8275 "add $to_allocate, %sp" must be used instead. */
8276 if (!sp_valid)
8278 gcc_assert (frame_pointer_needed);
8279 gcc_assert (!stack_realign_fp);
8280 pro_epilogue_adjust_stack (stack_pointer_rtx,
8281 hard_frame_pointer_rtx,
8282 GEN_INT (offset), style);
8284 else if (frame.to_allocate)
8285 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8286 GEN_INT (frame.to_allocate), style);
8288 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8289 if (ix86_save_reg (regno, false))
8290 emit_insn ((*ix86_gen_pop1) (gen_rtx_REG (Pmode, regno)));
8291 if (frame_pointer_needed)
8293 /* Leave results in shorter dependency chains on CPUs that are
8294 able to grok it fast. */
8295 if (TARGET_USE_LEAVE)
8296 emit_insn ((*ix86_gen_leave) ());
8297 else
8299 /* For stack realigned really happens, recover stack
8300 pointer to hard frame pointer is a must, if not using
8301 leave. */
8302 if (stack_realign_fp)
8303 pro_epilogue_adjust_stack (stack_pointer_rtx,
8304 hard_frame_pointer_rtx,
8305 const0_rtx, style);
8306 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8311 if (crtl->drap_reg && crtl->stack_realign_needed)
8313 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8314 ? 0 : UNITS_PER_WORD);
8315 gcc_assert (stack_realign_drap);
8316 emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
8317 crtl->drap_reg,
8318 GEN_INT (-(UNITS_PER_WORD
8319 + param_ptr_offset))));
8320 if (!call_used_regs[REGNO (crtl->drap_reg)])
8321 emit_insn ((*ix86_gen_pop1) (crtl->drap_reg));
8325 /* Sibcall epilogues don't want a return instruction. */
8326 if (style == 0)
8327 return;
8329 if (crtl->args.pops_args && crtl->args.size)
8331 rtx popc = GEN_INT (crtl->args.pops_args);
8333 /* i386 can only pop 64K bytes. If asked to pop more, pop
8334 return address, do explicit add, and jump indirectly to the
8335 caller. */
8337 if (crtl->args.pops_args >= 65536)
8339 rtx ecx = gen_rtx_REG (SImode, CX_REG);
8341 /* There is no "pascal" calling convention in any 64bit ABI. */
8342 gcc_assert (!TARGET_64BIT);
8344 emit_insn (gen_popsi1 (ecx));
8345 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
8346 emit_jump_insn (gen_return_indirect_internal (ecx));
8348 else
8349 emit_jump_insn (gen_return_pop_internal (popc));
8351 else
8352 emit_jump_insn (gen_return_internal ());
8355 /* Reset from the function's potential modifications. */
8357 static void
8358 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8359 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8361 if (pic_offset_table_rtx)
8362 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
8363 #if TARGET_MACHO
8364 /* Mach-O doesn't support labels at the end of objects, so if
8365 it looks like we might want one, insert a NOP. */
8367 rtx insn = get_last_insn ();
8368 while (insn
8369 && NOTE_P (insn)
8370 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
8371 insn = PREV_INSN (insn);
8372 if (insn
8373 && (LABEL_P (insn)
8374 || (NOTE_P (insn)
8375 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
8376 fputs ("\tnop\n", file);
8378 #endif
8382 /* Extract the parts of an RTL expression that is a valid memory address
8383 for an instruction. Return 0 if the structure of the address is
8384 grossly off. Return -1 if the address contains ASHIFT, so it is not
8385 strictly valid, but still used for computing length of lea instruction. */
8388 ix86_decompose_address (rtx addr, struct ix86_address *out)
8390 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
8391 rtx base_reg, index_reg;
8392 HOST_WIDE_INT scale = 1;
8393 rtx scale_rtx = NULL_RTX;
8394 int retval = 1;
8395 enum ix86_address_seg seg = SEG_DEFAULT;
8397 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
8398 base = addr;
8399 else if (GET_CODE (addr) == PLUS)
8401 rtx addends[4], op;
8402 int n = 0, i;
8404 op = addr;
8407 if (n >= 4)
8408 return 0;
8409 addends[n++] = XEXP (op, 1);
8410 op = XEXP (op, 0);
8412 while (GET_CODE (op) == PLUS);
8413 if (n >= 4)
8414 return 0;
8415 addends[n] = op;
8417 for (i = n; i >= 0; --i)
8419 op = addends[i];
8420 switch (GET_CODE (op))
8422 case MULT:
8423 if (index)
8424 return 0;
8425 index = XEXP (op, 0);
8426 scale_rtx = XEXP (op, 1);
8427 break;
8429 case UNSPEC:
8430 if (XINT (op, 1) == UNSPEC_TP
8431 && TARGET_TLS_DIRECT_SEG_REFS
8432 && seg == SEG_DEFAULT)
8433 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
8434 else
8435 return 0;
8436 break;
8438 case REG:
8439 case SUBREG:
8440 if (!base)
8441 base = op;
8442 else if (!index)
8443 index = op;
8444 else
8445 return 0;
8446 break;
8448 case CONST:
8449 case CONST_INT:
8450 case SYMBOL_REF:
8451 case LABEL_REF:
8452 if (disp)
8453 return 0;
8454 disp = op;
8455 break;
8457 default:
8458 return 0;
8462 else if (GET_CODE (addr) == MULT)
8464 index = XEXP (addr, 0); /* index*scale */
8465 scale_rtx = XEXP (addr, 1);
8467 else if (GET_CODE (addr) == ASHIFT)
8469 rtx tmp;
8471 /* We're called for lea too, which implements ashift on occasion. */
8472 index = XEXP (addr, 0);
8473 tmp = XEXP (addr, 1);
8474 if (!CONST_INT_P (tmp))
8475 return 0;
8476 scale = INTVAL (tmp);
8477 if ((unsigned HOST_WIDE_INT) scale > 3)
8478 return 0;
8479 scale = 1 << scale;
8480 retval = -1;
8482 else
8483 disp = addr; /* displacement */
8485 /* Extract the integral value of scale. */
8486 if (scale_rtx)
8488 if (!CONST_INT_P (scale_rtx))
8489 return 0;
8490 scale = INTVAL (scale_rtx);
8493 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
8494 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
8496 /* Allow arg pointer and stack pointer as index if there is not scaling. */
8497 if (base_reg && index_reg && scale == 1
8498 && (index_reg == arg_pointer_rtx
8499 || index_reg == frame_pointer_rtx
8500 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
8502 rtx tmp;
8503 tmp = base, base = index, index = tmp;
8504 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
8507 /* Special case: %ebp cannot be encoded as a base without a displacement. */
8508 if ((base_reg == hard_frame_pointer_rtx
8509 || base_reg == frame_pointer_rtx
8510 || base_reg == arg_pointer_rtx) && !disp)
8511 disp = const0_rtx;
8513 /* Special case: on K6, [%esi] makes the instruction vector decoded.
8514 Avoid this by transforming to [%esi+0].
8515 Reload calls address legitimization without cfun defined, so we need
8516 to test cfun for being non-NULL. */
8517 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
8518 && base_reg && !index_reg && !disp
8519 && REG_P (base_reg)
8520 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
8521 disp = const0_rtx;
8523 /* Special case: encode reg+reg instead of reg*2. */
8524 if (!base && index && scale && scale == 2)
8525 base = index, base_reg = index_reg, scale = 1;
8527 /* Special case: scaling cannot be encoded without base or displacement. */
8528 if (!base && !disp && index && scale != 1)
8529 disp = const0_rtx;
8531 out->base = base;
8532 out->index = index;
8533 out->disp = disp;
8534 out->scale = scale;
8535 out->seg = seg;
8537 return retval;
8540 /* Return cost of the memory address x.
8541 For i386, it is better to use a complex address than let gcc copy
8542 the address into a reg and make a new pseudo. But not if the address
8543 requires to two regs - that would mean more pseudos with longer
8544 lifetimes. */
8545 static int
8546 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
8548 struct ix86_address parts;
8549 int cost = 1;
8550 int ok = ix86_decompose_address (x, &parts);
8552 gcc_assert (ok);
8554 if (parts.base && GET_CODE (parts.base) == SUBREG)
8555 parts.base = SUBREG_REG (parts.base);
8556 if (parts.index && GET_CODE (parts.index) == SUBREG)
8557 parts.index = SUBREG_REG (parts.index);
8559 /* Attempt to minimize number of registers in the address. */
8560 if ((parts.base
8561 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
8562 || (parts.index
8563 && (!REG_P (parts.index)
8564 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
8565 cost++;
8567 if (parts.base
8568 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
8569 && parts.index
8570 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
8571 && parts.base != parts.index)
8572 cost++;
8574 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
8575 since it's predecode logic can't detect the length of instructions
8576 and it degenerates to vector decoded. Increase cost of such
8577 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
8578 to split such addresses or even refuse such addresses at all.
8580 Following addressing modes are affected:
8581 [base+scale*index]
8582 [scale*index+disp]
8583 [base+index]
8585 The first and last case may be avoidable by explicitly coding the zero in
8586 memory address, but I don't have AMD-K6 machine handy to check this
8587 theory. */
8589 if (TARGET_K6
8590 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
8591 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
8592 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
8593 cost += 10;
8595 return cost;
8598 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
8599 this is used for to form addresses to local data when -fPIC is in
8600 use. */
8602 static bool
8603 darwin_local_data_pic (rtx disp)
8605 return (GET_CODE (disp) == UNSPEC
8606 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
8609 /* Determine if a given RTX is a valid constant. We already know this
8610 satisfies CONSTANT_P. */
8612 bool
8613 legitimate_constant_p (rtx x)
8615 switch (GET_CODE (x))
8617 case CONST:
8618 x = XEXP (x, 0);
8620 if (GET_CODE (x) == PLUS)
8622 if (!CONST_INT_P (XEXP (x, 1)))
8623 return false;
8624 x = XEXP (x, 0);
8627 if (TARGET_MACHO && darwin_local_data_pic (x))
8628 return true;
8630 /* Only some unspecs are valid as "constants". */
8631 if (GET_CODE (x) == UNSPEC)
8632 switch (XINT (x, 1))
8634 case UNSPEC_GOT:
8635 case UNSPEC_GOTOFF:
8636 case UNSPEC_PLTOFF:
8637 return TARGET_64BIT;
8638 case UNSPEC_TPOFF:
8639 case UNSPEC_NTPOFF:
8640 x = XVECEXP (x, 0, 0);
8641 return (GET_CODE (x) == SYMBOL_REF
8642 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8643 case UNSPEC_DTPOFF:
8644 x = XVECEXP (x, 0, 0);
8645 return (GET_CODE (x) == SYMBOL_REF
8646 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
8647 default:
8648 return false;
8651 /* We must have drilled down to a symbol. */
8652 if (GET_CODE (x) == LABEL_REF)
8653 return true;
8654 if (GET_CODE (x) != SYMBOL_REF)
8655 return false;
8656 /* FALLTHRU */
8658 case SYMBOL_REF:
8659 /* TLS symbols are never valid. */
8660 if (SYMBOL_REF_TLS_MODEL (x))
8661 return false;
8663 /* DLLIMPORT symbols are never valid. */
8664 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
8665 && SYMBOL_REF_DLLIMPORT_P (x))
8666 return false;
8667 break;
8669 case CONST_DOUBLE:
8670 if (GET_MODE (x) == TImode
8671 && x != CONST0_RTX (TImode)
8672 && !TARGET_64BIT)
8673 return false;
8674 break;
8676 case CONST_VECTOR:
8677 if (x == CONST0_RTX (GET_MODE (x)))
8678 return true;
8679 return false;
8681 default:
8682 break;
8685 /* Otherwise we handle everything else in the move patterns. */
8686 return true;
8689 /* Determine if it's legal to put X into the constant pool. This
8690 is not possible for the address of thread-local symbols, which
8691 is checked above. */
8693 static bool
8694 ix86_cannot_force_const_mem (rtx x)
8696 /* We can always put integral constants and vectors in memory. */
8697 switch (GET_CODE (x))
8699 case CONST_INT:
8700 case CONST_DOUBLE:
8701 case CONST_VECTOR:
8702 return false;
8704 default:
8705 break;
8707 return !legitimate_constant_p (x);
8710 /* Determine if a given RTX is a valid constant address. */
8712 bool
8713 constant_address_p (rtx x)
8715 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
8718 /* Nonzero if the constant value X is a legitimate general operand
8719 when generating PIC code. It is given that flag_pic is on and
8720 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
8722 bool
8723 legitimate_pic_operand_p (rtx x)
8725 rtx inner;
8727 switch (GET_CODE (x))
8729 case CONST:
8730 inner = XEXP (x, 0);
8731 if (GET_CODE (inner) == PLUS
8732 && CONST_INT_P (XEXP (inner, 1)))
8733 inner = XEXP (inner, 0);
8735 /* Only some unspecs are valid as "constants". */
8736 if (GET_CODE (inner) == UNSPEC)
8737 switch (XINT (inner, 1))
8739 case UNSPEC_GOT:
8740 case UNSPEC_GOTOFF:
8741 case UNSPEC_PLTOFF:
8742 return TARGET_64BIT;
8743 case UNSPEC_TPOFF:
8744 x = XVECEXP (inner, 0, 0);
8745 return (GET_CODE (x) == SYMBOL_REF
8746 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8747 case UNSPEC_MACHOPIC_OFFSET:
8748 return legitimate_pic_address_disp_p (x);
8749 default:
8750 return false;
8752 /* FALLTHRU */
8754 case SYMBOL_REF:
8755 case LABEL_REF:
8756 return legitimate_pic_address_disp_p (x);
8758 default:
8759 return true;
8763 /* Determine if a given CONST RTX is a valid memory displacement
8764 in PIC mode. */
8767 legitimate_pic_address_disp_p (rtx disp)
8769 bool saw_plus;
8771 /* In 64bit mode we can allow direct addresses of symbols and labels
8772 when they are not dynamic symbols. */
8773 if (TARGET_64BIT)
8775 rtx op0 = disp, op1;
8777 switch (GET_CODE (disp))
8779 case LABEL_REF:
8780 return true;
8782 case CONST:
8783 if (GET_CODE (XEXP (disp, 0)) != PLUS)
8784 break;
8785 op0 = XEXP (XEXP (disp, 0), 0);
8786 op1 = XEXP (XEXP (disp, 0), 1);
8787 if (!CONST_INT_P (op1)
8788 || INTVAL (op1) >= 16*1024*1024
8789 || INTVAL (op1) < -16*1024*1024)
8790 break;
8791 if (GET_CODE (op0) == LABEL_REF)
8792 return true;
8793 if (GET_CODE (op0) != SYMBOL_REF)
8794 break;
8795 /* FALLTHRU */
8797 case SYMBOL_REF:
8798 /* TLS references should always be enclosed in UNSPEC. */
8799 if (SYMBOL_REF_TLS_MODEL (op0))
8800 return false;
8801 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
8802 && ix86_cmodel != CM_LARGE_PIC)
8803 return true;
8804 break;
8806 default:
8807 break;
8810 if (GET_CODE (disp) != CONST)
8811 return 0;
8812 disp = XEXP (disp, 0);
8814 if (TARGET_64BIT)
8816 /* We are unsafe to allow PLUS expressions. This limit allowed distance
8817 of GOT tables. We should not need these anyway. */
8818 if (GET_CODE (disp) != UNSPEC
8819 || (XINT (disp, 1) != UNSPEC_GOTPCREL
8820 && XINT (disp, 1) != UNSPEC_GOTOFF
8821 && XINT (disp, 1) != UNSPEC_PLTOFF))
8822 return 0;
8824 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
8825 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
8826 return 0;
8827 return 1;
8830 saw_plus = false;
8831 if (GET_CODE (disp) == PLUS)
8833 if (!CONST_INT_P (XEXP (disp, 1)))
8834 return 0;
8835 disp = XEXP (disp, 0);
8836 saw_plus = true;
8839 if (TARGET_MACHO && darwin_local_data_pic (disp))
8840 return 1;
8842 if (GET_CODE (disp) != UNSPEC)
8843 return 0;
8845 switch (XINT (disp, 1))
8847 case UNSPEC_GOT:
8848 if (saw_plus)
8849 return false;
8850 /* We need to check for both symbols and labels because VxWorks loads
8851 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
8852 details. */
8853 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8854 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
8855 case UNSPEC_GOTOFF:
8856 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
8857 While ABI specify also 32bit relocation but we don't produce it in
8858 small PIC model at all. */
8859 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8860 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
8861 && !TARGET_64BIT)
8862 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
8863 return false;
8864 case UNSPEC_GOTTPOFF:
8865 case UNSPEC_GOTNTPOFF:
8866 case UNSPEC_INDNTPOFF:
8867 if (saw_plus)
8868 return false;
8869 disp = XVECEXP (disp, 0, 0);
8870 return (GET_CODE (disp) == SYMBOL_REF
8871 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
8872 case UNSPEC_NTPOFF:
8873 disp = XVECEXP (disp, 0, 0);
8874 return (GET_CODE (disp) == SYMBOL_REF
8875 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
8876 case UNSPEC_DTPOFF:
8877 disp = XVECEXP (disp, 0, 0);
8878 return (GET_CODE (disp) == SYMBOL_REF
8879 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
8882 return 0;
8885 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
8886 memory address for an instruction. The MODE argument is the machine mode
8887 for the MEM expression that wants to use this address.
8889 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
8890 convert common non-canonical forms to canonical form so that they will
8891 be recognized. */
8894 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
8895 rtx addr, int strict)
8897 struct ix86_address parts;
8898 rtx base, index, disp;
8899 HOST_WIDE_INT scale;
8900 const char *reason = NULL;
8901 rtx reason_rtx = NULL_RTX;
8903 if (ix86_decompose_address (addr, &parts) <= 0)
8905 reason = "decomposition failed";
8906 goto report_error;
8909 base = parts.base;
8910 index = parts.index;
8911 disp = parts.disp;
8912 scale = parts.scale;
8914 /* Validate base register.
8916 Don't allow SUBREG's that span more than a word here. It can lead to spill
8917 failures when the base is one word out of a two word structure, which is
8918 represented internally as a DImode int. */
8920 if (base)
8922 rtx reg;
8923 reason_rtx = base;
8925 if (REG_P (base))
8926 reg = base;
8927 else if (GET_CODE (base) == SUBREG
8928 && REG_P (SUBREG_REG (base))
8929 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
8930 <= UNITS_PER_WORD)
8931 reg = SUBREG_REG (base);
8932 else
8934 reason = "base is not a register";
8935 goto report_error;
8938 if (GET_MODE (base) != Pmode)
8940 reason = "base is not in Pmode";
8941 goto report_error;
8944 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
8945 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
8947 reason = "base is not valid";
8948 goto report_error;
8952 /* Validate index register.
8954 Don't allow SUBREG's that span more than a word here -- same as above. */
8956 if (index)
8958 rtx reg;
8959 reason_rtx = index;
8961 if (REG_P (index))
8962 reg = index;
8963 else if (GET_CODE (index) == SUBREG
8964 && REG_P (SUBREG_REG (index))
8965 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
8966 <= UNITS_PER_WORD)
8967 reg = SUBREG_REG (index);
8968 else
8970 reason = "index is not a register";
8971 goto report_error;
8974 if (GET_MODE (index) != Pmode)
8976 reason = "index is not in Pmode";
8977 goto report_error;
8980 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
8981 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
8983 reason = "index is not valid";
8984 goto report_error;
8988 /* Validate scale factor. */
8989 if (scale != 1)
8991 reason_rtx = GEN_INT (scale);
8992 if (!index)
8994 reason = "scale without index";
8995 goto report_error;
8998 if (scale != 2 && scale != 4 && scale != 8)
9000 reason = "scale is not a valid multiplier";
9001 goto report_error;
9005 /* Validate displacement. */
9006 if (disp)
9008 reason_rtx = disp;
9010 if (GET_CODE (disp) == CONST
9011 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9012 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9013 switch (XINT (XEXP (disp, 0), 1))
9015 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9016 used. While ABI specify also 32bit relocations, we don't produce
9017 them at all and use IP relative instead. */
9018 case UNSPEC_GOT:
9019 case UNSPEC_GOTOFF:
9020 gcc_assert (flag_pic);
9021 if (!TARGET_64BIT)
9022 goto is_legitimate_pic;
9023 reason = "64bit address unspec";
9024 goto report_error;
9026 case UNSPEC_GOTPCREL:
9027 gcc_assert (flag_pic);
9028 goto is_legitimate_pic;
9030 case UNSPEC_GOTTPOFF:
9031 case UNSPEC_GOTNTPOFF:
9032 case UNSPEC_INDNTPOFF:
9033 case UNSPEC_NTPOFF:
9034 case UNSPEC_DTPOFF:
9035 break;
9037 default:
9038 reason = "invalid address unspec";
9039 goto report_error;
9042 else if (SYMBOLIC_CONST (disp)
9043 && (flag_pic
9044 || (TARGET_MACHO
9045 #if TARGET_MACHO
9046 && MACHOPIC_INDIRECT
9047 && !machopic_operand_p (disp)
9048 #endif
9052 is_legitimate_pic:
9053 if (TARGET_64BIT && (index || base))
9055 /* foo@dtpoff(%rX) is ok. */
9056 if (GET_CODE (disp) != CONST
9057 || GET_CODE (XEXP (disp, 0)) != PLUS
9058 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9059 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9060 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9061 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9063 reason = "non-constant pic memory reference";
9064 goto report_error;
9067 else if (! legitimate_pic_address_disp_p (disp))
9069 reason = "displacement is an invalid pic construct";
9070 goto report_error;
9073 /* This code used to verify that a symbolic pic displacement
9074 includes the pic_offset_table_rtx register.
9076 While this is good idea, unfortunately these constructs may
9077 be created by "adds using lea" optimization for incorrect
9078 code like:
9080 int a;
9081 int foo(int i)
9083 return *(&a+i);
9086 This code is nonsensical, but results in addressing
9087 GOT table with pic_offset_table_rtx base. We can't
9088 just refuse it easily, since it gets matched by
9089 "addsi3" pattern, that later gets split to lea in the
9090 case output register differs from input. While this
9091 can be handled by separate addsi pattern for this case
9092 that never results in lea, this seems to be easier and
9093 correct fix for crash to disable this test. */
9095 else if (GET_CODE (disp) != LABEL_REF
9096 && !CONST_INT_P (disp)
9097 && (GET_CODE (disp) != CONST
9098 || !legitimate_constant_p (disp))
9099 && (GET_CODE (disp) != SYMBOL_REF
9100 || !legitimate_constant_p (disp)))
9102 reason = "displacement is not constant";
9103 goto report_error;
9105 else if (TARGET_64BIT
9106 && !x86_64_immediate_operand (disp, VOIDmode))
9108 reason = "displacement is out of range";
9109 goto report_error;
9113 /* Everything looks valid. */
9114 return TRUE;
9116 report_error:
9117 return FALSE;
9120 /* Return a unique alias set for the GOT. */
9122 static alias_set_type
9123 ix86_GOT_alias_set (void)
9125 static alias_set_type set = -1;
9126 if (set == -1)
9127 set = new_alias_set ();
9128 return set;
9131 /* Return a legitimate reference for ORIG (an address) using the
9132 register REG. If REG is 0, a new pseudo is generated.
9134 There are two types of references that must be handled:
9136 1. Global data references must load the address from the GOT, via
9137 the PIC reg. An insn is emitted to do this load, and the reg is
9138 returned.
9140 2. Static data references, constant pool addresses, and code labels
9141 compute the address as an offset from the GOT, whose base is in
9142 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9143 differentiate them from global data objects. The returned
9144 address is the PIC reg + an unspec constant.
9146 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
9147 reg also appears in the address. */
9149 static rtx
9150 legitimize_pic_address (rtx orig, rtx reg)
9152 rtx addr = orig;
9153 rtx new_rtx = orig;
9154 rtx base;
9156 #if TARGET_MACHO
9157 if (TARGET_MACHO && !TARGET_64BIT)
9159 if (reg == 0)
9160 reg = gen_reg_rtx (Pmode);
9161 /* Use the generic Mach-O PIC machinery. */
9162 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
9164 #endif
9166 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
9167 new_rtx = addr;
9168 else if (TARGET_64BIT
9169 && ix86_cmodel != CM_SMALL_PIC
9170 && gotoff_operand (addr, Pmode))
9172 rtx tmpreg;
9173 /* This symbol may be referenced via a displacement from the PIC
9174 base address (@GOTOFF). */
9176 if (reload_in_progress)
9177 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9178 if (GET_CODE (addr) == CONST)
9179 addr = XEXP (addr, 0);
9180 if (GET_CODE (addr) == PLUS)
9182 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9183 UNSPEC_GOTOFF);
9184 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9186 else
9187 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9188 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9189 if (!reg)
9190 tmpreg = gen_reg_rtx (Pmode);
9191 else
9192 tmpreg = reg;
9193 emit_move_insn (tmpreg, new_rtx);
9195 if (reg != 0)
9197 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
9198 tmpreg, 1, OPTAB_DIRECT);
9199 new_rtx = reg;
9201 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
9203 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
9205 /* This symbol may be referenced via a displacement from the PIC
9206 base address (@GOTOFF). */
9208 if (reload_in_progress)
9209 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9210 if (GET_CODE (addr) == CONST)
9211 addr = XEXP (addr, 0);
9212 if (GET_CODE (addr) == PLUS)
9214 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9215 UNSPEC_GOTOFF);
9216 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9218 else
9219 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9220 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9221 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9223 if (reg != 0)
9225 emit_move_insn (reg, new_rtx);
9226 new_rtx = reg;
9229 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
9230 /* We can't use @GOTOFF for text labels on VxWorks;
9231 see gotoff_operand. */
9232 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
9234 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9236 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
9237 return legitimize_dllimport_symbol (addr, true);
9238 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
9239 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
9240 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
9242 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
9243 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
9247 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
9249 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
9250 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9251 new_rtx = gen_const_mem (Pmode, new_rtx);
9252 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9254 if (reg == 0)
9255 reg = gen_reg_rtx (Pmode);
9256 /* Use directly gen_movsi, otherwise the address is loaded
9257 into register for CSE. We don't want to CSE this addresses,
9258 instead we CSE addresses from the GOT table, so skip this. */
9259 emit_insn (gen_movsi (reg, new_rtx));
9260 new_rtx = reg;
9262 else
9264 /* This symbol must be referenced via a load from the
9265 Global Offset Table (@GOT). */
9267 if (reload_in_progress)
9268 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9269 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
9270 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9271 if (TARGET_64BIT)
9272 new_rtx = force_reg (Pmode, new_rtx);
9273 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9274 new_rtx = gen_const_mem (Pmode, new_rtx);
9275 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9277 if (reg == 0)
9278 reg = gen_reg_rtx (Pmode);
9279 emit_move_insn (reg, new_rtx);
9280 new_rtx = reg;
9283 else
9285 if (CONST_INT_P (addr)
9286 && !x86_64_immediate_operand (addr, VOIDmode))
9288 if (reg)
9290 emit_move_insn (reg, addr);
9291 new_rtx = reg;
9293 else
9294 new_rtx = force_reg (Pmode, addr);
9296 else if (GET_CODE (addr) == CONST)
9298 addr = XEXP (addr, 0);
9300 /* We must match stuff we generate before. Assume the only
9301 unspecs that can get here are ours. Not that we could do
9302 anything with them anyway.... */
9303 if (GET_CODE (addr) == UNSPEC
9304 || (GET_CODE (addr) == PLUS
9305 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
9306 return orig;
9307 gcc_assert (GET_CODE (addr) == PLUS);
9309 if (GET_CODE (addr) == PLUS)
9311 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
9313 /* Check first to see if this is a constant offset from a @GOTOFF
9314 symbol reference. */
9315 if (gotoff_operand (op0, Pmode)
9316 && CONST_INT_P (op1))
9318 if (!TARGET_64BIT)
9320 if (reload_in_progress)
9321 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9322 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
9323 UNSPEC_GOTOFF);
9324 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
9325 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9326 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9328 if (reg != 0)
9330 emit_move_insn (reg, new_rtx);
9331 new_rtx = reg;
9334 else
9336 if (INTVAL (op1) < -16*1024*1024
9337 || INTVAL (op1) >= 16*1024*1024)
9339 if (!x86_64_immediate_operand (op1, Pmode))
9340 op1 = force_reg (Pmode, op1);
9341 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
9345 else
9347 base = legitimize_pic_address (XEXP (addr, 0), reg);
9348 new_rtx = legitimize_pic_address (XEXP (addr, 1),
9349 base == reg ? NULL_RTX : reg);
9351 if (CONST_INT_P (new_rtx))
9352 new_rtx = plus_constant (base, INTVAL (new_rtx));
9353 else
9355 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
9357 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
9358 new_rtx = XEXP (new_rtx, 1);
9360 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
9365 return new_rtx;
9368 /* Load the thread pointer. If TO_REG is true, force it into a register. */
9370 static rtx
9371 get_thread_pointer (int to_reg)
9373 rtx tp, reg, insn;
9375 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
9376 if (!to_reg)
9377 return tp;
9379 reg = gen_reg_rtx (Pmode);
9380 insn = gen_rtx_SET (VOIDmode, reg, tp);
9381 insn = emit_insn (insn);
9383 return reg;
9386 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
9387 false if we expect this to be used for a memory address and true if
9388 we expect to load the address into a register. */
9390 static rtx
9391 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
9393 rtx dest, base, off, pic, tp;
9394 int type;
9396 switch (model)
9398 case TLS_MODEL_GLOBAL_DYNAMIC:
9399 dest = gen_reg_rtx (Pmode);
9400 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9402 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9404 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
9406 start_sequence ();
9407 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
9408 insns = get_insns ();
9409 end_sequence ();
9411 RTL_CONST_CALL_P (insns) = 1;
9412 emit_libcall_block (insns, dest, rax, x);
9414 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9415 emit_insn (gen_tls_global_dynamic_64 (dest, x));
9416 else
9417 emit_insn (gen_tls_global_dynamic_32 (dest, x));
9419 if (TARGET_GNU2_TLS)
9421 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
9423 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9425 break;
9427 case TLS_MODEL_LOCAL_DYNAMIC:
9428 base = gen_reg_rtx (Pmode);
9429 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9431 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9433 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
9435 start_sequence ();
9436 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
9437 insns = get_insns ();
9438 end_sequence ();
9440 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
9441 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
9442 RTL_CONST_CALL_P (insns) = 1;
9443 emit_libcall_block (insns, base, rax, note);
9445 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9446 emit_insn (gen_tls_local_dynamic_base_64 (base));
9447 else
9448 emit_insn (gen_tls_local_dynamic_base_32 (base));
9450 if (TARGET_GNU2_TLS)
9452 rtx x = ix86_tls_module_base ();
9454 set_unique_reg_note (get_last_insn (), REG_EQUIV,
9455 gen_rtx_MINUS (Pmode, x, tp));
9458 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
9459 off = gen_rtx_CONST (Pmode, off);
9461 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
9463 if (TARGET_GNU2_TLS)
9465 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
9467 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9470 break;
9472 case TLS_MODEL_INITIAL_EXEC:
9473 if (TARGET_64BIT)
9475 pic = NULL;
9476 type = UNSPEC_GOTNTPOFF;
9478 else if (flag_pic)
9480 if (reload_in_progress)
9481 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9482 pic = pic_offset_table_rtx;
9483 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
9485 else if (!TARGET_ANY_GNU_TLS)
9487 pic = gen_reg_rtx (Pmode);
9488 emit_insn (gen_set_got (pic));
9489 type = UNSPEC_GOTTPOFF;
9491 else
9493 pic = NULL;
9494 type = UNSPEC_INDNTPOFF;
9497 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
9498 off = gen_rtx_CONST (Pmode, off);
9499 if (pic)
9500 off = gen_rtx_PLUS (Pmode, pic, off);
9501 off = gen_const_mem (Pmode, off);
9502 set_mem_alias_set (off, ix86_GOT_alias_set ());
9504 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9506 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9507 off = force_reg (Pmode, off);
9508 return gen_rtx_PLUS (Pmode, base, off);
9510 else
9512 base = get_thread_pointer (true);
9513 dest = gen_reg_rtx (Pmode);
9514 emit_insn (gen_subsi3 (dest, base, off));
9516 break;
9518 case TLS_MODEL_LOCAL_EXEC:
9519 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
9520 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9521 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
9522 off = gen_rtx_CONST (Pmode, off);
9524 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9526 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9527 return gen_rtx_PLUS (Pmode, base, off);
9529 else
9531 base = get_thread_pointer (true);
9532 dest = gen_reg_rtx (Pmode);
9533 emit_insn (gen_subsi3 (dest, base, off));
9535 break;
9537 default:
9538 gcc_unreachable ();
9541 return dest;
9544 /* Create or return the unique __imp_DECL dllimport symbol corresponding
9545 to symbol DECL. */
9547 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
9548 htab_t dllimport_map;
9550 static tree
9551 get_dllimport_decl (tree decl)
9553 struct tree_map *h, in;
9554 void **loc;
9555 const char *name;
9556 const char *prefix;
9557 size_t namelen, prefixlen;
9558 char *imp_name;
9559 tree to;
9560 rtx rtl;
9562 if (!dllimport_map)
9563 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
9565 in.hash = htab_hash_pointer (decl);
9566 in.base.from = decl;
9567 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
9568 h = (struct tree_map *) *loc;
9569 if (h)
9570 return h->to;
9572 *loc = h = GGC_NEW (struct tree_map);
9573 h->hash = in.hash;
9574 h->base.from = decl;
9575 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
9576 DECL_ARTIFICIAL (to) = 1;
9577 DECL_IGNORED_P (to) = 1;
9578 DECL_EXTERNAL (to) = 1;
9579 TREE_READONLY (to) = 1;
9581 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9582 name = targetm.strip_name_encoding (name);
9583 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
9584 ? "*__imp_" : "*__imp__";
9585 namelen = strlen (name);
9586 prefixlen = strlen (prefix);
9587 imp_name = (char *) alloca (namelen + prefixlen + 1);
9588 memcpy (imp_name, prefix, prefixlen);
9589 memcpy (imp_name + prefixlen, name, namelen + 1);
9591 name = ggc_alloc_string (imp_name, namelen + prefixlen);
9592 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
9593 SET_SYMBOL_REF_DECL (rtl, to);
9594 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
9596 rtl = gen_const_mem (Pmode, rtl);
9597 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
9599 SET_DECL_RTL (to, rtl);
9600 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
9602 return to;
9605 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
9606 true if we require the result be a register. */
9608 static rtx
9609 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
9611 tree imp_decl;
9612 rtx x;
9614 gcc_assert (SYMBOL_REF_DECL (symbol));
9615 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
9617 x = DECL_RTL (imp_decl);
9618 if (want_reg)
9619 x = force_reg (Pmode, x);
9620 return x;
9623 /* Try machine-dependent ways of modifying an illegitimate address
9624 to be legitimate. If we find one, return the new, valid address.
9625 This macro is used in only one place: `memory_address' in explow.c.
9627 OLDX is the address as it was before break_out_memory_refs was called.
9628 In some cases it is useful to look at this to decide what needs to be done.
9630 MODE and WIN are passed so that this macro can use
9631 GO_IF_LEGITIMATE_ADDRESS.
9633 It is always safe for this macro to do nothing. It exists to recognize
9634 opportunities to optimize the output.
9636 For the 80386, we handle X+REG by loading X into a register R and
9637 using R+REG. R will go in a general reg and indexing will be used.
9638 However, if REG is a broken-out memory address or multiplication,
9639 nothing needs to be done because REG can certainly go in a general reg.
9641 When -fpic is used, special handling is needed for symbolic references.
9642 See comments by legitimize_pic_address in i386.c for details. */
9645 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
9647 int changed = 0;
9648 unsigned log;
9650 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
9651 if (log)
9652 return legitimize_tls_address (x, (enum tls_model) log, false);
9653 if (GET_CODE (x) == CONST
9654 && GET_CODE (XEXP (x, 0)) == PLUS
9655 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9656 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
9658 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
9659 (enum tls_model) log, false);
9660 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9663 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9665 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
9666 return legitimize_dllimport_symbol (x, true);
9667 if (GET_CODE (x) == CONST
9668 && GET_CODE (XEXP (x, 0)) == PLUS
9669 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9670 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
9672 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
9673 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9677 if (flag_pic && SYMBOLIC_CONST (x))
9678 return legitimize_pic_address (x, 0);
9680 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
9681 if (GET_CODE (x) == ASHIFT
9682 && CONST_INT_P (XEXP (x, 1))
9683 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
9685 changed = 1;
9686 log = INTVAL (XEXP (x, 1));
9687 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
9688 GEN_INT (1 << log));
9691 if (GET_CODE (x) == PLUS)
9693 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
9695 if (GET_CODE (XEXP (x, 0)) == ASHIFT
9696 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9697 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
9699 changed = 1;
9700 log = INTVAL (XEXP (XEXP (x, 0), 1));
9701 XEXP (x, 0) = gen_rtx_MULT (Pmode,
9702 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
9703 GEN_INT (1 << log));
9706 if (GET_CODE (XEXP (x, 1)) == ASHIFT
9707 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
9708 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
9710 changed = 1;
9711 log = INTVAL (XEXP (XEXP (x, 1), 1));
9712 XEXP (x, 1) = gen_rtx_MULT (Pmode,
9713 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
9714 GEN_INT (1 << log));
9717 /* Put multiply first if it isn't already. */
9718 if (GET_CODE (XEXP (x, 1)) == MULT)
9720 rtx tmp = XEXP (x, 0);
9721 XEXP (x, 0) = XEXP (x, 1);
9722 XEXP (x, 1) = tmp;
9723 changed = 1;
9726 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
9727 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
9728 created by virtual register instantiation, register elimination, and
9729 similar optimizations. */
9730 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
9732 changed = 1;
9733 x = gen_rtx_PLUS (Pmode,
9734 gen_rtx_PLUS (Pmode, XEXP (x, 0),
9735 XEXP (XEXP (x, 1), 0)),
9736 XEXP (XEXP (x, 1), 1));
9739 /* Canonicalize
9740 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
9741 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
9742 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
9743 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
9744 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
9745 && CONSTANT_P (XEXP (x, 1)))
9747 rtx constant;
9748 rtx other = NULL_RTX;
9750 if (CONST_INT_P (XEXP (x, 1)))
9752 constant = XEXP (x, 1);
9753 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
9755 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
9757 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
9758 other = XEXP (x, 1);
9760 else
9761 constant = 0;
9763 if (constant)
9765 changed = 1;
9766 x = gen_rtx_PLUS (Pmode,
9767 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
9768 XEXP (XEXP (XEXP (x, 0), 1), 0)),
9769 plus_constant (other, INTVAL (constant)));
9773 if (changed && legitimate_address_p (mode, x, FALSE))
9774 return x;
9776 if (GET_CODE (XEXP (x, 0)) == MULT)
9778 changed = 1;
9779 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
9782 if (GET_CODE (XEXP (x, 1)) == MULT)
9784 changed = 1;
9785 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
9788 if (changed
9789 && REG_P (XEXP (x, 1))
9790 && REG_P (XEXP (x, 0)))
9791 return x;
9793 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
9795 changed = 1;
9796 x = legitimize_pic_address (x, 0);
9799 if (changed && legitimate_address_p (mode, x, FALSE))
9800 return x;
9802 if (REG_P (XEXP (x, 0)))
9804 rtx temp = gen_reg_rtx (Pmode);
9805 rtx val = force_operand (XEXP (x, 1), temp);
9806 if (val != temp)
9807 emit_move_insn (temp, val);
9809 XEXP (x, 1) = temp;
9810 return x;
9813 else if (REG_P (XEXP (x, 1)))
9815 rtx temp = gen_reg_rtx (Pmode);
9816 rtx val = force_operand (XEXP (x, 0), temp);
9817 if (val != temp)
9818 emit_move_insn (temp, val);
9820 XEXP (x, 0) = temp;
9821 return x;
9825 return x;
9828 /* Print an integer constant expression in assembler syntax. Addition
9829 and subtraction are the only arithmetic that may appear in these
9830 expressions. FILE is the stdio stream to write to, X is the rtx, and
9831 CODE is the operand print code from the output string. */
9833 static void
9834 output_pic_addr_const (FILE *file, rtx x, int code)
9836 char buf[256];
9838 switch (GET_CODE (x))
9840 case PC:
9841 gcc_assert (flag_pic);
9842 putc ('.', file);
9843 break;
9845 case SYMBOL_REF:
9846 if (! TARGET_MACHO || TARGET_64BIT)
9847 output_addr_const (file, x);
9848 else
9850 const char *name = XSTR (x, 0);
9852 /* Mark the decl as referenced so that cgraph will
9853 output the function. */
9854 if (SYMBOL_REF_DECL (x))
9855 mark_decl_referenced (SYMBOL_REF_DECL (x));
9857 #if TARGET_MACHO
9858 if (MACHOPIC_INDIRECT
9859 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
9860 name = machopic_indirection_name (x, /*stub_p=*/true);
9861 #endif
9862 assemble_name (file, name);
9864 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
9865 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
9866 fputs ("@PLT", file);
9867 break;
9869 case LABEL_REF:
9870 x = XEXP (x, 0);
9871 /* FALLTHRU */
9872 case CODE_LABEL:
9873 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
9874 assemble_name (asm_out_file, buf);
9875 break;
9877 case CONST_INT:
9878 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
9879 break;
9881 case CONST:
9882 /* This used to output parentheses around the expression,
9883 but that does not work on the 386 (either ATT or BSD assembler). */
9884 output_pic_addr_const (file, XEXP (x, 0), code);
9885 break;
9887 case CONST_DOUBLE:
9888 if (GET_MODE (x) == VOIDmode)
9890 /* We can use %d if the number is <32 bits and positive. */
9891 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
9892 fprintf (file, "0x%lx%08lx",
9893 (unsigned long) CONST_DOUBLE_HIGH (x),
9894 (unsigned long) CONST_DOUBLE_LOW (x));
9895 else
9896 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
9898 else
9899 /* We can't handle floating point constants;
9900 PRINT_OPERAND must handle them. */
9901 output_operand_lossage ("floating constant misused");
9902 break;
9904 case PLUS:
9905 /* Some assemblers need integer constants to appear first. */
9906 if (CONST_INT_P (XEXP (x, 0)))
9908 output_pic_addr_const (file, XEXP (x, 0), code);
9909 putc ('+', file);
9910 output_pic_addr_const (file, XEXP (x, 1), code);
9912 else
9914 gcc_assert (CONST_INT_P (XEXP (x, 1)));
9915 output_pic_addr_const (file, XEXP (x, 1), code);
9916 putc ('+', file);
9917 output_pic_addr_const (file, XEXP (x, 0), code);
9919 break;
9921 case MINUS:
9922 if (!TARGET_MACHO)
9923 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
9924 output_pic_addr_const (file, XEXP (x, 0), code);
9925 putc ('-', file);
9926 output_pic_addr_const (file, XEXP (x, 1), code);
9927 if (!TARGET_MACHO)
9928 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
9929 break;
9931 case UNSPEC:
9932 gcc_assert (XVECLEN (x, 0) == 1);
9933 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
9934 switch (XINT (x, 1))
9936 case UNSPEC_GOT:
9937 fputs ("@GOT", file);
9938 break;
9939 case UNSPEC_GOTOFF:
9940 fputs ("@GOTOFF", file);
9941 break;
9942 case UNSPEC_PLTOFF:
9943 fputs ("@PLTOFF", file);
9944 break;
9945 case UNSPEC_GOTPCREL:
9946 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9947 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
9948 break;
9949 case UNSPEC_GOTTPOFF:
9950 /* FIXME: This might be @TPOFF in Sun ld too. */
9951 fputs ("@GOTTPOFF", file);
9952 break;
9953 case UNSPEC_TPOFF:
9954 fputs ("@TPOFF", file);
9955 break;
9956 case UNSPEC_NTPOFF:
9957 if (TARGET_64BIT)
9958 fputs ("@TPOFF", file);
9959 else
9960 fputs ("@NTPOFF", file);
9961 break;
9962 case UNSPEC_DTPOFF:
9963 fputs ("@DTPOFF", file);
9964 break;
9965 case UNSPEC_GOTNTPOFF:
9966 if (TARGET_64BIT)
9967 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9968 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
9969 else
9970 fputs ("@GOTNTPOFF", file);
9971 break;
9972 case UNSPEC_INDNTPOFF:
9973 fputs ("@INDNTPOFF", file);
9974 break;
9975 #if TARGET_MACHO
9976 case UNSPEC_MACHOPIC_OFFSET:
9977 putc ('-', file);
9978 machopic_output_function_base_name (file);
9979 break;
9980 #endif
9981 default:
9982 output_operand_lossage ("invalid UNSPEC as operand");
9983 break;
9985 break;
9987 default:
9988 output_operand_lossage ("invalid expression as operand");
9992 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9993 We need to emit DTP-relative relocations. */
9995 static void ATTRIBUTE_UNUSED
9996 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
9998 fputs (ASM_LONG, file);
9999 output_addr_const (file, x);
10000 fputs ("@DTPOFF", file);
10001 switch (size)
10003 case 4:
10004 break;
10005 case 8:
10006 fputs (", 0", file);
10007 break;
10008 default:
10009 gcc_unreachable ();
10013 /* Return true if X is a representation of the PIC register. This copes
10014 with calls from ix86_find_base_term, where the register might have
10015 been replaced by a cselib value. */
10017 static bool
10018 ix86_pic_register_p (rtx x)
10020 if (GET_CODE (x) == VALUE)
10021 return (pic_offset_table_rtx
10022 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10023 else
10024 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10027 /* In the name of slightly smaller debug output, and to cater to
10028 general assembler lossage, recognize PIC+GOTOFF and turn it back
10029 into a direct symbol reference.
10031 On Darwin, this is necessary to avoid a crash, because Darwin
10032 has a different PIC label for each routine but the DWARF debugging
10033 information is not associated with any particular routine, so it's
10034 necessary to remove references to the PIC label from RTL stored by
10035 the DWARF output code. */
10037 static rtx
10038 ix86_delegitimize_address (rtx orig_x)
10040 rtx x = orig_x;
10041 /* reg_addend is NULL or a multiple of some register. */
10042 rtx reg_addend = NULL_RTX;
10043 /* const_addend is NULL or a const_int. */
10044 rtx const_addend = NULL_RTX;
10045 /* This is the result, or NULL. */
10046 rtx result = NULL_RTX;
10048 if (MEM_P (x))
10049 x = XEXP (x, 0);
10051 if (TARGET_64BIT)
10053 if (GET_CODE (x) != CONST
10054 || GET_CODE (XEXP (x, 0)) != UNSPEC
10055 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10056 || !MEM_P (orig_x))
10057 return orig_x;
10058 return XVECEXP (XEXP (x, 0), 0, 0);
10061 if (GET_CODE (x) != PLUS
10062 || GET_CODE (XEXP (x, 1)) != CONST)
10063 return orig_x;
10065 if (ix86_pic_register_p (XEXP (x, 0)))
10066 /* %ebx + GOT/GOTOFF */
10068 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10070 /* %ebx + %reg * scale + GOT/GOTOFF */
10071 reg_addend = XEXP (x, 0);
10072 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10073 reg_addend = XEXP (reg_addend, 1);
10074 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10075 reg_addend = XEXP (reg_addend, 0);
10076 else
10077 return orig_x;
10078 if (!REG_P (reg_addend)
10079 && GET_CODE (reg_addend) != MULT
10080 && GET_CODE (reg_addend) != ASHIFT)
10081 return orig_x;
10083 else
10084 return orig_x;
10086 x = XEXP (XEXP (x, 1), 0);
10087 if (GET_CODE (x) == PLUS
10088 && CONST_INT_P (XEXP (x, 1)))
10090 const_addend = XEXP (x, 1);
10091 x = XEXP (x, 0);
10094 if (GET_CODE (x) == UNSPEC
10095 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10096 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10097 result = XVECEXP (x, 0, 0);
10099 if (TARGET_MACHO && darwin_local_data_pic (x)
10100 && !MEM_P (orig_x))
10101 result = XVECEXP (x, 0, 0);
10103 if (! result)
10104 return orig_x;
10106 if (const_addend)
10107 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10108 if (reg_addend)
10109 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10110 return result;
10113 /* If X is a machine specific address (i.e. a symbol or label being
10114 referenced as a displacement from the GOT implemented using an
10115 UNSPEC), then return the base term. Otherwise return X. */
10118 ix86_find_base_term (rtx x)
10120 rtx term;
10122 if (TARGET_64BIT)
10124 if (GET_CODE (x) != CONST)
10125 return x;
10126 term = XEXP (x, 0);
10127 if (GET_CODE (term) == PLUS
10128 && (CONST_INT_P (XEXP (term, 1))
10129 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10130 term = XEXP (term, 0);
10131 if (GET_CODE (term) != UNSPEC
10132 || XINT (term, 1) != UNSPEC_GOTPCREL)
10133 return x;
10135 return XVECEXP (term, 0, 0);
10138 return ix86_delegitimize_address (x);
10141 static void
10142 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10143 int fp, FILE *file)
10145 const char *suffix;
10147 if (mode == CCFPmode || mode == CCFPUmode)
10149 enum rtx_code second_code, bypass_code;
10150 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
10151 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
10152 code = ix86_fp_compare_code_to_integer (code);
10153 mode = CCmode;
10155 if (reverse)
10156 code = reverse_condition (code);
10158 switch (code)
10160 case EQ:
10161 switch (mode)
10163 case CCAmode:
10164 suffix = "a";
10165 break;
10167 case CCCmode:
10168 suffix = "c";
10169 break;
10171 case CCOmode:
10172 suffix = "o";
10173 break;
10175 case CCSmode:
10176 suffix = "s";
10177 break;
10179 default:
10180 suffix = "e";
10182 break;
10183 case NE:
10184 switch (mode)
10186 case CCAmode:
10187 suffix = "na";
10188 break;
10190 case CCCmode:
10191 suffix = "nc";
10192 break;
10194 case CCOmode:
10195 suffix = "no";
10196 break;
10198 case CCSmode:
10199 suffix = "ns";
10200 break;
10202 default:
10203 suffix = "ne";
10205 break;
10206 case GT:
10207 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
10208 suffix = "g";
10209 break;
10210 case GTU:
10211 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
10212 Those same assemblers have the same but opposite lossage on cmov. */
10213 if (mode == CCmode)
10214 suffix = fp ? "nbe" : "a";
10215 else if (mode == CCCmode)
10216 suffix = "b";
10217 else
10218 gcc_unreachable ();
10219 break;
10220 case LT:
10221 switch (mode)
10223 case CCNOmode:
10224 case CCGOCmode:
10225 suffix = "s";
10226 break;
10228 case CCmode:
10229 case CCGCmode:
10230 suffix = "l";
10231 break;
10233 default:
10234 gcc_unreachable ();
10236 break;
10237 case LTU:
10238 gcc_assert (mode == CCmode || mode == CCCmode);
10239 suffix = "b";
10240 break;
10241 case GE:
10242 switch (mode)
10244 case CCNOmode:
10245 case CCGOCmode:
10246 suffix = "ns";
10247 break;
10249 case CCmode:
10250 case CCGCmode:
10251 suffix = "ge";
10252 break;
10254 default:
10255 gcc_unreachable ();
10257 break;
10258 case GEU:
10259 /* ??? As above. */
10260 gcc_assert (mode == CCmode || mode == CCCmode);
10261 suffix = fp ? "nb" : "ae";
10262 break;
10263 case LE:
10264 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
10265 suffix = "le";
10266 break;
10267 case LEU:
10268 /* ??? As above. */
10269 if (mode == CCmode)
10270 suffix = "be";
10271 else if (mode == CCCmode)
10272 suffix = fp ? "nb" : "ae";
10273 else
10274 gcc_unreachable ();
10275 break;
10276 case UNORDERED:
10277 suffix = fp ? "u" : "p";
10278 break;
10279 case ORDERED:
10280 suffix = fp ? "nu" : "np";
10281 break;
10282 default:
10283 gcc_unreachable ();
10285 fputs (suffix, file);
10288 /* Print the name of register X to FILE based on its machine mode and number.
10289 If CODE is 'w', pretend the mode is HImode.
10290 If CODE is 'b', pretend the mode is QImode.
10291 If CODE is 'k', pretend the mode is SImode.
10292 If CODE is 'q', pretend the mode is DImode.
10293 If CODE is 'x', pretend the mode is V4SFmode.
10294 If CODE is 't', pretend the mode is V8SFmode.
10295 If CODE is 'h', pretend the reg is the 'high' byte register.
10296 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
10297 If CODE is 'd', duplicate the operand for AVX instruction.
10300 void
10301 print_reg (rtx x, int code, FILE *file)
10303 const char *reg;
10304 bool duplicated = code == 'd' && TARGET_AVX;
10306 gcc_assert (x == pc_rtx
10307 || (REGNO (x) != ARG_POINTER_REGNUM
10308 && REGNO (x) != FRAME_POINTER_REGNUM
10309 && REGNO (x) != FLAGS_REG
10310 && REGNO (x) != FPSR_REG
10311 && REGNO (x) != FPCR_REG));
10313 if (ASSEMBLER_DIALECT == ASM_ATT)
10314 putc ('%', file);
10316 if (x == pc_rtx)
10318 gcc_assert (TARGET_64BIT);
10319 fputs ("rip", file);
10320 return;
10323 if (code == 'w' || MMX_REG_P (x))
10324 code = 2;
10325 else if (code == 'b')
10326 code = 1;
10327 else if (code == 'k')
10328 code = 4;
10329 else if (code == 'q')
10330 code = 8;
10331 else if (code == 'y')
10332 code = 3;
10333 else if (code == 'h')
10334 code = 0;
10335 else if (code == 'x')
10336 code = 16;
10337 else if (code == 't')
10338 code = 32;
10339 else
10340 code = GET_MODE_SIZE (GET_MODE (x));
10342 /* Irritatingly, AMD extended registers use different naming convention
10343 from the normal registers. */
10344 if (REX_INT_REG_P (x))
10346 gcc_assert (TARGET_64BIT);
10347 switch (code)
10349 case 0:
10350 error ("extended registers have no high halves");
10351 break;
10352 case 1:
10353 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
10354 break;
10355 case 2:
10356 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
10357 break;
10358 case 4:
10359 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
10360 break;
10361 case 8:
10362 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
10363 break;
10364 default:
10365 error ("unsupported operand size for extended register");
10366 break;
10368 return;
10371 reg = NULL;
10372 switch (code)
10374 case 3:
10375 if (STACK_TOP_P (x))
10377 reg = "st(0)";
10378 break;
10380 /* FALLTHRU */
10381 case 8:
10382 case 4:
10383 case 12:
10384 if (! ANY_FP_REG_P (x))
10385 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
10386 /* FALLTHRU */
10387 case 16:
10388 case 2:
10389 normal:
10390 reg = hi_reg_name[REGNO (x)];
10391 break;
10392 case 1:
10393 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
10394 goto normal;
10395 reg = qi_reg_name[REGNO (x)];
10396 break;
10397 case 0:
10398 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
10399 goto normal;
10400 reg = qi_high_reg_name[REGNO (x)];
10401 break;
10402 case 32:
10403 if (SSE_REG_P (x))
10405 gcc_assert (!duplicated);
10406 putc ('y', file);
10407 fputs (hi_reg_name[REGNO (x)] + 1, file);
10408 return;
10410 break;
10411 default:
10412 gcc_unreachable ();
10415 fputs (reg, file);
10416 if (duplicated)
10418 if (ASSEMBLER_DIALECT == ASM_ATT)
10419 fprintf (file, ", %%%s", reg);
10420 else
10421 fprintf (file, ", %s", reg);
10425 /* Locate some local-dynamic symbol still in use by this function
10426 so that we can print its name in some tls_local_dynamic_base
10427 pattern. */
10429 static int
10430 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10432 rtx x = *px;
10434 if (GET_CODE (x) == SYMBOL_REF
10435 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10437 cfun->machine->some_ld_name = XSTR (x, 0);
10438 return 1;
10441 return 0;
10444 static const char *
10445 get_some_local_dynamic_name (void)
10447 rtx insn;
10449 if (cfun->machine->some_ld_name)
10450 return cfun->machine->some_ld_name;
10452 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10453 if (INSN_P (insn)
10454 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10455 return cfun->machine->some_ld_name;
10457 gcc_unreachable ();
10460 /* Meaning of CODE:
10461 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
10462 C -- print opcode suffix for set/cmov insn.
10463 c -- like C, but print reversed condition
10464 E,e -- likewise, but for compare-and-branch fused insn.
10465 F,f -- likewise, but for floating-point.
10466 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
10467 otherwise nothing
10468 R -- print the prefix for register names.
10469 z -- print the opcode suffix for the size of the current operand.
10470 * -- print a star (in certain assembler syntax)
10471 A -- print an absolute memory reference.
10472 w -- print the operand as if it's a "word" (HImode) even if it isn't.
10473 s -- print a shift double count, followed by the assemblers argument
10474 delimiter.
10475 b -- print the QImode name of the register for the indicated operand.
10476 %b0 would print %al if operands[0] is reg 0.
10477 w -- likewise, print the HImode name of the register.
10478 k -- likewise, print the SImode name of the register.
10479 q -- likewise, print the DImode name of the register.
10480 x -- likewise, print the V4SFmode name of the register.
10481 t -- likewise, print the V8SFmode name of the register.
10482 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
10483 y -- print "st(0)" instead of "st" as a register.
10484 d -- print duplicated register operand for AVX instruction.
10485 D -- print condition for SSE cmp instruction.
10486 P -- if PIC, print an @PLT suffix.
10487 X -- don't print any sort of PIC '@' suffix for a symbol.
10488 & -- print some in-use local-dynamic symbol name.
10489 H -- print a memory address offset by 8; used for sse high-parts
10490 Y -- print condition for SSE5 com* instruction.
10491 + -- print a branch hint as 'cs' or 'ds' prefix
10492 ; -- print a semicolon (after prefixes due to bug in older gas).
10495 void
10496 print_operand (FILE *file, rtx x, int code)
10498 if (code)
10500 switch (code)
10502 case '*':
10503 if (ASSEMBLER_DIALECT == ASM_ATT)
10504 putc ('*', file);
10505 return;
10507 case '&':
10508 assemble_name (file, get_some_local_dynamic_name ());
10509 return;
10511 case 'A':
10512 switch (ASSEMBLER_DIALECT)
10514 case ASM_ATT:
10515 putc ('*', file);
10516 break;
10518 case ASM_INTEL:
10519 /* Intel syntax. For absolute addresses, registers should not
10520 be surrounded by braces. */
10521 if (!REG_P (x))
10523 putc ('[', file);
10524 PRINT_OPERAND (file, x, 0);
10525 putc (']', file);
10526 return;
10528 break;
10530 default:
10531 gcc_unreachable ();
10534 PRINT_OPERAND (file, x, 0);
10535 return;
10538 case 'L':
10539 if (ASSEMBLER_DIALECT == ASM_ATT)
10540 putc ('l', file);
10541 return;
10543 case 'W':
10544 if (ASSEMBLER_DIALECT == ASM_ATT)
10545 putc ('w', file);
10546 return;
10548 case 'B':
10549 if (ASSEMBLER_DIALECT == ASM_ATT)
10550 putc ('b', file);
10551 return;
10553 case 'Q':
10554 if (ASSEMBLER_DIALECT == ASM_ATT)
10555 putc ('l', file);
10556 return;
10558 case 'S':
10559 if (ASSEMBLER_DIALECT == ASM_ATT)
10560 putc ('s', file);
10561 return;
10563 case 'T':
10564 if (ASSEMBLER_DIALECT == ASM_ATT)
10565 putc ('t', file);
10566 return;
10568 case 'z':
10569 /* 387 opcodes don't get size suffixes if the operands are
10570 registers. */
10571 if (STACK_REG_P (x))
10572 return;
10574 /* Likewise if using Intel opcodes. */
10575 if (ASSEMBLER_DIALECT == ASM_INTEL)
10576 return;
10578 /* This is the size of op from size of operand. */
10579 switch (GET_MODE_SIZE (GET_MODE (x)))
10581 case 1:
10582 putc ('b', file);
10583 return;
10585 case 2:
10586 if (MEM_P (x))
10588 #ifdef HAVE_GAS_FILDS_FISTS
10589 putc ('s', file);
10590 #endif
10591 return;
10593 else
10594 putc ('w', file);
10595 return;
10597 case 4:
10598 if (GET_MODE (x) == SFmode)
10600 putc ('s', file);
10601 return;
10603 else
10604 putc ('l', file);
10605 return;
10607 case 12:
10608 case 16:
10609 putc ('t', file);
10610 return;
10612 case 8:
10613 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10615 if (MEM_P (x))
10617 #ifdef GAS_MNEMONICS
10618 putc ('q', file);
10619 #else
10620 putc ('l', file);
10621 putc ('l', file);
10622 #endif
10624 else
10625 putc ('q', file);
10627 else
10628 putc ('l', file);
10629 return;
10631 default:
10632 gcc_unreachable ();
10635 case 'd':
10636 case 'b':
10637 case 'w':
10638 case 'k':
10639 case 'q':
10640 case 'h':
10641 case 't':
10642 case 'y':
10643 case 'x':
10644 case 'X':
10645 case 'P':
10646 break;
10648 case 's':
10649 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
10651 PRINT_OPERAND (file, x, 0);
10652 fputs (", ", file);
10654 return;
10656 case 'D':
10657 /* Little bit of braindamage here. The SSE compare instructions
10658 does use completely different names for the comparisons that the
10659 fp conditional moves. */
10660 if (TARGET_AVX)
10662 switch (GET_CODE (x))
10664 case EQ:
10665 fputs ("eq", file);
10666 break;
10667 case UNEQ:
10668 fputs ("eq_us", file);
10669 break;
10670 case LT:
10671 fputs ("lt", file);
10672 break;
10673 case UNLT:
10674 fputs ("nge", file);
10675 break;
10676 case LE:
10677 fputs ("le", file);
10678 break;
10679 case UNLE:
10680 fputs ("ngt", file);
10681 break;
10682 case UNORDERED:
10683 fputs ("unord", file);
10684 break;
10685 case NE:
10686 fputs ("neq", file);
10687 break;
10688 case LTGT:
10689 fputs ("neq_oq", file);
10690 break;
10691 case GE:
10692 fputs ("ge", file);
10693 break;
10694 case UNGE:
10695 fputs ("nlt", file);
10696 break;
10697 case GT:
10698 fputs ("gt", file);
10699 break;
10700 case UNGT:
10701 fputs ("nle", file);
10702 break;
10703 case ORDERED:
10704 fputs ("ord", file);
10705 break;
10706 default:
10707 gcc_unreachable ();
10710 else
10712 switch (GET_CODE (x))
10714 case EQ:
10715 case UNEQ:
10716 fputs ("eq", file);
10717 break;
10718 case LT:
10719 case UNLT:
10720 fputs ("lt", file);
10721 break;
10722 case LE:
10723 case UNLE:
10724 fputs ("le", file);
10725 break;
10726 case UNORDERED:
10727 fputs ("unord", file);
10728 break;
10729 case NE:
10730 case LTGT:
10731 fputs ("neq", file);
10732 break;
10733 case UNGE:
10734 case GE:
10735 fputs ("nlt", file);
10736 break;
10737 case UNGT:
10738 case GT:
10739 fputs ("nle", file);
10740 break;
10741 case ORDERED:
10742 fputs ("ord", file);
10743 break;
10744 default:
10745 gcc_unreachable ();
10748 return;
10749 case 'O':
10750 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10751 if (ASSEMBLER_DIALECT == ASM_ATT)
10753 switch (GET_MODE (x))
10755 case HImode: putc ('w', file); break;
10756 case SImode:
10757 case SFmode: putc ('l', file); break;
10758 case DImode:
10759 case DFmode: putc ('q', file); break;
10760 default: gcc_unreachable ();
10762 putc ('.', file);
10764 #endif
10765 return;
10766 case 'C':
10767 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
10768 return;
10769 case 'F':
10770 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10771 if (ASSEMBLER_DIALECT == ASM_ATT)
10772 putc ('.', file);
10773 #endif
10774 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
10775 return;
10777 /* Like above, but reverse condition */
10778 case 'c':
10779 /* Check to see if argument to %c is really a constant
10780 and not a condition code which needs to be reversed. */
10781 if (!COMPARISON_P (x))
10783 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
10784 return;
10786 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
10787 return;
10788 case 'f':
10789 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10790 if (ASSEMBLER_DIALECT == ASM_ATT)
10791 putc ('.', file);
10792 #endif
10793 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
10794 return;
10796 case 'E':
10797 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
10798 return;
10800 case 'e':
10801 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
10802 return;
10804 case 'H':
10805 /* It doesn't actually matter what mode we use here, as we're
10806 only going to use this for printing. */
10807 x = adjust_address_nv (x, DImode, 8);
10808 break;
10810 case '+':
10812 rtx x;
10814 if (!optimize
10815 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
10816 return;
10818 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
10819 if (x)
10821 int pred_val = INTVAL (XEXP (x, 0));
10823 if (pred_val < REG_BR_PROB_BASE * 45 / 100
10824 || pred_val > REG_BR_PROB_BASE * 55 / 100)
10826 int taken = pred_val > REG_BR_PROB_BASE / 2;
10827 int cputaken = final_forward_branch_p (current_output_insn) == 0;
10829 /* Emit hints only in the case default branch prediction
10830 heuristics would fail. */
10831 if (taken != cputaken)
10833 /* We use 3e (DS) prefix for taken branches and
10834 2e (CS) prefix for not taken branches. */
10835 if (taken)
10836 fputs ("ds ; ", file);
10837 else
10838 fputs ("cs ; ", file);
10842 return;
10845 case 'Y':
10846 switch (GET_CODE (x))
10848 case NE:
10849 fputs ("neq", file);
10850 break;
10851 case EQ:
10852 fputs ("eq", file);
10853 break;
10854 case GE:
10855 case GEU:
10856 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
10857 break;
10858 case GT:
10859 case GTU:
10860 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
10861 break;
10862 case LE:
10863 case LEU:
10864 fputs ("le", file);
10865 break;
10866 case LT:
10867 case LTU:
10868 fputs ("lt", file);
10869 break;
10870 case UNORDERED:
10871 fputs ("unord", file);
10872 break;
10873 case ORDERED:
10874 fputs ("ord", file);
10875 break;
10876 case UNEQ:
10877 fputs ("ueq", file);
10878 break;
10879 case UNGE:
10880 fputs ("nlt", file);
10881 break;
10882 case UNGT:
10883 fputs ("nle", file);
10884 break;
10885 case UNLE:
10886 fputs ("ule", file);
10887 break;
10888 case UNLT:
10889 fputs ("ult", file);
10890 break;
10891 case LTGT:
10892 fputs ("une", file);
10893 break;
10894 default:
10895 gcc_unreachable ();
10897 return;
10899 case ';':
10900 #if TARGET_MACHO
10901 fputs (" ; ", file);
10902 #else
10903 fputc (' ', file);
10904 #endif
10905 return;
10907 default:
10908 output_operand_lossage ("invalid operand code '%c'", code);
10912 if (REG_P (x))
10913 print_reg (x, code, file);
10915 else if (MEM_P (x))
10917 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
10918 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
10919 && GET_MODE (x) != BLKmode)
10921 const char * size;
10922 switch (GET_MODE_SIZE (GET_MODE (x)))
10924 case 1: size = "BYTE"; break;
10925 case 2: size = "WORD"; break;
10926 case 4: size = "DWORD"; break;
10927 case 8: size = "QWORD"; break;
10928 case 12: size = "XWORD"; break;
10929 case 16:
10930 if (GET_MODE (x) == XFmode)
10931 size = "XWORD";
10932 else
10933 size = "XMMWORD";
10934 break;
10935 default:
10936 gcc_unreachable ();
10939 /* Check for explicit size override (codes 'b', 'w' and 'k') */
10940 if (code == 'b')
10941 size = "BYTE";
10942 else if (code == 'w')
10943 size = "WORD";
10944 else if (code == 'k')
10945 size = "DWORD";
10947 fputs (size, file);
10948 fputs (" PTR ", file);
10951 x = XEXP (x, 0);
10952 /* Avoid (%rip) for call operands. */
10953 if (CONSTANT_ADDRESS_P (x) && code == 'P'
10954 && !CONST_INT_P (x))
10955 output_addr_const (file, x);
10956 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
10957 output_operand_lossage ("invalid constraints for operand");
10958 else
10959 output_address (x);
10962 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
10964 REAL_VALUE_TYPE r;
10965 long l;
10967 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10968 REAL_VALUE_TO_TARGET_SINGLE (r, l);
10970 if (ASSEMBLER_DIALECT == ASM_ATT)
10971 putc ('$', file);
10972 fprintf (file, "0x%08lx", (long unsigned int) l);
10975 /* These float cases don't actually occur as immediate operands. */
10976 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
10978 char dstr[30];
10980 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10981 fprintf (file, "%s", dstr);
10984 else if (GET_CODE (x) == CONST_DOUBLE
10985 && GET_MODE (x) == XFmode)
10987 char dstr[30];
10989 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10990 fprintf (file, "%s", dstr);
10993 else
10995 /* We have patterns that allow zero sets of memory, for instance.
10996 In 64-bit mode, we should probably support all 8-byte vectors,
10997 since we can in fact encode that into an immediate. */
10998 if (GET_CODE (x) == CONST_VECTOR)
11000 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11001 x = const0_rtx;
11004 if (code != 'P')
11006 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11008 if (ASSEMBLER_DIALECT == ASM_ATT)
11009 putc ('$', file);
11011 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11012 || GET_CODE (x) == LABEL_REF)
11014 if (ASSEMBLER_DIALECT == ASM_ATT)
11015 putc ('$', file);
11016 else
11017 fputs ("OFFSET FLAT:", file);
11020 if (CONST_INT_P (x))
11021 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11022 else if (flag_pic)
11023 output_pic_addr_const (file, x, code);
11024 else
11025 output_addr_const (file, x);
11029 /* Print a memory operand whose address is ADDR. */
11031 void
11032 print_operand_address (FILE *file, rtx addr)
11034 struct ix86_address parts;
11035 rtx base, index, disp;
11036 int scale;
11037 int ok = ix86_decompose_address (addr, &parts);
11039 gcc_assert (ok);
11041 base = parts.base;
11042 index = parts.index;
11043 disp = parts.disp;
11044 scale = parts.scale;
11046 switch (parts.seg)
11048 case SEG_DEFAULT:
11049 break;
11050 case SEG_FS:
11051 case SEG_GS:
11052 if (ASSEMBLER_DIALECT == ASM_ATT)
11053 putc ('%', file);
11054 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11055 break;
11056 default:
11057 gcc_unreachable ();
11060 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11061 if (TARGET_64BIT && !base && !index)
11063 rtx symbol = disp;
11065 if (GET_CODE (disp) == CONST
11066 && GET_CODE (XEXP (disp, 0)) == PLUS
11067 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11068 symbol = XEXP (XEXP (disp, 0), 0);
11070 if (GET_CODE (symbol) == LABEL_REF
11071 || (GET_CODE (symbol) == SYMBOL_REF
11072 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11073 base = pc_rtx;
11075 if (!base && !index)
11077 /* Displacement only requires special attention. */
11079 if (CONST_INT_P (disp))
11081 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
11082 fputs ("ds:", file);
11083 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
11085 else if (flag_pic)
11086 output_pic_addr_const (file, disp, 0);
11087 else
11088 output_addr_const (file, disp);
11090 else
11092 if (ASSEMBLER_DIALECT == ASM_ATT)
11094 if (disp)
11096 if (flag_pic)
11097 output_pic_addr_const (file, disp, 0);
11098 else if (GET_CODE (disp) == LABEL_REF)
11099 output_asm_label (disp);
11100 else
11101 output_addr_const (file, disp);
11104 putc ('(', file);
11105 if (base)
11106 print_reg (base, 0, file);
11107 if (index)
11109 putc (',', file);
11110 print_reg (index, 0, file);
11111 if (scale != 1)
11112 fprintf (file, ",%d", scale);
11114 putc (')', file);
11116 else
11118 rtx offset = NULL_RTX;
11120 if (disp)
11122 /* Pull out the offset of a symbol; print any symbol itself. */
11123 if (GET_CODE (disp) == CONST
11124 && GET_CODE (XEXP (disp, 0)) == PLUS
11125 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11127 offset = XEXP (XEXP (disp, 0), 1);
11128 disp = gen_rtx_CONST (VOIDmode,
11129 XEXP (XEXP (disp, 0), 0));
11132 if (flag_pic)
11133 output_pic_addr_const (file, disp, 0);
11134 else if (GET_CODE (disp) == LABEL_REF)
11135 output_asm_label (disp);
11136 else if (CONST_INT_P (disp))
11137 offset = disp;
11138 else
11139 output_addr_const (file, disp);
11142 putc ('[', file);
11143 if (base)
11145 print_reg (base, 0, file);
11146 if (offset)
11148 if (INTVAL (offset) >= 0)
11149 putc ('+', file);
11150 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11153 else if (offset)
11154 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11155 else
11156 putc ('0', file);
11158 if (index)
11160 putc ('+', file);
11161 print_reg (index, 0, file);
11162 if (scale != 1)
11163 fprintf (file, "*%d", scale);
11165 putc (']', file);
11170 bool
11171 output_addr_const_extra (FILE *file, rtx x)
11173 rtx op;
11175 if (GET_CODE (x) != UNSPEC)
11176 return false;
11178 op = XVECEXP (x, 0, 0);
11179 switch (XINT (x, 1))
11181 case UNSPEC_GOTTPOFF:
11182 output_addr_const (file, op);
11183 /* FIXME: This might be @TPOFF in Sun ld. */
11184 fputs ("@GOTTPOFF", file);
11185 break;
11186 case UNSPEC_TPOFF:
11187 output_addr_const (file, op);
11188 fputs ("@TPOFF", file);
11189 break;
11190 case UNSPEC_NTPOFF:
11191 output_addr_const (file, op);
11192 if (TARGET_64BIT)
11193 fputs ("@TPOFF", file);
11194 else
11195 fputs ("@NTPOFF", file);
11196 break;
11197 case UNSPEC_DTPOFF:
11198 output_addr_const (file, op);
11199 fputs ("@DTPOFF", file);
11200 break;
11201 case UNSPEC_GOTNTPOFF:
11202 output_addr_const (file, op);
11203 if (TARGET_64BIT)
11204 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11205 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
11206 else
11207 fputs ("@GOTNTPOFF", file);
11208 break;
11209 case UNSPEC_INDNTPOFF:
11210 output_addr_const (file, op);
11211 fputs ("@INDNTPOFF", file);
11212 break;
11213 #if TARGET_MACHO
11214 case UNSPEC_MACHOPIC_OFFSET:
11215 output_addr_const (file, op);
11216 putc ('-', file);
11217 machopic_output_function_base_name (file);
11218 break;
11219 #endif
11221 default:
11222 return false;
11225 return true;
11228 /* Split one or more DImode RTL references into pairs of SImode
11229 references. The RTL can be REG, offsettable MEM, integer constant, or
11230 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11231 split and "num" is its length. lo_half and hi_half are output arrays
11232 that parallel "operands". */
11234 void
11235 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11237 while (num--)
11239 rtx op = operands[num];
11241 /* simplify_subreg refuse to split volatile memory addresses,
11242 but we still have to handle it. */
11243 if (MEM_P (op))
11245 lo_half[num] = adjust_address (op, SImode, 0);
11246 hi_half[num] = adjust_address (op, SImode, 4);
11248 else
11250 lo_half[num] = simplify_gen_subreg (SImode, op,
11251 GET_MODE (op) == VOIDmode
11252 ? DImode : GET_MODE (op), 0);
11253 hi_half[num] = simplify_gen_subreg (SImode, op,
11254 GET_MODE (op) == VOIDmode
11255 ? DImode : GET_MODE (op), 4);
11259 /* Split one or more TImode RTL references into pairs of DImode
11260 references. The RTL can be REG, offsettable MEM, integer constant, or
11261 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11262 split and "num" is its length. lo_half and hi_half are output arrays
11263 that parallel "operands". */
11265 void
11266 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11268 while (num--)
11270 rtx op = operands[num];
11272 /* simplify_subreg refuse to split volatile memory addresses, but we
11273 still have to handle it. */
11274 if (MEM_P (op))
11276 lo_half[num] = adjust_address (op, DImode, 0);
11277 hi_half[num] = adjust_address (op, DImode, 8);
11279 else
11281 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
11282 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
11287 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
11288 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
11289 is the expression of the binary operation. The output may either be
11290 emitted here, or returned to the caller, like all output_* functions.
11292 There is no guarantee that the operands are the same mode, as they
11293 might be within FLOAT or FLOAT_EXTEND expressions. */
11295 #ifndef SYSV386_COMPAT
11296 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
11297 wants to fix the assemblers because that causes incompatibility
11298 with gcc. No-one wants to fix gcc because that causes
11299 incompatibility with assemblers... You can use the option of
11300 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
11301 #define SYSV386_COMPAT 1
11302 #endif
11304 const char *
11305 output_387_binary_op (rtx insn, rtx *operands)
11307 static char buf[40];
11308 const char *p;
11309 const char *ssep;
11310 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
11312 #ifdef ENABLE_CHECKING
11313 /* Even if we do not want to check the inputs, this documents input
11314 constraints. Which helps in understanding the following code. */
11315 if (STACK_REG_P (operands[0])
11316 && ((REG_P (operands[1])
11317 && REGNO (operands[0]) == REGNO (operands[1])
11318 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
11319 || (REG_P (operands[2])
11320 && REGNO (operands[0]) == REGNO (operands[2])
11321 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
11322 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
11323 ; /* ok */
11324 else
11325 gcc_assert (is_sse);
11326 #endif
11328 switch (GET_CODE (operands[3]))
11330 case PLUS:
11331 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11332 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11333 p = "fiadd";
11334 else
11335 p = "fadd";
11336 ssep = "vadd";
11337 break;
11339 case MINUS:
11340 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11341 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11342 p = "fisub";
11343 else
11344 p = "fsub";
11345 ssep = "vsub";
11346 break;
11348 case MULT:
11349 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11350 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11351 p = "fimul";
11352 else
11353 p = "fmul";
11354 ssep = "vmul";
11355 break;
11357 case DIV:
11358 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11359 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11360 p = "fidiv";
11361 else
11362 p = "fdiv";
11363 ssep = "vdiv";
11364 break;
11366 default:
11367 gcc_unreachable ();
11370 if (is_sse)
11372 if (TARGET_AVX)
11374 strcpy (buf, ssep);
11375 if (GET_MODE (operands[0]) == SFmode)
11376 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
11377 else
11378 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
11380 else
11382 strcpy (buf, ssep + 1);
11383 if (GET_MODE (operands[0]) == SFmode)
11384 strcat (buf, "ss\t{%2, %0|%0, %2}");
11385 else
11386 strcat (buf, "sd\t{%2, %0|%0, %2}");
11388 return buf;
11390 strcpy (buf, p);
11392 switch (GET_CODE (operands[3]))
11394 case MULT:
11395 case PLUS:
11396 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
11398 rtx temp = operands[2];
11399 operands[2] = operands[1];
11400 operands[1] = temp;
11403 /* know operands[0] == operands[1]. */
11405 if (MEM_P (operands[2]))
11407 p = "%z2\t%2";
11408 break;
11411 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11413 if (STACK_TOP_P (operands[0]))
11414 /* How is it that we are storing to a dead operand[2]?
11415 Well, presumably operands[1] is dead too. We can't
11416 store the result to st(0) as st(0) gets popped on this
11417 instruction. Instead store to operands[2] (which I
11418 think has to be st(1)). st(1) will be popped later.
11419 gcc <= 2.8.1 didn't have this check and generated
11420 assembly code that the Unixware assembler rejected. */
11421 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11422 else
11423 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11424 break;
11427 if (STACK_TOP_P (operands[0]))
11428 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11429 else
11430 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11431 break;
11433 case MINUS:
11434 case DIV:
11435 if (MEM_P (operands[1]))
11437 p = "r%z1\t%1";
11438 break;
11441 if (MEM_P (operands[2]))
11443 p = "%z2\t%2";
11444 break;
11447 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11449 #if SYSV386_COMPAT
11450 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
11451 derived assemblers, confusingly reverse the direction of
11452 the operation for fsub{r} and fdiv{r} when the
11453 destination register is not st(0). The Intel assembler
11454 doesn't have this brain damage. Read !SYSV386_COMPAT to
11455 figure out what the hardware really does. */
11456 if (STACK_TOP_P (operands[0]))
11457 p = "{p\t%0, %2|rp\t%2, %0}";
11458 else
11459 p = "{rp\t%2, %0|p\t%0, %2}";
11460 #else
11461 if (STACK_TOP_P (operands[0]))
11462 /* As above for fmul/fadd, we can't store to st(0). */
11463 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11464 else
11465 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11466 #endif
11467 break;
11470 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
11472 #if SYSV386_COMPAT
11473 if (STACK_TOP_P (operands[0]))
11474 p = "{rp\t%0, %1|p\t%1, %0}";
11475 else
11476 p = "{p\t%1, %0|rp\t%0, %1}";
11477 #else
11478 if (STACK_TOP_P (operands[0]))
11479 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
11480 else
11481 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
11482 #endif
11483 break;
11486 if (STACK_TOP_P (operands[0]))
11488 if (STACK_TOP_P (operands[1]))
11489 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11490 else
11491 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
11492 break;
11494 else if (STACK_TOP_P (operands[1]))
11496 #if SYSV386_COMPAT
11497 p = "{\t%1, %0|r\t%0, %1}";
11498 #else
11499 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
11500 #endif
11502 else
11504 #if SYSV386_COMPAT
11505 p = "{r\t%2, %0|\t%0, %2}";
11506 #else
11507 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11508 #endif
11510 break;
11512 default:
11513 gcc_unreachable ();
11516 strcat (buf, p);
11517 return buf;
11520 /* Return needed mode for entity in optimize_mode_switching pass. */
11523 ix86_mode_needed (int entity, rtx insn)
11525 enum attr_i387_cw mode;
11527 /* The mode UNINITIALIZED is used to store control word after a
11528 function call or ASM pattern. The mode ANY specify that function
11529 has no requirements on the control word and make no changes in the
11530 bits we are interested in. */
11532 if (CALL_P (insn)
11533 || (NONJUMP_INSN_P (insn)
11534 && (asm_noperands (PATTERN (insn)) >= 0
11535 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
11536 return I387_CW_UNINITIALIZED;
11538 if (recog_memoized (insn) < 0)
11539 return I387_CW_ANY;
11541 mode = get_attr_i387_cw (insn);
11543 switch (entity)
11545 case I387_TRUNC:
11546 if (mode == I387_CW_TRUNC)
11547 return mode;
11548 break;
11550 case I387_FLOOR:
11551 if (mode == I387_CW_FLOOR)
11552 return mode;
11553 break;
11555 case I387_CEIL:
11556 if (mode == I387_CW_CEIL)
11557 return mode;
11558 break;
11560 case I387_MASK_PM:
11561 if (mode == I387_CW_MASK_PM)
11562 return mode;
11563 break;
11565 default:
11566 gcc_unreachable ();
11569 return I387_CW_ANY;
11572 /* Output code to initialize control word copies used by trunc?f?i and
11573 rounding patterns. CURRENT_MODE is set to current control word,
11574 while NEW_MODE is set to new control word. */
11576 void
11577 emit_i387_cw_initialization (int mode)
11579 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
11580 rtx new_mode;
11582 enum ix86_stack_slot slot;
11584 rtx reg = gen_reg_rtx (HImode);
11586 emit_insn (gen_x86_fnstcw_1 (stored_mode));
11587 emit_move_insn (reg, copy_rtx (stored_mode));
11589 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
11590 || optimize_function_for_size_p (cfun))
11592 switch (mode)
11594 case I387_CW_TRUNC:
11595 /* round toward zero (truncate) */
11596 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
11597 slot = SLOT_CW_TRUNC;
11598 break;
11600 case I387_CW_FLOOR:
11601 /* round down toward -oo */
11602 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11603 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
11604 slot = SLOT_CW_FLOOR;
11605 break;
11607 case I387_CW_CEIL:
11608 /* round up toward +oo */
11609 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11610 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
11611 slot = SLOT_CW_CEIL;
11612 break;
11614 case I387_CW_MASK_PM:
11615 /* mask precision exception for nearbyint() */
11616 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11617 slot = SLOT_CW_MASK_PM;
11618 break;
11620 default:
11621 gcc_unreachable ();
11624 else
11626 switch (mode)
11628 case I387_CW_TRUNC:
11629 /* round toward zero (truncate) */
11630 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
11631 slot = SLOT_CW_TRUNC;
11632 break;
11634 case I387_CW_FLOOR:
11635 /* round down toward -oo */
11636 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
11637 slot = SLOT_CW_FLOOR;
11638 break;
11640 case I387_CW_CEIL:
11641 /* round up toward +oo */
11642 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
11643 slot = SLOT_CW_CEIL;
11644 break;
11646 case I387_CW_MASK_PM:
11647 /* mask precision exception for nearbyint() */
11648 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11649 slot = SLOT_CW_MASK_PM;
11650 break;
11652 default:
11653 gcc_unreachable ();
11657 gcc_assert (slot < MAX_386_STACK_LOCALS);
11659 new_mode = assign_386_stack_local (HImode, slot);
11660 emit_move_insn (new_mode, reg);
11663 /* Output code for INSN to convert a float to a signed int. OPERANDS
11664 are the insn operands. The output may be [HSD]Imode and the input
11665 operand may be [SDX]Fmode. */
11667 const char *
11668 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
11670 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11671 int dimode_p = GET_MODE (operands[0]) == DImode;
11672 int round_mode = get_attr_i387_cw (insn);
11674 /* Jump through a hoop or two for DImode, since the hardware has no
11675 non-popping instruction. We used to do this a different way, but
11676 that was somewhat fragile and broke with post-reload splitters. */
11677 if ((dimode_p || fisttp) && !stack_top_dies)
11678 output_asm_insn ("fld\t%y1", operands);
11680 gcc_assert (STACK_TOP_P (operands[1]));
11681 gcc_assert (MEM_P (operands[0]));
11682 gcc_assert (GET_MODE (operands[1]) != TFmode);
11684 if (fisttp)
11685 output_asm_insn ("fisttp%z0\t%0", operands);
11686 else
11688 if (round_mode != I387_CW_ANY)
11689 output_asm_insn ("fldcw\t%3", operands);
11690 if (stack_top_dies || dimode_p)
11691 output_asm_insn ("fistp%z0\t%0", operands);
11692 else
11693 output_asm_insn ("fist%z0\t%0", operands);
11694 if (round_mode != I387_CW_ANY)
11695 output_asm_insn ("fldcw\t%2", operands);
11698 return "";
11701 /* Output code for x87 ffreep insn. The OPNO argument, which may only
11702 have the values zero or one, indicates the ffreep insn's operand
11703 from the OPERANDS array. */
11705 static const char *
11706 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
11708 if (TARGET_USE_FFREEP)
11709 #if HAVE_AS_IX86_FFREEP
11710 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
11711 #else
11713 static char retval[] = ".word\t0xc_df";
11714 int regno = REGNO (operands[opno]);
11716 gcc_assert (FP_REGNO_P (regno));
11718 retval[9] = '0' + (regno - FIRST_STACK_REG);
11719 return retval;
11721 #endif
11723 return opno ? "fstp\t%y1" : "fstp\t%y0";
11727 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
11728 should be used. UNORDERED_P is true when fucom should be used. */
11730 const char *
11731 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
11733 int stack_top_dies;
11734 rtx cmp_op0, cmp_op1;
11735 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
11737 if (eflags_p)
11739 cmp_op0 = operands[0];
11740 cmp_op1 = operands[1];
11742 else
11744 cmp_op0 = operands[1];
11745 cmp_op1 = operands[2];
11748 if (is_sse)
11750 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
11751 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
11752 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
11753 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
11755 if (GET_MODE (operands[0]) == SFmode)
11756 if (unordered_p)
11757 return &ucomiss[TARGET_AVX ? 0 : 1];
11758 else
11759 return &comiss[TARGET_AVX ? 0 : 1];
11760 else
11761 if (unordered_p)
11762 return &ucomisd[TARGET_AVX ? 0 : 1];
11763 else
11764 return &comisd[TARGET_AVX ? 0 : 1];
11767 gcc_assert (STACK_TOP_P (cmp_op0));
11769 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11771 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
11773 if (stack_top_dies)
11775 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
11776 return output_387_ffreep (operands, 1);
11778 else
11779 return "ftst\n\tfnstsw\t%0";
11782 if (STACK_REG_P (cmp_op1)
11783 && stack_top_dies
11784 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
11785 && REGNO (cmp_op1) != FIRST_STACK_REG)
11787 /* If both the top of the 387 stack dies, and the other operand
11788 is also a stack register that dies, then this must be a
11789 `fcompp' float compare */
11791 if (eflags_p)
11793 /* There is no double popping fcomi variant. Fortunately,
11794 eflags is immune from the fstp's cc clobbering. */
11795 if (unordered_p)
11796 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
11797 else
11798 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
11799 return output_387_ffreep (operands, 0);
11801 else
11803 if (unordered_p)
11804 return "fucompp\n\tfnstsw\t%0";
11805 else
11806 return "fcompp\n\tfnstsw\t%0";
11809 else
11811 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
11813 static const char * const alt[16] =
11815 "fcom%z2\t%y2\n\tfnstsw\t%0",
11816 "fcomp%z2\t%y2\n\tfnstsw\t%0",
11817 "fucom%z2\t%y2\n\tfnstsw\t%0",
11818 "fucomp%z2\t%y2\n\tfnstsw\t%0",
11820 "ficom%z2\t%y2\n\tfnstsw\t%0",
11821 "ficomp%z2\t%y2\n\tfnstsw\t%0",
11822 NULL,
11823 NULL,
11825 "fcomi\t{%y1, %0|%0, %y1}",
11826 "fcomip\t{%y1, %0|%0, %y1}",
11827 "fucomi\t{%y1, %0|%0, %y1}",
11828 "fucomip\t{%y1, %0|%0, %y1}",
11830 NULL,
11831 NULL,
11832 NULL,
11833 NULL
11836 int mask;
11837 const char *ret;
11839 mask = eflags_p << 3;
11840 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
11841 mask |= unordered_p << 1;
11842 mask |= stack_top_dies;
11844 gcc_assert (mask < 16);
11845 ret = alt[mask];
11846 gcc_assert (ret);
11848 return ret;
11852 void
11853 ix86_output_addr_vec_elt (FILE *file, int value)
11855 const char *directive = ASM_LONG;
11857 #ifdef ASM_QUAD
11858 if (TARGET_64BIT)
11859 directive = ASM_QUAD;
11860 #else
11861 gcc_assert (!TARGET_64BIT);
11862 #endif
11864 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
11867 void
11868 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
11870 const char *directive = ASM_LONG;
11872 #ifdef ASM_QUAD
11873 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
11874 directive = ASM_QUAD;
11875 #else
11876 gcc_assert (!TARGET_64BIT);
11877 #endif
11878 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
11879 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
11880 fprintf (file, "%s%s%d-%s%d\n",
11881 directive, LPREFIX, value, LPREFIX, rel);
11882 else if (HAVE_AS_GOTOFF_IN_DATA)
11883 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
11884 #if TARGET_MACHO
11885 else if (TARGET_MACHO)
11887 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
11888 machopic_output_function_base_name (file);
11889 fprintf(file, "\n");
11891 #endif
11892 else
11893 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
11894 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
11897 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
11898 for the target. */
11900 void
11901 ix86_expand_clear (rtx dest)
11903 rtx tmp;
11905 /* We play register width games, which are only valid after reload. */
11906 gcc_assert (reload_completed);
11908 /* Avoid HImode and its attendant prefix byte. */
11909 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
11910 dest = gen_rtx_REG (SImode, REGNO (dest));
11911 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
11913 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
11914 if (reload_completed && (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ()))
11916 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11917 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
11920 emit_insn (tmp);
11923 /* X is an unchanging MEM. If it is a constant pool reference, return
11924 the constant pool rtx, else NULL. */
11927 maybe_get_pool_constant (rtx x)
11929 x = ix86_delegitimize_address (XEXP (x, 0));
11931 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
11932 return get_pool_constant (x);
11934 return NULL_RTX;
11937 void
11938 ix86_expand_move (enum machine_mode mode, rtx operands[])
11940 rtx op0, op1;
11941 enum tls_model model;
11943 op0 = operands[0];
11944 op1 = operands[1];
11946 if (GET_CODE (op1) == SYMBOL_REF)
11948 model = SYMBOL_REF_TLS_MODEL (op1);
11949 if (model)
11951 op1 = legitimize_tls_address (op1, model, true);
11952 op1 = force_operand (op1, op0);
11953 if (op1 == op0)
11954 return;
11956 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11957 && SYMBOL_REF_DLLIMPORT_P (op1))
11958 op1 = legitimize_dllimport_symbol (op1, false);
11960 else if (GET_CODE (op1) == CONST
11961 && GET_CODE (XEXP (op1, 0)) == PLUS
11962 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
11964 rtx addend = XEXP (XEXP (op1, 0), 1);
11965 rtx symbol = XEXP (XEXP (op1, 0), 0);
11966 rtx tmp = NULL;
11968 model = SYMBOL_REF_TLS_MODEL (symbol);
11969 if (model)
11970 tmp = legitimize_tls_address (symbol, model, true);
11971 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11972 && SYMBOL_REF_DLLIMPORT_P (symbol))
11973 tmp = legitimize_dllimport_symbol (symbol, true);
11975 if (tmp)
11977 tmp = force_operand (tmp, NULL);
11978 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
11979 op0, 1, OPTAB_DIRECT);
11980 if (tmp == op0)
11981 return;
11985 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
11987 if (TARGET_MACHO && !TARGET_64BIT)
11989 #if TARGET_MACHO
11990 if (MACHOPIC_PURE)
11992 rtx temp = ((reload_in_progress
11993 || ((op0 && REG_P (op0))
11994 && mode == Pmode))
11995 ? op0 : gen_reg_rtx (Pmode));
11996 op1 = machopic_indirect_data_reference (op1, temp);
11997 op1 = machopic_legitimize_pic_address (op1, mode,
11998 temp == op1 ? 0 : temp);
12000 else if (MACHOPIC_INDIRECT)
12001 op1 = machopic_indirect_data_reference (op1, 0);
12002 if (op0 == op1)
12003 return;
12004 #endif
12006 else
12008 if (MEM_P (op0))
12009 op1 = force_reg (Pmode, op1);
12010 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12012 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
12013 op1 = legitimize_pic_address (op1, reg);
12014 if (op0 == op1)
12015 return;
12019 else
12021 if (MEM_P (op0)
12022 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12023 || !push_operand (op0, mode))
12024 && MEM_P (op1))
12025 op1 = force_reg (mode, op1);
12027 if (push_operand (op0, mode)
12028 && ! general_no_elim_operand (op1, mode))
12029 op1 = copy_to_mode_reg (mode, op1);
12031 /* Force large constants in 64bit compilation into register
12032 to get them CSEed. */
12033 if (can_create_pseudo_p ()
12034 && (mode == DImode) && TARGET_64BIT
12035 && immediate_operand (op1, mode)
12036 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12037 && !register_operand (op0, mode)
12038 && optimize)
12039 op1 = copy_to_mode_reg (mode, op1);
12041 if (can_create_pseudo_p ()
12042 && FLOAT_MODE_P (mode)
12043 && GET_CODE (op1) == CONST_DOUBLE)
12045 /* If we are loading a floating point constant to a register,
12046 force the value to memory now, since we'll get better code
12047 out the back end. */
12049 op1 = validize_mem (force_const_mem (mode, op1));
12050 if (!register_operand (op0, mode))
12052 rtx temp = gen_reg_rtx (mode);
12053 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12054 emit_move_insn (op0, temp);
12055 return;
12060 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12063 void
12064 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12066 rtx op0 = operands[0], op1 = operands[1];
12067 unsigned int align = GET_MODE_ALIGNMENT (mode);
12069 /* Force constants other than zero into memory. We do not know how
12070 the instructions used to build constants modify the upper 64 bits
12071 of the register, once we have that information we may be able
12072 to handle some of them more efficiently. */
12073 if (can_create_pseudo_p ()
12074 && register_operand (op0, mode)
12075 && (CONSTANT_P (op1)
12076 || (GET_CODE (op1) == SUBREG
12077 && CONSTANT_P (SUBREG_REG (op1))))
12078 && standard_sse_constant_p (op1) <= 0)
12079 op1 = validize_mem (force_const_mem (mode, op1));
12081 /* We need to check memory alignment for SSE mode since attribute
12082 can make operands unaligned. */
12083 if (can_create_pseudo_p ()
12084 && SSE_REG_MODE_P (mode)
12085 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
12086 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
12088 rtx tmp[2];
12090 /* ix86_expand_vector_move_misalign() does not like constants ... */
12091 if (CONSTANT_P (op1)
12092 || (GET_CODE (op1) == SUBREG
12093 && CONSTANT_P (SUBREG_REG (op1))))
12094 op1 = validize_mem (force_const_mem (mode, op1));
12096 /* ... nor both arguments in memory. */
12097 if (!register_operand (op0, mode)
12098 && !register_operand (op1, mode))
12099 op1 = force_reg (mode, op1);
12101 tmp[0] = op0; tmp[1] = op1;
12102 ix86_expand_vector_move_misalign (mode, tmp);
12103 return;
12106 /* Make operand1 a register if it isn't already. */
12107 if (can_create_pseudo_p ()
12108 && !register_operand (op0, mode)
12109 && !register_operand (op1, mode))
12111 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
12112 return;
12115 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12118 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
12119 straight to ix86_expand_vector_move. */
12120 /* Code generation for scalar reg-reg moves of single and double precision data:
12121 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
12122 movaps reg, reg
12123 else
12124 movss reg, reg
12125 if (x86_sse_partial_reg_dependency == true)
12126 movapd reg, reg
12127 else
12128 movsd reg, reg
12130 Code generation for scalar loads of double precision data:
12131 if (x86_sse_split_regs == true)
12132 movlpd mem, reg (gas syntax)
12133 else
12134 movsd mem, reg
12136 Code generation for unaligned packed loads of single precision data
12137 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
12138 if (x86_sse_unaligned_move_optimal)
12139 movups mem, reg
12141 if (x86_sse_partial_reg_dependency == true)
12143 xorps reg, reg
12144 movlps mem, reg
12145 movhps mem+8, reg
12147 else
12149 movlps mem, reg
12150 movhps mem+8, reg
12153 Code generation for unaligned packed loads of double precision data
12154 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
12155 if (x86_sse_unaligned_move_optimal)
12156 movupd mem, reg
12158 if (x86_sse_split_regs == true)
12160 movlpd mem, reg
12161 movhpd mem+8, reg
12163 else
12165 movsd mem, reg
12166 movhpd mem+8, reg
12170 void
12171 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
12173 rtx op0, op1, m;
12175 op0 = operands[0];
12176 op1 = operands[1];
12178 if (TARGET_AVX)
12180 switch (GET_MODE_CLASS (mode))
12182 case MODE_VECTOR_INT:
12183 case MODE_INT:
12184 switch (GET_MODE_SIZE (mode))
12186 case 16:
12187 op0 = gen_lowpart (V16QImode, op0);
12188 op1 = gen_lowpart (V16QImode, op1);
12189 emit_insn (gen_avx_movdqu (op0, op1));
12190 break;
12191 case 32:
12192 op0 = gen_lowpart (V32QImode, op0);
12193 op1 = gen_lowpart (V32QImode, op1);
12194 emit_insn (gen_avx_movdqu256 (op0, op1));
12195 break;
12196 default:
12197 gcc_unreachable ();
12199 break;
12200 case MODE_VECTOR_FLOAT:
12201 op0 = gen_lowpart (mode, op0);
12202 op1 = gen_lowpart (mode, op1);
12204 switch (mode)
12206 case V4SFmode:
12207 emit_insn (gen_avx_movups (op0, op1));
12208 break;
12209 case V8SFmode:
12210 emit_insn (gen_avx_movups256 (op0, op1));
12211 break;
12212 case V2DFmode:
12213 emit_insn (gen_avx_movupd (op0, op1));
12214 break;
12215 case V4DFmode:
12216 emit_insn (gen_avx_movupd256 (op0, op1));
12217 break;
12218 default:
12219 gcc_unreachable ();
12221 break;
12223 default:
12224 gcc_unreachable ();
12227 return;
12230 if (MEM_P (op1))
12232 /* If we're optimizing for size, movups is the smallest. */
12233 if (optimize_insn_for_size_p ())
12235 op0 = gen_lowpart (V4SFmode, op0);
12236 op1 = gen_lowpart (V4SFmode, op1);
12237 emit_insn (gen_sse_movups (op0, op1));
12238 return;
12241 /* ??? If we have typed data, then it would appear that using
12242 movdqu is the only way to get unaligned data loaded with
12243 integer type. */
12244 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12246 op0 = gen_lowpart (V16QImode, op0);
12247 op1 = gen_lowpart (V16QImode, op1);
12248 emit_insn (gen_sse2_movdqu (op0, op1));
12249 return;
12252 if (TARGET_SSE2 && mode == V2DFmode)
12254 rtx zero;
12256 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12258 op0 = gen_lowpart (V2DFmode, op0);
12259 op1 = gen_lowpart (V2DFmode, op1);
12260 emit_insn (gen_sse2_movupd (op0, op1));
12261 return;
12264 /* When SSE registers are split into halves, we can avoid
12265 writing to the top half twice. */
12266 if (TARGET_SSE_SPLIT_REGS)
12268 emit_clobber (op0);
12269 zero = op0;
12271 else
12273 /* ??? Not sure about the best option for the Intel chips.
12274 The following would seem to satisfy; the register is
12275 entirely cleared, breaking the dependency chain. We
12276 then store to the upper half, with a dependency depth
12277 of one. A rumor has it that Intel recommends two movsd
12278 followed by an unpacklpd, but this is unconfirmed. And
12279 given that the dependency depth of the unpacklpd would
12280 still be one, I'm not sure why this would be better. */
12281 zero = CONST0_RTX (V2DFmode);
12284 m = adjust_address (op1, DFmode, 0);
12285 emit_insn (gen_sse2_loadlpd (op0, zero, m));
12286 m = adjust_address (op1, DFmode, 8);
12287 emit_insn (gen_sse2_loadhpd (op0, op0, m));
12289 else
12291 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12293 op0 = gen_lowpart (V4SFmode, op0);
12294 op1 = gen_lowpart (V4SFmode, op1);
12295 emit_insn (gen_sse_movups (op0, op1));
12296 return;
12299 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
12300 emit_move_insn (op0, CONST0_RTX (mode));
12301 else
12302 emit_clobber (op0);
12304 if (mode != V4SFmode)
12305 op0 = gen_lowpart (V4SFmode, op0);
12306 m = adjust_address (op1, V2SFmode, 0);
12307 emit_insn (gen_sse_loadlps (op0, op0, m));
12308 m = adjust_address (op1, V2SFmode, 8);
12309 emit_insn (gen_sse_loadhps (op0, op0, m));
12312 else if (MEM_P (op0))
12314 /* If we're optimizing for size, movups is the smallest. */
12315 if (optimize_insn_for_size_p ())
12317 op0 = gen_lowpart (V4SFmode, op0);
12318 op1 = gen_lowpart (V4SFmode, op1);
12319 emit_insn (gen_sse_movups (op0, op1));
12320 return;
12323 /* ??? Similar to above, only less clear because of quote
12324 typeless stores unquote. */
12325 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
12326 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12328 op0 = gen_lowpart (V16QImode, op0);
12329 op1 = gen_lowpart (V16QImode, op1);
12330 emit_insn (gen_sse2_movdqu (op0, op1));
12331 return;
12334 if (TARGET_SSE2 && mode == V2DFmode)
12336 m = adjust_address (op0, DFmode, 0);
12337 emit_insn (gen_sse2_storelpd (m, op1));
12338 m = adjust_address (op0, DFmode, 8);
12339 emit_insn (gen_sse2_storehpd (m, op1));
12341 else
12343 if (mode != V4SFmode)
12344 op1 = gen_lowpart (V4SFmode, op1);
12345 m = adjust_address (op0, V2SFmode, 0);
12346 emit_insn (gen_sse_storelps (m, op1));
12347 m = adjust_address (op0, V2SFmode, 8);
12348 emit_insn (gen_sse_storehps (m, op1));
12351 else
12352 gcc_unreachable ();
12355 /* Expand a push in MODE. This is some mode for which we do not support
12356 proper push instructions, at least from the registers that we expect
12357 the value to live in. */
12359 void
12360 ix86_expand_push (enum machine_mode mode, rtx x)
12362 rtx tmp;
12364 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
12365 GEN_INT (-GET_MODE_SIZE (mode)),
12366 stack_pointer_rtx, 1, OPTAB_DIRECT);
12367 if (tmp != stack_pointer_rtx)
12368 emit_move_insn (stack_pointer_rtx, tmp);
12370 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
12371 emit_move_insn (tmp, x);
12374 /* Helper function of ix86_fixup_binary_operands to canonicalize
12375 operand order. Returns true if the operands should be swapped. */
12377 static bool
12378 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
12379 rtx operands[])
12381 rtx dst = operands[0];
12382 rtx src1 = operands[1];
12383 rtx src2 = operands[2];
12385 /* If the operation is not commutative, we can't do anything. */
12386 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
12387 return false;
12389 /* Highest priority is that src1 should match dst. */
12390 if (rtx_equal_p (dst, src1))
12391 return false;
12392 if (rtx_equal_p (dst, src2))
12393 return true;
12395 /* Next highest priority is that immediate constants come second. */
12396 if (immediate_operand (src2, mode))
12397 return false;
12398 if (immediate_operand (src1, mode))
12399 return true;
12401 /* Lowest priority is that memory references should come second. */
12402 if (MEM_P (src2))
12403 return false;
12404 if (MEM_P (src1))
12405 return true;
12407 return false;
12411 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
12412 destination to use for the operation. If different from the true
12413 destination in operands[0], a copy operation will be required. */
12416 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
12417 rtx operands[])
12419 rtx dst = operands[0];
12420 rtx src1 = operands[1];
12421 rtx src2 = operands[2];
12423 /* Canonicalize operand order. */
12424 if (ix86_swap_binary_operands_p (code, mode, operands))
12426 rtx temp;
12428 /* It is invalid to swap operands of different modes. */
12429 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
12431 temp = src1;
12432 src1 = src2;
12433 src2 = temp;
12436 /* Both source operands cannot be in memory. */
12437 if (MEM_P (src1) && MEM_P (src2))
12439 /* Optimization: Only read from memory once. */
12440 if (rtx_equal_p (src1, src2))
12442 src2 = force_reg (mode, src2);
12443 src1 = src2;
12445 else
12446 src2 = force_reg (mode, src2);
12449 /* If the destination is memory, and we do not have matching source
12450 operands, do things in registers. */
12451 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12452 dst = gen_reg_rtx (mode);
12454 /* Source 1 cannot be a constant. */
12455 if (CONSTANT_P (src1))
12456 src1 = force_reg (mode, src1);
12458 /* Source 1 cannot be a non-matching memory. */
12459 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12460 src1 = force_reg (mode, src1);
12462 operands[1] = src1;
12463 operands[2] = src2;
12464 return dst;
12467 /* Similarly, but assume that the destination has already been
12468 set up properly. */
12470 void
12471 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
12472 enum machine_mode mode, rtx operands[])
12474 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
12475 gcc_assert (dst == operands[0]);
12478 /* Attempt to expand a binary operator. Make the expansion closer to the
12479 actual machine, then just general_operand, which will allow 3 separate
12480 memory references (one output, two input) in a single insn. */
12482 void
12483 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
12484 rtx operands[])
12486 rtx src1, src2, dst, op, clob;
12488 dst = ix86_fixup_binary_operands (code, mode, operands);
12489 src1 = operands[1];
12490 src2 = operands[2];
12492 /* Emit the instruction. */
12494 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
12495 if (reload_in_progress)
12497 /* Reload doesn't know about the flags register, and doesn't know that
12498 it doesn't want to clobber it. We can only do this with PLUS. */
12499 gcc_assert (code == PLUS);
12500 emit_insn (op);
12502 else
12504 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12505 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12508 /* Fix up the destination if needed. */
12509 if (dst != operands[0])
12510 emit_move_insn (operands[0], dst);
12513 /* Return TRUE or FALSE depending on whether the binary operator meets the
12514 appropriate constraints. */
12517 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
12518 rtx operands[3])
12520 rtx dst = operands[0];
12521 rtx src1 = operands[1];
12522 rtx src2 = operands[2];
12524 /* Both source operands cannot be in memory. */
12525 if (MEM_P (src1) && MEM_P (src2))
12526 return 0;
12528 /* Canonicalize operand order for commutative operators. */
12529 if (ix86_swap_binary_operands_p (code, mode, operands))
12531 rtx temp = src1;
12532 src1 = src2;
12533 src2 = temp;
12536 /* If the destination is memory, we must have a matching source operand. */
12537 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12538 return 0;
12540 /* Source 1 cannot be a constant. */
12541 if (CONSTANT_P (src1))
12542 return 0;
12544 /* Source 1 cannot be a non-matching memory. */
12545 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12546 return 0;
12548 return 1;
12551 /* Attempt to expand a unary operator. Make the expansion closer to the
12552 actual machine, then just general_operand, which will allow 2 separate
12553 memory references (one output, one input) in a single insn. */
12555 void
12556 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
12557 rtx operands[])
12559 int matching_memory;
12560 rtx src, dst, op, clob;
12562 dst = operands[0];
12563 src = operands[1];
12565 /* If the destination is memory, and we do not have matching source
12566 operands, do things in registers. */
12567 matching_memory = 0;
12568 if (MEM_P (dst))
12570 if (rtx_equal_p (dst, src))
12571 matching_memory = 1;
12572 else
12573 dst = gen_reg_rtx (mode);
12576 /* When source operand is memory, destination must match. */
12577 if (MEM_P (src) && !matching_memory)
12578 src = force_reg (mode, src);
12580 /* Emit the instruction. */
12582 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
12583 if (reload_in_progress || code == NOT)
12585 /* Reload doesn't know about the flags register, and doesn't know that
12586 it doesn't want to clobber it. */
12587 gcc_assert (code == NOT);
12588 emit_insn (op);
12590 else
12592 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12593 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12596 /* Fix up the destination if needed. */
12597 if (dst != operands[0])
12598 emit_move_insn (operands[0], dst);
12601 /* Return TRUE or FALSE depending on whether the unary operator meets the
12602 appropriate constraints. */
12605 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
12606 enum machine_mode mode ATTRIBUTE_UNUSED,
12607 rtx operands[2] ATTRIBUTE_UNUSED)
12609 /* If one of operands is memory, source and destination must match. */
12610 if ((MEM_P (operands[0])
12611 || MEM_P (operands[1]))
12612 && ! rtx_equal_p (operands[0], operands[1]))
12613 return FALSE;
12614 return TRUE;
12617 /* Post-reload splitter for converting an SF or DFmode value in an
12618 SSE register into an unsigned SImode. */
12620 void
12621 ix86_split_convert_uns_si_sse (rtx operands[])
12623 enum machine_mode vecmode;
12624 rtx value, large, zero_or_two31, input, two31, x;
12626 large = operands[1];
12627 zero_or_two31 = operands[2];
12628 input = operands[3];
12629 two31 = operands[4];
12630 vecmode = GET_MODE (large);
12631 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
12633 /* Load up the value into the low element. We must ensure that the other
12634 elements are valid floats -- zero is the easiest such value. */
12635 if (MEM_P (input))
12637 if (vecmode == V4SFmode)
12638 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
12639 else
12640 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
12642 else
12644 input = gen_rtx_REG (vecmode, REGNO (input));
12645 emit_move_insn (value, CONST0_RTX (vecmode));
12646 if (vecmode == V4SFmode)
12647 emit_insn (gen_sse_movss (value, value, input));
12648 else
12649 emit_insn (gen_sse2_movsd (value, value, input));
12652 emit_move_insn (large, two31);
12653 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
12655 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
12656 emit_insn (gen_rtx_SET (VOIDmode, large, x));
12658 x = gen_rtx_AND (vecmode, zero_or_two31, large);
12659 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
12661 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
12662 emit_insn (gen_rtx_SET (VOIDmode, value, x));
12664 large = gen_rtx_REG (V4SImode, REGNO (large));
12665 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
12667 x = gen_rtx_REG (V4SImode, REGNO (value));
12668 if (vecmode == V4SFmode)
12669 emit_insn (gen_sse2_cvttps2dq (x, value));
12670 else
12671 emit_insn (gen_sse2_cvttpd2dq (x, value));
12672 value = x;
12674 emit_insn (gen_xorv4si3 (value, value, large));
12677 /* Convert an unsigned DImode value into a DFmode, using only SSE.
12678 Expects the 64-bit DImode to be supplied in a pair of integral
12679 registers. Requires SSE2; will use SSE3 if available. For x86_32,
12680 -mfpmath=sse, !optimize_size only. */
12682 void
12683 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
12685 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
12686 rtx int_xmm, fp_xmm;
12687 rtx biases, exponents;
12688 rtx x;
12690 int_xmm = gen_reg_rtx (V4SImode);
12691 if (TARGET_INTER_UNIT_MOVES)
12692 emit_insn (gen_movdi_to_sse (int_xmm, input));
12693 else if (TARGET_SSE_SPLIT_REGS)
12695 emit_clobber (int_xmm);
12696 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
12698 else
12700 x = gen_reg_rtx (V2DImode);
12701 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
12702 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
12705 x = gen_rtx_CONST_VECTOR (V4SImode,
12706 gen_rtvec (4, GEN_INT (0x43300000UL),
12707 GEN_INT (0x45300000UL),
12708 const0_rtx, const0_rtx));
12709 exponents = validize_mem (force_const_mem (V4SImode, x));
12711 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
12712 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
12714 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
12715 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
12716 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
12717 (0x1.0p84 + double(fp_value_hi_xmm)).
12718 Note these exponents differ by 32. */
12720 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
12722 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
12723 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
12724 real_ldexp (&bias_lo_rvt, &dconst1, 52);
12725 real_ldexp (&bias_hi_rvt, &dconst1, 84);
12726 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
12727 x = const_double_from_real_value (bias_hi_rvt, DFmode);
12728 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
12729 biases = validize_mem (force_const_mem (V2DFmode, biases));
12730 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
12732 /* Add the upper and lower DFmode values together. */
12733 if (TARGET_SSE3)
12734 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
12735 else
12737 x = copy_to_mode_reg (V2DFmode, fp_xmm);
12738 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
12739 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
12742 ix86_expand_vector_extract (false, target, fp_xmm, 0);
12745 /* Not used, but eases macroization of patterns. */
12746 void
12747 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
12748 rtx input ATTRIBUTE_UNUSED)
12750 gcc_unreachable ();
12753 /* Convert an unsigned SImode value into a DFmode. Only currently used
12754 for SSE, but applicable anywhere. */
12756 void
12757 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
12759 REAL_VALUE_TYPE TWO31r;
12760 rtx x, fp;
12762 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
12763 NULL, 1, OPTAB_DIRECT);
12765 fp = gen_reg_rtx (DFmode);
12766 emit_insn (gen_floatsidf2 (fp, x));
12768 real_ldexp (&TWO31r, &dconst1, 31);
12769 x = const_double_from_real_value (TWO31r, DFmode);
12771 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
12772 if (x != target)
12773 emit_move_insn (target, x);
12776 /* Convert a signed DImode value into a DFmode. Only used for SSE in
12777 32-bit mode; otherwise we have a direct convert instruction. */
12779 void
12780 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
12782 REAL_VALUE_TYPE TWO32r;
12783 rtx fp_lo, fp_hi, x;
12785 fp_lo = gen_reg_rtx (DFmode);
12786 fp_hi = gen_reg_rtx (DFmode);
12788 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
12790 real_ldexp (&TWO32r, &dconst1, 32);
12791 x = const_double_from_real_value (TWO32r, DFmode);
12792 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
12794 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
12796 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
12797 0, OPTAB_DIRECT);
12798 if (x != target)
12799 emit_move_insn (target, x);
12802 /* Convert an unsigned SImode value into a SFmode, using only SSE.
12803 For x86_32, -mfpmath=sse, !optimize_size only. */
12804 void
12805 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
12807 REAL_VALUE_TYPE ONE16r;
12808 rtx fp_hi, fp_lo, int_hi, int_lo, x;
12810 real_ldexp (&ONE16r, &dconst1, 16);
12811 x = const_double_from_real_value (ONE16r, SFmode);
12812 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
12813 NULL, 0, OPTAB_DIRECT);
12814 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
12815 NULL, 0, OPTAB_DIRECT);
12816 fp_hi = gen_reg_rtx (SFmode);
12817 fp_lo = gen_reg_rtx (SFmode);
12818 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
12819 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
12820 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
12821 0, OPTAB_DIRECT);
12822 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
12823 0, OPTAB_DIRECT);
12824 if (!rtx_equal_p (target, fp_hi))
12825 emit_move_insn (target, fp_hi);
12828 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
12829 then replicate the value for all elements of the vector
12830 register. */
12833 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
12835 rtvec v;
12836 switch (mode)
12838 case SImode:
12839 gcc_assert (vect);
12840 v = gen_rtvec (4, value, value, value, value);
12841 return gen_rtx_CONST_VECTOR (V4SImode, v);
12843 case DImode:
12844 gcc_assert (vect);
12845 v = gen_rtvec (2, value, value);
12846 return gen_rtx_CONST_VECTOR (V2DImode, v);
12848 case SFmode:
12849 if (vect)
12850 v = gen_rtvec (4, value, value, value, value);
12851 else
12852 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
12853 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
12854 return gen_rtx_CONST_VECTOR (V4SFmode, v);
12856 case DFmode:
12857 if (vect)
12858 v = gen_rtvec (2, value, value);
12859 else
12860 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
12861 return gen_rtx_CONST_VECTOR (V2DFmode, v);
12863 default:
12864 gcc_unreachable ();
12868 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
12869 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
12870 for an SSE register. If VECT is true, then replicate the mask for
12871 all elements of the vector register. If INVERT is true, then create
12872 a mask excluding the sign bit. */
12875 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
12877 enum machine_mode vec_mode, imode;
12878 HOST_WIDE_INT hi, lo;
12879 int shift = 63;
12880 rtx v;
12881 rtx mask;
12883 /* Find the sign bit, sign extended to 2*HWI. */
12884 switch (mode)
12886 case SImode:
12887 case SFmode:
12888 imode = SImode;
12889 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
12890 lo = 0x80000000, hi = lo < 0;
12891 break;
12893 case DImode:
12894 case DFmode:
12895 imode = DImode;
12896 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
12897 if (HOST_BITS_PER_WIDE_INT >= 64)
12898 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
12899 else
12900 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12901 break;
12903 case TImode:
12904 case TFmode:
12905 vec_mode = VOIDmode;
12906 if (HOST_BITS_PER_WIDE_INT >= 64)
12908 imode = TImode;
12909 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
12911 else
12913 rtvec vec;
12915 imode = DImode;
12916 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12918 if (invert)
12920 lo = ~lo, hi = ~hi;
12921 v = constm1_rtx;
12923 else
12924 v = const0_rtx;
12926 mask = immed_double_const (lo, hi, imode);
12928 vec = gen_rtvec (2, v, mask);
12929 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
12930 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
12932 return v;
12934 break;
12936 default:
12937 gcc_unreachable ();
12940 if (invert)
12941 lo = ~lo, hi = ~hi;
12943 /* Force this value into the low part of a fp vector constant. */
12944 mask = immed_double_const (lo, hi, imode);
12945 mask = gen_lowpart (mode, mask);
12947 if (vec_mode == VOIDmode)
12948 return force_reg (mode, mask);
12950 v = ix86_build_const_vector (mode, vect, mask);
12951 return force_reg (vec_mode, v);
12954 /* Generate code for floating point ABS or NEG. */
12956 void
12957 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
12958 rtx operands[])
12960 rtx mask, set, use, clob, dst, src;
12961 bool use_sse = false;
12962 bool vector_mode = VECTOR_MODE_P (mode);
12963 enum machine_mode elt_mode = mode;
12965 if (vector_mode)
12967 elt_mode = GET_MODE_INNER (mode);
12968 use_sse = true;
12970 else if (mode == TFmode)
12971 use_sse = true;
12972 else if (TARGET_SSE_MATH)
12973 use_sse = SSE_FLOAT_MODE_P (mode);
12975 /* NEG and ABS performed with SSE use bitwise mask operations.
12976 Create the appropriate mask now. */
12977 if (use_sse)
12978 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
12979 else
12980 mask = NULL_RTX;
12982 dst = operands[0];
12983 src = operands[1];
12985 if (vector_mode)
12987 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
12988 set = gen_rtx_SET (VOIDmode, dst, set);
12989 emit_insn (set);
12991 else
12993 set = gen_rtx_fmt_e (code, mode, src);
12994 set = gen_rtx_SET (VOIDmode, dst, set);
12995 if (mask)
12997 use = gen_rtx_USE (VOIDmode, mask);
12998 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12999 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13000 gen_rtvec (3, set, use, clob)));
13002 else
13003 emit_insn (set);
13007 /* Expand a copysign operation. Special case operand 0 being a constant. */
13009 void
13010 ix86_expand_copysign (rtx operands[])
13012 enum machine_mode mode;
13013 rtx dest, op0, op1, mask, nmask;
13015 dest = operands[0];
13016 op0 = operands[1];
13017 op1 = operands[2];
13019 mode = GET_MODE (dest);
13021 if (GET_CODE (op0) == CONST_DOUBLE)
13023 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
13025 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
13026 op0 = simplify_unary_operation (ABS, mode, op0, mode);
13028 if (mode == SFmode || mode == DFmode)
13030 enum machine_mode vmode;
13032 vmode = mode == SFmode ? V4SFmode : V2DFmode;
13034 if (op0 == CONST0_RTX (mode))
13035 op0 = CONST0_RTX (vmode);
13036 else
13038 rtvec v;
13040 if (mode == SFmode)
13041 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
13042 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13043 else
13044 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
13046 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
13049 else if (op0 != CONST0_RTX (mode))
13050 op0 = force_reg (mode, op0);
13052 mask = ix86_build_signbit_mask (mode, 0, 0);
13054 if (mode == SFmode)
13055 copysign_insn = gen_copysignsf3_const;
13056 else if (mode == DFmode)
13057 copysign_insn = gen_copysigndf3_const;
13058 else
13059 copysign_insn = gen_copysigntf3_const;
13061 emit_insn (copysign_insn (dest, op0, op1, mask));
13063 else
13065 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
13067 nmask = ix86_build_signbit_mask (mode, 0, 1);
13068 mask = ix86_build_signbit_mask (mode, 0, 0);
13070 if (mode == SFmode)
13071 copysign_insn = gen_copysignsf3_var;
13072 else if (mode == DFmode)
13073 copysign_insn = gen_copysigndf3_var;
13074 else
13075 copysign_insn = gen_copysigntf3_var;
13077 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
13081 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
13082 be a constant, and so has already been expanded into a vector constant. */
13084 void
13085 ix86_split_copysign_const (rtx operands[])
13087 enum machine_mode mode, vmode;
13088 rtx dest, op0, op1, mask, x;
13090 dest = operands[0];
13091 op0 = operands[1];
13092 op1 = operands[2];
13093 mask = operands[3];
13095 mode = GET_MODE (dest);
13096 vmode = GET_MODE (mask);
13098 dest = simplify_gen_subreg (vmode, dest, mode, 0);
13099 x = gen_rtx_AND (vmode, dest, mask);
13100 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13102 if (op0 != CONST0_RTX (vmode))
13104 x = gen_rtx_IOR (vmode, dest, op0);
13105 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13109 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
13110 so we have to do two masks. */
13112 void
13113 ix86_split_copysign_var (rtx operands[])
13115 enum machine_mode mode, vmode;
13116 rtx dest, scratch, op0, op1, mask, nmask, x;
13118 dest = operands[0];
13119 scratch = operands[1];
13120 op0 = operands[2];
13121 op1 = operands[3];
13122 nmask = operands[4];
13123 mask = operands[5];
13125 mode = GET_MODE (dest);
13126 vmode = GET_MODE (mask);
13128 if (rtx_equal_p (op0, op1))
13130 /* Shouldn't happen often (it's useless, obviously), but when it does
13131 we'd generate incorrect code if we continue below. */
13132 emit_move_insn (dest, op0);
13133 return;
13136 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
13138 gcc_assert (REGNO (op1) == REGNO (scratch));
13140 x = gen_rtx_AND (vmode, scratch, mask);
13141 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13143 dest = mask;
13144 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13145 x = gen_rtx_NOT (vmode, dest);
13146 x = gen_rtx_AND (vmode, x, op0);
13147 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13149 else
13151 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
13153 x = gen_rtx_AND (vmode, scratch, mask);
13155 else /* alternative 2,4 */
13157 gcc_assert (REGNO (mask) == REGNO (scratch));
13158 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
13159 x = gen_rtx_AND (vmode, scratch, op1);
13161 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13163 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
13165 dest = simplify_gen_subreg (vmode, op0, mode, 0);
13166 x = gen_rtx_AND (vmode, dest, nmask);
13168 else /* alternative 3,4 */
13170 gcc_assert (REGNO (nmask) == REGNO (dest));
13171 dest = nmask;
13172 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13173 x = gen_rtx_AND (vmode, dest, op0);
13175 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13178 x = gen_rtx_IOR (vmode, dest, scratch);
13179 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13182 /* Return TRUE or FALSE depending on whether the first SET in INSN
13183 has source and destination with matching CC modes, and that the
13184 CC mode is at least as constrained as REQ_MODE. */
13187 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
13189 rtx set;
13190 enum machine_mode set_mode;
13192 set = PATTERN (insn);
13193 if (GET_CODE (set) == PARALLEL)
13194 set = XVECEXP (set, 0, 0);
13195 gcc_assert (GET_CODE (set) == SET);
13196 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
13198 set_mode = GET_MODE (SET_DEST (set));
13199 switch (set_mode)
13201 case CCNOmode:
13202 if (req_mode != CCNOmode
13203 && (req_mode != CCmode
13204 || XEXP (SET_SRC (set), 1) != const0_rtx))
13205 return 0;
13206 break;
13207 case CCmode:
13208 if (req_mode == CCGCmode)
13209 return 0;
13210 /* FALLTHRU */
13211 case CCGCmode:
13212 if (req_mode == CCGOCmode || req_mode == CCNOmode)
13213 return 0;
13214 /* FALLTHRU */
13215 case CCGOCmode:
13216 if (req_mode == CCZmode)
13217 return 0;
13218 /* FALLTHRU */
13219 case CCAmode:
13220 case CCCmode:
13221 case CCOmode:
13222 case CCSmode:
13223 case CCZmode:
13224 break;
13226 default:
13227 gcc_unreachable ();
13230 return (GET_MODE (SET_SRC (set)) == set_mode);
13233 /* Generate insn patterns to do an integer compare of OPERANDS. */
13235 static rtx
13236 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
13238 enum machine_mode cmpmode;
13239 rtx tmp, flags;
13241 cmpmode = SELECT_CC_MODE (code, op0, op1);
13242 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
13244 /* This is very simple, but making the interface the same as in the
13245 FP case makes the rest of the code easier. */
13246 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
13247 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
13249 /* Return the test that should be put into the flags user, i.e.
13250 the bcc, scc, or cmov instruction. */
13251 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
13254 /* Figure out whether to use ordered or unordered fp comparisons.
13255 Return the appropriate mode to use. */
13257 enum machine_mode
13258 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
13260 /* ??? In order to make all comparisons reversible, we do all comparisons
13261 non-trapping when compiling for IEEE. Once gcc is able to distinguish
13262 all forms trapping and nontrapping comparisons, we can make inequality
13263 comparisons trapping again, since it results in better code when using
13264 FCOM based compares. */
13265 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
13268 enum machine_mode
13269 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
13271 enum machine_mode mode = GET_MODE (op0);
13273 if (SCALAR_FLOAT_MODE_P (mode))
13275 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
13276 return ix86_fp_compare_mode (code);
13279 switch (code)
13281 /* Only zero flag is needed. */
13282 case EQ: /* ZF=0 */
13283 case NE: /* ZF!=0 */
13284 return CCZmode;
13285 /* Codes needing carry flag. */
13286 case GEU: /* CF=0 */
13287 case LTU: /* CF=1 */
13288 /* Detect overflow checks. They need just the carry flag. */
13289 if (GET_CODE (op0) == PLUS
13290 && rtx_equal_p (op1, XEXP (op0, 0)))
13291 return CCCmode;
13292 else
13293 return CCmode;
13294 case GTU: /* CF=0 & ZF=0 */
13295 case LEU: /* CF=1 | ZF=1 */
13296 /* Detect overflow checks. They need just the carry flag. */
13297 if (GET_CODE (op0) == MINUS
13298 && rtx_equal_p (op1, XEXP (op0, 0)))
13299 return CCCmode;
13300 else
13301 return CCmode;
13302 /* Codes possibly doable only with sign flag when
13303 comparing against zero. */
13304 case GE: /* SF=OF or SF=0 */
13305 case LT: /* SF<>OF or SF=1 */
13306 if (op1 == const0_rtx)
13307 return CCGOCmode;
13308 else
13309 /* For other cases Carry flag is not required. */
13310 return CCGCmode;
13311 /* Codes doable only with sign flag when comparing
13312 against zero, but we miss jump instruction for it
13313 so we need to use relational tests against overflow
13314 that thus needs to be zero. */
13315 case GT: /* ZF=0 & SF=OF */
13316 case LE: /* ZF=1 | SF<>OF */
13317 if (op1 == const0_rtx)
13318 return CCNOmode;
13319 else
13320 return CCGCmode;
13321 /* strcmp pattern do (use flags) and combine may ask us for proper
13322 mode. */
13323 case USE:
13324 return CCmode;
13325 default:
13326 gcc_unreachable ();
13330 /* Return the fixed registers used for condition codes. */
13332 static bool
13333 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13335 *p1 = FLAGS_REG;
13336 *p2 = FPSR_REG;
13337 return true;
13340 /* If two condition code modes are compatible, return a condition code
13341 mode which is compatible with both. Otherwise, return
13342 VOIDmode. */
13344 static enum machine_mode
13345 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
13347 if (m1 == m2)
13348 return m1;
13350 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
13351 return VOIDmode;
13353 if ((m1 == CCGCmode && m2 == CCGOCmode)
13354 || (m1 == CCGOCmode && m2 == CCGCmode))
13355 return CCGCmode;
13357 switch (m1)
13359 default:
13360 gcc_unreachable ();
13362 case CCmode:
13363 case CCGCmode:
13364 case CCGOCmode:
13365 case CCNOmode:
13366 case CCAmode:
13367 case CCCmode:
13368 case CCOmode:
13369 case CCSmode:
13370 case CCZmode:
13371 switch (m2)
13373 default:
13374 return VOIDmode;
13376 case CCmode:
13377 case CCGCmode:
13378 case CCGOCmode:
13379 case CCNOmode:
13380 case CCAmode:
13381 case CCCmode:
13382 case CCOmode:
13383 case CCSmode:
13384 case CCZmode:
13385 return CCmode;
13388 case CCFPmode:
13389 case CCFPUmode:
13390 /* These are only compatible with themselves, which we already
13391 checked above. */
13392 return VOIDmode;
13396 /* Split comparison code CODE into comparisons we can do using branch
13397 instructions. BYPASS_CODE is comparison code for branch that will
13398 branch around FIRST_CODE and SECOND_CODE. If some of branches
13399 is not required, set value to UNKNOWN.
13400 We never require more than two branches. */
13402 void
13403 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
13404 enum rtx_code *first_code,
13405 enum rtx_code *second_code)
13407 *first_code = code;
13408 *bypass_code = UNKNOWN;
13409 *second_code = UNKNOWN;
13411 /* The fcomi comparison sets flags as follows:
13413 cmp ZF PF CF
13414 > 0 0 0
13415 < 0 0 1
13416 = 1 0 0
13417 un 1 1 1 */
13419 switch (code)
13421 case GT: /* GTU - CF=0 & ZF=0 */
13422 case GE: /* GEU - CF=0 */
13423 case ORDERED: /* PF=0 */
13424 case UNORDERED: /* PF=1 */
13425 case UNEQ: /* EQ - ZF=1 */
13426 case UNLT: /* LTU - CF=1 */
13427 case UNLE: /* LEU - CF=1 | ZF=1 */
13428 case LTGT: /* EQ - ZF=0 */
13429 break;
13430 case LT: /* LTU - CF=1 - fails on unordered */
13431 *first_code = UNLT;
13432 *bypass_code = UNORDERED;
13433 break;
13434 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
13435 *first_code = UNLE;
13436 *bypass_code = UNORDERED;
13437 break;
13438 case EQ: /* EQ - ZF=1 - fails on unordered */
13439 *first_code = UNEQ;
13440 *bypass_code = UNORDERED;
13441 break;
13442 case NE: /* NE - ZF=0 - fails on unordered */
13443 *first_code = LTGT;
13444 *second_code = UNORDERED;
13445 break;
13446 case UNGE: /* GEU - CF=0 - fails on unordered */
13447 *first_code = GE;
13448 *second_code = UNORDERED;
13449 break;
13450 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
13451 *first_code = GT;
13452 *second_code = UNORDERED;
13453 break;
13454 default:
13455 gcc_unreachable ();
13457 if (!TARGET_IEEE_FP)
13459 *second_code = UNKNOWN;
13460 *bypass_code = UNKNOWN;
13464 /* Return cost of comparison done fcom + arithmetics operations on AX.
13465 All following functions do use number of instructions as a cost metrics.
13466 In future this should be tweaked to compute bytes for optimize_size and
13467 take into account performance of various instructions on various CPUs. */
13468 static int
13469 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
13471 if (!TARGET_IEEE_FP)
13472 return 4;
13473 /* The cost of code output by ix86_expand_fp_compare. */
13474 switch (code)
13476 case UNLE:
13477 case UNLT:
13478 case LTGT:
13479 case GT:
13480 case GE:
13481 case UNORDERED:
13482 case ORDERED:
13483 case UNEQ:
13484 return 4;
13485 break;
13486 case LT:
13487 case NE:
13488 case EQ:
13489 case UNGE:
13490 return 5;
13491 break;
13492 case LE:
13493 case UNGT:
13494 return 6;
13495 break;
13496 default:
13497 gcc_unreachable ();
13501 /* Return cost of comparison done using fcomi operation.
13502 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13503 static int
13504 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
13506 enum rtx_code bypass_code, first_code, second_code;
13507 /* Return arbitrarily high cost when instruction is not supported - this
13508 prevents gcc from using it. */
13509 if (!TARGET_CMOVE)
13510 return 1024;
13511 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13512 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
13515 /* Return cost of comparison done using sahf operation.
13516 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13517 static int
13518 ix86_fp_comparison_sahf_cost (enum rtx_code code)
13520 enum rtx_code bypass_code, first_code, second_code;
13521 /* Return arbitrarily high cost when instruction is not preferred - this
13522 avoids gcc from using it. */
13523 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ())))
13524 return 1024;
13525 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13526 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
13529 /* Compute cost of the comparison done using any method.
13530 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13531 static int
13532 ix86_fp_comparison_cost (enum rtx_code code)
13534 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
13535 int min;
13537 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
13538 sahf_cost = ix86_fp_comparison_sahf_cost (code);
13540 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
13541 if (min > sahf_cost)
13542 min = sahf_cost;
13543 if (min > fcomi_cost)
13544 min = fcomi_cost;
13545 return min;
13548 /* Return true if we should use an FCOMI instruction for this
13549 fp comparison. */
13552 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
13554 enum rtx_code swapped_code = swap_condition (code);
13556 return ((ix86_fp_comparison_cost (code)
13557 == ix86_fp_comparison_fcomi_cost (code))
13558 || (ix86_fp_comparison_cost (swapped_code)
13559 == ix86_fp_comparison_fcomi_cost (swapped_code)));
13562 /* Swap, force into registers, or otherwise massage the two operands
13563 to a fp comparison. The operands are updated in place; the new
13564 comparison code is returned. */
13566 static enum rtx_code
13567 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
13569 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
13570 rtx op0 = *pop0, op1 = *pop1;
13571 enum machine_mode op_mode = GET_MODE (op0);
13572 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
13574 /* All of the unordered compare instructions only work on registers.
13575 The same is true of the fcomi compare instructions. The XFmode
13576 compare instructions require registers except when comparing
13577 against zero or when converting operand 1 from fixed point to
13578 floating point. */
13580 if (!is_sse
13581 && (fpcmp_mode == CCFPUmode
13582 || (op_mode == XFmode
13583 && ! (standard_80387_constant_p (op0) == 1
13584 || standard_80387_constant_p (op1) == 1)
13585 && GET_CODE (op1) != FLOAT)
13586 || ix86_use_fcomi_compare (code)))
13588 op0 = force_reg (op_mode, op0);
13589 op1 = force_reg (op_mode, op1);
13591 else
13593 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
13594 things around if they appear profitable, otherwise force op0
13595 into a register. */
13597 if (standard_80387_constant_p (op0) == 0
13598 || (MEM_P (op0)
13599 && ! (standard_80387_constant_p (op1) == 0
13600 || MEM_P (op1))))
13602 rtx tmp;
13603 tmp = op0, op0 = op1, op1 = tmp;
13604 code = swap_condition (code);
13607 if (!REG_P (op0))
13608 op0 = force_reg (op_mode, op0);
13610 if (CONSTANT_P (op1))
13612 int tmp = standard_80387_constant_p (op1);
13613 if (tmp == 0)
13614 op1 = validize_mem (force_const_mem (op_mode, op1));
13615 else if (tmp == 1)
13617 if (TARGET_CMOVE)
13618 op1 = force_reg (op_mode, op1);
13620 else
13621 op1 = force_reg (op_mode, op1);
13625 /* Try to rearrange the comparison to make it cheaper. */
13626 if (ix86_fp_comparison_cost (code)
13627 > ix86_fp_comparison_cost (swap_condition (code))
13628 && (REG_P (op1) || can_create_pseudo_p ()))
13630 rtx tmp;
13631 tmp = op0, op0 = op1, op1 = tmp;
13632 code = swap_condition (code);
13633 if (!REG_P (op0))
13634 op0 = force_reg (op_mode, op0);
13637 *pop0 = op0;
13638 *pop1 = op1;
13639 return code;
13642 /* Convert comparison codes we use to represent FP comparison to integer
13643 code that will result in proper branch. Return UNKNOWN if no such code
13644 is available. */
13646 enum rtx_code
13647 ix86_fp_compare_code_to_integer (enum rtx_code code)
13649 switch (code)
13651 case GT:
13652 return GTU;
13653 case GE:
13654 return GEU;
13655 case ORDERED:
13656 case UNORDERED:
13657 return code;
13658 break;
13659 case UNEQ:
13660 return EQ;
13661 break;
13662 case UNLT:
13663 return LTU;
13664 break;
13665 case UNLE:
13666 return LEU;
13667 break;
13668 case LTGT:
13669 return NE;
13670 break;
13671 default:
13672 return UNKNOWN;
13676 /* Generate insn patterns to do a floating point compare of OPERANDS. */
13678 static rtx
13679 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
13680 rtx *second_test, rtx *bypass_test)
13682 enum machine_mode fpcmp_mode, intcmp_mode;
13683 rtx tmp, tmp2;
13684 int cost = ix86_fp_comparison_cost (code);
13685 enum rtx_code bypass_code, first_code, second_code;
13687 fpcmp_mode = ix86_fp_compare_mode (code);
13688 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
13690 if (second_test)
13691 *second_test = NULL_RTX;
13692 if (bypass_test)
13693 *bypass_test = NULL_RTX;
13695 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13697 /* Do fcomi/sahf based test when profitable. */
13698 if (ix86_fp_comparison_arithmetics_cost (code) > cost
13699 && (bypass_code == UNKNOWN || bypass_test)
13700 && (second_code == UNKNOWN || second_test))
13702 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13703 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
13704 tmp);
13705 if (TARGET_CMOVE)
13706 emit_insn (tmp);
13707 else
13709 gcc_assert (TARGET_SAHF);
13711 if (!scratch)
13712 scratch = gen_reg_rtx (HImode);
13713 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
13715 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
13718 /* The FP codes work out to act like unsigned. */
13719 intcmp_mode = fpcmp_mode;
13720 code = first_code;
13721 if (bypass_code != UNKNOWN)
13722 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
13723 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13724 const0_rtx);
13725 if (second_code != UNKNOWN)
13726 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
13727 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13728 const0_rtx);
13730 else
13732 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
13733 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13734 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
13735 if (!scratch)
13736 scratch = gen_reg_rtx (HImode);
13737 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
13739 /* In the unordered case, we have to check C2 for NaN's, which
13740 doesn't happen to work out to anything nice combination-wise.
13741 So do some bit twiddling on the value we've got in AH to come
13742 up with an appropriate set of condition codes. */
13744 intcmp_mode = CCNOmode;
13745 switch (code)
13747 case GT:
13748 case UNGT:
13749 if (code == GT || !TARGET_IEEE_FP)
13751 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13752 code = EQ;
13754 else
13756 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13757 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13758 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
13759 intcmp_mode = CCmode;
13760 code = GEU;
13762 break;
13763 case LT:
13764 case UNLT:
13765 if (code == LT && TARGET_IEEE_FP)
13767 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13768 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
13769 intcmp_mode = CCmode;
13770 code = EQ;
13772 else
13774 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
13775 code = NE;
13777 break;
13778 case GE:
13779 case UNGE:
13780 if (code == GE || !TARGET_IEEE_FP)
13782 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
13783 code = EQ;
13785 else
13787 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13788 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13789 GEN_INT (0x01)));
13790 code = NE;
13792 break;
13793 case LE:
13794 case UNLE:
13795 if (code == LE && TARGET_IEEE_FP)
13797 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13798 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13799 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13800 intcmp_mode = CCmode;
13801 code = LTU;
13803 else
13805 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13806 code = NE;
13808 break;
13809 case EQ:
13810 case UNEQ:
13811 if (code == EQ && TARGET_IEEE_FP)
13813 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13814 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13815 intcmp_mode = CCmode;
13816 code = EQ;
13818 else
13820 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13821 code = NE;
13822 break;
13824 break;
13825 case NE:
13826 case LTGT:
13827 if (code == NE && TARGET_IEEE_FP)
13829 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13830 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13831 GEN_INT (0x40)));
13832 code = NE;
13834 else
13836 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13837 code = EQ;
13839 break;
13841 case UNORDERED:
13842 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13843 code = NE;
13844 break;
13845 case ORDERED:
13846 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13847 code = EQ;
13848 break;
13850 default:
13851 gcc_unreachable ();
13855 /* Return the test that should be put into the flags user, i.e.
13856 the bcc, scc, or cmov instruction. */
13857 return gen_rtx_fmt_ee (code, VOIDmode,
13858 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13859 const0_rtx);
13863 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
13865 rtx op0, op1, ret;
13866 op0 = ix86_compare_op0;
13867 op1 = ix86_compare_op1;
13869 if (second_test)
13870 *second_test = NULL_RTX;
13871 if (bypass_test)
13872 *bypass_test = NULL_RTX;
13874 if (ix86_compare_emitted)
13876 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
13877 ix86_compare_emitted = NULL_RTX;
13879 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
13881 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
13882 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
13883 second_test, bypass_test);
13885 else
13886 ret = ix86_expand_int_compare (code, op0, op1);
13888 return ret;
13891 /* Return true if the CODE will result in nontrivial jump sequence. */
13892 bool
13893 ix86_fp_jump_nontrivial_p (enum rtx_code code)
13895 enum rtx_code bypass_code, first_code, second_code;
13896 if (!TARGET_CMOVE)
13897 return true;
13898 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13899 return bypass_code != UNKNOWN || second_code != UNKNOWN;
13902 void
13903 ix86_expand_branch (enum rtx_code code, rtx label)
13905 rtx tmp;
13907 /* If we have emitted a compare insn, go straight to simple.
13908 ix86_expand_compare won't emit anything if ix86_compare_emitted
13909 is non NULL. */
13910 if (ix86_compare_emitted)
13911 goto simple;
13913 switch (GET_MODE (ix86_compare_op0))
13915 case QImode:
13916 case HImode:
13917 case SImode:
13918 simple:
13919 tmp = ix86_expand_compare (code, NULL, NULL);
13920 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13921 gen_rtx_LABEL_REF (VOIDmode, label),
13922 pc_rtx);
13923 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
13924 return;
13926 case SFmode:
13927 case DFmode:
13928 case XFmode:
13930 rtvec vec;
13931 int use_fcomi;
13932 enum rtx_code bypass_code, first_code, second_code;
13934 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
13935 &ix86_compare_op1);
13937 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13939 /* Check whether we will use the natural sequence with one jump. If
13940 so, we can expand jump early. Otherwise delay expansion by
13941 creating compound insn to not confuse optimizers. */
13942 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
13944 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
13945 gen_rtx_LABEL_REF (VOIDmode, label),
13946 pc_rtx, NULL_RTX, NULL_RTX);
13948 else
13950 tmp = gen_rtx_fmt_ee (code, VOIDmode,
13951 ix86_compare_op0, ix86_compare_op1);
13952 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13953 gen_rtx_LABEL_REF (VOIDmode, label),
13954 pc_rtx);
13955 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
13957 use_fcomi = ix86_use_fcomi_compare (code);
13958 vec = rtvec_alloc (3 + !use_fcomi);
13959 RTVEC_ELT (vec, 0) = tmp;
13960 RTVEC_ELT (vec, 1)
13961 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
13962 RTVEC_ELT (vec, 2)
13963 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
13964 if (! use_fcomi)
13965 RTVEC_ELT (vec, 3)
13966 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
13968 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
13970 return;
13973 case DImode:
13974 if (TARGET_64BIT)
13975 goto simple;
13976 case TImode:
13977 /* Expand DImode branch into multiple compare+branch. */
13979 rtx lo[2], hi[2], label2;
13980 enum rtx_code code1, code2, code3;
13981 enum machine_mode submode;
13983 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
13985 tmp = ix86_compare_op0;
13986 ix86_compare_op0 = ix86_compare_op1;
13987 ix86_compare_op1 = tmp;
13988 code = swap_condition (code);
13990 if (GET_MODE (ix86_compare_op0) == DImode)
13992 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
13993 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
13994 submode = SImode;
13996 else
13998 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
13999 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
14000 submode = DImode;
14003 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
14004 avoid two branches. This costs one extra insn, so disable when
14005 optimizing for size. */
14007 if ((code == EQ || code == NE)
14008 && (!optimize_insn_for_size_p ()
14009 || hi[1] == const0_rtx || lo[1] == const0_rtx))
14011 rtx xor0, xor1;
14013 xor1 = hi[0];
14014 if (hi[1] != const0_rtx)
14015 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
14016 NULL_RTX, 0, OPTAB_WIDEN);
14018 xor0 = lo[0];
14019 if (lo[1] != const0_rtx)
14020 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
14021 NULL_RTX, 0, OPTAB_WIDEN);
14023 tmp = expand_binop (submode, ior_optab, xor1, xor0,
14024 NULL_RTX, 0, OPTAB_WIDEN);
14026 ix86_compare_op0 = tmp;
14027 ix86_compare_op1 = const0_rtx;
14028 ix86_expand_branch (code, label);
14029 return;
14032 /* Otherwise, if we are doing less-than or greater-or-equal-than,
14033 op1 is a constant and the low word is zero, then we can just
14034 examine the high word. Similarly for low word -1 and
14035 less-or-equal-than or greater-than. */
14037 if (CONST_INT_P (hi[1]))
14038 switch (code)
14040 case LT: case LTU: case GE: case GEU:
14041 if (lo[1] == const0_rtx)
14043 ix86_compare_op0 = hi[0];
14044 ix86_compare_op1 = hi[1];
14045 ix86_expand_branch (code, label);
14046 return;
14048 break;
14049 case LE: case LEU: case GT: case GTU:
14050 if (lo[1] == constm1_rtx)
14052 ix86_compare_op0 = hi[0];
14053 ix86_compare_op1 = hi[1];
14054 ix86_expand_branch (code, label);
14055 return;
14057 break;
14058 default:
14059 break;
14062 /* Otherwise, we need two or three jumps. */
14064 label2 = gen_label_rtx ();
14066 code1 = code;
14067 code2 = swap_condition (code);
14068 code3 = unsigned_condition (code);
14070 switch (code)
14072 case LT: case GT: case LTU: case GTU:
14073 break;
14075 case LE: code1 = LT; code2 = GT; break;
14076 case GE: code1 = GT; code2 = LT; break;
14077 case LEU: code1 = LTU; code2 = GTU; break;
14078 case GEU: code1 = GTU; code2 = LTU; break;
14080 case EQ: code1 = UNKNOWN; code2 = NE; break;
14081 case NE: code2 = UNKNOWN; break;
14083 default:
14084 gcc_unreachable ();
14088 * a < b =>
14089 * if (hi(a) < hi(b)) goto true;
14090 * if (hi(a) > hi(b)) goto false;
14091 * if (lo(a) < lo(b)) goto true;
14092 * false:
14095 ix86_compare_op0 = hi[0];
14096 ix86_compare_op1 = hi[1];
14098 if (code1 != UNKNOWN)
14099 ix86_expand_branch (code1, label);
14100 if (code2 != UNKNOWN)
14101 ix86_expand_branch (code2, label2);
14103 ix86_compare_op0 = lo[0];
14104 ix86_compare_op1 = lo[1];
14105 ix86_expand_branch (code3, label);
14107 if (code2 != UNKNOWN)
14108 emit_label (label2);
14109 return;
14112 default:
14113 gcc_unreachable ();
14117 /* Split branch based on floating point condition. */
14118 void
14119 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
14120 rtx target1, rtx target2, rtx tmp, rtx pushed)
14122 rtx second, bypass;
14123 rtx label = NULL_RTX;
14124 rtx condition;
14125 int bypass_probability = -1, second_probability = -1, probability = -1;
14126 rtx i;
14128 if (target2 != pc_rtx)
14130 rtx tmp = target2;
14131 code = reverse_condition_maybe_unordered (code);
14132 target2 = target1;
14133 target1 = tmp;
14136 condition = ix86_expand_fp_compare (code, op1, op2,
14137 tmp, &second, &bypass);
14139 /* Remove pushed operand from stack. */
14140 if (pushed)
14141 ix86_free_from_memory (GET_MODE (pushed));
14143 if (split_branch_probability >= 0)
14145 /* Distribute the probabilities across the jumps.
14146 Assume the BYPASS and SECOND to be always test
14147 for UNORDERED. */
14148 probability = split_branch_probability;
14150 /* Value of 1 is low enough to make no need for probability
14151 to be updated. Later we may run some experiments and see
14152 if unordered values are more frequent in practice. */
14153 if (bypass)
14154 bypass_probability = 1;
14155 if (second)
14156 second_probability = 1;
14158 if (bypass != NULL_RTX)
14160 label = gen_label_rtx ();
14161 i = emit_jump_insn (gen_rtx_SET
14162 (VOIDmode, pc_rtx,
14163 gen_rtx_IF_THEN_ELSE (VOIDmode,
14164 bypass,
14165 gen_rtx_LABEL_REF (VOIDmode,
14166 label),
14167 pc_rtx)));
14168 if (bypass_probability >= 0)
14169 REG_NOTES (i)
14170 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14171 GEN_INT (bypass_probability),
14172 REG_NOTES (i));
14174 i = emit_jump_insn (gen_rtx_SET
14175 (VOIDmode, pc_rtx,
14176 gen_rtx_IF_THEN_ELSE (VOIDmode,
14177 condition, target1, target2)));
14178 if (probability >= 0)
14179 REG_NOTES (i)
14180 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14181 GEN_INT (probability),
14182 REG_NOTES (i));
14183 if (second != NULL_RTX)
14185 i = emit_jump_insn (gen_rtx_SET
14186 (VOIDmode, pc_rtx,
14187 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
14188 target2)));
14189 if (second_probability >= 0)
14190 REG_NOTES (i)
14191 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14192 GEN_INT (second_probability),
14193 REG_NOTES (i));
14195 if (label != NULL_RTX)
14196 emit_label (label);
14200 ix86_expand_setcc (enum rtx_code code, rtx dest)
14202 rtx ret, tmp, tmpreg, equiv;
14203 rtx second_test, bypass_test;
14205 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
14206 return 0; /* FAIL */
14208 gcc_assert (GET_MODE (dest) == QImode);
14210 ret = ix86_expand_compare (code, &second_test, &bypass_test);
14211 PUT_MODE (ret, QImode);
14213 tmp = dest;
14214 tmpreg = dest;
14216 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
14217 if (bypass_test || second_test)
14219 rtx test = second_test;
14220 int bypass = 0;
14221 rtx tmp2 = gen_reg_rtx (QImode);
14222 if (bypass_test)
14224 gcc_assert (!second_test);
14225 test = bypass_test;
14226 bypass = 1;
14227 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
14229 PUT_MODE (test, QImode);
14230 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
14232 if (bypass)
14233 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
14234 else
14235 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
14238 /* Attach a REG_EQUAL note describing the comparison result. */
14239 if (ix86_compare_op0 && ix86_compare_op1)
14241 equiv = simplify_gen_relational (code, QImode,
14242 GET_MODE (ix86_compare_op0),
14243 ix86_compare_op0, ix86_compare_op1);
14244 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
14247 return 1; /* DONE */
14250 /* Expand comparison setting or clearing carry flag. Return true when
14251 successful and set pop for the operation. */
14252 static bool
14253 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
14255 enum machine_mode mode =
14256 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
14258 /* Do not handle DImode compares that go through special path. */
14259 if (mode == (TARGET_64BIT ? TImode : DImode))
14260 return false;
14262 if (SCALAR_FLOAT_MODE_P (mode))
14264 rtx second_test = NULL, bypass_test = NULL;
14265 rtx compare_op, compare_seq;
14267 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14269 /* Shortcut: following common codes never translate
14270 into carry flag compares. */
14271 if (code == EQ || code == NE || code == UNEQ || code == LTGT
14272 || code == ORDERED || code == UNORDERED)
14273 return false;
14275 /* These comparisons require zero flag; swap operands so they won't. */
14276 if ((code == GT || code == UNLE || code == LE || code == UNGT)
14277 && !TARGET_IEEE_FP)
14279 rtx tmp = op0;
14280 op0 = op1;
14281 op1 = tmp;
14282 code = swap_condition (code);
14285 /* Try to expand the comparison and verify that we end up with
14286 carry flag based comparison. This fails to be true only when
14287 we decide to expand comparison using arithmetic that is not
14288 too common scenario. */
14289 start_sequence ();
14290 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14291 &second_test, &bypass_test);
14292 compare_seq = get_insns ();
14293 end_sequence ();
14295 if (second_test || bypass_test)
14296 return false;
14298 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14299 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14300 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
14301 else
14302 code = GET_CODE (compare_op);
14304 if (code != LTU && code != GEU)
14305 return false;
14307 emit_insn (compare_seq);
14308 *pop = compare_op;
14309 return true;
14312 if (!INTEGRAL_MODE_P (mode))
14313 return false;
14315 switch (code)
14317 case LTU:
14318 case GEU:
14319 break;
14321 /* Convert a==0 into (unsigned)a<1. */
14322 case EQ:
14323 case NE:
14324 if (op1 != const0_rtx)
14325 return false;
14326 op1 = const1_rtx;
14327 code = (code == EQ ? LTU : GEU);
14328 break;
14330 /* Convert a>b into b<a or a>=b-1. */
14331 case GTU:
14332 case LEU:
14333 if (CONST_INT_P (op1))
14335 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
14336 /* Bail out on overflow. We still can swap operands but that
14337 would force loading of the constant into register. */
14338 if (op1 == const0_rtx
14339 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
14340 return false;
14341 code = (code == GTU ? GEU : LTU);
14343 else
14345 rtx tmp = op1;
14346 op1 = op0;
14347 op0 = tmp;
14348 code = (code == GTU ? LTU : GEU);
14350 break;
14352 /* Convert a>=0 into (unsigned)a<0x80000000. */
14353 case LT:
14354 case GE:
14355 if (mode == DImode || op1 != const0_rtx)
14356 return false;
14357 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14358 code = (code == LT ? GEU : LTU);
14359 break;
14360 case LE:
14361 case GT:
14362 if (mode == DImode || op1 != constm1_rtx)
14363 return false;
14364 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14365 code = (code == LE ? GEU : LTU);
14366 break;
14368 default:
14369 return false;
14371 /* Swapping operands may cause constant to appear as first operand. */
14372 if (!nonimmediate_operand (op0, VOIDmode))
14374 if (!can_create_pseudo_p ())
14375 return false;
14376 op0 = force_reg (mode, op0);
14378 ix86_compare_op0 = op0;
14379 ix86_compare_op1 = op1;
14380 *pop = ix86_expand_compare (code, NULL, NULL);
14381 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
14382 return true;
14386 ix86_expand_int_movcc (rtx operands[])
14388 enum rtx_code code = GET_CODE (operands[1]), compare_code;
14389 rtx compare_seq, compare_op;
14390 rtx second_test, bypass_test;
14391 enum machine_mode mode = GET_MODE (operands[0]);
14392 bool sign_bit_compare_p = false;;
14394 start_sequence ();
14395 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14396 compare_seq = get_insns ();
14397 end_sequence ();
14399 compare_code = GET_CODE (compare_op);
14401 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
14402 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
14403 sign_bit_compare_p = true;
14405 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
14406 HImode insns, we'd be swallowed in word prefix ops. */
14408 if ((mode != HImode || TARGET_FAST_PREFIX)
14409 && (mode != (TARGET_64BIT ? TImode : DImode))
14410 && CONST_INT_P (operands[2])
14411 && CONST_INT_P (operands[3]))
14413 rtx out = operands[0];
14414 HOST_WIDE_INT ct = INTVAL (operands[2]);
14415 HOST_WIDE_INT cf = INTVAL (operands[3]);
14416 HOST_WIDE_INT diff;
14418 diff = ct - cf;
14419 /* Sign bit compares are better done using shifts than we do by using
14420 sbb. */
14421 if (sign_bit_compare_p
14422 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
14423 ix86_compare_op1, &compare_op))
14425 /* Detect overlap between destination and compare sources. */
14426 rtx tmp = out;
14428 if (!sign_bit_compare_p)
14430 bool fpcmp = false;
14432 compare_code = GET_CODE (compare_op);
14434 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14435 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14437 fpcmp = true;
14438 compare_code = ix86_fp_compare_code_to_integer (compare_code);
14441 /* To simplify rest of code, restrict to the GEU case. */
14442 if (compare_code == LTU)
14444 HOST_WIDE_INT tmp = ct;
14445 ct = cf;
14446 cf = tmp;
14447 compare_code = reverse_condition (compare_code);
14448 code = reverse_condition (code);
14450 else
14452 if (fpcmp)
14453 PUT_CODE (compare_op,
14454 reverse_condition_maybe_unordered
14455 (GET_CODE (compare_op)));
14456 else
14457 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
14459 diff = ct - cf;
14461 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
14462 || reg_overlap_mentioned_p (out, ix86_compare_op1))
14463 tmp = gen_reg_rtx (mode);
14465 if (mode == DImode)
14466 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
14467 else
14468 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
14470 else
14472 if (code == GT || code == GE)
14473 code = reverse_condition (code);
14474 else
14476 HOST_WIDE_INT tmp = ct;
14477 ct = cf;
14478 cf = tmp;
14479 diff = ct - cf;
14481 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
14482 ix86_compare_op1, VOIDmode, 0, -1);
14485 if (diff == 1)
14488 * cmpl op0,op1
14489 * sbbl dest,dest
14490 * [addl dest, ct]
14492 * Size 5 - 8.
14494 if (ct)
14495 tmp = expand_simple_binop (mode, PLUS,
14496 tmp, GEN_INT (ct),
14497 copy_rtx (tmp), 1, OPTAB_DIRECT);
14499 else if (cf == -1)
14502 * cmpl op0,op1
14503 * sbbl dest,dest
14504 * orl $ct, dest
14506 * Size 8.
14508 tmp = expand_simple_binop (mode, IOR,
14509 tmp, GEN_INT (ct),
14510 copy_rtx (tmp), 1, OPTAB_DIRECT);
14512 else if (diff == -1 && ct)
14515 * cmpl op0,op1
14516 * sbbl dest,dest
14517 * notl dest
14518 * [addl dest, cf]
14520 * Size 8 - 11.
14522 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14523 if (cf)
14524 tmp = expand_simple_binop (mode, PLUS,
14525 copy_rtx (tmp), GEN_INT (cf),
14526 copy_rtx (tmp), 1, OPTAB_DIRECT);
14528 else
14531 * cmpl op0,op1
14532 * sbbl dest,dest
14533 * [notl dest]
14534 * andl cf - ct, dest
14535 * [addl dest, ct]
14537 * Size 8 - 11.
14540 if (cf == 0)
14542 cf = ct;
14543 ct = 0;
14544 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14547 tmp = expand_simple_binop (mode, AND,
14548 copy_rtx (tmp),
14549 gen_int_mode (cf - ct, mode),
14550 copy_rtx (tmp), 1, OPTAB_DIRECT);
14551 if (ct)
14552 tmp = expand_simple_binop (mode, PLUS,
14553 copy_rtx (tmp), GEN_INT (ct),
14554 copy_rtx (tmp), 1, OPTAB_DIRECT);
14557 if (!rtx_equal_p (tmp, out))
14558 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
14560 return 1; /* DONE */
14563 if (diff < 0)
14565 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14567 HOST_WIDE_INT tmp;
14568 tmp = ct, ct = cf, cf = tmp;
14569 diff = -diff;
14571 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14573 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14575 /* We may be reversing unordered compare to normal compare, that
14576 is not valid in general (we may convert non-trapping condition
14577 to trapping one), however on i386 we currently emit all
14578 comparisons unordered. */
14579 compare_code = reverse_condition_maybe_unordered (compare_code);
14580 code = reverse_condition_maybe_unordered (code);
14582 else
14584 compare_code = reverse_condition (compare_code);
14585 code = reverse_condition (code);
14589 compare_code = UNKNOWN;
14590 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
14591 && CONST_INT_P (ix86_compare_op1))
14593 if (ix86_compare_op1 == const0_rtx
14594 && (code == LT || code == GE))
14595 compare_code = code;
14596 else if (ix86_compare_op1 == constm1_rtx)
14598 if (code == LE)
14599 compare_code = LT;
14600 else if (code == GT)
14601 compare_code = GE;
14605 /* Optimize dest = (op0 < 0) ? -1 : cf. */
14606 if (compare_code != UNKNOWN
14607 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
14608 && (cf == -1 || ct == -1))
14610 /* If lea code below could be used, only optimize
14611 if it results in a 2 insn sequence. */
14613 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
14614 || diff == 3 || diff == 5 || diff == 9)
14615 || (compare_code == LT && ct == -1)
14616 || (compare_code == GE && cf == -1))
14619 * notl op1 (if necessary)
14620 * sarl $31, op1
14621 * orl cf, op1
14623 if (ct != -1)
14625 cf = ct;
14626 ct = -1;
14627 code = reverse_condition (code);
14630 out = emit_store_flag (out, code, ix86_compare_op0,
14631 ix86_compare_op1, VOIDmode, 0, -1);
14633 out = expand_simple_binop (mode, IOR,
14634 out, GEN_INT (cf),
14635 out, 1, OPTAB_DIRECT);
14636 if (out != operands[0])
14637 emit_move_insn (operands[0], out);
14639 return 1; /* DONE */
14644 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
14645 || diff == 3 || diff == 5 || diff == 9)
14646 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
14647 && (mode != DImode
14648 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
14651 * xorl dest,dest
14652 * cmpl op1,op2
14653 * setcc dest
14654 * lea cf(dest*(ct-cf)),dest
14656 * Size 14.
14658 * This also catches the degenerate setcc-only case.
14661 rtx tmp;
14662 int nops;
14664 out = emit_store_flag (out, code, ix86_compare_op0,
14665 ix86_compare_op1, VOIDmode, 0, 1);
14667 nops = 0;
14668 /* On x86_64 the lea instruction operates on Pmode, so we need
14669 to get arithmetics done in proper mode to match. */
14670 if (diff == 1)
14671 tmp = copy_rtx (out);
14672 else
14674 rtx out1;
14675 out1 = copy_rtx (out);
14676 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
14677 nops++;
14678 if (diff & 1)
14680 tmp = gen_rtx_PLUS (mode, tmp, out1);
14681 nops++;
14684 if (cf != 0)
14686 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
14687 nops++;
14689 if (!rtx_equal_p (tmp, out))
14691 if (nops == 1)
14692 out = force_operand (tmp, copy_rtx (out));
14693 else
14694 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
14696 if (!rtx_equal_p (out, operands[0]))
14697 emit_move_insn (operands[0], copy_rtx (out));
14699 return 1; /* DONE */
14703 * General case: Jumpful:
14704 * xorl dest,dest cmpl op1, op2
14705 * cmpl op1, op2 movl ct, dest
14706 * setcc dest jcc 1f
14707 * decl dest movl cf, dest
14708 * andl (cf-ct),dest 1:
14709 * addl ct,dest
14711 * Size 20. Size 14.
14713 * This is reasonably steep, but branch mispredict costs are
14714 * high on modern cpus, so consider failing only if optimizing
14715 * for space.
14718 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14719 && BRANCH_COST (optimize_insn_for_speed_p (),
14720 false) >= 2)
14722 if (cf == 0)
14724 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14726 cf = ct;
14727 ct = 0;
14729 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14731 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14733 /* We may be reversing unordered compare to normal compare,
14734 that is not valid in general (we may convert non-trapping
14735 condition to trapping one), however on i386 we currently
14736 emit all comparisons unordered. */
14737 code = reverse_condition_maybe_unordered (code);
14739 else
14741 code = reverse_condition (code);
14742 if (compare_code != UNKNOWN)
14743 compare_code = reverse_condition (compare_code);
14747 if (compare_code != UNKNOWN)
14749 /* notl op1 (if needed)
14750 sarl $31, op1
14751 andl (cf-ct), op1
14752 addl ct, op1
14754 For x < 0 (resp. x <= -1) there will be no notl,
14755 so if possible swap the constants to get rid of the
14756 complement.
14757 True/false will be -1/0 while code below (store flag
14758 followed by decrement) is 0/-1, so the constants need
14759 to be exchanged once more. */
14761 if (compare_code == GE || !cf)
14763 code = reverse_condition (code);
14764 compare_code = LT;
14766 else
14768 HOST_WIDE_INT tmp = cf;
14769 cf = ct;
14770 ct = tmp;
14773 out = emit_store_flag (out, code, ix86_compare_op0,
14774 ix86_compare_op1, VOIDmode, 0, -1);
14776 else
14778 out = emit_store_flag (out, code, ix86_compare_op0,
14779 ix86_compare_op1, VOIDmode, 0, 1);
14781 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
14782 copy_rtx (out), 1, OPTAB_DIRECT);
14785 out = expand_simple_binop (mode, AND, copy_rtx (out),
14786 gen_int_mode (cf - ct, mode),
14787 copy_rtx (out), 1, OPTAB_DIRECT);
14788 if (ct)
14789 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
14790 copy_rtx (out), 1, OPTAB_DIRECT);
14791 if (!rtx_equal_p (out, operands[0]))
14792 emit_move_insn (operands[0], copy_rtx (out));
14794 return 1; /* DONE */
14798 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14800 /* Try a few things more with specific constants and a variable. */
14802 optab op;
14803 rtx var, orig_out, out, tmp;
14805 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
14806 return 0; /* FAIL */
14808 /* If one of the two operands is an interesting constant, load a
14809 constant with the above and mask it in with a logical operation. */
14811 if (CONST_INT_P (operands[2]))
14813 var = operands[3];
14814 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
14815 operands[3] = constm1_rtx, op = and_optab;
14816 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
14817 operands[3] = const0_rtx, op = ior_optab;
14818 else
14819 return 0; /* FAIL */
14821 else if (CONST_INT_P (operands[3]))
14823 var = operands[2];
14824 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
14825 operands[2] = constm1_rtx, op = and_optab;
14826 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
14827 operands[2] = const0_rtx, op = ior_optab;
14828 else
14829 return 0; /* FAIL */
14831 else
14832 return 0; /* FAIL */
14834 orig_out = operands[0];
14835 tmp = gen_reg_rtx (mode);
14836 operands[0] = tmp;
14838 /* Recurse to get the constant loaded. */
14839 if (ix86_expand_int_movcc (operands) == 0)
14840 return 0; /* FAIL */
14842 /* Mask in the interesting variable. */
14843 out = expand_binop (mode, op, var, tmp, orig_out, 0,
14844 OPTAB_WIDEN);
14845 if (!rtx_equal_p (out, orig_out))
14846 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
14848 return 1; /* DONE */
14852 * For comparison with above,
14854 * movl cf,dest
14855 * movl ct,tmp
14856 * cmpl op1,op2
14857 * cmovcc tmp,dest
14859 * Size 15.
14862 if (! nonimmediate_operand (operands[2], mode))
14863 operands[2] = force_reg (mode, operands[2]);
14864 if (! nonimmediate_operand (operands[3], mode))
14865 operands[3] = force_reg (mode, operands[3]);
14867 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
14869 rtx tmp = gen_reg_rtx (mode);
14870 emit_move_insn (tmp, operands[3]);
14871 operands[3] = tmp;
14873 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
14875 rtx tmp = gen_reg_rtx (mode);
14876 emit_move_insn (tmp, operands[2]);
14877 operands[2] = tmp;
14880 if (! register_operand (operands[2], VOIDmode)
14881 && (mode == QImode
14882 || ! register_operand (operands[3], VOIDmode)))
14883 operands[2] = force_reg (mode, operands[2]);
14885 if (mode == QImode
14886 && ! register_operand (operands[3], VOIDmode))
14887 operands[3] = force_reg (mode, operands[3]);
14889 emit_insn (compare_seq);
14890 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14891 gen_rtx_IF_THEN_ELSE (mode,
14892 compare_op, operands[2],
14893 operands[3])));
14894 if (bypass_test)
14895 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14896 gen_rtx_IF_THEN_ELSE (mode,
14897 bypass_test,
14898 copy_rtx (operands[3]),
14899 copy_rtx (operands[0]))));
14900 if (second_test)
14901 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14902 gen_rtx_IF_THEN_ELSE (mode,
14903 second_test,
14904 copy_rtx (operands[2]),
14905 copy_rtx (operands[0]))));
14907 return 1; /* DONE */
14910 /* Swap, force into registers, or otherwise massage the two operands
14911 to an sse comparison with a mask result. Thus we differ a bit from
14912 ix86_prepare_fp_compare_args which expects to produce a flags result.
14914 The DEST operand exists to help determine whether to commute commutative
14915 operators. The POP0/POP1 operands are updated in place. The new
14916 comparison code is returned, or UNKNOWN if not implementable. */
14918 static enum rtx_code
14919 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
14920 rtx *pop0, rtx *pop1)
14922 rtx tmp;
14924 switch (code)
14926 case LTGT:
14927 case UNEQ:
14928 /* We have no LTGT as an operator. We could implement it with
14929 NE & ORDERED, but this requires an extra temporary. It's
14930 not clear that it's worth it. */
14931 return UNKNOWN;
14933 case LT:
14934 case LE:
14935 case UNGT:
14936 case UNGE:
14937 /* These are supported directly. */
14938 break;
14940 case EQ:
14941 case NE:
14942 case UNORDERED:
14943 case ORDERED:
14944 /* For commutative operators, try to canonicalize the destination
14945 operand to be first in the comparison - this helps reload to
14946 avoid extra moves. */
14947 if (!dest || !rtx_equal_p (dest, *pop1))
14948 break;
14949 /* FALLTHRU */
14951 case GE:
14952 case GT:
14953 case UNLE:
14954 case UNLT:
14955 /* These are not supported directly. Swap the comparison operands
14956 to transform into something that is supported. */
14957 tmp = *pop0;
14958 *pop0 = *pop1;
14959 *pop1 = tmp;
14960 code = swap_condition (code);
14961 break;
14963 default:
14964 gcc_unreachable ();
14967 return code;
14970 /* Detect conditional moves that exactly match min/max operational
14971 semantics. Note that this is IEEE safe, as long as we don't
14972 interchange the operands.
14974 Returns FALSE if this conditional move doesn't match a MIN/MAX,
14975 and TRUE if the operation is successful and instructions are emitted. */
14977 static bool
14978 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
14979 rtx cmp_op1, rtx if_true, rtx if_false)
14981 enum machine_mode mode;
14982 bool is_min;
14983 rtx tmp;
14985 if (code == LT)
14987 else if (code == UNGE)
14989 tmp = if_true;
14990 if_true = if_false;
14991 if_false = tmp;
14993 else
14994 return false;
14996 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
14997 is_min = true;
14998 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
14999 is_min = false;
15000 else
15001 return false;
15003 mode = GET_MODE (dest);
15005 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15006 but MODE may be a vector mode and thus not appropriate. */
15007 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15009 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15010 rtvec v;
15012 if_true = force_reg (mode, if_true);
15013 v = gen_rtvec (2, if_true, if_false);
15014 tmp = gen_rtx_UNSPEC (mode, v, u);
15016 else
15018 code = is_min ? SMIN : SMAX;
15019 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
15022 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
15023 return true;
15026 /* Expand an sse vector comparison. Return the register with the result. */
15028 static rtx
15029 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
15030 rtx op_true, rtx op_false)
15032 enum machine_mode mode = GET_MODE (dest);
15033 rtx x;
15035 cmp_op0 = force_reg (mode, cmp_op0);
15036 if (!nonimmediate_operand (cmp_op1, mode))
15037 cmp_op1 = force_reg (mode, cmp_op1);
15039 if (optimize
15040 || reg_overlap_mentioned_p (dest, op_true)
15041 || reg_overlap_mentioned_p (dest, op_false))
15042 dest = gen_reg_rtx (mode);
15044 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
15045 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15047 return dest;
15050 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
15051 operations. This is used for both scalar and vector conditional moves. */
15053 static void
15054 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
15056 enum machine_mode mode = GET_MODE (dest);
15057 rtx t2, t3, x;
15059 if (op_false == CONST0_RTX (mode))
15061 op_true = force_reg (mode, op_true);
15062 x = gen_rtx_AND (mode, cmp, op_true);
15063 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15065 else if (op_true == CONST0_RTX (mode))
15067 op_false = force_reg (mode, op_false);
15068 x = gen_rtx_NOT (mode, cmp);
15069 x = gen_rtx_AND (mode, x, op_false);
15070 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15072 else if (TARGET_SSE5)
15074 rtx pcmov = gen_rtx_SET (mode, dest,
15075 gen_rtx_IF_THEN_ELSE (mode, cmp,
15076 op_true,
15077 op_false));
15078 emit_insn (pcmov);
15080 else
15082 op_true = force_reg (mode, op_true);
15083 op_false = force_reg (mode, op_false);
15085 t2 = gen_reg_rtx (mode);
15086 if (optimize)
15087 t3 = gen_reg_rtx (mode);
15088 else
15089 t3 = dest;
15091 x = gen_rtx_AND (mode, op_true, cmp);
15092 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
15094 x = gen_rtx_NOT (mode, cmp);
15095 x = gen_rtx_AND (mode, x, op_false);
15096 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
15098 x = gen_rtx_IOR (mode, t3, t2);
15099 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15103 /* Expand a floating-point conditional move. Return true if successful. */
15106 ix86_expand_fp_movcc (rtx operands[])
15108 enum machine_mode mode = GET_MODE (operands[0]);
15109 enum rtx_code code = GET_CODE (operands[1]);
15110 rtx tmp, compare_op, second_test, bypass_test;
15112 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
15114 enum machine_mode cmode;
15116 /* Since we've no cmove for sse registers, don't force bad register
15117 allocation just to gain access to it. Deny movcc when the
15118 comparison mode doesn't match the move mode. */
15119 cmode = GET_MODE (ix86_compare_op0);
15120 if (cmode == VOIDmode)
15121 cmode = GET_MODE (ix86_compare_op1);
15122 if (cmode != mode)
15123 return 0;
15125 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15126 &ix86_compare_op0,
15127 &ix86_compare_op1);
15128 if (code == UNKNOWN)
15129 return 0;
15131 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
15132 ix86_compare_op1, operands[2],
15133 operands[3]))
15134 return 1;
15136 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
15137 ix86_compare_op1, operands[2], operands[3]);
15138 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
15139 return 1;
15142 /* The floating point conditional move instructions don't directly
15143 support conditions resulting from a signed integer comparison. */
15145 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15147 /* The floating point conditional move instructions don't directly
15148 support signed integer comparisons. */
15150 if (!fcmov_comparison_operator (compare_op, VOIDmode))
15152 gcc_assert (!second_test && !bypass_test);
15153 tmp = gen_reg_rtx (QImode);
15154 ix86_expand_setcc (code, tmp);
15155 code = NE;
15156 ix86_compare_op0 = tmp;
15157 ix86_compare_op1 = const0_rtx;
15158 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15160 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15162 tmp = gen_reg_rtx (mode);
15163 emit_move_insn (tmp, operands[3]);
15164 operands[3] = tmp;
15166 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15168 tmp = gen_reg_rtx (mode);
15169 emit_move_insn (tmp, operands[2]);
15170 operands[2] = tmp;
15173 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15174 gen_rtx_IF_THEN_ELSE (mode, compare_op,
15175 operands[2], operands[3])));
15176 if (bypass_test)
15177 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15178 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
15179 operands[3], operands[0])));
15180 if (second_test)
15181 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15182 gen_rtx_IF_THEN_ELSE (mode, second_test,
15183 operands[2], operands[0])));
15185 return 1;
15188 /* Expand a floating-point vector conditional move; a vcond operation
15189 rather than a movcc operation. */
15191 bool
15192 ix86_expand_fp_vcond (rtx operands[])
15194 enum rtx_code code = GET_CODE (operands[3]);
15195 rtx cmp;
15197 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15198 &operands[4], &operands[5]);
15199 if (code == UNKNOWN)
15200 return false;
15202 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
15203 operands[5], operands[1], operands[2]))
15204 return true;
15206 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
15207 operands[1], operands[2]);
15208 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
15209 return true;
15212 /* Expand a signed/unsigned integral vector conditional move. */
15214 bool
15215 ix86_expand_int_vcond (rtx operands[])
15217 enum machine_mode mode = GET_MODE (operands[0]);
15218 enum rtx_code code = GET_CODE (operands[3]);
15219 bool negate = false;
15220 rtx x, cop0, cop1;
15222 cop0 = operands[4];
15223 cop1 = operands[5];
15225 /* SSE5 supports all of the comparisons on all vector int types. */
15226 if (!TARGET_SSE5)
15228 /* Canonicalize the comparison to EQ, GT, GTU. */
15229 switch (code)
15231 case EQ:
15232 case GT:
15233 case GTU:
15234 break;
15236 case NE:
15237 case LE:
15238 case LEU:
15239 code = reverse_condition (code);
15240 negate = true;
15241 break;
15243 case GE:
15244 case GEU:
15245 code = reverse_condition (code);
15246 negate = true;
15247 /* FALLTHRU */
15249 case LT:
15250 case LTU:
15251 code = swap_condition (code);
15252 x = cop0, cop0 = cop1, cop1 = x;
15253 break;
15255 default:
15256 gcc_unreachable ();
15259 /* Only SSE4.1/SSE4.2 supports V2DImode. */
15260 if (mode == V2DImode)
15262 switch (code)
15264 case EQ:
15265 /* SSE4.1 supports EQ. */
15266 if (!TARGET_SSE4_1)
15267 return false;
15268 break;
15270 case GT:
15271 case GTU:
15272 /* SSE4.2 supports GT/GTU. */
15273 if (!TARGET_SSE4_2)
15274 return false;
15275 break;
15277 default:
15278 gcc_unreachable ();
15282 /* Unsigned parallel compare is not supported by the hardware. Play some
15283 tricks to turn this into a signed comparison against 0. */
15284 if (code == GTU)
15286 cop0 = force_reg (mode, cop0);
15288 switch (mode)
15290 case V4SImode:
15291 case V2DImode:
15293 rtx t1, t2, mask;
15295 /* Perform a parallel modulo subtraction. */
15296 t1 = gen_reg_rtx (mode);
15297 emit_insn ((mode == V4SImode
15298 ? gen_subv4si3
15299 : gen_subv2di3) (t1, cop0, cop1));
15301 /* Extract the original sign bit of op0. */
15302 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
15303 true, false);
15304 t2 = gen_reg_rtx (mode);
15305 emit_insn ((mode == V4SImode
15306 ? gen_andv4si3
15307 : gen_andv2di3) (t2, cop0, mask));
15309 /* XOR it back into the result of the subtraction. This results
15310 in the sign bit set iff we saw unsigned underflow. */
15311 x = gen_reg_rtx (mode);
15312 emit_insn ((mode == V4SImode
15313 ? gen_xorv4si3
15314 : gen_xorv2di3) (x, t1, t2));
15316 code = GT;
15318 break;
15320 case V16QImode:
15321 case V8HImode:
15322 /* Perform a parallel unsigned saturating subtraction. */
15323 x = gen_reg_rtx (mode);
15324 emit_insn (gen_rtx_SET (VOIDmode, x,
15325 gen_rtx_US_MINUS (mode, cop0, cop1)));
15327 code = EQ;
15328 negate = !negate;
15329 break;
15331 default:
15332 gcc_unreachable ();
15335 cop0 = x;
15336 cop1 = CONST0_RTX (mode);
15340 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
15341 operands[1+negate], operands[2-negate]);
15343 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
15344 operands[2-negate]);
15345 return true;
15348 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
15349 true if we should do zero extension, else sign extension. HIGH_P is
15350 true if we want the N/2 high elements, else the low elements. */
15352 void
15353 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15355 enum machine_mode imode = GET_MODE (operands[1]);
15356 rtx (*unpack)(rtx, rtx, rtx);
15357 rtx se, dest;
15359 switch (imode)
15361 case V16QImode:
15362 if (high_p)
15363 unpack = gen_vec_interleave_highv16qi;
15364 else
15365 unpack = gen_vec_interleave_lowv16qi;
15366 break;
15367 case V8HImode:
15368 if (high_p)
15369 unpack = gen_vec_interleave_highv8hi;
15370 else
15371 unpack = gen_vec_interleave_lowv8hi;
15372 break;
15373 case V4SImode:
15374 if (high_p)
15375 unpack = gen_vec_interleave_highv4si;
15376 else
15377 unpack = gen_vec_interleave_lowv4si;
15378 break;
15379 default:
15380 gcc_unreachable ();
15383 dest = gen_lowpart (imode, operands[0]);
15385 if (unsigned_p)
15386 se = force_reg (imode, CONST0_RTX (imode));
15387 else
15388 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
15389 operands[1], pc_rtx, pc_rtx);
15391 emit_insn (unpack (dest, operands[1], se));
15394 /* This function performs the same task as ix86_expand_sse_unpack,
15395 but with SSE4.1 instructions. */
15397 void
15398 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15400 enum machine_mode imode = GET_MODE (operands[1]);
15401 rtx (*unpack)(rtx, rtx);
15402 rtx src, dest;
15404 switch (imode)
15406 case V16QImode:
15407 if (unsigned_p)
15408 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
15409 else
15410 unpack = gen_sse4_1_extendv8qiv8hi2;
15411 break;
15412 case V8HImode:
15413 if (unsigned_p)
15414 unpack = gen_sse4_1_zero_extendv4hiv4si2;
15415 else
15416 unpack = gen_sse4_1_extendv4hiv4si2;
15417 break;
15418 case V4SImode:
15419 if (unsigned_p)
15420 unpack = gen_sse4_1_zero_extendv2siv2di2;
15421 else
15422 unpack = gen_sse4_1_extendv2siv2di2;
15423 break;
15424 default:
15425 gcc_unreachable ();
15428 dest = operands[0];
15429 if (high_p)
15431 /* Shift higher 8 bytes to lower 8 bytes. */
15432 src = gen_reg_rtx (imode);
15433 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
15434 gen_lowpart (TImode, operands[1]),
15435 GEN_INT (64)));
15437 else
15438 src = operands[1];
15440 emit_insn (unpack (dest, src));
15443 /* This function performs the same task as ix86_expand_sse_unpack,
15444 but with sse5 instructions. */
15446 void
15447 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15449 enum machine_mode imode = GET_MODE (operands[1]);
15450 int pperm_bytes[16];
15451 int i;
15452 int h = (high_p) ? 8 : 0;
15453 int h2;
15454 int sign_extend;
15455 rtvec v = rtvec_alloc (16);
15456 rtvec vs;
15457 rtx x, p;
15458 rtx op0 = operands[0], op1 = operands[1];
15460 switch (imode)
15462 case V16QImode:
15463 vs = rtvec_alloc (8);
15464 h2 = (high_p) ? 8 : 0;
15465 for (i = 0; i < 8; i++)
15467 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
15468 pperm_bytes[2*i+1] = ((unsigned_p)
15469 ? PPERM_ZERO
15470 : PPERM_SIGN | PPERM_SRC2 | i | h);
15473 for (i = 0; i < 16; i++)
15474 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15476 for (i = 0; i < 8; i++)
15477 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15479 p = gen_rtx_PARALLEL (VOIDmode, vs);
15480 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15481 if (unsigned_p)
15482 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
15483 else
15484 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
15485 break;
15487 case V8HImode:
15488 vs = rtvec_alloc (4);
15489 h2 = (high_p) ? 4 : 0;
15490 for (i = 0; i < 4; i++)
15492 sign_extend = ((unsigned_p)
15493 ? PPERM_ZERO
15494 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
15495 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
15496 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
15497 pperm_bytes[4*i+2] = sign_extend;
15498 pperm_bytes[4*i+3] = sign_extend;
15501 for (i = 0; i < 16; i++)
15502 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15504 for (i = 0; i < 4; i++)
15505 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15507 p = gen_rtx_PARALLEL (VOIDmode, vs);
15508 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15509 if (unsigned_p)
15510 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
15511 else
15512 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
15513 break;
15515 case V4SImode:
15516 vs = rtvec_alloc (2);
15517 h2 = (high_p) ? 2 : 0;
15518 for (i = 0; i < 2; i++)
15520 sign_extend = ((unsigned_p)
15521 ? PPERM_ZERO
15522 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
15523 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
15524 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
15525 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
15526 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
15527 pperm_bytes[8*i+4] = sign_extend;
15528 pperm_bytes[8*i+5] = sign_extend;
15529 pperm_bytes[8*i+6] = sign_extend;
15530 pperm_bytes[8*i+7] = sign_extend;
15533 for (i = 0; i < 16; i++)
15534 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15536 for (i = 0; i < 2; i++)
15537 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15539 p = gen_rtx_PARALLEL (VOIDmode, vs);
15540 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15541 if (unsigned_p)
15542 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
15543 else
15544 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
15545 break;
15547 default:
15548 gcc_unreachable ();
15551 return;
15554 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
15555 next narrower integer vector type */
15556 void
15557 ix86_expand_sse5_pack (rtx operands[3])
15559 enum machine_mode imode = GET_MODE (operands[0]);
15560 int pperm_bytes[16];
15561 int i;
15562 rtvec v = rtvec_alloc (16);
15563 rtx x;
15564 rtx op0 = operands[0];
15565 rtx op1 = operands[1];
15566 rtx op2 = operands[2];
15568 switch (imode)
15570 case V16QImode:
15571 for (i = 0; i < 8; i++)
15573 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
15574 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
15577 for (i = 0; i < 16; i++)
15578 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15580 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15581 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
15582 break;
15584 case V8HImode:
15585 for (i = 0; i < 4; i++)
15587 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
15588 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
15589 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
15590 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
15593 for (i = 0; i < 16; i++)
15594 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15596 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15597 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
15598 break;
15600 case V4SImode:
15601 for (i = 0; i < 2; i++)
15603 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
15604 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
15605 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
15606 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
15607 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
15608 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
15609 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
15610 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
15613 for (i = 0; i < 16; i++)
15614 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15616 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15617 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
15618 break;
15620 default:
15621 gcc_unreachable ();
15624 return;
15627 /* Expand conditional increment or decrement using adb/sbb instructions.
15628 The default case using setcc followed by the conditional move can be
15629 done by generic code. */
15631 ix86_expand_int_addcc (rtx operands[])
15633 enum rtx_code code = GET_CODE (operands[1]);
15634 rtx compare_op;
15635 rtx val = const0_rtx;
15636 bool fpcmp = false;
15637 enum machine_mode mode = GET_MODE (operands[0]);
15639 if (operands[3] != const1_rtx
15640 && operands[3] != constm1_rtx)
15641 return 0;
15642 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15643 ix86_compare_op1, &compare_op))
15644 return 0;
15645 code = GET_CODE (compare_op);
15647 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15648 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15650 fpcmp = true;
15651 code = ix86_fp_compare_code_to_integer (code);
15654 if (code != LTU)
15656 val = constm1_rtx;
15657 if (fpcmp)
15658 PUT_CODE (compare_op,
15659 reverse_condition_maybe_unordered
15660 (GET_CODE (compare_op)));
15661 else
15662 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
15664 PUT_MODE (compare_op, mode);
15666 /* Construct either adc or sbb insn. */
15667 if ((code == LTU) == (operands[3] == constm1_rtx))
15669 switch (GET_MODE (operands[0]))
15671 case QImode:
15672 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
15673 break;
15674 case HImode:
15675 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
15676 break;
15677 case SImode:
15678 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
15679 break;
15680 case DImode:
15681 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15682 break;
15683 default:
15684 gcc_unreachable ();
15687 else
15689 switch (GET_MODE (operands[0]))
15691 case QImode:
15692 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
15693 break;
15694 case HImode:
15695 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
15696 break;
15697 case SImode:
15698 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
15699 break;
15700 case DImode:
15701 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15702 break;
15703 default:
15704 gcc_unreachable ();
15707 return 1; /* DONE */
15711 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
15712 works for floating pointer parameters and nonoffsetable memories.
15713 For pushes, it returns just stack offsets; the values will be saved
15714 in the right order. Maximally three parts are generated. */
15716 static int
15717 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
15719 int size;
15721 if (!TARGET_64BIT)
15722 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
15723 else
15724 size = (GET_MODE_SIZE (mode) + 4) / 8;
15726 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
15727 gcc_assert (size >= 2 && size <= 4);
15729 /* Optimize constant pool reference to immediates. This is used by fp
15730 moves, that force all constants to memory to allow combining. */
15731 if (MEM_P (operand) && MEM_READONLY_P (operand))
15733 rtx tmp = maybe_get_pool_constant (operand);
15734 if (tmp)
15735 operand = tmp;
15738 if (MEM_P (operand) && !offsettable_memref_p (operand))
15740 /* The only non-offsetable memories we handle are pushes. */
15741 int ok = push_operand (operand, VOIDmode);
15743 gcc_assert (ok);
15745 operand = copy_rtx (operand);
15746 PUT_MODE (operand, Pmode);
15747 parts[0] = parts[1] = parts[2] = parts[3] = operand;
15748 return size;
15751 if (GET_CODE (operand) == CONST_VECTOR)
15753 enum machine_mode imode = int_mode_for_mode (mode);
15754 /* Caution: if we looked through a constant pool memory above,
15755 the operand may actually have a different mode now. That's
15756 ok, since we want to pun this all the way back to an integer. */
15757 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
15758 gcc_assert (operand != NULL);
15759 mode = imode;
15762 if (!TARGET_64BIT)
15764 if (mode == DImode)
15765 split_di (&operand, 1, &parts[0], &parts[1]);
15766 else
15768 int i;
15770 if (REG_P (operand))
15772 gcc_assert (reload_completed);
15773 for (i = 0; i < size; i++)
15774 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
15776 else if (offsettable_memref_p (operand))
15778 operand = adjust_address (operand, SImode, 0);
15779 parts[0] = operand;
15780 for (i = 1; i < size; i++)
15781 parts[i] = adjust_address (operand, SImode, 4 * i);
15783 else if (GET_CODE (operand) == CONST_DOUBLE)
15785 REAL_VALUE_TYPE r;
15786 long l[4];
15788 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15789 switch (mode)
15791 case TFmode:
15792 real_to_target (l, &r, mode);
15793 parts[3] = gen_int_mode (l[3], SImode);
15794 parts[2] = gen_int_mode (l[2], SImode);
15795 break;
15796 case XFmode:
15797 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
15798 parts[2] = gen_int_mode (l[2], SImode);
15799 break;
15800 case DFmode:
15801 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
15802 break;
15803 default:
15804 gcc_unreachable ();
15806 parts[1] = gen_int_mode (l[1], SImode);
15807 parts[0] = gen_int_mode (l[0], SImode);
15809 else
15810 gcc_unreachable ();
15813 else
15815 if (mode == TImode)
15816 split_ti (&operand, 1, &parts[0], &parts[1]);
15817 if (mode == XFmode || mode == TFmode)
15819 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
15820 if (REG_P (operand))
15822 gcc_assert (reload_completed);
15823 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
15824 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
15826 else if (offsettable_memref_p (operand))
15828 operand = adjust_address (operand, DImode, 0);
15829 parts[0] = operand;
15830 parts[1] = adjust_address (operand, upper_mode, 8);
15832 else if (GET_CODE (operand) == CONST_DOUBLE)
15834 REAL_VALUE_TYPE r;
15835 long l[4];
15837 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15838 real_to_target (l, &r, mode);
15840 /* Do not use shift by 32 to avoid warning on 32bit systems. */
15841 if (HOST_BITS_PER_WIDE_INT >= 64)
15842 parts[0]
15843 = gen_int_mode
15844 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
15845 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
15846 DImode);
15847 else
15848 parts[0] = immed_double_const (l[0], l[1], DImode);
15850 if (upper_mode == SImode)
15851 parts[1] = gen_int_mode (l[2], SImode);
15852 else if (HOST_BITS_PER_WIDE_INT >= 64)
15853 parts[1]
15854 = gen_int_mode
15855 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
15856 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
15857 DImode);
15858 else
15859 parts[1] = immed_double_const (l[2], l[3], DImode);
15861 else
15862 gcc_unreachable ();
15866 return size;
15869 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
15870 Return false when normal moves are needed; true when all required
15871 insns have been emitted. Operands 2-4 contain the input values
15872 int the correct order; operands 5-7 contain the output values. */
15874 void
15875 ix86_split_long_move (rtx operands[])
15877 rtx part[2][4];
15878 int nparts, i, j;
15879 int push = 0;
15880 int collisions = 0;
15881 enum machine_mode mode = GET_MODE (operands[0]);
15882 bool collisionparts[4];
15884 /* The DFmode expanders may ask us to move double.
15885 For 64bit target this is single move. By hiding the fact
15886 here we simplify i386.md splitters. */
15887 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
15889 /* Optimize constant pool reference to immediates. This is used by
15890 fp moves, that force all constants to memory to allow combining. */
15892 if (MEM_P (operands[1])
15893 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
15894 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
15895 operands[1] = get_pool_constant (XEXP (operands[1], 0));
15896 if (push_operand (operands[0], VOIDmode))
15898 operands[0] = copy_rtx (operands[0]);
15899 PUT_MODE (operands[0], Pmode);
15901 else
15902 operands[0] = gen_lowpart (DImode, operands[0]);
15903 operands[1] = gen_lowpart (DImode, operands[1]);
15904 emit_move_insn (operands[0], operands[1]);
15905 return;
15908 /* The only non-offsettable memory we handle is push. */
15909 if (push_operand (operands[0], VOIDmode))
15910 push = 1;
15911 else
15912 gcc_assert (!MEM_P (operands[0])
15913 || offsettable_memref_p (operands[0]));
15915 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
15916 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
15918 /* When emitting push, take care for source operands on the stack. */
15919 if (push && MEM_P (operands[1])
15920 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
15921 for (i = 0; i < nparts - 1; i++)
15922 part[1][i] = change_address (part[1][i],
15923 GET_MODE (part[1][i]),
15924 XEXP (part[1][i + 1], 0));
15926 /* We need to do copy in the right order in case an address register
15927 of the source overlaps the destination. */
15928 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
15930 rtx tmp;
15932 for (i = 0; i < nparts; i++)
15934 collisionparts[i]
15935 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
15936 if (collisionparts[i])
15937 collisions++;
15940 /* Collision in the middle part can be handled by reordering. */
15941 if (collisions == 1 && nparts == 3 && collisionparts [1])
15943 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15944 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15946 else if (collisions == 1
15947 && nparts == 4
15948 && (collisionparts [1] || collisionparts [2]))
15950 if (collisionparts [1])
15952 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15953 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15955 else
15957 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
15958 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
15962 /* If there are more collisions, we can't handle it by reordering.
15963 Do an lea to the last part and use only one colliding move. */
15964 else if (collisions > 1)
15966 rtx base;
15968 collisions = 1;
15970 base = part[0][nparts - 1];
15972 /* Handle the case when the last part isn't valid for lea.
15973 Happens in 64-bit mode storing the 12-byte XFmode. */
15974 if (GET_MODE (base) != Pmode)
15975 base = gen_rtx_REG (Pmode, REGNO (base));
15977 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
15978 part[1][0] = replace_equiv_address (part[1][0], base);
15979 for (i = 1; i < nparts; i++)
15981 tmp = plus_constant (base, UNITS_PER_WORD * i);
15982 part[1][i] = replace_equiv_address (part[1][i], tmp);
15987 if (push)
15989 if (!TARGET_64BIT)
15991 if (nparts == 3)
15993 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
15994 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
15995 emit_move_insn (part[0][2], part[1][2]);
15997 else if (nparts == 4)
15999 emit_move_insn (part[0][3], part[1][3]);
16000 emit_move_insn (part[0][2], part[1][2]);
16003 else
16005 /* In 64bit mode we don't have 32bit push available. In case this is
16006 register, it is OK - we will just use larger counterpart. We also
16007 retype memory - these comes from attempt to avoid REX prefix on
16008 moving of second half of TFmode value. */
16009 if (GET_MODE (part[1][1]) == SImode)
16011 switch (GET_CODE (part[1][1]))
16013 case MEM:
16014 part[1][1] = adjust_address (part[1][1], DImode, 0);
16015 break;
16017 case REG:
16018 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16019 break;
16021 default:
16022 gcc_unreachable ();
16025 if (GET_MODE (part[1][0]) == SImode)
16026 part[1][0] = part[1][1];
16029 emit_move_insn (part[0][1], part[1][1]);
16030 emit_move_insn (part[0][0], part[1][0]);
16031 return;
16034 /* Choose correct order to not overwrite the source before it is copied. */
16035 if ((REG_P (part[0][0])
16036 && REG_P (part[1][1])
16037 && (REGNO (part[0][0]) == REGNO (part[1][1])
16038 || (nparts == 3
16039 && REGNO (part[0][0]) == REGNO (part[1][2]))
16040 || (nparts == 4
16041 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16042 || (collisions > 0
16043 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16045 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16047 operands[2 + i] = part[0][j];
16048 operands[6 + i] = part[1][j];
16051 else
16053 for (i = 0; i < nparts; i++)
16055 operands[2 + i] = part[0][i];
16056 operands[6 + i] = part[1][i];
16060 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16061 if (optimize_insn_for_size_p ())
16063 for (j = 0; j < nparts - 1; j++)
16064 if (CONST_INT_P (operands[6 + j])
16065 && operands[6 + j] != const0_rtx
16066 && REG_P (operands[2 + j]))
16067 for (i = j; i < nparts - 1; i++)
16068 if (CONST_INT_P (operands[7 + i])
16069 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16070 operands[7 + i] = operands[2 + j];
16073 for (i = 0; i < nparts; i++)
16074 emit_move_insn (operands[2 + i], operands[6 + i]);
16076 return;
16079 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16080 left shift by a constant, either using a single shift or
16081 a sequence of add instructions. */
16083 static void
16084 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16086 if (count == 1)
16088 emit_insn ((mode == DImode
16089 ? gen_addsi3
16090 : gen_adddi3) (operand, operand, operand));
16092 else if (!optimize_insn_for_size_p ()
16093 && count * ix86_cost->add <= ix86_cost->shift_const)
16095 int i;
16096 for (i=0; i<count; i++)
16098 emit_insn ((mode == DImode
16099 ? gen_addsi3
16100 : gen_adddi3) (operand, operand, operand));
16103 else
16104 emit_insn ((mode == DImode
16105 ? gen_ashlsi3
16106 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16109 void
16110 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16112 rtx low[2], high[2];
16113 int count;
16114 const int single_width = mode == DImode ? 32 : 64;
16116 if (CONST_INT_P (operands[2]))
16118 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16119 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16121 if (count >= single_width)
16123 emit_move_insn (high[0], low[1]);
16124 emit_move_insn (low[0], const0_rtx);
16126 if (count > single_width)
16127 ix86_expand_ashl_const (high[0], count - single_width, mode);
16129 else
16131 if (!rtx_equal_p (operands[0], operands[1]))
16132 emit_move_insn (operands[0], operands[1]);
16133 emit_insn ((mode == DImode
16134 ? gen_x86_shld
16135 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16136 ix86_expand_ashl_const (low[0], count, mode);
16138 return;
16141 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16143 if (operands[1] == const1_rtx)
16145 /* Assuming we've chosen a QImode capable registers, then 1 << N
16146 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16147 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16149 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16151 ix86_expand_clear (low[0]);
16152 ix86_expand_clear (high[0]);
16153 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16155 d = gen_lowpart (QImode, low[0]);
16156 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16157 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16158 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16160 d = gen_lowpart (QImode, high[0]);
16161 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16162 s = gen_rtx_NE (QImode, flags, const0_rtx);
16163 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16166 /* Otherwise, we can get the same results by manually performing
16167 a bit extract operation on bit 5/6, and then performing the two
16168 shifts. The two methods of getting 0/1 into low/high are exactly
16169 the same size. Avoiding the shift in the bit extract case helps
16170 pentium4 a bit; no one else seems to care much either way. */
16171 else
16173 rtx x;
16175 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16176 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16177 else
16178 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16179 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16181 emit_insn ((mode == DImode
16182 ? gen_lshrsi3
16183 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
16184 emit_insn ((mode == DImode
16185 ? gen_andsi3
16186 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
16187 emit_move_insn (low[0], high[0]);
16188 emit_insn ((mode == DImode
16189 ? gen_xorsi3
16190 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
16193 emit_insn ((mode == DImode
16194 ? gen_ashlsi3
16195 : gen_ashldi3) (low[0], low[0], operands[2]));
16196 emit_insn ((mode == DImode
16197 ? gen_ashlsi3
16198 : gen_ashldi3) (high[0], high[0], operands[2]));
16199 return;
16202 if (operands[1] == constm1_rtx)
16204 /* For -1 << N, we can avoid the shld instruction, because we
16205 know that we're shifting 0...31/63 ones into a -1. */
16206 emit_move_insn (low[0], constm1_rtx);
16207 if (optimize_insn_for_size_p ())
16208 emit_move_insn (high[0], low[0]);
16209 else
16210 emit_move_insn (high[0], constm1_rtx);
16212 else
16214 if (!rtx_equal_p (operands[0], operands[1]))
16215 emit_move_insn (operands[0], operands[1]);
16217 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16218 emit_insn ((mode == DImode
16219 ? gen_x86_shld
16220 : gen_x86_64_shld) (high[0], low[0], operands[2]));
16223 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
16225 if (TARGET_CMOVE && scratch)
16227 ix86_expand_clear (scratch);
16228 emit_insn ((mode == DImode
16229 ? gen_x86_shift_adj_1
16230 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
16231 scratch));
16233 else
16234 emit_insn ((mode == DImode
16235 ? gen_x86_shift_adj_2
16236 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
16239 void
16240 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
16242 rtx low[2], high[2];
16243 int count;
16244 const int single_width = mode == DImode ? 32 : 64;
16246 if (CONST_INT_P (operands[2]))
16248 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16249 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16251 if (count == single_width * 2 - 1)
16253 emit_move_insn (high[0], high[1]);
16254 emit_insn ((mode == DImode
16255 ? gen_ashrsi3
16256 : gen_ashrdi3) (high[0], high[0],
16257 GEN_INT (single_width - 1)));
16258 emit_move_insn (low[0], high[0]);
16261 else if (count >= single_width)
16263 emit_move_insn (low[0], high[1]);
16264 emit_move_insn (high[0], low[0]);
16265 emit_insn ((mode == DImode
16266 ? gen_ashrsi3
16267 : gen_ashrdi3) (high[0], high[0],
16268 GEN_INT (single_width - 1)));
16269 if (count > single_width)
16270 emit_insn ((mode == DImode
16271 ? gen_ashrsi3
16272 : gen_ashrdi3) (low[0], low[0],
16273 GEN_INT (count - single_width)));
16275 else
16277 if (!rtx_equal_p (operands[0], operands[1]))
16278 emit_move_insn (operands[0], operands[1]);
16279 emit_insn ((mode == DImode
16280 ? gen_x86_shrd
16281 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16282 emit_insn ((mode == DImode
16283 ? gen_ashrsi3
16284 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
16287 else
16289 if (!rtx_equal_p (operands[0], operands[1]))
16290 emit_move_insn (operands[0], operands[1]);
16292 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16294 emit_insn ((mode == DImode
16295 ? gen_x86_shrd
16296 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16297 emit_insn ((mode == DImode
16298 ? gen_ashrsi3
16299 : gen_ashrdi3) (high[0], high[0], operands[2]));
16301 if (TARGET_CMOVE && scratch)
16303 emit_move_insn (scratch, high[0]);
16304 emit_insn ((mode == DImode
16305 ? gen_ashrsi3
16306 : gen_ashrdi3) (scratch, scratch,
16307 GEN_INT (single_width - 1)));
16308 emit_insn ((mode == DImode
16309 ? gen_x86_shift_adj_1
16310 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16311 scratch));
16313 else
16314 emit_insn ((mode == DImode
16315 ? gen_x86_shift_adj_3
16316 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
16320 void
16321 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
16323 rtx low[2], high[2];
16324 int count;
16325 const int single_width = mode == DImode ? 32 : 64;
16327 if (CONST_INT_P (operands[2]))
16329 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16330 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16332 if (count >= single_width)
16334 emit_move_insn (low[0], high[1]);
16335 ix86_expand_clear (high[0]);
16337 if (count > single_width)
16338 emit_insn ((mode == DImode
16339 ? gen_lshrsi3
16340 : gen_lshrdi3) (low[0], low[0],
16341 GEN_INT (count - single_width)));
16343 else
16345 if (!rtx_equal_p (operands[0], operands[1]))
16346 emit_move_insn (operands[0], operands[1]);
16347 emit_insn ((mode == DImode
16348 ? gen_x86_shrd
16349 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16350 emit_insn ((mode == DImode
16351 ? gen_lshrsi3
16352 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
16355 else
16357 if (!rtx_equal_p (operands[0], operands[1]))
16358 emit_move_insn (operands[0], operands[1]);
16360 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16362 emit_insn ((mode == DImode
16363 ? gen_x86_shrd
16364 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16365 emit_insn ((mode == DImode
16366 ? gen_lshrsi3
16367 : gen_lshrdi3) (high[0], high[0], operands[2]));
16369 /* Heh. By reversing the arguments, we can reuse this pattern. */
16370 if (TARGET_CMOVE && scratch)
16372 ix86_expand_clear (scratch);
16373 emit_insn ((mode == DImode
16374 ? gen_x86_shift_adj_1
16375 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16376 scratch));
16378 else
16379 emit_insn ((mode == DImode
16380 ? gen_x86_shift_adj_2
16381 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
16385 /* Predict just emitted jump instruction to be taken with probability PROB. */
16386 static void
16387 predict_jump (int prob)
16389 rtx insn = get_last_insn ();
16390 gcc_assert (JUMP_P (insn));
16391 REG_NOTES (insn)
16392 = gen_rtx_EXPR_LIST (REG_BR_PROB,
16393 GEN_INT (prob),
16394 REG_NOTES (insn));
16397 /* Helper function for the string operations below. Dest VARIABLE whether
16398 it is aligned to VALUE bytes. If true, jump to the label. */
16399 static rtx
16400 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
16402 rtx label = gen_label_rtx ();
16403 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
16404 if (GET_MODE (variable) == DImode)
16405 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
16406 else
16407 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
16408 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
16409 1, label);
16410 if (epilogue)
16411 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16412 else
16413 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16414 return label;
16417 /* Adjust COUNTER by the VALUE. */
16418 static void
16419 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
16421 if (GET_MODE (countreg) == DImode)
16422 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
16423 else
16424 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
16427 /* Zero extend possibly SImode EXP to Pmode register. */
16429 ix86_zero_extend_to_Pmode (rtx exp)
16431 rtx r;
16432 if (GET_MODE (exp) == VOIDmode)
16433 return force_reg (Pmode, exp);
16434 if (GET_MODE (exp) == Pmode)
16435 return copy_to_mode_reg (Pmode, exp);
16436 r = gen_reg_rtx (Pmode);
16437 emit_insn (gen_zero_extendsidi2 (r, exp));
16438 return r;
16441 /* Divide COUNTREG by SCALE. */
16442 static rtx
16443 scale_counter (rtx countreg, int scale)
16445 rtx sc;
16446 rtx piece_size_mask;
16448 if (scale == 1)
16449 return countreg;
16450 if (CONST_INT_P (countreg))
16451 return GEN_INT (INTVAL (countreg) / scale);
16452 gcc_assert (REG_P (countreg));
16454 piece_size_mask = GEN_INT (scale - 1);
16455 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
16456 GEN_INT (exact_log2 (scale)),
16457 NULL, 1, OPTAB_DIRECT);
16458 return sc;
16461 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
16462 DImode for constant loop counts. */
16464 static enum machine_mode
16465 counter_mode (rtx count_exp)
16467 if (GET_MODE (count_exp) != VOIDmode)
16468 return GET_MODE (count_exp);
16469 if (GET_CODE (count_exp) != CONST_INT)
16470 return Pmode;
16471 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
16472 return DImode;
16473 return SImode;
16476 /* When SRCPTR is non-NULL, output simple loop to move memory
16477 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
16478 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
16479 equivalent loop to set memory by VALUE (supposed to be in MODE).
16481 The size is rounded down to whole number of chunk size moved at once.
16482 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
16485 static void
16486 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
16487 rtx destptr, rtx srcptr, rtx value,
16488 rtx count, enum machine_mode mode, int unroll,
16489 int expected_size)
16491 rtx out_label, top_label, iter, tmp;
16492 enum machine_mode iter_mode = counter_mode (count);
16493 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
16494 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
16495 rtx size;
16496 rtx x_addr;
16497 rtx y_addr;
16498 int i;
16500 top_label = gen_label_rtx ();
16501 out_label = gen_label_rtx ();
16502 iter = gen_reg_rtx (iter_mode);
16504 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
16505 NULL, 1, OPTAB_DIRECT);
16506 /* Those two should combine. */
16507 if (piece_size == const1_rtx)
16509 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
16510 true, out_label);
16511 predict_jump (REG_BR_PROB_BASE * 10 / 100);
16513 emit_move_insn (iter, const0_rtx);
16515 emit_label (top_label);
16517 tmp = convert_modes (Pmode, iter_mode, iter, true);
16518 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
16519 destmem = change_address (destmem, mode, x_addr);
16521 if (srcmem)
16523 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
16524 srcmem = change_address (srcmem, mode, y_addr);
16526 /* When unrolling for chips that reorder memory reads and writes,
16527 we can save registers by using single temporary.
16528 Also using 4 temporaries is overkill in 32bit mode. */
16529 if (!TARGET_64BIT && 0)
16531 for (i = 0; i < unroll; i++)
16533 if (i)
16535 destmem =
16536 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16537 srcmem =
16538 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16540 emit_move_insn (destmem, srcmem);
16543 else
16545 rtx tmpreg[4];
16546 gcc_assert (unroll <= 4);
16547 for (i = 0; i < unroll; i++)
16549 tmpreg[i] = gen_reg_rtx (mode);
16550 if (i)
16552 srcmem =
16553 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16555 emit_move_insn (tmpreg[i], srcmem);
16557 for (i = 0; i < unroll; i++)
16559 if (i)
16561 destmem =
16562 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16564 emit_move_insn (destmem, tmpreg[i]);
16568 else
16569 for (i = 0; i < unroll; i++)
16571 if (i)
16572 destmem =
16573 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16574 emit_move_insn (destmem, value);
16577 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
16578 true, OPTAB_LIB_WIDEN);
16579 if (tmp != iter)
16580 emit_move_insn (iter, tmp);
16582 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
16583 true, top_label);
16584 if (expected_size != -1)
16586 expected_size /= GET_MODE_SIZE (mode) * unroll;
16587 if (expected_size == 0)
16588 predict_jump (0);
16589 else if (expected_size > REG_BR_PROB_BASE)
16590 predict_jump (REG_BR_PROB_BASE - 1);
16591 else
16592 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
16594 else
16595 predict_jump (REG_BR_PROB_BASE * 80 / 100);
16596 iter = ix86_zero_extend_to_Pmode (iter);
16597 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
16598 true, OPTAB_LIB_WIDEN);
16599 if (tmp != destptr)
16600 emit_move_insn (destptr, tmp);
16601 if (srcptr)
16603 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
16604 true, OPTAB_LIB_WIDEN);
16605 if (tmp != srcptr)
16606 emit_move_insn (srcptr, tmp);
16608 emit_label (out_label);
16611 /* Output "rep; mov" instruction.
16612 Arguments have same meaning as for previous function */
16613 static void
16614 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
16615 rtx destptr, rtx srcptr,
16616 rtx count,
16617 enum machine_mode mode)
16619 rtx destexp;
16620 rtx srcexp;
16621 rtx countreg;
16623 /* If the size is known, it is shorter to use rep movs. */
16624 if (mode == QImode && CONST_INT_P (count)
16625 && !(INTVAL (count) & 3))
16626 mode = SImode;
16628 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16629 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16630 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
16631 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
16632 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16633 if (mode != QImode)
16635 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16636 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16637 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16638 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
16639 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16640 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
16642 else
16644 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16645 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
16647 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
16648 destexp, srcexp));
16651 /* Output "rep; stos" instruction.
16652 Arguments have same meaning as for previous function */
16653 static void
16654 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
16655 rtx count,
16656 enum machine_mode mode)
16658 rtx destexp;
16659 rtx countreg;
16661 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16662 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16663 value = force_reg (mode, gen_lowpart (mode, value));
16664 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16665 if (mode != QImode)
16667 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16668 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16669 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16671 else
16672 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16673 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
16676 static void
16677 emit_strmov (rtx destmem, rtx srcmem,
16678 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
16680 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
16681 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
16682 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16685 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
16686 static void
16687 expand_movmem_epilogue (rtx destmem, rtx srcmem,
16688 rtx destptr, rtx srcptr, rtx count, int max_size)
16690 rtx src, dest;
16691 if (CONST_INT_P (count))
16693 HOST_WIDE_INT countval = INTVAL (count);
16694 int offset = 0;
16696 if ((countval & 0x10) && max_size > 16)
16698 if (TARGET_64BIT)
16700 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16701 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
16703 else
16704 gcc_unreachable ();
16705 offset += 16;
16707 if ((countval & 0x08) && max_size > 8)
16709 if (TARGET_64BIT)
16710 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16711 else
16713 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16714 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
16716 offset += 8;
16718 if ((countval & 0x04) && max_size > 4)
16720 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16721 offset += 4;
16723 if ((countval & 0x02) && max_size > 2)
16725 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
16726 offset += 2;
16728 if ((countval & 0x01) && max_size > 1)
16730 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
16731 offset += 1;
16733 return;
16735 if (max_size > 8)
16737 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
16738 count, 1, OPTAB_DIRECT);
16739 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
16740 count, QImode, 1, 4);
16741 return;
16744 /* When there are stringops, we can cheaply increase dest and src pointers.
16745 Otherwise we save code size by maintaining offset (zero is readily
16746 available from preceding rep operation) and using x86 addressing modes.
16748 if (TARGET_SINGLE_STRINGOP)
16750 if (max_size > 4)
16752 rtx label = ix86_expand_aligntest (count, 4, true);
16753 src = change_address (srcmem, SImode, srcptr);
16754 dest = change_address (destmem, SImode, destptr);
16755 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16756 emit_label (label);
16757 LABEL_NUSES (label) = 1;
16759 if (max_size > 2)
16761 rtx label = ix86_expand_aligntest (count, 2, true);
16762 src = change_address (srcmem, HImode, srcptr);
16763 dest = change_address (destmem, HImode, destptr);
16764 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16765 emit_label (label);
16766 LABEL_NUSES (label) = 1;
16768 if (max_size > 1)
16770 rtx label = ix86_expand_aligntest (count, 1, true);
16771 src = change_address (srcmem, QImode, srcptr);
16772 dest = change_address (destmem, QImode, destptr);
16773 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16774 emit_label (label);
16775 LABEL_NUSES (label) = 1;
16778 else
16780 rtx offset = force_reg (Pmode, const0_rtx);
16781 rtx tmp;
16783 if (max_size > 4)
16785 rtx label = ix86_expand_aligntest (count, 4, true);
16786 src = change_address (srcmem, SImode, srcptr);
16787 dest = change_address (destmem, SImode, destptr);
16788 emit_move_insn (dest, src);
16789 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
16790 true, OPTAB_LIB_WIDEN);
16791 if (tmp != offset)
16792 emit_move_insn (offset, tmp);
16793 emit_label (label);
16794 LABEL_NUSES (label) = 1;
16796 if (max_size > 2)
16798 rtx label = ix86_expand_aligntest (count, 2, true);
16799 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16800 src = change_address (srcmem, HImode, tmp);
16801 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16802 dest = change_address (destmem, HImode, tmp);
16803 emit_move_insn (dest, src);
16804 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
16805 true, OPTAB_LIB_WIDEN);
16806 if (tmp != offset)
16807 emit_move_insn (offset, tmp);
16808 emit_label (label);
16809 LABEL_NUSES (label) = 1;
16811 if (max_size > 1)
16813 rtx label = ix86_expand_aligntest (count, 1, true);
16814 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16815 src = change_address (srcmem, QImode, tmp);
16816 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16817 dest = change_address (destmem, QImode, tmp);
16818 emit_move_insn (dest, src);
16819 emit_label (label);
16820 LABEL_NUSES (label) = 1;
16825 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16826 static void
16827 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
16828 rtx count, int max_size)
16830 count =
16831 expand_simple_binop (counter_mode (count), AND, count,
16832 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
16833 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
16834 gen_lowpart (QImode, value), count, QImode,
16835 1, max_size / 2);
16838 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16839 static void
16840 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
16842 rtx dest;
16844 if (CONST_INT_P (count))
16846 HOST_WIDE_INT countval = INTVAL (count);
16847 int offset = 0;
16849 if ((countval & 0x10) && max_size > 16)
16851 if (TARGET_64BIT)
16853 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16854 emit_insn (gen_strset (destptr, dest, value));
16855 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
16856 emit_insn (gen_strset (destptr, dest, value));
16858 else
16859 gcc_unreachable ();
16860 offset += 16;
16862 if ((countval & 0x08) && max_size > 8)
16864 if (TARGET_64BIT)
16866 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16867 emit_insn (gen_strset (destptr, dest, value));
16869 else
16871 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16872 emit_insn (gen_strset (destptr, dest, value));
16873 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
16874 emit_insn (gen_strset (destptr, dest, value));
16876 offset += 8;
16878 if ((countval & 0x04) && max_size > 4)
16880 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16881 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16882 offset += 4;
16884 if ((countval & 0x02) && max_size > 2)
16886 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
16887 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16888 offset += 2;
16890 if ((countval & 0x01) && max_size > 1)
16892 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
16893 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16894 offset += 1;
16896 return;
16898 if (max_size > 32)
16900 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
16901 return;
16903 if (max_size > 16)
16905 rtx label = ix86_expand_aligntest (count, 16, true);
16906 if (TARGET_64BIT)
16908 dest = change_address (destmem, DImode, destptr);
16909 emit_insn (gen_strset (destptr, dest, value));
16910 emit_insn (gen_strset (destptr, dest, value));
16912 else
16914 dest = change_address (destmem, SImode, destptr);
16915 emit_insn (gen_strset (destptr, dest, value));
16916 emit_insn (gen_strset (destptr, dest, value));
16917 emit_insn (gen_strset (destptr, dest, value));
16918 emit_insn (gen_strset (destptr, dest, value));
16920 emit_label (label);
16921 LABEL_NUSES (label) = 1;
16923 if (max_size > 8)
16925 rtx label = ix86_expand_aligntest (count, 8, true);
16926 if (TARGET_64BIT)
16928 dest = change_address (destmem, DImode, destptr);
16929 emit_insn (gen_strset (destptr, dest, value));
16931 else
16933 dest = change_address (destmem, SImode, destptr);
16934 emit_insn (gen_strset (destptr, dest, value));
16935 emit_insn (gen_strset (destptr, dest, value));
16937 emit_label (label);
16938 LABEL_NUSES (label) = 1;
16940 if (max_size > 4)
16942 rtx label = ix86_expand_aligntest (count, 4, true);
16943 dest = change_address (destmem, SImode, destptr);
16944 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16945 emit_label (label);
16946 LABEL_NUSES (label) = 1;
16948 if (max_size > 2)
16950 rtx label = ix86_expand_aligntest (count, 2, true);
16951 dest = change_address (destmem, HImode, destptr);
16952 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16953 emit_label (label);
16954 LABEL_NUSES (label) = 1;
16956 if (max_size > 1)
16958 rtx label = ix86_expand_aligntest (count, 1, true);
16959 dest = change_address (destmem, QImode, destptr);
16960 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16961 emit_label (label);
16962 LABEL_NUSES (label) = 1;
16966 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
16967 DESIRED_ALIGNMENT. */
16968 static void
16969 expand_movmem_prologue (rtx destmem, rtx srcmem,
16970 rtx destptr, rtx srcptr, rtx count,
16971 int align, int desired_alignment)
16973 if (align <= 1 && desired_alignment > 1)
16975 rtx label = ix86_expand_aligntest (destptr, 1, false);
16976 srcmem = change_address (srcmem, QImode, srcptr);
16977 destmem = change_address (destmem, QImode, destptr);
16978 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16979 ix86_adjust_counter (count, 1);
16980 emit_label (label);
16981 LABEL_NUSES (label) = 1;
16983 if (align <= 2 && desired_alignment > 2)
16985 rtx label = ix86_expand_aligntest (destptr, 2, false);
16986 srcmem = change_address (srcmem, HImode, srcptr);
16987 destmem = change_address (destmem, HImode, destptr);
16988 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16989 ix86_adjust_counter (count, 2);
16990 emit_label (label);
16991 LABEL_NUSES (label) = 1;
16993 if (align <= 4 && desired_alignment > 4)
16995 rtx label = ix86_expand_aligntest (destptr, 4, false);
16996 srcmem = change_address (srcmem, SImode, srcptr);
16997 destmem = change_address (destmem, SImode, destptr);
16998 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16999 ix86_adjust_counter (count, 4);
17000 emit_label (label);
17001 LABEL_NUSES (label) = 1;
17003 gcc_assert (desired_alignment <= 8);
17006 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17007 DESIRED_ALIGNMENT. */
17008 static void
17009 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17010 int align, int desired_alignment)
17012 if (align <= 1 && desired_alignment > 1)
17014 rtx label = ix86_expand_aligntest (destptr, 1, false);
17015 destmem = change_address (destmem, QImode, destptr);
17016 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17017 ix86_adjust_counter (count, 1);
17018 emit_label (label);
17019 LABEL_NUSES (label) = 1;
17021 if (align <= 2 && desired_alignment > 2)
17023 rtx label = ix86_expand_aligntest (destptr, 2, false);
17024 destmem = change_address (destmem, HImode, destptr);
17025 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17026 ix86_adjust_counter (count, 2);
17027 emit_label (label);
17028 LABEL_NUSES (label) = 1;
17030 if (align <= 4 && desired_alignment > 4)
17032 rtx label = ix86_expand_aligntest (destptr, 4, false);
17033 destmem = change_address (destmem, SImode, destptr);
17034 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17035 ix86_adjust_counter (count, 4);
17036 emit_label (label);
17037 LABEL_NUSES (label) = 1;
17039 gcc_assert (desired_alignment <= 8);
17042 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17043 static enum stringop_alg
17044 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17045 int *dynamic_check)
17047 const struct stringop_algs * algs;
17048 bool optimize_for_speed;
17049 /* Algorithms using the rep prefix want at least edi and ecx;
17050 additionally, memset wants eax and memcpy wants esi. Don't
17051 consider such algorithms if the user has appropriated those
17052 registers for their own purposes. */
17053 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17054 || (memset
17055 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17057 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17058 || (alg != rep_prefix_1_byte \
17059 && alg != rep_prefix_4_byte \
17060 && alg != rep_prefix_8_byte))
17061 const struct processor_costs *cost;
17063 /* Even if the string operation call is cold, we still might spend a lot
17064 of time processing large blocks. */
17065 if (optimize_function_for_size_p (cfun)
17066 || (optimize_insn_for_size_p ()
17067 && expected_size != -1 && expected_size < 256))
17068 optimize_for_speed = false;
17069 else
17070 optimize_for_speed = true;
17072 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17074 *dynamic_check = -1;
17075 if (memset)
17076 algs = &cost->memset[TARGET_64BIT != 0];
17077 else
17078 algs = &cost->memcpy[TARGET_64BIT != 0];
17079 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
17080 return stringop_alg;
17081 /* rep; movq or rep; movl is the smallest variant. */
17082 else if (!optimize_for_speed)
17084 if (!count || (count & 3))
17085 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
17086 else
17087 return rep_prefix_usable ? rep_prefix_4_byte : loop;
17089 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
17091 else if (expected_size != -1 && expected_size < 4)
17092 return loop_1_byte;
17093 else if (expected_size != -1)
17095 unsigned int i;
17096 enum stringop_alg alg = libcall;
17097 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17099 /* We get here if the algorithms that were not libcall-based
17100 were rep-prefix based and we are unable to use rep prefixes
17101 based on global register usage. Break out of the loop and
17102 use the heuristic below. */
17103 if (algs->size[i].max == 0)
17104 break;
17105 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
17107 enum stringop_alg candidate = algs->size[i].alg;
17109 if (candidate != libcall && ALG_USABLE_P (candidate))
17110 alg = candidate;
17111 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
17112 last non-libcall inline algorithm. */
17113 if (TARGET_INLINE_ALL_STRINGOPS)
17115 /* When the current size is best to be copied by a libcall,
17116 but we are still forced to inline, run the heuristic below
17117 that will pick code for medium sized blocks. */
17118 if (alg != libcall)
17119 return alg;
17120 break;
17122 else if (ALG_USABLE_P (candidate))
17123 return candidate;
17126 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
17128 /* When asked to inline the call anyway, try to pick meaningful choice.
17129 We look for maximal size of block that is faster to copy by hand and
17130 take blocks of at most of that size guessing that average size will
17131 be roughly half of the block.
17133 If this turns out to be bad, we might simply specify the preferred
17134 choice in ix86_costs. */
17135 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17136 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
17138 int max = -1;
17139 enum stringop_alg alg;
17140 int i;
17141 bool any_alg_usable_p = true;
17143 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17145 enum stringop_alg candidate = algs->size[i].alg;
17146 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
17148 if (candidate != libcall && candidate
17149 && ALG_USABLE_P (candidate))
17150 max = algs->size[i].max;
17152 /* If there aren't any usable algorithms, then recursing on
17153 smaller sizes isn't going to find anything. Just return the
17154 simple byte-at-a-time copy loop. */
17155 if (!any_alg_usable_p)
17157 /* Pick something reasonable. */
17158 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17159 *dynamic_check = 128;
17160 return loop_1_byte;
17162 if (max == -1)
17163 max = 4096;
17164 alg = decide_alg (count, max / 2, memset, dynamic_check);
17165 gcc_assert (*dynamic_check == -1);
17166 gcc_assert (alg != libcall);
17167 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17168 *dynamic_check = max;
17169 return alg;
17171 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
17172 #undef ALG_USABLE_P
17175 /* Decide on alignment. We know that the operand is already aligned to ALIGN
17176 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
17177 static int
17178 decide_alignment (int align,
17179 enum stringop_alg alg,
17180 int expected_size)
17182 int desired_align = 0;
17183 switch (alg)
17185 case no_stringop:
17186 gcc_unreachable ();
17187 case loop:
17188 case unrolled_loop:
17189 desired_align = GET_MODE_SIZE (Pmode);
17190 break;
17191 case rep_prefix_8_byte:
17192 desired_align = 8;
17193 break;
17194 case rep_prefix_4_byte:
17195 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17196 copying whole cacheline at once. */
17197 if (TARGET_PENTIUMPRO)
17198 desired_align = 8;
17199 else
17200 desired_align = 4;
17201 break;
17202 case rep_prefix_1_byte:
17203 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17204 copying whole cacheline at once. */
17205 if (TARGET_PENTIUMPRO)
17206 desired_align = 8;
17207 else
17208 desired_align = 1;
17209 break;
17210 case loop_1_byte:
17211 desired_align = 1;
17212 break;
17213 case libcall:
17214 return 0;
17217 if (optimize_size)
17218 desired_align = 1;
17219 if (desired_align < align)
17220 desired_align = align;
17221 if (expected_size != -1 && expected_size < 4)
17222 desired_align = align;
17223 return desired_align;
17226 /* Return the smallest power of 2 greater than VAL. */
17227 static int
17228 smallest_pow2_greater_than (int val)
17230 int ret = 1;
17231 while (ret <= val)
17232 ret <<= 1;
17233 return ret;
17236 /* Expand string move (memcpy) operation. Use i386 string operations when
17237 profitable. expand_setmem contains similar code. The code depends upon
17238 architecture, block size and alignment, but always has the same
17239 overall structure:
17241 1) Prologue guard: Conditional that jumps up to epilogues for small
17242 blocks that can be handled by epilogue alone. This is faster but
17243 also needed for correctness, since prologue assume the block is larger
17244 than the desired alignment.
17246 Optional dynamic check for size and libcall for large
17247 blocks is emitted here too, with -minline-stringops-dynamically.
17249 2) Prologue: copy first few bytes in order to get destination aligned
17250 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
17251 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
17252 We emit either a jump tree on power of two sized blocks, or a byte loop.
17254 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
17255 with specified algorithm.
17257 4) Epilogue: code copying tail of the block that is too small to be
17258 handled by main body (or up to size guarded by prologue guard). */
17261 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
17262 rtx expected_align_exp, rtx expected_size_exp)
17264 rtx destreg;
17265 rtx srcreg;
17266 rtx label = NULL;
17267 rtx tmp;
17268 rtx jump_around_label = NULL;
17269 HOST_WIDE_INT align = 1;
17270 unsigned HOST_WIDE_INT count = 0;
17271 HOST_WIDE_INT expected_size = -1;
17272 int size_needed = 0, epilogue_size_needed;
17273 int desired_align = 0;
17274 enum stringop_alg alg;
17275 int dynamic_check;
17276 bool need_zero_guard = false;
17278 if (CONST_INT_P (align_exp))
17279 align = INTVAL (align_exp);
17280 /* i386 can do misaligned access on reasonably increased cost. */
17281 if (CONST_INT_P (expected_align_exp)
17282 && INTVAL (expected_align_exp) > align)
17283 align = INTVAL (expected_align_exp);
17284 if (CONST_INT_P (count_exp))
17285 count = expected_size = INTVAL (count_exp);
17286 if (CONST_INT_P (expected_size_exp) && count == 0)
17287 expected_size = INTVAL (expected_size_exp);
17289 /* Make sure we don't need to care about overflow later on. */
17290 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17291 return 0;
17293 /* Step 0: Decide on preferred algorithm, desired alignment and
17294 size of chunks to be copied by main loop. */
17296 alg = decide_alg (count, expected_size, false, &dynamic_check);
17297 desired_align = decide_alignment (align, alg, expected_size);
17299 if (!TARGET_ALIGN_STRINGOPS)
17300 align = desired_align;
17302 if (alg == libcall)
17303 return 0;
17304 gcc_assert (alg != no_stringop);
17305 if (!count)
17306 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
17307 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17308 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
17309 switch (alg)
17311 case libcall:
17312 case no_stringop:
17313 gcc_unreachable ();
17314 case loop:
17315 need_zero_guard = true;
17316 size_needed = GET_MODE_SIZE (Pmode);
17317 break;
17318 case unrolled_loop:
17319 need_zero_guard = true;
17320 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
17321 break;
17322 case rep_prefix_8_byte:
17323 size_needed = 8;
17324 break;
17325 case rep_prefix_4_byte:
17326 size_needed = 4;
17327 break;
17328 case rep_prefix_1_byte:
17329 size_needed = 1;
17330 break;
17331 case loop_1_byte:
17332 need_zero_guard = true;
17333 size_needed = 1;
17334 break;
17337 epilogue_size_needed = size_needed;
17339 /* Step 1: Prologue guard. */
17341 /* Alignment code needs count to be in register. */
17342 if (CONST_INT_P (count_exp) && desired_align > align)
17343 count_exp = force_reg (counter_mode (count_exp), count_exp);
17344 gcc_assert (desired_align >= 1 && align >= 1);
17346 /* Ensure that alignment prologue won't copy past end of block. */
17347 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17349 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17350 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17351 Make sure it is power of 2. */
17352 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17354 if (CONST_INT_P (count_exp))
17356 if (UINTVAL (count_exp) < (unsigned HOST_WIDE_INT)epilogue_size_needed)
17357 goto epilogue;
17359 else
17361 label = gen_label_rtx ();
17362 emit_cmp_and_jump_insns (count_exp,
17363 GEN_INT (epilogue_size_needed),
17364 LTU, 0, counter_mode (count_exp), 1, label);
17365 if (expected_size == -1 || expected_size < epilogue_size_needed)
17366 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17367 else
17368 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17372 /* Emit code to decide on runtime whether library call or inline should be
17373 used. */
17374 if (dynamic_check != -1)
17376 if (CONST_INT_P (count_exp))
17378 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
17380 emit_block_move_via_libcall (dst, src, count_exp, false);
17381 count_exp = const0_rtx;
17382 goto epilogue;
17385 else
17387 rtx hot_label = gen_label_rtx ();
17388 jump_around_label = gen_label_rtx ();
17389 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17390 LEU, 0, GET_MODE (count_exp), 1, hot_label);
17391 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17392 emit_block_move_via_libcall (dst, src, count_exp, false);
17393 emit_jump (jump_around_label);
17394 emit_label (hot_label);
17398 /* Step 2: Alignment prologue. */
17400 if (desired_align > align)
17402 /* Except for the first move in epilogue, we no longer know
17403 constant offset in aliasing info. It don't seems to worth
17404 the pain to maintain it for the first move, so throw away
17405 the info early. */
17406 src = change_address (src, BLKmode, srcreg);
17407 dst = change_address (dst, BLKmode, destreg);
17408 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
17409 desired_align);
17410 if (need_zero_guard && !count)
17412 /* It is possible that we copied enough so the main loop will not
17413 execute. */
17414 emit_cmp_and_jump_insns (count_exp,
17415 GEN_INT (size_needed),
17416 LTU, 0, counter_mode (count_exp), 1, label);
17417 if (expected_size == -1
17418 || expected_size < (desired_align - align) / 2 + size_needed)
17419 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17420 else
17421 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17424 if (label && size_needed == 1)
17426 emit_label (label);
17427 LABEL_NUSES (label) = 1;
17428 label = NULL;
17431 /* Step 3: Main loop. */
17433 switch (alg)
17435 case libcall:
17436 case no_stringop:
17437 gcc_unreachable ();
17438 case loop_1_byte:
17439 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17440 count_exp, QImode, 1, expected_size);
17441 break;
17442 case loop:
17443 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17444 count_exp, Pmode, 1, expected_size);
17445 break;
17446 case unrolled_loop:
17447 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
17448 registers for 4 temporaries anyway. */
17449 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
17450 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
17451 expected_size);
17452 break;
17453 case rep_prefix_8_byte:
17454 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17455 DImode);
17456 break;
17457 case rep_prefix_4_byte:
17458 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17459 SImode);
17460 break;
17461 case rep_prefix_1_byte:
17462 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
17463 QImode);
17464 break;
17466 /* Adjust properly the offset of src and dest memory for aliasing. */
17467 if (CONST_INT_P (count_exp))
17469 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
17470 (count / size_needed) * size_needed);
17471 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17472 (count / size_needed) * size_needed);
17474 else
17476 src = change_address (src, BLKmode, srcreg);
17477 dst = change_address (dst, BLKmode, destreg);
17480 /* Step 4: Epilogue to copy the remaining bytes. */
17481 epilogue:
17482 if (label)
17484 /* When the main loop is done, COUNT_EXP might hold original count,
17485 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17486 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17487 bytes. Compensate if needed. */
17489 if (size_needed < epilogue_size_needed)
17491 tmp =
17492 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17493 GEN_INT (size_needed - 1), count_exp, 1,
17494 OPTAB_DIRECT);
17495 if (tmp != count_exp)
17496 emit_move_insn (count_exp, tmp);
17498 emit_label (label);
17499 LABEL_NUSES (label) = 1;
17502 if (count_exp != const0_rtx && epilogue_size_needed > 1)
17503 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
17504 epilogue_size_needed);
17505 if (jump_around_label)
17506 emit_label (jump_around_label);
17507 return 1;
17510 /* Helper function for memcpy. For QImode value 0xXY produce
17511 0xXYXYXYXY of wide specified by MODE. This is essentially
17512 a * 0x10101010, but we can do slightly better than
17513 synth_mult by unwinding the sequence by hand on CPUs with
17514 slow multiply. */
17515 static rtx
17516 promote_duplicated_reg (enum machine_mode mode, rtx val)
17518 enum machine_mode valmode = GET_MODE (val);
17519 rtx tmp;
17520 int nops = mode == DImode ? 3 : 2;
17522 gcc_assert (mode == SImode || mode == DImode);
17523 if (val == const0_rtx)
17524 return copy_to_mode_reg (mode, const0_rtx);
17525 if (CONST_INT_P (val))
17527 HOST_WIDE_INT v = INTVAL (val) & 255;
17529 v |= v << 8;
17530 v |= v << 16;
17531 if (mode == DImode)
17532 v |= (v << 16) << 16;
17533 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
17536 if (valmode == VOIDmode)
17537 valmode = QImode;
17538 if (valmode != QImode)
17539 val = gen_lowpart (QImode, val);
17540 if (mode == QImode)
17541 return val;
17542 if (!TARGET_PARTIAL_REG_STALL)
17543 nops--;
17544 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
17545 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
17546 <= (ix86_cost->shift_const + ix86_cost->add) * nops
17547 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
17549 rtx reg = convert_modes (mode, QImode, val, true);
17550 tmp = promote_duplicated_reg (mode, const1_rtx);
17551 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
17552 OPTAB_DIRECT);
17554 else
17556 rtx reg = convert_modes (mode, QImode, val, true);
17558 if (!TARGET_PARTIAL_REG_STALL)
17559 if (mode == SImode)
17560 emit_insn (gen_movsi_insv_1 (reg, reg));
17561 else
17562 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
17563 else
17565 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
17566 NULL, 1, OPTAB_DIRECT);
17567 reg =
17568 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17570 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
17571 NULL, 1, OPTAB_DIRECT);
17572 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17573 if (mode == SImode)
17574 return reg;
17575 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
17576 NULL, 1, OPTAB_DIRECT);
17577 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
17578 return reg;
17582 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
17583 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
17584 alignment from ALIGN to DESIRED_ALIGN. */
17585 static rtx
17586 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
17588 rtx promoted_val;
17590 if (TARGET_64BIT
17591 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
17592 promoted_val = promote_duplicated_reg (DImode, val);
17593 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
17594 promoted_val = promote_duplicated_reg (SImode, val);
17595 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
17596 promoted_val = promote_duplicated_reg (HImode, val);
17597 else
17598 promoted_val = val;
17600 return promoted_val;
17603 /* Expand string clear operation (bzero). Use i386 string operations when
17604 profitable. See expand_movmem comment for explanation of individual
17605 steps performed. */
17607 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
17608 rtx expected_align_exp, rtx expected_size_exp)
17610 rtx destreg;
17611 rtx label = NULL;
17612 rtx tmp;
17613 rtx jump_around_label = NULL;
17614 HOST_WIDE_INT align = 1;
17615 unsigned HOST_WIDE_INT count = 0;
17616 HOST_WIDE_INT expected_size = -1;
17617 int size_needed = 0, epilogue_size_needed;
17618 int desired_align = 0;
17619 enum stringop_alg alg;
17620 rtx promoted_val = NULL;
17621 bool force_loopy_epilogue = false;
17622 int dynamic_check;
17623 bool need_zero_guard = false;
17625 if (CONST_INT_P (align_exp))
17626 align = INTVAL (align_exp);
17627 /* i386 can do misaligned access on reasonably increased cost. */
17628 if (CONST_INT_P (expected_align_exp)
17629 && INTVAL (expected_align_exp) > align)
17630 align = INTVAL (expected_align_exp);
17631 if (CONST_INT_P (count_exp))
17632 count = expected_size = INTVAL (count_exp);
17633 if (CONST_INT_P (expected_size_exp) && count == 0)
17634 expected_size = INTVAL (expected_size_exp);
17636 /* Make sure we don't need to care about overflow later on. */
17637 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17638 return 0;
17640 /* Step 0: Decide on preferred algorithm, desired alignment and
17641 size of chunks to be copied by main loop. */
17643 alg = decide_alg (count, expected_size, true, &dynamic_check);
17644 desired_align = decide_alignment (align, alg, expected_size);
17646 if (!TARGET_ALIGN_STRINGOPS)
17647 align = desired_align;
17649 if (alg == libcall)
17650 return 0;
17651 gcc_assert (alg != no_stringop);
17652 if (!count)
17653 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
17654 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17655 switch (alg)
17657 case libcall:
17658 case no_stringop:
17659 gcc_unreachable ();
17660 case loop:
17661 need_zero_guard = true;
17662 size_needed = GET_MODE_SIZE (Pmode);
17663 break;
17664 case unrolled_loop:
17665 need_zero_guard = true;
17666 size_needed = GET_MODE_SIZE (Pmode) * 4;
17667 break;
17668 case rep_prefix_8_byte:
17669 size_needed = 8;
17670 break;
17671 case rep_prefix_4_byte:
17672 size_needed = 4;
17673 break;
17674 case rep_prefix_1_byte:
17675 size_needed = 1;
17676 break;
17677 case loop_1_byte:
17678 need_zero_guard = true;
17679 size_needed = 1;
17680 break;
17682 epilogue_size_needed = size_needed;
17684 /* Step 1: Prologue guard. */
17686 /* Alignment code needs count to be in register. */
17687 if (CONST_INT_P (count_exp) && desired_align > align)
17689 enum machine_mode mode = SImode;
17690 if (TARGET_64BIT && (count & ~0xffffffff))
17691 mode = DImode;
17692 count_exp = force_reg (mode, count_exp);
17694 /* Do the cheap promotion to allow better CSE across the
17695 main loop and epilogue (ie one load of the big constant in the
17696 front of all code. */
17697 if (CONST_INT_P (val_exp))
17698 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17699 desired_align, align);
17700 /* Ensure that alignment prologue won't copy past end of block. */
17701 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17703 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17704 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17705 Make sure it is power of 2. */
17706 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17708 /* To improve performance of small blocks, we jump around the VAL
17709 promoting mode. This mean that if the promoted VAL is not constant,
17710 we might not use it in the epilogue and have to use byte
17711 loop variant. */
17712 if (epilogue_size_needed > 2 && !promoted_val)
17713 force_loopy_epilogue = true;
17714 label = gen_label_rtx ();
17715 emit_cmp_and_jump_insns (count_exp,
17716 GEN_INT (epilogue_size_needed),
17717 LTU, 0, counter_mode (count_exp), 1, label);
17718 if (GET_CODE (count_exp) == CONST_INT)
17720 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
17721 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17722 else
17723 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17725 if (dynamic_check != -1)
17727 rtx hot_label = gen_label_rtx ();
17728 jump_around_label = gen_label_rtx ();
17729 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17730 LEU, 0, counter_mode (count_exp), 1, hot_label);
17731 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17732 set_storage_via_libcall (dst, count_exp, val_exp, false);
17733 emit_jump (jump_around_label);
17734 emit_label (hot_label);
17737 /* Step 2: Alignment prologue. */
17739 /* Do the expensive promotion once we branched off the small blocks. */
17740 if (!promoted_val)
17741 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17742 desired_align, align);
17743 gcc_assert (desired_align >= 1 && align >= 1);
17745 if (desired_align > align)
17747 /* Except for the first move in epilogue, we no longer know
17748 constant offset in aliasing info. It don't seems to worth
17749 the pain to maintain it for the first move, so throw away
17750 the info early. */
17751 dst = change_address (dst, BLKmode, destreg);
17752 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
17753 desired_align);
17754 if (need_zero_guard && !count)
17756 /* It is possible that we copied enough so the main loop will not
17757 execute. */
17758 emit_cmp_and_jump_insns (count_exp,
17759 GEN_INT (size_needed),
17760 LTU, 0, counter_mode (count_exp), 1, label);
17761 if (expected_size == -1
17762 || expected_size < (desired_align - align) / 2 + size_needed)
17763 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17764 else
17765 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17768 if (label && size_needed == 1)
17770 emit_label (label);
17771 LABEL_NUSES (label) = 1;
17772 label = NULL;
17775 /* Step 3: Main loop. */
17777 switch (alg)
17779 case libcall:
17780 case no_stringop:
17781 gcc_unreachable ();
17782 case loop_1_byte:
17783 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17784 count_exp, QImode, 1, expected_size);
17785 break;
17786 case loop:
17787 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17788 count_exp, Pmode, 1, expected_size);
17789 break;
17790 case unrolled_loop:
17791 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17792 count_exp, Pmode, 4, expected_size);
17793 break;
17794 case rep_prefix_8_byte:
17795 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17796 DImode);
17797 break;
17798 case rep_prefix_4_byte:
17799 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17800 SImode);
17801 break;
17802 case rep_prefix_1_byte:
17803 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17804 QImode);
17805 break;
17807 /* Adjust properly the offset of src and dest memory for aliasing. */
17808 if (CONST_INT_P (count_exp))
17809 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17810 (count / size_needed) * size_needed);
17811 else
17812 dst = change_address (dst, BLKmode, destreg);
17814 /* Step 4: Epilogue to copy the remaining bytes. */
17816 if (label)
17818 /* When the main loop is done, COUNT_EXP might hold original count,
17819 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17820 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17821 bytes. Compensate if needed. */
17823 if (size_needed < desired_align - align)
17825 tmp =
17826 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17827 GEN_INT (size_needed - 1), count_exp, 1,
17828 OPTAB_DIRECT);
17829 size_needed = desired_align - align + 1;
17830 if (tmp != count_exp)
17831 emit_move_insn (count_exp, tmp);
17833 emit_label (label);
17834 LABEL_NUSES (label) = 1;
17836 if (count_exp != const0_rtx && epilogue_size_needed > 1)
17838 if (force_loopy_epilogue)
17839 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
17840 size_needed);
17841 else
17842 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
17843 size_needed);
17845 if (jump_around_label)
17846 emit_label (jump_around_label);
17847 return 1;
17850 /* Expand the appropriate insns for doing strlen if not just doing
17851 repnz; scasb
17853 out = result, initialized with the start address
17854 align_rtx = alignment of the address.
17855 scratch = scratch register, initialized with the startaddress when
17856 not aligned, otherwise undefined
17858 This is just the body. It needs the initializations mentioned above and
17859 some address computing at the end. These things are done in i386.md. */
17861 static void
17862 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
17864 int align;
17865 rtx tmp;
17866 rtx align_2_label = NULL_RTX;
17867 rtx align_3_label = NULL_RTX;
17868 rtx align_4_label = gen_label_rtx ();
17869 rtx end_0_label = gen_label_rtx ();
17870 rtx mem;
17871 rtx tmpreg = gen_reg_rtx (SImode);
17872 rtx scratch = gen_reg_rtx (SImode);
17873 rtx cmp;
17875 align = 0;
17876 if (CONST_INT_P (align_rtx))
17877 align = INTVAL (align_rtx);
17879 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
17881 /* Is there a known alignment and is it less than 4? */
17882 if (align < 4)
17884 rtx scratch1 = gen_reg_rtx (Pmode);
17885 emit_move_insn (scratch1, out);
17886 /* Is there a known alignment and is it not 2? */
17887 if (align != 2)
17889 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
17890 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
17892 /* Leave just the 3 lower bits. */
17893 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
17894 NULL_RTX, 0, OPTAB_WIDEN);
17896 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17897 Pmode, 1, align_4_label);
17898 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
17899 Pmode, 1, align_2_label);
17900 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
17901 Pmode, 1, align_3_label);
17903 else
17905 /* Since the alignment is 2, we have to check 2 or 0 bytes;
17906 check if is aligned to 4 - byte. */
17908 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
17909 NULL_RTX, 0, OPTAB_WIDEN);
17911 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17912 Pmode, 1, align_4_label);
17915 mem = change_address (src, QImode, out);
17917 /* Now compare the bytes. */
17919 /* Compare the first n unaligned byte on a byte per byte basis. */
17920 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
17921 QImode, 1, end_0_label);
17923 /* Increment the address. */
17924 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17926 /* Not needed with an alignment of 2 */
17927 if (align != 2)
17929 emit_label (align_2_label);
17931 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17932 end_0_label);
17934 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17936 emit_label (align_3_label);
17939 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17940 end_0_label);
17942 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17945 /* Generate loop to check 4 bytes at a time. It is not a good idea to
17946 align this loop. It gives only huge programs, but does not help to
17947 speed up. */
17948 emit_label (align_4_label);
17950 mem = change_address (src, SImode, out);
17951 emit_move_insn (scratch, mem);
17952 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
17954 /* This formula yields a nonzero result iff one of the bytes is zero.
17955 This saves three branches inside loop and many cycles. */
17957 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
17958 emit_insn (gen_one_cmplsi2 (scratch, scratch));
17959 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
17960 emit_insn (gen_andsi3 (tmpreg, tmpreg,
17961 gen_int_mode (0x80808080, SImode)));
17962 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
17963 align_4_label);
17965 if (TARGET_CMOVE)
17967 rtx reg = gen_reg_rtx (SImode);
17968 rtx reg2 = gen_reg_rtx (Pmode);
17969 emit_move_insn (reg, tmpreg);
17970 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
17972 /* If zero is not in the first two bytes, move two bytes forward. */
17973 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17974 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17975 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17976 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
17977 gen_rtx_IF_THEN_ELSE (SImode, tmp,
17978 reg,
17979 tmpreg)));
17980 /* Emit lea manually to avoid clobbering of flags. */
17981 emit_insn (gen_rtx_SET (SImode, reg2,
17982 gen_rtx_PLUS (Pmode, out, const2_rtx)));
17984 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17985 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17986 emit_insn (gen_rtx_SET (VOIDmode, out,
17987 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
17988 reg2,
17989 out)));
17992 else
17994 rtx end_2_label = gen_label_rtx ();
17995 /* Is zero in the first two bytes? */
17997 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17998 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17999 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
18000 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
18001 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
18002 pc_rtx);
18003 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
18004 JUMP_LABEL (tmp) = end_2_label;
18006 /* Not in the first two. Move two bytes forward. */
18007 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
18008 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
18010 emit_label (end_2_label);
18014 /* Avoid branch in fixing the byte. */
18015 tmpreg = gen_lowpart (QImode, tmpreg);
18016 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
18017 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
18018 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
18020 emit_label (end_0_label);
18023 /* Expand strlen. */
18026 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
18028 rtx addr, scratch1, scratch2, scratch3, scratch4;
18030 /* The generic case of strlen expander is long. Avoid it's
18031 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
18033 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18034 && !TARGET_INLINE_ALL_STRINGOPS
18035 && !optimize_insn_for_size_p ()
18036 && (!CONST_INT_P (align) || INTVAL (align) < 4))
18037 return 0;
18039 addr = force_reg (Pmode, XEXP (src, 0));
18040 scratch1 = gen_reg_rtx (Pmode);
18042 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18043 && !optimize_insn_for_size_p ())
18045 /* Well it seems that some optimizer does not combine a call like
18046 foo(strlen(bar), strlen(bar));
18047 when the move and the subtraction is done here. It does calculate
18048 the length just once when these instructions are done inside of
18049 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
18050 often used and I use one fewer register for the lifetime of
18051 output_strlen_unroll() this is better. */
18053 emit_move_insn (out, addr);
18055 ix86_expand_strlensi_unroll_1 (out, src, align);
18057 /* strlensi_unroll_1 returns the address of the zero at the end of
18058 the string, like memchr(), so compute the length by subtracting
18059 the start address. */
18060 emit_insn ((*ix86_gen_sub3) (out, out, addr));
18062 else
18064 rtx unspec;
18066 /* Can't use this if the user has appropriated eax, ecx, or edi. */
18067 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
18068 return false;
18070 scratch2 = gen_reg_rtx (Pmode);
18071 scratch3 = gen_reg_rtx (Pmode);
18072 scratch4 = force_reg (Pmode, constm1_rtx);
18074 emit_move_insn (scratch3, addr);
18075 eoschar = force_reg (QImode, eoschar);
18077 src = replace_equiv_address_nv (src, scratch3);
18079 /* If .md starts supporting :P, this can be done in .md. */
18080 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
18081 scratch4), UNSPEC_SCAS);
18082 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
18083 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
18084 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
18086 return 1;
18089 /* For given symbol (function) construct code to compute address of it's PLT
18090 entry in large x86-64 PIC model. */
18092 construct_plt_address (rtx symbol)
18094 rtx tmp = gen_reg_rtx (Pmode);
18095 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
18097 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18098 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
18100 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
18101 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
18102 return tmp;
18105 void
18106 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
18107 rtx callarg2 ATTRIBUTE_UNUSED,
18108 rtx pop, int sibcall)
18110 rtx use = NULL, call;
18112 if (pop == const0_rtx)
18113 pop = NULL;
18114 gcc_assert (!TARGET_64BIT || !pop);
18116 if (TARGET_MACHO && !TARGET_64BIT)
18118 #if TARGET_MACHO
18119 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
18120 fnaddr = machopic_indirect_call_target (fnaddr);
18121 #endif
18123 else
18125 /* Static functions and indirect calls don't need the pic register. */
18126 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
18127 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18128 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
18129 use_reg (&use, pic_offset_table_rtx);
18132 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
18134 rtx al = gen_rtx_REG (QImode, AX_REG);
18135 emit_move_insn (al, callarg2);
18136 use_reg (&use, al);
18139 if (ix86_cmodel == CM_LARGE_PIC
18140 && GET_CODE (fnaddr) == MEM
18141 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18142 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
18143 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
18144 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
18146 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18147 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18149 if (sibcall && TARGET_64BIT
18150 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
18152 rtx addr;
18153 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18154 fnaddr = gen_rtx_REG (Pmode, R11_REG);
18155 emit_move_insn (fnaddr, addr);
18156 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18159 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
18160 if (retval)
18161 call = gen_rtx_SET (VOIDmode, retval, call);
18162 if (pop)
18164 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
18165 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
18166 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
18169 call = emit_call_insn (call);
18170 if (use)
18171 CALL_INSN_FUNCTION_USAGE (call) = use;
18175 /* Clear stack slot assignments remembered from previous functions.
18176 This is called from INIT_EXPANDERS once before RTL is emitted for each
18177 function. */
18179 static struct machine_function *
18180 ix86_init_machine_status (void)
18182 struct machine_function *f;
18184 f = GGC_CNEW (struct machine_function);
18185 f->use_fast_prologue_epilogue_nregs = -1;
18186 f->tls_descriptor_call_expanded_p = 0;
18187 f->call_abi = DEFAULT_ABI;
18189 return f;
18192 /* Return a MEM corresponding to a stack slot with mode MODE.
18193 Allocate a new slot if necessary.
18195 The RTL for a function can have several slots available: N is
18196 which slot to use. */
18199 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
18201 struct stack_local_entry *s;
18203 gcc_assert (n < MAX_386_STACK_LOCALS);
18205 /* Virtual slot is valid only before vregs are instantiated. */
18206 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
18208 for (s = ix86_stack_locals; s; s = s->next)
18209 if (s->mode == mode && s->n == n)
18210 return copy_rtx (s->rtl);
18212 s = (struct stack_local_entry *)
18213 ggc_alloc (sizeof (struct stack_local_entry));
18214 s->n = n;
18215 s->mode = mode;
18216 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18218 s->next = ix86_stack_locals;
18219 ix86_stack_locals = s;
18220 return s->rtl;
18223 /* Construct the SYMBOL_REF for the tls_get_addr function. */
18225 static GTY(()) rtx ix86_tls_symbol;
18227 ix86_tls_get_addr (void)
18230 if (!ix86_tls_symbol)
18232 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
18233 (TARGET_ANY_GNU_TLS
18234 && !TARGET_64BIT)
18235 ? "___tls_get_addr"
18236 : "__tls_get_addr");
18239 return ix86_tls_symbol;
18242 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
18244 static GTY(()) rtx ix86_tls_module_base_symbol;
18246 ix86_tls_module_base (void)
18249 if (!ix86_tls_module_base_symbol)
18251 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
18252 "_TLS_MODULE_BASE_");
18253 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
18254 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
18257 return ix86_tls_module_base_symbol;
18260 /* Calculate the length of the memory address in the instruction
18261 encoding. Does not include the one-byte modrm, opcode, or prefix. */
18264 memory_address_length (rtx addr)
18266 struct ix86_address parts;
18267 rtx base, index, disp;
18268 int len;
18269 int ok;
18271 if (GET_CODE (addr) == PRE_DEC
18272 || GET_CODE (addr) == POST_INC
18273 || GET_CODE (addr) == PRE_MODIFY
18274 || GET_CODE (addr) == POST_MODIFY)
18275 return 0;
18277 ok = ix86_decompose_address (addr, &parts);
18278 gcc_assert (ok);
18280 if (parts.base && GET_CODE (parts.base) == SUBREG)
18281 parts.base = SUBREG_REG (parts.base);
18282 if (parts.index && GET_CODE (parts.index) == SUBREG)
18283 parts.index = SUBREG_REG (parts.index);
18285 base = parts.base;
18286 index = parts.index;
18287 disp = parts.disp;
18288 len = 0;
18290 /* Rule of thumb:
18291 - esp as the base always wants an index,
18292 - ebp as the base always wants a displacement. */
18294 /* Register Indirect. */
18295 if (base && !index && !disp)
18297 /* esp (for its index) and ebp (for its displacement) need
18298 the two-byte modrm form. */
18299 if (addr == stack_pointer_rtx
18300 || addr == arg_pointer_rtx
18301 || addr == frame_pointer_rtx
18302 || addr == hard_frame_pointer_rtx)
18303 len = 1;
18306 /* Direct Addressing. */
18307 else if (disp && !base && !index)
18308 len = 4;
18310 else
18312 /* Find the length of the displacement constant. */
18313 if (disp)
18315 if (base && satisfies_constraint_K (disp))
18316 len = 1;
18317 else
18318 len = 4;
18320 /* ebp always wants a displacement. */
18321 else if (base == hard_frame_pointer_rtx)
18322 len = 1;
18324 /* An index requires the two-byte modrm form.... */
18325 if (index
18326 /* ...like esp, which always wants an index. */
18327 || base == stack_pointer_rtx
18328 || base == arg_pointer_rtx
18329 || base == frame_pointer_rtx)
18330 len += 1;
18333 return len;
18336 /* Compute default value for "length_immediate" attribute. When SHORTFORM
18337 is set, expect that insn have 8bit immediate alternative. */
18339 ix86_attr_length_immediate_default (rtx insn, int shortform)
18341 int len = 0;
18342 int i;
18343 extract_insn_cached (insn);
18344 for (i = recog_data.n_operands - 1; i >= 0; --i)
18345 if (CONSTANT_P (recog_data.operand[i]))
18347 gcc_assert (!len);
18348 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
18349 len = 1;
18350 else
18352 switch (get_attr_mode (insn))
18354 case MODE_QI:
18355 len+=1;
18356 break;
18357 case MODE_HI:
18358 len+=2;
18359 break;
18360 case MODE_SI:
18361 len+=4;
18362 break;
18363 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
18364 case MODE_DI:
18365 len+=4;
18366 break;
18367 default:
18368 fatal_insn ("unknown insn mode", insn);
18372 return len;
18374 /* Compute default value for "length_address" attribute. */
18376 ix86_attr_length_address_default (rtx insn)
18378 int i;
18380 if (get_attr_type (insn) == TYPE_LEA)
18382 rtx set = PATTERN (insn);
18384 if (GET_CODE (set) == PARALLEL)
18385 set = XVECEXP (set, 0, 0);
18387 gcc_assert (GET_CODE (set) == SET);
18389 return memory_address_length (SET_SRC (set));
18392 extract_insn_cached (insn);
18393 for (i = recog_data.n_operands - 1; i >= 0; --i)
18394 if (MEM_P (recog_data.operand[i]))
18396 return memory_address_length (XEXP (recog_data.operand[i], 0));
18397 break;
18399 return 0;
18402 /* Compute default value for "length_vex" attribute. It includes
18403 2 or 3 byte VEX prefix and 1 opcode byte. */
18406 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
18407 int has_vex_w)
18409 int i;
18411 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
18412 byte VEX prefix. */
18413 if (!has_0f_opcode || has_vex_w)
18414 return 3 + 1;
18416 /* We can always use 2 byte VEX prefix in 32bit. */
18417 if (!TARGET_64BIT)
18418 return 2 + 1;
18420 extract_insn_cached (insn);
18422 for (i = recog_data.n_operands - 1; i >= 0; --i)
18423 if (REG_P (recog_data.operand[i]))
18425 /* REX.W bit uses 3 byte VEX prefix. */
18426 if (GET_MODE (recog_data.operand[i]) == DImode)
18427 return 3 + 1;
18429 else
18431 /* REX.X or REX.B bits use 3 byte VEX prefix. */
18432 if (MEM_P (recog_data.operand[i])
18433 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
18434 return 3 + 1;
18437 return 2 + 1;
18440 /* Return the maximum number of instructions a cpu can issue. */
18442 static int
18443 ix86_issue_rate (void)
18445 switch (ix86_tune)
18447 case PROCESSOR_PENTIUM:
18448 case PROCESSOR_K6:
18449 return 2;
18451 case PROCESSOR_PENTIUMPRO:
18452 case PROCESSOR_PENTIUM4:
18453 case PROCESSOR_ATHLON:
18454 case PROCESSOR_K8:
18455 case PROCESSOR_AMDFAM10:
18456 case PROCESSOR_NOCONA:
18457 case PROCESSOR_GENERIC32:
18458 case PROCESSOR_GENERIC64:
18459 return 3;
18461 case PROCESSOR_CORE2:
18462 return 4;
18464 default:
18465 return 1;
18469 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
18470 by DEP_INSN and nothing set by DEP_INSN. */
18472 static int
18473 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
18475 rtx set, set2;
18477 /* Simplify the test for uninteresting insns. */
18478 if (insn_type != TYPE_SETCC
18479 && insn_type != TYPE_ICMOV
18480 && insn_type != TYPE_FCMOV
18481 && insn_type != TYPE_IBR)
18482 return 0;
18484 if ((set = single_set (dep_insn)) != 0)
18486 set = SET_DEST (set);
18487 set2 = NULL_RTX;
18489 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
18490 && XVECLEN (PATTERN (dep_insn), 0) == 2
18491 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
18492 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
18494 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
18495 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
18497 else
18498 return 0;
18500 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
18501 return 0;
18503 /* This test is true if the dependent insn reads the flags but
18504 not any other potentially set register. */
18505 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
18506 return 0;
18508 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
18509 return 0;
18511 return 1;
18514 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
18515 address with operands set by DEP_INSN. */
18517 static int
18518 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
18520 rtx addr;
18522 if (insn_type == TYPE_LEA
18523 && TARGET_PENTIUM)
18525 addr = PATTERN (insn);
18527 if (GET_CODE (addr) == PARALLEL)
18528 addr = XVECEXP (addr, 0, 0);
18530 gcc_assert (GET_CODE (addr) == SET);
18532 addr = SET_SRC (addr);
18534 else
18536 int i;
18537 extract_insn_cached (insn);
18538 for (i = recog_data.n_operands - 1; i >= 0; --i)
18539 if (MEM_P (recog_data.operand[i]))
18541 addr = XEXP (recog_data.operand[i], 0);
18542 goto found;
18544 return 0;
18545 found:;
18548 return modified_in_p (addr, dep_insn);
18551 static int
18552 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
18554 enum attr_type insn_type, dep_insn_type;
18555 enum attr_memory memory;
18556 rtx set, set2;
18557 int dep_insn_code_number;
18559 /* Anti and output dependencies have zero cost on all CPUs. */
18560 if (REG_NOTE_KIND (link) != 0)
18561 return 0;
18563 dep_insn_code_number = recog_memoized (dep_insn);
18565 /* If we can't recognize the insns, we can't really do anything. */
18566 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
18567 return cost;
18569 insn_type = get_attr_type (insn);
18570 dep_insn_type = get_attr_type (dep_insn);
18572 switch (ix86_tune)
18574 case PROCESSOR_PENTIUM:
18575 /* Address Generation Interlock adds a cycle of latency. */
18576 if (ix86_agi_dependent (insn, dep_insn, insn_type))
18577 cost += 1;
18579 /* ??? Compares pair with jump/setcc. */
18580 if (ix86_flags_dependent (insn, dep_insn, insn_type))
18581 cost = 0;
18583 /* Floating point stores require value to be ready one cycle earlier. */
18584 if (insn_type == TYPE_FMOV
18585 && get_attr_memory (insn) == MEMORY_STORE
18586 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18587 cost += 1;
18588 break;
18590 case PROCESSOR_PENTIUMPRO:
18591 memory = get_attr_memory (insn);
18593 /* INT->FP conversion is expensive. */
18594 if (get_attr_fp_int_src (dep_insn))
18595 cost += 5;
18597 /* There is one cycle extra latency between an FP op and a store. */
18598 if (insn_type == TYPE_FMOV
18599 && (set = single_set (dep_insn)) != NULL_RTX
18600 && (set2 = single_set (insn)) != NULL_RTX
18601 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
18602 && MEM_P (SET_DEST (set2)))
18603 cost += 1;
18605 /* Show ability of reorder buffer to hide latency of load by executing
18606 in parallel with previous instruction in case
18607 previous instruction is not needed to compute the address. */
18608 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18609 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18611 /* Claim moves to take one cycle, as core can issue one load
18612 at time and the next load can start cycle later. */
18613 if (dep_insn_type == TYPE_IMOV
18614 || dep_insn_type == TYPE_FMOV)
18615 cost = 1;
18616 else if (cost > 1)
18617 cost--;
18619 break;
18621 case PROCESSOR_K6:
18622 memory = get_attr_memory (insn);
18624 /* The esp dependency is resolved before the instruction is really
18625 finished. */
18626 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
18627 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
18628 return 1;
18630 /* INT->FP conversion is expensive. */
18631 if (get_attr_fp_int_src (dep_insn))
18632 cost += 5;
18634 /* Show ability of reorder buffer to hide latency of load by executing
18635 in parallel with previous instruction in case
18636 previous instruction is not needed to compute the address. */
18637 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18638 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18640 /* Claim moves to take one cycle, as core can issue one load
18641 at time and the next load can start cycle later. */
18642 if (dep_insn_type == TYPE_IMOV
18643 || dep_insn_type == TYPE_FMOV)
18644 cost = 1;
18645 else if (cost > 2)
18646 cost -= 2;
18647 else
18648 cost = 1;
18650 break;
18652 case PROCESSOR_ATHLON:
18653 case PROCESSOR_K8:
18654 case PROCESSOR_AMDFAM10:
18655 case PROCESSOR_GENERIC32:
18656 case PROCESSOR_GENERIC64:
18657 memory = get_attr_memory (insn);
18659 /* Show ability of reorder buffer to hide latency of load by executing
18660 in parallel with previous instruction in case
18661 previous instruction is not needed to compute the address. */
18662 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
18663 && !ix86_agi_dependent (insn, dep_insn, insn_type))
18665 enum attr_unit unit = get_attr_unit (insn);
18666 int loadcost = 3;
18668 /* Because of the difference between the length of integer and
18669 floating unit pipeline preparation stages, the memory operands
18670 for floating point are cheaper.
18672 ??? For Athlon it the difference is most probably 2. */
18673 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
18674 loadcost = 3;
18675 else
18676 loadcost = TARGET_ATHLON ? 2 : 0;
18678 if (cost >= loadcost)
18679 cost -= loadcost;
18680 else
18681 cost = 0;
18684 default:
18685 break;
18688 return cost;
18691 /* How many alternative schedules to try. This should be as wide as the
18692 scheduling freedom in the DFA, but no wider. Making this value too
18693 large results extra work for the scheduler. */
18695 static int
18696 ia32_multipass_dfa_lookahead (void)
18698 switch (ix86_tune)
18700 case PROCESSOR_PENTIUM:
18701 return 2;
18703 case PROCESSOR_PENTIUMPRO:
18704 case PROCESSOR_K6:
18705 return 1;
18707 default:
18708 return 0;
18713 /* Compute the alignment given to a constant that is being placed in memory.
18714 EXP is the constant and ALIGN is the alignment that the object would
18715 ordinarily have.
18716 The value of this function is used instead of that alignment to align
18717 the object. */
18720 ix86_constant_alignment (tree exp, int align)
18722 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
18723 || TREE_CODE (exp) == INTEGER_CST)
18725 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
18726 return 64;
18727 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
18728 return 128;
18730 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
18731 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
18732 return BITS_PER_WORD;
18734 return align;
18737 /* Compute the alignment for a static variable.
18738 TYPE is the data type, and ALIGN is the alignment that
18739 the object would ordinarily have. The value of this function is used
18740 instead of that alignment to align the object. */
18743 ix86_data_alignment (tree type, int align)
18745 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
18747 if (AGGREGATE_TYPE_P (type)
18748 && TYPE_SIZE (type)
18749 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18750 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
18751 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
18752 && align < max_align)
18753 align = max_align;
18755 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18756 to 16byte boundary. */
18757 if (TARGET_64BIT)
18759 if (AGGREGATE_TYPE_P (type)
18760 && TYPE_SIZE (type)
18761 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18762 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
18763 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18764 return 128;
18767 if (TREE_CODE (type) == ARRAY_TYPE)
18769 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18770 return 64;
18771 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18772 return 128;
18774 else if (TREE_CODE (type) == COMPLEX_TYPE)
18777 if (TYPE_MODE (type) == DCmode && align < 64)
18778 return 64;
18779 if ((TYPE_MODE (type) == XCmode
18780 || TYPE_MODE (type) == TCmode) && align < 128)
18781 return 128;
18783 else if ((TREE_CODE (type) == RECORD_TYPE
18784 || TREE_CODE (type) == UNION_TYPE
18785 || TREE_CODE (type) == QUAL_UNION_TYPE)
18786 && TYPE_FIELDS (type))
18788 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18789 return 64;
18790 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18791 return 128;
18793 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18794 || TREE_CODE (type) == INTEGER_TYPE)
18796 if (TYPE_MODE (type) == DFmode && align < 64)
18797 return 64;
18798 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18799 return 128;
18802 return align;
18805 /* Compute the alignment for a local variable or a stack slot. TYPE is
18806 the data type, MODE is the widest mode available and ALIGN is the
18807 alignment that the object would ordinarily have. The value of this
18808 macro is used instead of that alignment to align the object. */
18810 unsigned int
18811 ix86_local_alignment (tree type, enum machine_mode mode,
18812 unsigned int align)
18814 /* If TYPE is NULL, we are allocating a stack slot for caller-save
18815 register in MODE. We will return the largest alignment of XF
18816 and DF. */
18817 if (!type)
18819 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
18820 align = GET_MODE_ALIGNMENT (DFmode);
18821 return align;
18824 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18825 to 16byte boundary. */
18826 if (TARGET_64BIT)
18828 if (AGGREGATE_TYPE_P (type)
18829 && TYPE_SIZE (type)
18830 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18831 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
18832 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18833 return 128;
18835 if (TREE_CODE (type) == ARRAY_TYPE)
18837 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18838 return 64;
18839 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18840 return 128;
18842 else if (TREE_CODE (type) == COMPLEX_TYPE)
18844 if (TYPE_MODE (type) == DCmode && align < 64)
18845 return 64;
18846 if ((TYPE_MODE (type) == XCmode
18847 || TYPE_MODE (type) == TCmode) && align < 128)
18848 return 128;
18850 else if ((TREE_CODE (type) == RECORD_TYPE
18851 || TREE_CODE (type) == UNION_TYPE
18852 || TREE_CODE (type) == QUAL_UNION_TYPE)
18853 && TYPE_FIELDS (type))
18855 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18856 return 64;
18857 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18858 return 128;
18860 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18861 || TREE_CODE (type) == INTEGER_TYPE)
18864 if (TYPE_MODE (type) == DFmode && align < 64)
18865 return 64;
18866 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18867 return 128;
18869 return align;
18872 /* Emit RTL insns to initialize the variable parts of a trampoline.
18873 FNADDR is an RTX for the address of the function's pure code.
18874 CXT is an RTX for the static chain value for the function. */
18875 void
18876 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
18878 if (!TARGET_64BIT)
18880 /* Compute offset from the end of the jmp to the target function. */
18881 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
18882 plus_constant (tramp, 10),
18883 NULL_RTX, 1, OPTAB_DIRECT);
18884 emit_move_insn (gen_rtx_MEM (QImode, tramp),
18885 gen_int_mode (0xb9, QImode));
18886 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
18887 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
18888 gen_int_mode (0xe9, QImode));
18889 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
18891 else
18893 int offset = 0;
18894 /* Try to load address using shorter movl instead of movabs.
18895 We may want to support movq for kernel mode, but kernel does not use
18896 trampolines at the moment. */
18897 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
18899 fnaddr = copy_to_mode_reg (DImode, fnaddr);
18900 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18901 gen_int_mode (0xbb41, HImode));
18902 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
18903 gen_lowpart (SImode, fnaddr));
18904 offset += 6;
18906 else
18908 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18909 gen_int_mode (0xbb49, HImode));
18910 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18911 fnaddr);
18912 offset += 10;
18914 /* Load static chain using movabs to r10. */
18915 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18916 gen_int_mode (0xba49, HImode));
18917 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18918 cxt);
18919 offset += 10;
18920 /* Jump to the r11 */
18921 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18922 gen_int_mode (0xff49, HImode));
18923 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
18924 gen_int_mode (0xe3, QImode));
18925 offset += 3;
18926 gcc_assert (offset <= TRAMPOLINE_SIZE);
18929 #ifdef ENABLE_EXECUTE_STACK
18930 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
18931 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
18932 #endif
18935 /* Codes for all the SSE/MMX builtins. */
18936 enum ix86_builtins
18938 IX86_BUILTIN_ADDPS,
18939 IX86_BUILTIN_ADDSS,
18940 IX86_BUILTIN_DIVPS,
18941 IX86_BUILTIN_DIVSS,
18942 IX86_BUILTIN_MULPS,
18943 IX86_BUILTIN_MULSS,
18944 IX86_BUILTIN_SUBPS,
18945 IX86_BUILTIN_SUBSS,
18947 IX86_BUILTIN_CMPEQPS,
18948 IX86_BUILTIN_CMPLTPS,
18949 IX86_BUILTIN_CMPLEPS,
18950 IX86_BUILTIN_CMPGTPS,
18951 IX86_BUILTIN_CMPGEPS,
18952 IX86_BUILTIN_CMPNEQPS,
18953 IX86_BUILTIN_CMPNLTPS,
18954 IX86_BUILTIN_CMPNLEPS,
18955 IX86_BUILTIN_CMPNGTPS,
18956 IX86_BUILTIN_CMPNGEPS,
18957 IX86_BUILTIN_CMPORDPS,
18958 IX86_BUILTIN_CMPUNORDPS,
18959 IX86_BUILTIN_CMPEQSS,
18960 IX86_BUILTIN_CMPLTSS,
18961 IX86_BUILTIN_CMPLESS,
18962 IX86_BUILTIN_CMPNEQSS,
18963 IX86_BUILTIN_CMPNLTSS,
18964 IX86_BUILTIN_CMPNLESS,
18965 IX86_BUILTIN_CMPNGTSS,
18966 IX86_BUILTIN_CMPNGESS,
18967 IX86_BUILTIN_CMPORDSS,
18968 IX86_BUILTIN_CMPUNORDSS,
18970 IX86_BUILTIN_COMIEQSS,
18971 IX86_BUILTIN_COMILTSS,
18972 IX86_BUILTIN_COMILESS,
18973 IX86_BUILTIN_COMIGTSS,
18974 IX86_BUILTIN_COMIGESS,
18975 IX86_BUILTIN_COMINEQSS,
18976 IX86_BUILTIN_UCOMIEQSS,
18977 IX86_BUILTIN_UCOMILTSS,
18978 IX86_BUILTIN_UCOMILESS,
18979 IX86_BUILTIN_UCOMIGTSS,
18980 IX86_BUILTIN_UCOMIGESS,
18981 IX86_BUILTIN_UCOMINEQSS,
18983 IX86_BUILTIN_CVTPI2PS,
18984 IX86_BUILTIN_CVTPS2PI,
18985 IX86_BUILTIN_CVTSI2SS,
18986 IX86_BUILTIN_CVTSI642SS,
18987 IX86_BUILTIN_CVTSS2SI,
18988 IX86_BUILTIN_CVTSS2SI64,
18989 IX86_BUILTIN_CVTTPS2PI,
18990 IX86_BUILTIN_CVTTSS2SI,
18991 IX86_BUILTIN_CVTTSS2SI64,
18993 IX86_BUILTIN_MAXPS,
18994 IX86_BUILTIN_MAXSS,
18995 IX86_BUILTIN_MINPS,
18996 IX86_BUILTIN_MINSS,
18998 IX86_BUILTIN_LOADUPS,
18999 IX86_BUILTIN_STOREUPS,
19000 IX86_BUILTIN_MOVSS,
19002 IX86_BUILTIN_MOVHLPS,
19003 IX86_BUILTIN_MOVLHPS,
19004 IX86_BUILTIN_LOADHPS,
19005 IX86_BUILTIN_LOADLPS,
19006 IX86_BUILTIN_STOREHPS,
19007 IX86_BUILTIN_STORELPS,
19009 IX86_BUILTIN_MASKMOVQ,
19010 IX86_BUILTIN_MOVMSKPS,
19011 IX86_BUILTIN_PMOVMSKB,
19013 IX86_BUILTIN_MOVNTPS,
19014 IX86_BUILTIN_MOVNTQ,
19016 IX86_BUILTIN_LOADDQU,
19017 IX86_BUILTIN_STOREDQU,
19019 IX86_BUILTIN_PACKSSWB,
19020 IX86_BUILTIN_PACKSSDW,
19021 IX86_BUILTIN_PACKUSWB,
19023 IX86_BUILTIN_PADDB,
19024 IX86_BUILTIN_PADDW,
19025 IX86_BUILTIN_PADDD,
19026 IX86_BUILTIN_PADDQ,
19027 IX86_BUILTIN_PADDSB,
19028 IX86_BUILTIN_PADDSW,
19029 IX86_BUILTIN_PADDUSB,
19030 IX86_BUILTIN_PADDUSW,
19031 IX86_BUILTIN_PSUBB,
19032 IX86_BUILTIN_PSUBW,
19033 IX86_BUILTIN_PSUBD,
19034 IX86_BUILTIN_PSUBQ,
19035 IX86_BUILTIN_PSUBSB,
19036 IX86_BUILTIN_PSUBSW,
19037 IX86_BUILTIN_PSUBUSB,
19038 IX86_BUILTIN_PSUBUSW,
19040 IX86_BUILTIN_PAND,
19041 IX86_BUILTIN_PANDN,
19042 IX86_BUILTIN_POR,
19043 IX86_BUILTIN_PXOR,
19045 IX86_BUILTIN_PAVGB,
19046 IX86_BUILTIN_PAVGW,
19048 IX86_BUILTIN_PCMPEQB,
19049 IX86_BUILTIN_PCMPEQW,
19050 IX86_BUILTIN_PCMPEQD,
19051 IX86_BUILTIN_PCMPGTB,
19052 IX86_BUILTIN_PCMPGTW,
19053 IX86_BUILTIN_PCMPGTD,
19055 IX86_BUILTIN_PMADDWD,
19057 IX86_BUILTIN_PMAXSW,
19058 IX86_BUILTIN_PMAXUB,
19059 IX86_BUILTIN_PMINSW,
19060 IX86_BUILTIN_PMINUB,
19062 IX86_BUILTIN_PMULHUW,
19063 IX86_BUILTIN_PMULHW,
19064 IX86_BUILTIN_PMULLW,
19066 IX86_BUILTIN_PSADBW,
19067 IX86_BUILTIN_PSHUFW,
19069 IX86_BUILTIN_PSLLW,
19070 IX86_BUILTIN_PSLLD,
19071 IX86_BUILTIN_PSLLQ,
19072 IX86_BUILTIN_PSRAW,
19073 IX86_BUILTIN_PSRAD,
19074 IX86_BUILTIN_PSRLW,
19075 IX86_BUILTIN_PSRLD,
19076 IX86_BUILTIN_PSRLQ,
19077 IX86_BUILTIN_PSLLWI,
19078 IX86_BUILTIN_PSLLDI,
19079 IX86_BUILTIN_PSLLQI,
19080 IX86_BUILTIN_PSRAWI,
19081 IX86_BUILTIN_PSRADI,
19082 IX86_BUILTIN_PSRLWI,
19083 IX86_BUILTIN_PSRLDI,
19084 IX86_BUILTIN_PSRLQI,
19086 IX86_BUILTIN_PUNPCKHBW,
19087 IX86_BUILTIN_PUNPCKHWD,
19088 IX86_BUILTIN_PUNPCKHDQ,
19089 IX86_BUILTIN_PUNPCKLBW,
19090 IX86_BUILTIN_PUNPCKLWD,
19091 IX86_BUILTIN_PUNPCKLDQ,
19093 IX86_BUILTIN_SHUFPS,
19095 IX86_BUILTIN_RCPPS,
19096 IX86_BUILTIN_RCPSS,
19097 IX86_BUILTIN_RSQRTPS,
19098 IX86_BUILTIN_RSQRTPS_NR,
19099 IX86_BUILTIN_RSQRTSS,
19100 IX86_BUILTIN_RSQRTF,
19101 IX86_BUILTIN_SQRTPS,
19102 IX86_BUILTIN_SQRTPS_NR,
19103 IX86_BUILTIN_SQRTSS,
19105 IX86_BUILTIN_UNPCKHPS,
19106 IX86_BUILTIN_UNPCKLPS,
19108 IX86_BUILTIN_ANDPS,
19109 IX86_BUILTIN_ANDNPS,
19110 IX86_BUILTIN_ORPS,
19111 IX86_BUILTIN_XORPS,
19113 IX86_BUILTIN_EMMS,
19114 IX86_BUILTIN_LDMXCSR,
19115 IX86_BUILTIN_STMXCSR,
19116 IX86_BUILTIN_SFENCE,
19118 /* 3DNow! Original */
19119 IX86_BUILTIN_FEMMS,
19120 IX86_BUILTIN_PAVGUSB,
19121 IX86_BUILTIN_PF2ID,
19122 IX86_BUILTIN_PFACC,
19123 IX86_BUILTIN_PFADD,
19124 IX86_BUILTIN_PFCMPEQ,
19125 IX86_BUILTIN_PFCMPGE,
19126 IX86_BUILTIN_PFCMPGT,
19127 IX86_BUILTIN_PFMAX,
19128 IX86_BUILTIN_PFMIN,
19129 IX86_BUILTIN_PFMUL,
19130 IX86_BUILTIN_PFRCP,
19131 IX86_BUILTIN_PFRCPIT1,
19132 IX86_BUILTIN_PFRCPIT2,
19133 IX86_BUILTIN_PFRSQIT1,
19134 IX86_BUILTIN_PFRSQRT,
19135 IX86_BUILTIN_PFSUB,
19136 IX86_BUILTIN_PFSUBR,
19137 IX86_BUILTIN_PI2FD,
19138 IX86_BUILTIN_PMULHRW,
19140 /* 3DNow! Athlon Extensions */
19141 IX86_BUILTIN_PF2IW,
19142 IX86_BUILTIN_PFNACC,
19143 IX86_BUILTIN_PFPNACC,
19144 IX86_BUILTIN_PI2FW,
19145 IX86_BUILTIN_PSWAPDSI,
19146 IX86_BUILTIN_PSWAPDSF,
19148 /* SSE2 */
19149 IX86_BUILTIN_ADDPD,
19150 IX86_BUILTIN_ADDSD,
19151 IX86_BUILTIN_DIVPD,
19152 IX86_BUILTIN_DIVSD,
19153 IX86_BUILTIN_MULPD,
19154 IX86_BUILTIN_MULSD,
19155 IX86_BUILTIN_SUBPD,
19156 IX86_BUILTIN_SUBSD,
19158 IX86_BUILTIN_CMPEQPD,
19159 IX86_BUILTIN_CMPLTPD,
19160 IX86_BUILTIN_CMPLEPD,
19161 IX86_BUILTIN_CMPGTPD,
19162 IX86_BUILTIN_CMPGEPD,
19163 IX86_BUILTIN_CMPNEQPD,
19164 IX86_BUILTIN_CMPNLTPD,
19165 IX86_BUILTIN_CMPNLEPD,
19166 IX86_BUILTIN_CMPNGTPD,
19167 IX86_BUILTIN_CMPNGEPD,
19168 IX86_BUILTIN_CMPORDPD,
19169 IX86_BUILTIN_CMPUNORDPD,
19170 IX86_BUILTIN_CMPEQSD,
19171 IX86_BUILTIN_CMPLTSD,
19172 IX86_BUILTIN_CMPLESD,
19173 IX86_BUILTIN_CMPNEQSD,
19174 IX86_BUILTIN_CMPNLTSD,
19175 IX86_BUILTIN_CMPNLESD,
19176 IX86_BUILTIN_CMPORDSD,
19177 IX86_BUILTIN_CMPUNORDSD,
19179 IX86_BUILTIN_COMIEQSD,
19180 IX86_BUILTIN_COMILTSD,
19181 IX86_BUILTIN_COMILESD,
19182 IX86_BUILTIN_COMIGTSD,
19183 IX86_BUILTIN_COMIGESD,
19184 IX86_BUILTIN_COMINEQSD,
19185 IX86_BUILTIN_UCOMIEQSD,
19186 IX86_BUILTIN_UCOMILTSD,
19187 IX86_BUILTIN_UCOMILESD,
19188 IX86_BUILTIN_UCOMIGTSD,
19189 IX86_BUILTIN_UCOMIGESD,
19190 IX86_BUILTIN_UCOMINEQSD,
19192 IX86_BUILTIN_MAXPD,
19193 IX86_BUILTIN_MAXSD,
19194 IX86_BUILTIN_MINPD,
19195 IX86_BUILTIN_MINSD,
19197 IX86_BUILTIN_ANDPD,
19198 IX86_BUILTIN_ANDNPD,
19199 IX86_BUILTIN_ORPD,
19200 IX86_BUILTIN_XORPD,
19202 IX86_BUILTIN_SQRTPD,
19203 IX86_BUILTIN_SQRTSD,
19205 IX86_BUILTIN_UNPCKHPD,
19206 IX86_BUILTIN_UNPCKLPD,
19208 IX86_BUILTIN_SHUFPD,
19210 IX86_BUILTIN_LOADUPD,
19211 IX86_BUILTIN_STOREUPD,
19212 IX86_BUILTIN_MOVSD,
19214 IX86_BUILTIN_LOADHPD,
19215 IX86_BUILTIN_LOADLPD,
19217 IX86_BUILTIN_CVTDQ2PD,
19218 IX86_BUILTIN_CVTDQ2PS,
19220 IX86_BUILTIN_CVTPD2DQ,
19221 IX86_BUILTIN_CVTPD2PI,
19222 IX86_BUILTIN_CVTPD2PS,
19223 IX86_BUILTIN_CVTTPD2DQ,
19224 IX86_BUILTIN_CVTTPD2PI,
19226 IX86_BUILTIN_CVTPI2PD,
19227 IX86_BUILTIN_CVTSI2SD,
19228 IX86_BUILTIN_CVTSI642SD,
19230 IX86_BUILTIN_CVTSD2SI,
19231 IX86_BUILTIN_CVTSD2SI64,
19232 IX86_BUILTIN_CVTSD2SS,
19233 IX86_BUILTIN_CVTSS2SD,
19234 IX86_BUILTIN_CVTTSD2SI,
19235 IX86_BUILTIN_CVTTSD2SI64,
19237 IX86_BUILTIN_CVTPS2DQ,
19238 IX86_BUILTIN_CVTPS2PD,
19239 IX86_BUILTIN_CVTTPS2DQ,
19241 IX86_BUILTIN_MOVNTI,
19242 IX86_BUILTIN_MOVNTPD,
19243 IX86_BUILTIN_MOVNTDQ,
19245 IX86_BUILTIN_MOVQ128,
19247 /* SSE2 MMX */
19248 IX86_BUILTIN_MASKMOVDQU,
19249 IX86_BUILTIN_MOVMSKPD,
19250 IX86_BUILTIN_PMOVMSKB128,
19252 IX86_BUILTIN_PACKSSWB128,
19253 IX86_BUILTIN_PACKSSDW128,
19254 IX86_BUILTIN_PACKUSWB128,
19256 IX86_BUILTIN_PADDB128,
19257 IX86_BUILTIN_PADDW128,
19258 IX86_BUILTIN_PADDD128,
19259 IX86_BUILTIN_PADDQ128,
19260 IX86_BUILTIN_PADDSB128,
19261 IX86_BUILTIN_PADDSW128,
19262 IX86_BUILTIN_PADDUSB128,
19263 IX86_BUILTIN_PADDUSW128,
19264 IX86_BUILTIN_PSUBB128,
19265 IX86_BUILTIN_PSUBW128,
19266 IX86_BUILTIN_PSUBD128,
19267 IX86_BUILTIN_PSUBQ128,
19268 IX86_BUILTIN_PSUBSB128,
19269 IX86_BUILTIN_PSUBSW128,
19270 IX86_BUILTIN_PSUBUSB128,
19271 IX86_BUILTIN_PSUBUSW128,
19273 IX86_BUILTIN_PAND128,
19274 IX86_BUILTIN_PANDN128,
19275 IX86_BUILTIN_POR128,
19276 IX86_BUILTIN_PXOR128,
19278 IX86_BUILTIN_PAVGB128,
19279 IX86_BUILTIN_PAVGW128,
19281 IX86_BUILTIN_PCMPEQB128,
19282 IX86_BUILTIN_PCMPEQW128,
19283 IX86_BUILTIN_PCMPEQD128,
19284 IX86_BUILTIN_PCMPGTB128,
19285 IX86_BUILTIN_PCMPGTW128,
19286 IX86_BUILTIN_PCMPGTD128,
19288 IX86_BUILTIN_PMADDWD128,
19290 IX86_BUILTIN_PMAXSW128,
19291 IX86_BUILTIN_PMAXUB128,
19292 IX86_BUILTIN_PMINSW128,
19293 IX86_BUILTIN_PMINUB128,
19295 IX86_BUILTIN_PMULUDQ,
19296 IX86_BUILTIN_PMULUDQ128,
19297 IX86_BUILTIN_PMULHUW128,
19298 IX86_BUILTIN_PMULHW128,
19299 IX86_BUILTIN_PMULLW128,
19301 IX86_BUILTIN_PSADBW128,
19302 IX86_BUILTIN_PSHUFHW,
19303 IX86_BUILTIN_PSHUFLW,
19304 IX86_BUILTIN_PSHUFD,
19306 IX86_BUILTIN_PSLLDQI128,
19307 IX86_BUILTIN_PSLLWI128,
19308 IX86_BUILTIN_PSLLDI128,
19309 IX86_BUILTIN_PSLLQI128,
19310 IX86_BUILTIN_PSRAWI128,
19311 IX86_BUILTIN_PSRADI128,
19312 IX86_BUILTIN_PSRLDQI128,
19313 IX86_BUILTIN_PSRLWI128,
19314 IX86_BUILTIN_PSRLDI128,
19315 IX86_BUILTIN_PSRLQI128,
19317 IX86_BUILTIN_PSLLDQ128,
19318 IX86_BUILTIN_PSLLW128,
19319 IX86_BUILTIN_PSLLD128,
19320 IX86_BUILTIN_PSLLQ128,
19321 IX86_BUILTIN_PSRAW128,
19322 IX86_BUILTIN_PSRAD128,
19323 IX86_BUILTIN_PSRLW128,
19324 IX86_BUILTIN_PSRLD128,
19325 IX86_BUILTIN_PSRLQ128,
19327 IX86_BUILTIN_PUNPCKHBW128,
19328 IX86_BUILTIN_PUNPCKHWD128,
19329 IX86_BUILTIN_PUNPCKHDQ128,
19330 IX86_BUILTIN_PUNPCKHQDQ128,
19331 IX86_BUILTIN_PUNPCKLBW128,
19332 IX86_BUILTIN_PUNPCKLWD128,
19333 IX86_BUILTIN_PUNPCKLDQ128,
19334 IX86_BUILTIN_PUNPCKLQDQ128,
19336 IX86_BUILTIN_CLFLUSH,
19337 IX86_BUILTIN_MFENCE,
19338 IX86_BUILTIN_LFENCE,
19340 /* SSE3. */
19341 IX86_BUILTIN_ADDSUBPS,
19342 IX86_BUILTIN_HADDPS,
19343 IX86_BUILTIN_HSUBPS,
19344 IX86_BUILTIN_MOVSHDUP,
19345 IX86_BUILTIN_MOVSLDUP,
19346 IX86_BUILTIN_ADDSUBPD,
19347 IX86_BUILTIN_HADDPD,
19348 IX86_BUILTIN_HSUBPD,
19349 IX86_BUILTIN_LDDQU,
19351 IX86_BUILTIN_MONITOR,
19352 IX86_BUILTIN_MWAIT,
19354 /* SSSE3. */
19355 IX86_BUILTIN_PHADDW,
19356 IX86_BUILTIN_PHADDD,
19357 IX86_BUILTIN_PHADDSW,
19358 IX86_BUILTIN_PHSUBW,
19359 IX86_BUILTIN_PHSUBD,
19360 IX86_BUILTIN_PHSUBSW,
19361 IX86_BUILTIN_PMADDUBSW,
19362 IX86_BUILTIN_PMULHRSW,
19363 IX86_BUILTIN_PSHUFB,
19364 IX86_BUILTIN_PSIGNB,
19365 IX86_BUILTIN_PSIGNW,
19366 IX86_BUILTIN_PSIGND,
19367 IX86_BUILTIN_PALIGNR,
19368 IX86_BUILTIN_PABSB,
19369 IX86_BUILTIN_PABSW,
19370 IX86_BUILTIN_PABSD,
19372 IX86_BUILTIN_PHADDW128,
19373 IX86_BUILTIN_PHADDD128,
19374 IX86_BUILTIN_PHADDSW128,
19375 IX86_BUILTIN_PHSUBW128,
19376 IX86_BUILTIN_PHSUBD128,
19377 IX86_BUILTIN_PHSUBSW128,
19378 IX86_BUILTIN_PMADDUBSW128,
19379 IX86_BUILTIN_PMULHRSW128,
19380 IX86_BUILTIN_PSHUFB128,
19381 IX86_BUILTIN_PSIGNB128,
19382 IX86_BUILTIN_PSIGNW128,
19383 IX86_BUILTIN_PSIGND128,
19384 IX86_BUILTIN_PALIGNR128,
19385 IX86_BUILTIN_PABSB128,
19386 IX86_BUILTIN_PABSW128,
19387 IX86_BUILTIN_PABSD128,
19389 /* AMDFAM10 - SSE4A New Instructions. */
19390 IX86_BUILTIN_MOVNTSD,
19391 IX86_BUILTIN_MOVNTSS,
19392 IX86_BUILTIN_EXTRQI,
19393 IX86_BUILTIN_EXTRQ,
19394 IX86_BUILTIN_INSERTQI,
19395 IX86_BUILTIN_INSERTQ,
19397 /* SSE4.1. */
19398 IX86_BUILTIN_BLENDPD,
19399 IX86_BUILTIN_BLENDPS,
19400 IX86_BUILTIN_BLENDVPD,
19401 IX86_BUILTIN_BLENDVPS,
19402 IX86_BUILTIN_PBLENDVB128,
19403 IX86_BUILTIN_PBLENDW128,
19405 IX86_BUILTIN_DPPD,
19406 IX86_BUILTIN_DPPS,
19408 IX86_BUILTIN_INSERTPS128,
19410 IX86_BUILTIN_MOVNTDQA,
19411 IX86_BUILTIN_MPSADBW128,
19412 IX86_BUILTIN_PACKUSDW128,
19413 IX86_BUILTIN_PCMPEQQ,
19414 IX86_BUILTIN_PHMINPOSUW128,
19416 IX86_BUILTIN_PMAXSB128,
19417 IX86_BUILTIN_PMAXSD128,
19418 IX86_BUILTIN_PMAXUD128,
19419 IX86_BUILTIN_PMAXUW128,
19421 IX86_BUILTIN_PMINSB128,
19422 IX86_BUILTIN_PMINSD128,
19423 IX86_BUILTIN_PMINUD128,
19424 IX86_BUILTIN_PMINUW128,
19426 IX86_BUILTIN_PMOVSXBW128,
19427 IX86_BUILTIN_PMOVSXBD128,
19428 IX86_BUILTIN_PMOVSXBQ128,
19429 IX86_BUILTIN_PMOVSXWD128,
19430 IX86_BUILTIN_PMOVSXWQ128,
19431 IX86_BUILTIN_PMOVSXDQ128,
19433 IX86_BUILTIN_PMOVZXBW128,
19434 IX86_BUILTIN_PMOVZXBD128,
19435 IX86_BUILTIN_PMOVZXBQ128,
19436 IX86_BUILTIN_PMOVZXWD128,
19437 IX86_BUILTIN_PMOVZXWQ128,
19438 IX86_BUILTIN_PMOVZXDQ128,
19440 IX86_BUILTIN_PMULDQ128,
19441 IX86_BUILTIN_PMULLD128,
19443 IX86_BUILTIN_ROUNDPD,
19444 IX86_BUILTIN_ROUNDPS,
19445 IX86_BUILTIN_ROUNDSD,
19446 IX86_BUILTIN_ROUNDSS,
19448 IX86_BUILTIN_PTESTZ,
19449 IX86_BUILTIN_PTESTC,
19450 IX86_BUILTIN_PTESTNZC,
19452 IX86_BUILTIN_VEC_INIT_V2SI,
19453 IX86_BUILTIN_VEC_INIT_V4HI,
19454 IX86_BUILTIN_VEC_INIT_V8QI,
19455 IX86_BUILTIN_VEC_EXT_V2DF,
19456 IX86_BUILTIN_VEC_EXT_V2DI,
19457 IX86_BUILTIN_VEC_EXT_V4SF,
19458 IX86_BUILTIN_VEC_EXT_V4SI,
19459 IX86_BUILTIN_VEC_EXT_V8HI,
19460 IX86_BUILTIN_VEC_EXT_V2SI,
19461 IX86_BUILTIN_VEC_EXT_V4HI,
19462 IX86_BUILTIN_VEC_EXT_V16QI,
19463 IX86_BUILTIN_VEC_SET_V2DI,
19464 IX86_BUILTIN_VEC_SET_V4SF,
19465 IX86_BUILTIN_VEC_SET_V4SI,
19466 IX86_BUILTIN_VEC_SET_V8HI,
19467 IX86_BUILTIN_VEC_SET_V4HI,
19468 IX86_BUILTIN_VEC_SET_V16QI,
19470 IX86_BUILTIN_VEC_PACK_SFIX,
19472 /* SSE4.2. */
19473 IX86_BUILTIN_CRC32QI,
19474 IX86_BUILTIN_CRC32HI,
19475 IX86_BUILTIN_CRC32SI,
19476 IX86_BUILTIN_CRC32DI,
19478 IX86_BUILTIN_PCMPESTRI128,
19479 IX86_BUILTIN_PCMPESTRM128,
19480 IX86_BUILTIN_PCMPESTRA128,
19481 IX86_BUILTIN_PCMPESTRC128,
19482 IX86_BUILTIN_PCMPESTRO128,
19483 IX86_BUILTIN_PCMPESTRS128,
19484 IX86_BUILTIN_PCMPESTRZ128,
19485 IX86_BUILTIN_PCMPISTRI128,
19486 IX86_BUILTIN_PCMPISTRM128,
19487 IX86_BUILTIN_PCMPISTRA128,
19488 IX86_BUILTIN_PCMPISTRC128,
19489 IX86_BUILTIN_PCMPISTRO128,
19490 IX86_BUILTIN_PCMPISTRS128,
19491 IX86_BUILTIN_PCMPISTRZ128,
19493 IX86_BUILTIN_PCMPGTQ,
19495 /* AES instructions */
19496 IX86_BUILTIN_AESENC128,
19497 IX86_BUILTIN_AESENCLAST128,
19498 IX86_BUILTIN_AESDEC128,
19499 IX86_BUILTIN_AESDECLAST128,
19500 IX86_BUILTIN_AESIMC128,
19501 IX86_BUILTIN_AESKEYGENASSIST128,
19503 /* PCLMUL instruction */
19504 IX86_BUILTIN_PCLMULQDQ128,
19506 /* AVX */
19507 IX86_BUILTIN_ADDPD256,
19508 IX86_BUILTIN_ADDPS256,
19509 IX86_BUILTIN_ADDSUBPD256,
19510 IX86_BUILTIN_ADDSUBPS256,
19511 IX86_BUILTIN_ANDPD256,
19512 IX86_BUILTIN_ANDPS256,
19513 IX86_BUILTIN_ANDNPD256,
19514 IX86_BUILTIN_ANDNPS256,
19515 IX86_BUILTIN_BLENDPD256,
19516 IX86_BUILTIN_BLENDPS256,
19517 IX86_BUILTIN_BLENDVPD256,
19518 IX86_BUILTIN_BLENDVPS256,
19519 IX86_BUILTIN_DIVPD256,
19520 IX86_BUILTIN_DIVPS256,
19521 IX86_BUILTIN_DPPS256,
19522 IX86_BUILTIN_HADDPD256,
19523 IX86_BUILTIN_HADDPS256,
19524 IX86_BUILTIN_HSUBPD256,
19525 IX86_BUILTIN_HSUBPS256,
19526 IX86_BUILTIN_MAXPD256,
19527 IX86_BUILTIN_MAXPS256,
19528 IX86_BUILTIN_MINPD256,
19529 IX86_BUILTIN_MINPS256,
19530 IX86_BUILTIN_MULPD256,
19531 IX86_BUILTIN_MULPS256,
19532 IX86_BUILTIN_ORPD256,
19533 IX86_BUILTIN_ORPS256,
19534 IX86_BUILTIN_SHUFPD256,
19535 IX86_BUILTIN_SHUFPS256,
19536 IX86_BUILTIN_SUBPD256,
19537 IX86_BUILTIN_SUBPS256,
19538 IX86_BUILTIN_XORPD256,
19539 IX86_BUILTIN_XORPS256,
19540 IX86_BUILTIN_CMPSD,
19541 IX86_BUILTIN_CMPSS,
19542 IX86_BUILTIN_CMPPD,
19543 IX86_BUILTIN_CMPPS,
19544 IX86_BUILTIN_CMPPD256,
19545 IX86_BUILTIN_CMPPS256,
19546 IX86_BUILTIN_CVTDQ2PD256,
19547 IX86_BUILTIN_CVTDQ2PS256,
19548 IX86_BUILTIN_CVTPD2PS256,
19549 IX86_BUILTIN_CVTPS2DQ256,
19550 IX86_BUILTIN_CVTPS2PD256,
19551 IX86_BUILTIN_CVTTPD2DQ256,
19552 IX86_BUILTIN_CVTPD2DQ256,
19553 IX86_BUILTIN_CVTTPS2DQ256,
19554 IX86_BUILTIN_EXTRACTF128PD256,
19555 IX86_BUILTIN_EXTRACTF128PS256,
19556 IX86_BUILTIN_EXTRACTF128SI256,
19557 IX86_BUILTIN_VZEROALL,
19558 IX86_BUILTIN_VZEROUPPER,
19559 IX86_BUILTIN_VZEROUPPER_REX64,
19560 IX86_BUILTIN_VPERMILVARPD,
19561 IX86_BUILTIN_VPERMILVARPS,
19562 IX86_BUILTIN_VPERMILVARPD256,
19563 IX86_BUILTIN_VPERMILVARPS256,
19564 IX86_BUILTIN_VPERMILPD,
19565 IX86_BUILTIN_VPERMILPS,
19566 IX86_BUILTIN_VPERMILPD256,
19567 IX86_BUILTIN_VPERMILPS256,
19568 IX86_BUILTIN_VPERMIL2PD,
19569 IX86_BUILTIN_VPERMIL2PS,
19570 IX86_BUILTIN_VPERMIL2PD256,
19571 IX86_BUILTIN_VPERMIL2PS256,
19572 IX86_BUILTIN_VPERM2F128PD256,
19573 IX86_BUILTIN_VPERM2F128PS256,
19574 IX86_BUILTIN_VPERM2F128SI256,
19575 IX86_BUILTIN_VBROADCASTSS,
19576 IX86_BUILTIN_VBROADCASTSD256,
19577 IX86_BUILTIN_VBROADCASTSS256,
19578 IX86_BUILTIN_VBROADCASTPD256,
19579 IX86_BUILTIN_VBROADCASTPS256,
19580 IX86_BUILTIN_VINSERTF128PD256,
19581 IX86_BUILTIN_VINSERTF128PS256,
19582 IX86_BUILTIN_VINSERTF128SI256,
19583 IX86_BUILTIN_LOADUPD256,
19584 IX86_BUILTIN_LOADUPS256,
19585 IX86_BUILTIN_STOREUPD256,
19586 IX86_BUILTIN_STOREUPS256,
19587 IX86_BUILTIN_LDDQU256,
19588 IX86_BUILTIN_LOADDQU256,
19589 IX86_BUILTIN_STOREDQU256,
19590 IX86_BUILTIN_MASKLOADPD,
19591 IX86_BUILTIN_MASKLOADPS,
19592 IX86_BUILTIN_MASKSTOREPD,
19593 IX86_BUILTIN_MASKSTOREPS,
19594 IX86_BUILTIN_MASKLOADPD256,
19595 IX86_BUILTIN_MASKLOADPS256,
19596 IX86_BUILTIN_MASKSTOREPD256,
19597 IX86_BUILTIN_MASKSTOREPS256,
19598 IX86_BUILTIN_MOVSHDUP256,
19599 IX86_BUILTIN_MOVSLDUP256,
19600 IX86_BUILTIN_MOVDDUP256,
19602 IX86_BUILTIN_SQRTPD256,
19603 IX86_BUILTIN_SQRTPS256,
19604 IX86_BUILTIN_SQRTPS_NR256,
19605 IX86_BUILTIN_RSQRTPS256,
19606 IX86_BUILTIN_RSQRTPS_NR256,
19608 IX86_BUILTIN_RCPPS256,
19610 IX86_BUILTIN_ROUNDPD256,
19611 IX86_BUILTIN_ROUNDPS256,
19613 IX86_BUILTIN_UNPCKHPD256,
19614 IX86_BUILTIN_UNPCKLPD256,
19615 IX86_BUILTIN_UNPCKHPS256,
19616 IX86_BUILTIN_UNPCKLPS256,
19618 IX86_BUILTIN_SI256_SI,
19619 IX86_BUILTIN_PS256_PS,
19620 IX86_BUILTIN_PD256_PD,
19621 IX86_BUILTIN_SI_SI256,
19622 IX86_BUILTIN_PS_PS256,
19623 IX86_BUILTIN_PD_PD256,
19625 IX86_BUILTIN_VTESTZPD,
19626 IX86_BUILTIN_VTESTCPD,
19627 IX86_BUILTIN_VTESTNZCPD,
19628 IX86_BUILTIN_VTESTZPS,
19629 IX86_BUILTIN_VTESTCPS,
19630 IX86_BUILTIN_VTESTNZCPS,
19631 IX86_BUILTIN_VTESTZPD256,
19632 IX86_BUILTIN_VTESTCPD256,
19633 IX86_BUILTIN_VTESTNZCPD256,
19634 IX86_BUILTIN_VTESTZPS256,
19635 IX86_BUILTIN_VTESTCPS256,
19636 IX86_BUILTIN_VTESTNZCPS256,
19637 IX86_BUILTIN_PTESTZ256,
19638 IX86_BUILTIN_PTESTC256,
19639 IX86_BUILTIN_PTESTNZC256,
19641 IX86_BUILTIN_MOVMSKPD256,
19642 IX86_BUILTIN_MOVMSKPS256,
19644 /* TFmode support builtins. */
19645 IX86_BUILTIN_INFQ,
19646 IX86_BUILTIN_FABSQ,
19647 IX86_BUILTIN_COPYSIGNQ,
19649 /* SSE5 instructions */
19650 IX86_BUILTIN_FMADDSS,
19651 IX86_BUILTIN_FMADDSD,
19652 IX86_BUILTIN_FMADDPS,
19653 IX86_BUILTIN_FMADDPD,
19654 IX86_BUILTIN_FMSUBSS,
19655 IX86_BUILTIN_FMSUBSD,
19656 IX86_BUILTIN_FMSUBPS,
19657 IX86_BUILTIN_FMSUBPD,
19658 IX86_BUILTIN_FNMADDSS,
19659 IX86_BUILTIN_FNMADDSD,
19660 IX86_BUILTIN_FNMADDPS,
19661 IX86_BUILTIN_FNMADDPD,
19662 IX86_BUILTIN_FNMSUBSS,
19663 IX86_BUILTIN_FNMSUBSD,
19664 IX86_BUILTIN_FNMSUBPS,
19665 IX86_BUILTIN_FNMSUBPD,
19666 IX86_BUILTIN_PCMOV,
19667 IX86_BUILTIN_PCMOV_V2DI,
19668 IX86_BUILTIN_PCMOV_V4SI,
19669 IX86_BUILTIN_PCMOV_V8HI,
19670 IX86_BUILTIN_PCMOV_V16QI,
19671 IX86_BUILTIN_PCMOV_V4SF,
19672 IX86_BUILTIN_PCMOV_V2DF,
19673 IX86_BUILTIN_PPERM,
19674 IX86_BUILTIN_PERMPS,
19675 IX86_BUILTIN_PERMPD,
19676 IX86_BUILTIN_PMACSSWW,
19677 IX86_BUILTIN_PMACSWW,
19678 IX86_BUILTIN_PMACSSWD,
19679 IX86_BUILTIN_PMACSWD,
19680 IX86_BUILTIN_PMACSSDD,
19681 IX86_BUILTIN_PMACSDD,
19682 IX86_BUILTIN_PMACSSDQL,
19683 IX86_BUILTIN_PMACSSDQH,
19684 IX86_BUILTIN_PMACSDQL,
19685 IX86_BUILTIN_PMACSDQH,
19686 IX86_BUILTIN_PMADCSSWD,
19687 IX86_BUILTIN_PMADCSWD,
19688 IX86_BUILTIN_PHADDBW,
19689 IX86_BUILTIN_PHADDBD,
19690 IX86_BUILTIN_PHADDBQ,
19691 IX86_BUILTIN_PHADDWD,
19692 IX86_BUILTIN_PHADDWQ,
19693 IX86_BUILTIN_PHADDDQ,
19694 IX86_BUILTIN_PHADDUBW,
19695 IX86_BUILTIN_PHADDUBD,
19696 IX86_BUILTIN_PHADDUBQ,
19697 IX86_BUILTIN_PHADDUWD,
19698 IX86_BUILTIN_PHADDUWQ,
19699 IX86_BUILTIN_PHADDUDQ,
19700 IX86_BUILTIN_PHSUBBW,
19701 IX86_BUILTIN_PHSUBWD,
19702 IX86_BUILTIN_PHSUBDQ,
19703 IX86_BUILTIN_PROTB,
19704 IX86_BUILTIN_PROTW,
19705 IX86_BUILTIN_PROTD,
19706 IX86_BUILTIN_PROTQ,
19707 IX86_BUILTIN_PROTB_IMM,
19708 IX86_BUILTIN_PROTW_IMM,
19709 IX86_BUILTIN_PROTD_IMM,
19710 IX86_BUILTIN_PROTQ_IMM,
19711 IX86_BUILTIN_PSHLB,
19712 IX86_BUILTIN_PSHLW,
19713 IX86_BUILTIN_PSHLD,
19714 IX86_BUILTIN_PSHLQ,
19715 IX86_BUILTIN_PSHAB,
19716 IX86_BUILTIN_PSHAW,
19717 IX86_BUILTIN_PSHAD,
19718 IX86_BUILTIN_PSHAQ,
19719 IX86_BUILTIN_FRCZSS,
19720 IX86_BUILTIN_FRCZSD,
19721 IX86_BUILTIN_FRCZPS,
19722 IX86_BUILTIN_FRCZPD,
19723 IX86_BUILTIN_CVTPH2PS,
19724 IX86_BUILTIN_CVTPS2PH,
19726 IX86_BUILTIN_COMEQSS,
19727 IX86_BUILTIN_COMNESS,
19728 IX86_BUILTIN_COMLTSS,
19729 IX86_BUILTIN_COMLESS,
19730 IX86_BUILTIN_COMGTSS,
19731 IX86_BUILTIN_COMGESS,
19732 IX86_BUILTIN_COMUEQSS,
19733 IX86_BUILTIN_COMUNESS,
19734 IX86_BUILTIN_COMULTSS,
19735 IX86_BUILTIN_COMULESS,
19736 IX86_BUILTIN_COMUGTSS,
19737 IX86_BUILTIN_COMUGESS,
19738 IX86_BUILTIN_COMORDSS,
19739 IX86_BUILTIN_COMUNORDSS,
19740 IX86_BUILTIN_COMFALSESS,
19741 IX86_BUILTIN_COMTRUESS,
19743 IX86_BUILTIN_COMEQSD,
19744 IX86_BUILTIN_COMNESD,
19745 IX86_BUILTIN_COMLTSD,
19746 IX86_BUILTIN_COMLESD,
19747 IX86_BUILTIN_COMGTSD,
19748 IX86_BUILTIN_COMGESD,
19749 IX86_BUILTIN_COMUEQSD,
19750 IX86_BUILTIN_COMUNESD,
19751 IX86_BUILTIN_COMULTSD,
19752 IX86_BUILTIN_COMULESD,
19753 IX86_BUILTIN_COMUGTSD,
19754 IX86_BUILTIN_COMUGESD,
19755 IX86_BUILTIN_COMORDSD,
19756 IX86_BUILTIN_COMUNORDSD,
19757 IX86_BUILTIN_COMFALSESD,
19758 IX86_BUILTIN_COMTRUESD,
19760 IX86_BUILTIN_COMEQPS,
19761 IX86_BUILTIN_COMNEPS,
19762 IX86_BUILTIN_COMLTPS,
19763 IX86_BUILTIN_COMLEPS,
19764 IX86_BUILTIN_COMGTPS,
19765 IX86_BUILTIN_COMGEPS,
19766 IX86_BUILTIN_COMUEQPS,
19767 IX86_BUILTIN_COMUNEPS,
19768 IX86_BUILTIN_COMULTPS,
19769 IX86_BUILTIN_COMULEPS,
19770 IX86_BUILTIN_COMUGTPS,
19771 IX86_BUILTIN_COMUGEPS,
19772 IX86_BUILTIN_COMORDPS,
19773 IX86_BUILTIN_COMUNORDPS,
19774 IX86_BUILTIN_COMFALSEPS,
19775 IX86_BUILTIN_COMTRUEPS,
19777 IX86_BUILTIN_COMEQPD,
19778 IX86_BUILTIN_COMNEPD,
19779 IX86_BUILTIN_COMLTPD,
19780 IX86_BUILTIN_COMLEPD,
19781 IX86_BUILTIN_COMGTPD,
19782 IX86_BUILTIN_COMGEPD,
19783 IX86_BUILTIN_COMUEQPD,
19784 IX86_BUILTIN_COMUNEPD,
19785 IX86_BUILTIN_COMULTPD,
19786 IX86_BUILTIN_COMULEPD,
19787 IX86_BUILTIN_COMUGTPD,
19788 IX86_BUILTIN_COMUGEPD,
19789 IX86_BUILTIN_COMORDPD,
19790 IX86_BUILTIN_COMUNORDPD,
19791 IX86_BUILTIN_COMFALSEPD,
19792 IX86_BUILTIN_COMTRUEPD,
19794 IX86_BUILTIN_PCOMEQUB,
19795 IX86_BUILTIN_PCOMNEUB,
19796 IX86_BUILTIN_PCOMLTUB,
19797 IX86_BUILTIN_PCOMLEUB,
19798 IX86_BUILTIN_PCOMGTUB,
19799 IX86_BUILTIN_PCOMGEUB,
19800 IX86_BUILTIN_PCOMFALSEUB,
19801 IX86_BUILTIN_PCOMTRUEUB,
19802 IX86_BUILTIN_PCOMEQUW,
19803 IX86_BUILTIN_PCOMNEUW,
19804 IX86_BUILTIN_PCOMLTUW,
19805 IX86_BUILTIN_PCOMLEUW,
19806 IX86_BUILTIN_PCOMGTUW,
19807 IX86_BUILTIN_PCOMGEUW,
19808 IX86_BUILTIN_PCOMFALSEUW,
19809 IX86_BUILTIN_PCOMTRUEUW,
19810 IX86_BUILTIN_PCOMEQUD,
19811 IX86_BUILTIN_PCOMNEUD,
19812 IX86_BUILTIN_PCOMLTUD,
19813 IX86_BUILTIN_PCOMLEUD,
19814 IX86_BUILTIN_PCOMGTUD,
19815 IX86_BUILTIN_PCOMGEUD,
19816 IX86_BUILTIN_PCOMFALSEUD,
19817 IX86_BUILTIN_PCOMTRUEUD,
19818 IX86_BUILTIN_PCOMEQUQ,
19819 IX86_BUILTIN_PCOMNEUQ,
19820 IX86_BUILTIN_PCOMLTUQ,
19821 IX86_BUILTIN_PCOMLEUQ,
19822 IX86_BUILTIN_PCOMGTUQ,
19823 IX86_BUILTIN_PCOMGEUQ,
19824 IX86_BUILTIN_PCOMFALSEUQ,
19825 IX86_BUILTIN_PCOMTRUEUQ,
19827 IX86_BUILTIN_PCOMEQB,
19828 IX86_BUILTIN_PCOMNEB,
19829 IX86_BUILTIN_PCOMLTB,
19830 IX86_BUILTIN_PCOMLEB,
19831 IX86_BUILTIN_PCOMGTB,
19832 IX86_BUILTIN_PCOMGEB,
19833 IX86_BUILTIN_PCOMFALSEB,
19834 IX86_BUILTIN_PCOMTRUEB,
19835 IX86_BUILTIN_PCOMEQW,
19836 IX86_BUILTIN_PCOMNEW,
19837 IX86_BUILTIN_PCOMLTW,
19838 IX86_BUILTIN_PCOMLEW,
19839 IX86_BUILTIN_PCOMGTW,
19840 IX86_BUILTIN_PCOMGEW,
19841 IX86_BUILTIN_PCOMFALSEW,
19842 IX86_BUILTIN_PCOMTRUEW,
19843 IX86_BUILTIN_PCOMEQD,
19844 IX86_BUILTIN_PCOMNED,
19845 IX86_BUILTIN_PCOMLTD,
19846 IX86_BUILTIN_PCOMLED,
19847 IX86_BUILTIN_PCOMGTD,
19848 IX86_BUILTIN_PCOMGED,
19849 IX86_BUILTIN_PCOMFALSED,
19850 IX86_BUILTIN_PCOMTRUED,
19851 IX86_BUILTIN_PCOMEQQ,
19852 IX86_BUILTIN_PCOMNEQ,
19853 IX86_BUILTIN_PCOMLTQ,
19854 IX86_BUILTIN_PCOMLEQ,
19855 IX86_BUILTIN_PCOMGTQ,
19856 IX86_BUILTIN_PCOMGEQ,
19857 IX86_BUILTIN_PCOMFALSEQ,
19858 IX86_BUILTIN_PCOMTRUEQ,
19860 IX86_BUILTIN_MAX
19863 /* Table for the ix86 builtin decls. */
19864 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
19866 /* Table of all of the builtin functions that are possible with different ISA's
19867 but are waiting to be built until a function is declared to use that
19868 ISA. */
19869 struct builtin_isa GTY(())
19871 tree type; /* builtin type to use in the declaration */
19872 const char *name; /* function name */
19873 int isa; /* isa_flags this builtin is defined for */
19874 bool const_p; /* true if the declaration is constant */
19877 static GTY(()) struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
19880 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
19881 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
19882 * function decl in the ix86_builtins array. Returns the function decl or
19883 * NULL_TREE, if the builtin was not added.
19885 * If the front end has a special hook for builtin functions, delay adding
19886 * builtin functions that aren't in the current ISA until the ISA is changed
19887 * with function specific optimization. Doing so, can save about 300K for the
19888 * default compiler. When the builtin is expanded, check at that time whether
19889 * it is valid.
19891 * If the front end doesn't have a special hook, record all builtins, even if
19892 * it isn't an instruction set in the current ISA in case the user uses
19893 * function specific options for a different ISA, so that we don't get scope
19894 * errors if a builtin is added in the middle of a function scope. */
19896 static inline tree
19897 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
19899 tree decl = NULL_TREE;
19901 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
19903 ix86_builtins_isa[(int) code].isa = mask;
19905 if ((mask & ix86_isa_flags) != 0
19906 || (lang_hooks.builtin_function
19907 == lang_hooks.builtin_function_ext_scope))
19910 decl = add_builtin_function (name, type, code, BUILT_IN_MD, NULL,
19911 NULL_TREE);
19912 ix86_builtins[(int) code] = decl;
19913 ix86_builtins_isa[(int) code].type = NULL_TREE;
19915 else
19917 ix86_builtins[(int) code] = NULL_TREE;
19918 ix86_builtins_isa[(int) code].const_p = false;
19919 ix86_builtins_isa[(int) code].type = type;
19920 ix86_builtins_isa[(int) code].name = name;
19924 return decl;
19927 /* Like def_builtin, but also marks the function decl "const". */
19929 static inline tree
19930 def_builtin_const (int mask, const char *name, tree type,
19931 enum ix86_builtins code)
19933 tree decl = def_builtin (mask, name, type, code);
19934 if (decl)
19935 TREE_READONLY (decl) = 1;
19936 else
19937 ix86_builtins_isa[(int) code].const_p = true;
19939 return decl;
19942 /* Add any new builtin functions for a given ISA that may not have been
19943 declared. This saves a bit of space compared to adding all of the
19944 declarations to the tree, even if we didn't use them. */
19946 static void
19947 ix86_add_new_builtins (int isa)
19949 int i;
19950 tree decl;
19952 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
19954 if ((ix86_builtins_isa[i].isa & isa) != 0
19955 && ix86_builtins_isa[i].type != NULL_TREE)
19957 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
19958 ix86_builtins_isa[i].type,
19959 i, BUILT_IN_MD, NULL,
19960 NULL_TREE);
19962 ix86_builtins[i] = decl;
19963 ix86_builtins_isa[i].type = NULL_TREE;
19964 if (ix86_builtins_isa[i].const_p)
19965 TREE_READONLY (decl) = 1;
19970 /* Bits for builtin_description.flag. */
19972 /* Set when we don't support the comparison natively, and should
19973 swap_comparison in order to support it. */
19974 #define BUILTIN_DESC_SWAP_OPERANDS 1
19976 struct builtin_description
19978 const unsigned int mask;
19979 const enum insn_code icode;
19980 const char *const name;
19981 const enum ix86_builtins code;
19982 const enum rtx_code comparison;
19983 const int flag;
19986 static const struct builtin_description bdesc_comi[] =
19988 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
19989 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
19990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
19991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
19992 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
19993 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
19994 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
19995 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
19996 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
19997 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
19998 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
19999 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
20000 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
20001 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
20002 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
20003 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
20004 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
20005 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
20006 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
20007 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
20008 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
20009 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
20010 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
20011 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
20014 static const struct builtin_description bdesc_pcmpestr[] =
20016 /* SSE4.2 */
20017 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
20018 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
20019 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
20020 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
20021 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
20022 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
20023 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
20026 static const struct builtin_description bdesc_pcmpistr[] =
20028 /* SSE4.2 */
20029 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
20030 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
20031 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
20032 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
20033 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
20034 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
20035 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
20038 /* Special builtin types */
20039 enum ix86_special_builtin_type
20041 SPECIAL_FTYPE_UNKNOWN,
20042 VOID_FTYPE_VOID,
20043 V32QI_FTYPE_PCCHAR,
20044 V16QI_FTYPE_PCCHAR,
20045 V8SF_FTYPE_PCV4SF,
20046 V8SF_FTYPE_PCFLOAT,
20047 V4DF_FTYPE_PCV2DF,
20048 V4DF_FTYPE_PCDOUBLE,
20049 V4SF_FTYPE_PCFLOAT,
20050 V2DF_FTYPE_PCDOUBLE,
20051 V8SF_FTYPE_PCV8SF_V8SF,
20052 V4DF_FTYPE_PCV4DF_V4DF,
20053 V4SF_FTYPE_V4SF_PCV2SF,
20054 V4SF_FTYPE_PCV4SF_V4SF,
20055 V2DF_FTYPE_V2DF_PCDOUBLE,
20056 V2DF_FTYPE_PCV2DF_V2DF,
20057 V2DI_FTYPE_PV2DI,
20058 VOID_FTYPE_PV2SF_V4SF,
20059 VOID_FTYPE_PV2DI_V2DI,
20060 VOID_FTYPE_PCHAR_V32QI,
20061 VOID_FTYPE_PCHAR_V16QI,
20062 VOID_FTYPE_PFLOAT_V8SF,
20063 VOID_FTYPE_PFLOAT_V4SF,
20064 VOID_FTYPE_PDOUBLE_V4DF,
20065 VOID_FTYPE_PDOUBLE_V2DF,
20066 VOID_FTYPE_PDI_DI,
20067 VOID_FTYPE_PINT_INT,
20068 VOID_FTYPE_PV8SF_V8SF_V8SF,
20069 VOID_FTYPE_PV4DF_V4DF_V4DF,
20070 VOID_FTYPE_PV4SF_V4SF_V4SF,
20071 VOID_FTYPE_PV2DF_V2DF_V2DF
20074 /* Builtin types */
20075 enum ix86_builtin_type
20077 FTYPE_UNKNOWN,
20078 FLOAT128_FTYPE_FLOAT128,
20079 FLOAT_FTYPE_FLOAT,
20080 FLOAT128_FTYPE_FLOAT128_FLOAT128,
20081 INT_FTYPE_V8SF_V8SF_PTEST,
20082 INT_FTYPE_V4DI_V4DI_PTEST,
20083 INT_FTYPE_V4DF_V4DF_PTEST,
20084 INT_FTYPE_V4SF_V4SF_PTEST,
20085 INT_FTYPE_V2DI_V2DI_PTEST,
20086 INT_FTYPE_V2DF_V2DF_PTEST,
20087 INT64_FTYPE_V4SF,
20088 INT64_FTYPE_V2DF,
20089 INT_FTYPE_V16QI,
20090 INT_FTYPE_V8QI,
20091 INT_FTYPE_V8SF,
20092 INT_FTYPE_V4DF,
20093 INT_FTYPE_V4SF,
20094 INT_FTYPE_V2DF,
20095 V16QI_FTYPE_V16QI,
20096 V8SI_FTYPE_V8SF,
20097 V8SI_FTYPE_V4SI,
20098 V8HI_FTYPE_V8HI,
20099 V8HI_FTYPE_V16QI,
20100 V8QI_FTYPE_V8QI,
20101 V8SF_FTYPE_V8SF,
20102 V8SF_FTYPE_V8SI,
20103 V8SF_FTYPE_V4SF,
20104 V4SI_FTYPE_V4SI,
20105 V4SI_FTYPE_V16QI,
20106 V4SI_FTYPE_V8SI,
20107 V4SI_FTYPE_V8HI,
20108 V4SI_FTYPE_V4DF,
20109 V4SI_FTYPE_V4SF,
20110 V4SI_FTYPE_V2DF,
20111 V4HI_FTYPE_V4HI,
20112 V4DF_FTYPE_V4DF,
20113 V4DF_FTYPE_V4SI,
20114 V4DF_FTYPE_V4SF,
20115 V4DF_FTYPE_V2DF,
20116 V4SF_FTYPE_V4DF,
20117 V4SF_FTYPE_V4SF,
20118 V4SF_FTYPE_V4SF_VEC_MERGE,
20119 V4SF_FTYPE_V8SF,
20120 V4SF_FTYPE_V4SI,
20121 V4SF_FTYPE_V2DF,
20122 V2DI_FTYPE_V2DI,
20123 V2DI_FTYPE_V16QI,
20124 V2DI_FTYPE_V8HI,
20125 V2DI_FTYPE_V4SI,
20126 V2DF_FTYPE_V2DF,
20127 V2DF_FTYPE_V2DF_VEC_MERGE,
20128 V2DF_FTYPE_V4SI,
20129 V2DF_FTYPE_V4DF,
20130 V2DF_FTYPE_V4SF,
20131 V2DF_FTYPE_V2SI,
20132 V2SI_FTYPE_V2SI,
20133 V2SI_FTYPE_V4SF,
20134 V2SI_FTYPE_V2SF,
20135 V2SI_FTYPE_V2DF,
20136 V2SF_FTYPE_V2SF,
20137 V2SF_FTYPE_V2SI,
20138 V16QI_FTYPE_V16QI_V16QI,
20139 V16QI_FTYPE_V8HI_V8HI,
20140 V8QI_FTYPE_V8QI_V8QI,
20141 V8QI_FTYPE_V4HI_V4HI,
20142 V8HI_FTYPE_V8HI_V8HI,
20143 V8HI_FTYPE_V8HI_V8HI_COUNT,
20144 V8HI_FTYPE_V16QI_V16QI,
20145 V8HI_FTYPE_V4SI_V4SI,
20146 V8HI_FTYPE_V8HI_SI_COUNT,
20147 V8SF_FTYPE_V8SF_V8SF,
20148 V8SF_FTYPE_V8SF_V8SI,
20149 V4SI_FTYPE_V4SI_V4SI,
20150 V4SI_FTYPE_V4SI_V4SI_COUNT,
20151 V4SI_FTYPE_V8HI_V8HI,
20152 V4SI_FTYPE_V4SF_V4SF,
20153 V4SI_FTYPE_V2DF_V2DF,
20154 V4SI_FTYPE_V4SI_SI_COUNT,
20155 V4HI_FTYPE_V4HI_V4HI,
20156 V4HI_FTYPE_V4HI_V4HI_COUNT,
20157 V4HI_FTYPE_V8QI_V8QI,
20158 V4HI_FTYPE_V2SI_V2SI,
20159 V4HI_FTYPE_V4HI_SI_COUNT,
20160 V4DF_FTYPE_V4DF_V4DF,
20161 V4DF_FTYPE_V4DF_V4DI,
20162 V4SF_FTYPE_V4SF_V4SF,
20163 V4SF_FTYPE_V4SF_V4SF_SWAP,
20164 V4SF_FTYPE_V4SF_V4SI,
20165 V4SF_FTYPE_V4SF_V2SI,
20166 V4SF_FTYPE_V4SF_V2DF,
20167 V4SF_FTYPE_V4SF_DI,
20168 V4SF_FTYPE_V4SF_SI,
20169 V2DI_FTYPE_V2DI_V2DI,
20170 V2DI_FTYPE_V2DI_V2DI_COUNT,
20171 V2DI_FTYPE_V16QI_V16QI,
20172 V2DI_FTYPE_V4SI_V4SI,
20173 V2DI_FTYPE_V2DI_V16QI,
20174 V2DI_FTYPE_V2DF_V2DF,
20175 V2DI_FTYPE_V2DI_SI_COUNT,
20176 V2SI_FTYPE_V2SI_V2SI,
20177 V2SI_FTYPE_V2SI_V2SI_COUNT,
20178 V2SI_FTYPE_V4HI_V4HI,
20179 V2SI_FTYPE_V2SF_V2SF,
20180 V2SI_FTYPE_V2SI_SI_COUNT,
20181 V2DF_FTYPE_V2DF_V2DF,
20182 V2DF_FTYPE_V2DF_V2DF_SWAP,
20183 V2DF_FTYPE_V2DF_V4SF,
20184 V2DF_FTYPE_V2DF_V2DI,
20185 V2DF_FTYPE_V2DF_DI,
20186 V2DF_FTYPE_V2DF_SI,
20187 V2SF_FTYPE_V2SF_V2SF,
20188 V1DI_FTYPE_V1DI_V1DI,
20189 V1DI_FTYPE_V1DI_V1DI_COUNT,
20190 V1DI_FTYPE_V8QI_V8QI,
20191 V1DI_FTYPE_V2SI_V2SI,
20192 V1DI_FTYPE_V1DI_SI_COUNT,
20193 UINT64_FTYPE_UINT64_UINT64,
20194 UINT_FTYPE_UINT_UINT,
20195 UINT_FTYPE_UINT_USHORT,
20196 UINT_FTYPE_UINT_UCHAR,
20197 V8HI_FTYPE_V8HI_INT,
20198 V4SI_FTYPE_V4SI_INT,
20199 V4HI_FTYPE_V4HI_INT,
20200 V8SF_FTYPE_V8SF_INT,
20201 V4SI_FTYPE_V8SI_INT,
20202 V4SF_FTYPE_V8SF_INT,
20203 V2DF_FTYPE_V4DF_INT,
20204 V4DF_FTYPE_V4DF_INT,
20205 V4SF_FTYPE_V4SF_INT,
20206 V2DI_FTYPE_V2DI_INT,
20207 V2DI2TI_FTYPE_V2DI_INT,
20208 V2DF_FTYPE_V2DF_INT,
20209 V16QI_FTYPE_V16QI_V16QI_V16QI,
20210 V8SF_FTYPE_V8SF_V8SF_V8SF,
20211 V4DF_FTYPE_V4DF_V4DF_V4DF,
20212 V4SF_FTYPE_V4SF_V4SF_V4SF,
20213 V2DF_FTYPE_V2DF_V2DF_V2DF,
20214 V16QI_FTYPE_V16QI_V16QI_INT,
20215 V8SI_FTYPE_V8SI_V8SI_INT,
20216 V8SI_FTYPE_V8SI_V4SI_INT,
20217 V8HI_FTYPE_V8HI_V8HI_INT,
20218 V8SF_FTYPE_V8SF_V8SF_INT,
20219 V8SF_FTYPE_V8SF_V4SF_INT,
20220 V4SI_FTYPE_V4SI_V4SI_INT,
20221 V4DF_FTYPE_V4DF_V4DF_INT,
20222 V4DF_FTYPE_V4DF_V2DF_INT,
20223 V4SF_FTYPE_V4SF_V4SF_INT,
20224 V2DI_FTYPE_V2DI_V2DI_INT,
20225 V2DI2TI_FTYPE_V2DI_V2DI_INT,
20226 V1DI2DI_FTYPE_V1DI_V1DI_INT,
20227 V2DF_FTYPE_V2DF_V2DF_INT,
20228 V8SF_FTYPE_V8SF_V8SF_V8SI_INT,
20229 V4DF_FTYPE_V4DF_V4DF_V4DI_INT,
20230 V4SF_FTYPE_V4SF_V4SF_V4SI_INT,
20231 V2DF_FTYPE_V2DF_V2DF_V2DI_INT,
20232 V2DI_FTYPE_V2DI_UINT_UINT,
20233 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
20236 /* Special builtins with variable number of arguments. */
20237 static const struct builtin_description bdesc_special_args[] =
20239 /* MMX */
20240 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20242 /* 3DNow! */
20243 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20245 /* SSE */
20246 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20247 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20248 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20250 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20251 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20252 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20253 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20255 /* SSE or 3DNow!A */
20256 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20257 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
20259 /* SSE2 */
20260 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20262 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20263 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
20264 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20265 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
20266 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
20267 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
20268 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20270 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20271 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20273 /* SSE3 */
20274 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20276 /* SSE4.1 */
20277 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
20279 /* SSE4A */
20280 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20281 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20283 /* AVX */
20284 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
20285 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, 0, IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
20286 { OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_64BIT, CODE_FOR_avx_vzeroupper_rex64, 0, IX86_BUILTIN_VZEROUPPER_REX64, UNKNOWN, (int) VOID_FTYPE_VOID },
20288 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20289 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastsd256, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20290 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss256, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20291 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_pd256, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
20292 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_ps256, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
20294 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20295 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20296 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
20297 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
20298 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20299 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
20300 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
20302 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
20303 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
20304 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
20305 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
20306 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
20307 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
20308 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
20309 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
20312 /* Builtins with variable number of arguments. */
20313 static const struct builtin_description bdesc_args[] =
20315 /* MMX */
20316 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20317 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20318 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20319 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20320 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20321 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20323 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20324 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20325 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20326 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20327 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20328 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20329 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20330 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20332 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20333 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20335 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20336 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20337 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20338 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20340 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20341 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20342 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20343 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20344 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20345 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20347 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20348 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20349 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20350 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20351 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
20352 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
20354 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20355 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
20356 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
20358 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
20360 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20361 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20362 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20363 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20364 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20365 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20367 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20368 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20369 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
20370 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20371 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20372 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
20374 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
20375 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
20376 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
20377 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
20379 /* 3DNow! */
20380 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
20381 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
20382 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20383 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20385 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20386 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20387 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20388 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20389 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20390 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
20391 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20392 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20393 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20394 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20395 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20396 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20397 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20398 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20399 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20401 /* 3DNow!A */
20402 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
20403 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
20404 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
20405 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
20406 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20407 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
20409 /* SSE */
20410 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
20411 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20412 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20413 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20414 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20415 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20416 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
20417 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
20418 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
20419 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
20420 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
20421 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
20423 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20425 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20426 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20427 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20428 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20429 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20430 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20431 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20432 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20434 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
20435 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
20436 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
20437 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20438 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20439 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20440 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
20441 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
20442 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
20443 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20444 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
20445 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20446 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
20447 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
20448 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
20449 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20450 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
20451 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
20452 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
20453 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20454 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
20455 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
20457 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20458 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20459 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20460 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20462 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20463 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20464 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20465 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20467 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20468 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20469 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20470 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20471 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20473 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
20474 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
20475 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
20477 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
20479 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20480 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20481 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
20483 /* SSE MMX or 3Dnow!A */
20484 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20485 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20486 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20488 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20489 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20490 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20491 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20493 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
20494 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
20496 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
20498 /* SSE2 */
20499 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20501 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
20502 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
20503 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
20504 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
20505 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
20507 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
20508 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
20509 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
20510 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
20511 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
20513 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
20515 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
20516 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
20517 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
20518 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
20520 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
20521 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
20522 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
20524 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20525 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20526 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20527 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20528 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20529 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20530 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20531 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20533 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
20534 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
20535 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
20536 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20537 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
20538 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20539 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
20540 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
20541 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
20542 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20543 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
20544 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20545 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
20546 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
20547 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
20548 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20549 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
20550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
20551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
20552 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
20554 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20555 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20556 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20557 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20559 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20560 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20561 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20562 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20564 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20565 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20566 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20568 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
20570 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20571 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20572 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20573 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20574 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20575 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20576 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20577 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20579 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20580 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20581 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20582 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20583 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20584 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20585 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20586 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20588 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20589 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
20591 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20592 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20593 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20594 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20596 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20597 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20600 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20602 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20604 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20606 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20607 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20608 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20609 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20611 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20612 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20613 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20614 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20615 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20617 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20618 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20620 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
20621 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
20622 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
20624 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20625 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
20627 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
20628 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
20630 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
20632 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
20633 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
20634 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
20635 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
20637 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
20638 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20639 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20640 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
20641 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20642 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20643 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
20645 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
20646 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20647 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20648 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
20649 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20650 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20651 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
20653 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
20654 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
20655 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
20656 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
20658 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
20659 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
20660 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
20662 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
20664 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
20665 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
20667 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
20669 /* SSE2 MMX */
20670 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
20671 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
20673 /* SSE3 */
20674 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
20675 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
20677 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20678 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20679 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20680 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20681 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
20682 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
20684 /* SSSE3 */
20685 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
20686 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
20687 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
20688 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
20689 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
20690 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
20692 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20693 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20694 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20695 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20696 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20697 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20698 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20699 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20700 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20701 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20702 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20703 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20704 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
20705 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
20706 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20707 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20708 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20709 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20710 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20711 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
20712 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20713 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
20714 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20715 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
20717 /* SSSE3. */
20718 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
20719 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
20721 /* SSE4.1 */
20722 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20723 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20724 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
20725 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
20726 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20727 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20728 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20729 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
20730 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
20731 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
20733 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
20734 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
20735 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
20736 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
20737 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
20738 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
20739 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
20740 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
20741 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
20742 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
20743 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
20744 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
20745 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
20747 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
20748 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20749 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20750 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20751 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20752 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20753 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
20754 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20755 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20756 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
20757 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
20758 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
20760 /* SSE4.1 and SSE5 */
20761 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
20762 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
20763 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20764 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20766 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20767 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20768 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
20770 /* SSE4.2 */
20771 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20772 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
20773 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
20774 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
20775 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
20777 /* SSE4A */
20778 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
20779 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
20780 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
20781 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20783 /* AES */
20784 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
20785 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
20787 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20788 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20789 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20790 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
20792 /* PCLMUL */
20793 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
20795 /* AVX */
20796 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20797 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20800 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20801 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20802 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20803 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20804 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20805 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20806 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20807 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20809 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20810 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20811 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20812 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20813 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20814 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20815 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20816 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20817 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20818 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20819 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20820 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20821 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20823 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
20824 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
20825 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
20826 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
20828 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20829 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20830 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
20831 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
20832 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20833 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20834 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20835 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20836 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20837 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
20838 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
20839 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20840 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20841 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
20842 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
20843 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
20844 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
20845 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
20846 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
20847 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
20848 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
20849 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
20850 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
20851 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
20852 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
20853 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
20854 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
20855 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
20856 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
20857 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
20858 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
20859 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI_INT },
20860 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI_INT },
20861 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI_INT },
20862 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI_INT },
20863 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
20864 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
20865 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
20867 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20868 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20869 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
20871 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
20872 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20873 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20874 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20875 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20877 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
20879 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
20880 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
20882 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20883 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
20884 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20885 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
20887 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
20888 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
20889 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
20890 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
20891 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
20892 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
20894 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20895 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20896 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
20897 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20898 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20899 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
20900 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20901 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20902 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
20903 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20904 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20905 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
20906 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20907 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20908 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
20910 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
20911 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
20914 /* SSE5 */
20915 enum multi_arg_type {
20916 MULTI_ARG_UNKNOWN,
20917 MULTI_ARG_3_SF,
20918 MULTI_ARG_3_DF,
20919 MULTI_ARG_3_DI,
20920 MULTI_ARG_3_SI,
20921 MULTI_ARG_3_SI_DI,
20922 MULTI_ARG_3_HI,
20923 MULTI_ARG_3_HI_SI,
20924 MULTI_ARG_3_QI,
20925 MULTI_ARG_3_PERMPS,
20926 MULTI_ARG_3_PERMPD,
20927 MULTI_ARG_2_SF,
20928 MULTI_ARG_2_DF,
20929 MULTI_ARG_2_DI,
20930 MULTI_ARG_2_SI,
20931 MULTI_ARG_2_HI,
20932 MULTI_ARG_2_QI,
20933 MULTI_ARG_2_DI_IMM,
20934 MULTI_ARG_2_SI_IMM,
20935 MULTI_ARG_2_HI_IMM,
20936 MULTI_ARG_2_QI_IMM,
20937 MULTI_ARG_2_SF_CMP,
20938 MULTI_ARG_2_DF_CMP,
20939 MULTI_ARG_2_DI_CMP,
20940 MULTI_ARG_2_SI_CMP,
20941 MULTI_ARG_2_HI_CMP,
20942 MULTI_ARG_2_QI_CMP,
20943 MULTI_ARG_2_DI_TF,
20944 MULTI_ARG_2_SI_TF,
20945 MULTI_ARG_2_HI_TF,
20946 MULTI_ARG_2_QI_TF,
20947 MULTI_ARG_2_SF_TF,
20948 MULTI_ARG_2_DF_TF,
20949 MULTI_ARG_1_SF,
20950 MULTI_ARG_1_DF,
20951 MULTI_ARG_1_DI,
20952 MULTI_ARG_1_SI,
20953 MULTI_ARG_1_HI,
20954 MULTI_ARG_1_QI,
20955 MULTI_ARG_1_SI_DI,
20956 MULTI_ARG_1_HI_DI,
20957 MULTI_ARG_1_HI_SI,
20958 MULTI_ARG_1_QI_DI,
20959 MULTI_ARG_1_QI_SI,
20960 MULTI_ARG_1_QI_HI,
20961 MULTI_ARG_1_PH2PS,
20962 MULTI_ARG_1_PS2PH
20965 static const struct builtin_description bdesc_multi_arg[] =
20967 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
20968 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
20969 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
20970 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
20971 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
20972 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
20973 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
20974 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
20975 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
20976 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
20977 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
20978 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
20979 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
20980 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
20981 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
20982 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
20983 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV, 0, (int)MULTI_ARG_3_DI },
20984 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
20985 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
20986 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
20987 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
20988 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
20989 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
20990 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
20991 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
20992 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
20993 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
20994 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
20995 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
20996 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
20997 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
20998 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
20999 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21000 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21001 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21002 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21003 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
21004 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
21005 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
21006 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
21007 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
21008 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
21009 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
21010 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
21011 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
21012 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
21013 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
21014 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
21015 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
21016 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
21017 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
21018 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
21019 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
21020 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
21021 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
21022 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
21023 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
21024 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
21025 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
21026 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
21027 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
21028 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
21029 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
21030 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
21031 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
21032 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
21033 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
21034 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
21035 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
21036 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
21037 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
21038 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
21039 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
21040 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
21041 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
21043 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
21044 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21045 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21046 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
21047 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
21048 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
21049 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
21050 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21051 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21052 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21053 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21054 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21055 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21056 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21057 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21058 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21060 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
21061 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21062 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21063 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
21064 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
21065 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
21066 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
21067 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21068 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21069 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21070 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21071 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21072 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21073 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21074 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21075 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21077 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
21078 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21079 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21080 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
21081 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
21082 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
21083 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
21084 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21085 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21086 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21087 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21088 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21089 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21090 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21091 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21092 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21094 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
21095 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21096 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21097 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
21098 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
21099 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
21100 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
21101 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21102 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21103 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21104 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21105 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21106 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21107 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21108 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21109 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21111 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
21112 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21113 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21114 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
21115 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
21116 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
21117 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
21119 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
21120 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21121 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21122 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
21123 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
21124 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
21125 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
21127 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
21128 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21129 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21130 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
21131 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
21132 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
21133 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
21135 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21136 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21137 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21138 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
21139 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
21140 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
21141 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
21143 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
21144 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21145 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21146 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
21147 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
21148 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
21149 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
21151 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
21152 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21153 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21154 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
21155 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
21156 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
21157 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
21159 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
21160 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21161 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21162 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
21163 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
21164 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
21165 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
21167 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21168 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21169 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21170 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
21171 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
21172 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
21173 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
21175 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
21176 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
21177 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
21178 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
21179 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
21180 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
21181 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
21182 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
21184 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21185 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21186 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21187 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21188 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21189 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21190 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21191 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21193 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21194 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21195 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21196 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21197 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21198 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21199 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21200 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21203 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
21204 in the current target ISA to allow the user to compile particular modules
21205 with different target specific options that differ from the command line
21206 options. */
21207 static void
21208 ix86_init_mmx_sse_builtins (void)
21210 const struct builtin_description * d;
21211 size_t i;
21213 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
21214 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
21215 tree V1DI_type_node
21216 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
21217 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
21218 tree V2DI_type_node
21219 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
21220 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
21221 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
21222 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
21223 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
21224 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
21225 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
21227 tree pchar_type_node = build_pointer_type (char_type_node);
21228 tree pcchar_type_node
21229 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
21230 tree pfloat_type_node = build_pointer_type (float_type_node);
21231 tree pcfloat_type_node
21232 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
21233 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
21234 tree pcv2sf_type_node
21235 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
21236 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
21237 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
21239 /* Comparisons. */
21240 tree int_ftype_v4sf_v4sf
21241 = build_function_type_list (integer_type_node,
21242 V4SF_type_node, V4SF_type_node, NULL_TREE);
21243 tree v4si_ftype_v4sf_v4sf
21244 = build_function_type_list (V4SI_type_node,
21245 V4SF_type_node, V4SF_type_node, NULL_TREE);
21246 /* MMX/SSE/integer conversions. */
21247 tree int_ftype_v4sf
21248 = build_function_type_list (integer_type_node,
21249 V4SF_type_node, NULL_TREE);
21250 tree int64_ftype_v4sf
21251 = build_function_type_list (long_long_integer_type_node,
21252 V4SF_type_node, NULL_TREE);
21253 tree int_ftype_v8qi
21254 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
21255 tree v4sf_ftype_v4sf_int
21256 = build_function_type_list (V4SF_type_node,
21257 V4SF_type_node, integer_type_node, NULL_TREE);
21258 tree v4sf_ftype_v4sf_int64
21259 = build_function_type_list (V4SF_type_node,
21260 V4SF_type_node, long_long_integer_type_node,
21261 NULL_TREE);
21262 tree v4sf_ftype_v4sf_v2si
21263 = build_function_type_list (V4SF_type_node,
21264 V4SF_type_node, V2SI_type_node, NULL_TREE);
21266 /* Miscellaneous. */
21267 tree v8qi_ftype_v4hi_v4hi
21268 = build_function_type_list (V8QI_type_node,
21269 V4HI_type_node, V4HI_type_node, NULL_TREE);
21270 tree v4hi_ftype_v2si_v2si
21271 = build_function_type_list (V4HI_type_node,
21272 V2SI_type_node, V2SI_type_node, NULL_TREE);
21273 tree v4sf_ftype_v4sf_v4sf_int
21274 = build_function_type_list (V4SF_type_node,
21275 V4SF_type_node, V4SF_type_node,
21276 integer_type_node, NULL_TREE);
21277 tree v2si_ftype_v4hi_v4hi
21278 = build_function_type_list (V2SI_type_node,
21279 V4HI_type_node, V4HI_type_node, NULL_TREE);
21280 tree v4hi_ftype_v4hi_int
21281 = build_function_type_list (V4HI_type_node,
21282 V4HI_type_node, integer_type_node, NULL_TREE);
21283 tree v2si_ftype_v2si_int
21284 = build_function_type_list (V2SI_type_node,
21285 V2SI_type_node, integer_type_node, NULL_TREE);
21286 tree v1di_ftype_v1di_int
21287 = build_function_type_list (V1DI_type_node,
21288 V1DI_type_node, integer_type_node, NULL_TREE);
21290 tree void_ftype_void
21291 = build_function_type (void_type_node, void_list_node);
21292 tree void_ftype_unsigned
21293 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
21294 tree void_ftype_unsigned_unsigned
21295 = build_function_type_list (void_type_node, unsigned_type_node,
21296 unsigned_type_node, NULL_TREE);
21297 tree void_ftype_pcvoid_unsigned_unsigned
21298 = build_function_type_list (void_type_node, const_ptr_type_node,
21299 unsigned_type_node, unsigned_type_node,
21300 NULL_TREE);
21301 tree unsigned_ftype_void
21302 = build_function_type (unsigned_type_node, void_list_node);
21303 tree v2si_ftype_v4sf
21304 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
21305 /* Loads/stores. */
21306 tree void_ftype_v8qi_v8qi_pchar
21307 = build_function_type_list (void_type_node,
21308 V8QI_type_node, V8QI_type_node,
21309 pchar_type_node, NULL_TREE);
21310 tree v4sf_ftype_pcfloat
21311 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
21312 tree v4sf_ftype_v4sf_pcv2sf
21313 = build_function_type_list (V4SF_type_node,
21314 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
21315 tree void_ftype_pv2sf_v4sf
21316 = build_function_type_list (void_type_node,
21317 pv2sf_type_node, V4SF_type_node, NULL_TREE);
21318 tree void_ftype_pfloat_v4sf
21319 = build_function_type_list (void_type_node,
21320 pfloat_type_node, V4SF_type_node, NULL_TREE);
21321 tree void_ftype_pdi_di
21322 = build_function_type_list (void_type_node,
21323 pdi_type_node, long_long_unsigned_type_node,
21324 NULL_TREE);
21325 tree void_ftype_pv2di_v2di
21326 = build_function_type_list (void_type_node,
21327 pv2di_type_node, V2DI_type_node, NULL_TREE);
21328 /* Normal vector unops. */
21329 tree v4sf_ftype_v4sf
21330 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
21331 tree v16qi_ftype_v16qi
21332 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
21333 tree v8hi_ftype_v8hi
21334 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
21335 tree v4si_ftype_v4si
21336 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
21337 tree v8qi_ftype_v8qi
21338 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
21339 tree v4hi_ftype_v4hi
21340 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
21342 /* Normal vector binops. */
21343 tree v4sf_ftype_v4sf_v4sf
21344 = build_function_type_list (V4SF_type_node,
21345 V4SF_type_node, V4SF_type_node, NULL_TREE);
21346 tree v8qi_ftype_v8qi_v8qi
21347 = build_function_type_list (V8QI_type_node,
21348 V8QI_type_node, V8QI_type_node, NULL_TREE);
21349 tree v4hi_ftype_v4hi_v4hi
21350 = build_function_type_list (V4HI_type_node,
21351 V4HI_type_node, V4HI_type_node, NULL_TREE);
21352 tree v2si_ftype_v2si_v2si
21353 = build_function_type_list (V2SI_type_node,
21354 V2SI_type_node, V2SI_type_node, NULL_TREE);
21355 tree v1di_ftype_v1di_v1di
21356 = build_function_type_list (V1DI_type_node,
21357 V1DI_type_node, V1DI_type_node, NULL_TREE);
21358 tree v1di_ftype_v1di_v1di_int
21359 = build_function_type_list (V1DI_type_node,
21360 V1DI_type_node, V1DI_type_node,
21361 integer_type_node, NULL_TREE);
21362 tree v2si_ftype_v2sf
21363 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
21364 tree v2sf_ftype_v2si
21365 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
21366 tree v2si_ftype_v2si
21367 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
21368 tree v2sf_ftype_v2sf
21369 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
21370 tree v2sf_ftype_v2sf_v2sf
21371 = build_function_type_list (V2SF_type_node,
21372 V2SF_type_node, V2SF_type_node, NULL_TREE);
21373 tree v2si_ftype_v2sf_v2sf
21374 = build_function_type_list (V2SI_type_node,
21375 V2SF_type_node, V2SF_type_node, NULL_TREE);
21376 tree pint_type_node = build_pointer_type (integer_type_node);
21377 tree pdouble_type_node = build_pointer_type (double_type_node);
21378 tree pcdouble_type_node = build_pointer_type (
21379 build_type_variant (double_type_node, 1, 0));
21380 tree int_ftype_v2df_v2df
21381 = build_function_type_list (integer_type_node,
21382 V2DF_type_node, V2DF_type_node, NULL_TREE);
21384 tree void_ftype_pcvoid
21385 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
21386 tree v4sf_ftype_v4si
21387 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
21388 tree v4si_ftype_v4sf
21389 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
21390 tree v2df_ftype_v4si
21391 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
21392 tree v4si_ftype_v2df
21393 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
21394 tree v4si_ftype_v2df_v2df
21395 = build_function_type_list (V4SI_type_node,
21396 V2DF_type_node, V2DF_type_node, NULL_TREE);
21397 tree v2si_ftype_v2df
21398 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
21399 tree v4sf_ftype_v2df
21400 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
21401 tree v2df_ftype_v2si
21402 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
21403 tree v2df_ftype_v4sf
21404 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
21405 tree int_ftype_v2df
21406 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
21407 tree int64_ftype_v2df
21408 = build_function_type_list (long_long_integer_type_node,
21409 V2DF_type_node, NULL_TREE);
21410 tree v2df_ftype_v2df_int
21411 = build_function_type_list (V2DF_type_node,
21412 V2DF_type_node, integer_type_node, NULL_TREE);
21413 tree v2df_ftype_v2df_int64
21414 = build_function_type_list (V2DF_type_node,
21415 V2DF_type_node, long_long_integer_type_node,
21416 NULL_TREE);
21417 tree v4sf_ftype_v4sf_v2df
21418 = build_function_type_list (V4SF_type_node,
21419 V4SF_type_node, V2DF_type_node, NULL_TREE);
21420 tree v2df_ftype_v2df_v4sf
21421 = build_function_type_list (V2DF_type_node,
21422 V2DF_type_node, V4SF_type_node, NULL_TREE);
21423 tree v2df_ftype_v2df_v2df_int
21424 = build_function_type_list (V2DF_type_node,
21425 V2DF_type_node, V2DF_type_node,
21426 integer_type_node,
21427 NULL_TREE);
21428 tree v2df_ftype_v2df_pcdouble
21429 = build_function_type_list (V2DF_type_node,
21430 V2DF_type_node, pcdouble_type_node, NULL_TREE);
21431 tree void_ftype_pdouble_v2df
21432 = build_function_type_list (void_type_node,
21433 pdouble_type_node, V2DF_type_node, NULL_TREE);
21434 tree void_ftype_pint_int
21435 = build_function_type_list (void_type_node,
21436 pint_type_node, integer_type_node, NULL_TREE);
21437 tree void_ftype_v16qi_v16qi_pchar
21438 = build_function_type_list (void_type_node,
21439 V16QI_type_node, V16QI_type_node,
21440 pchar_type_node, NULL_TREE);
21441 tree v2df_ftype_pcdouble
21442 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
21443 tree v2df_ftype_v2df_v2df
21444 = build_function_type_list (V2DF_type_node,
21445 V2DF_type_node, V2DF_type_node, NULL_TREE);
21446 tree v16qi_ftype_v16qi_v16qi
21447 = build_function_type_list (V16QI_type_node,
21448 V16QI_type_node, V16QI_type_node, NULL_TREE);
21449 tree v8hi_ftype_v8hi_v8hi
21450 = build_function_type_list (V8HI_type_node,
21451 V8HI_type_node, V8HI_type_node, NULL_TREE);
21452 tree v4si_ftype_v4si_v4si
21453 = build_function_type_list (V4SI_type_node,
21454 V4SI_type_node, V4SI_type_node, NULL_TREE);
21455 tree v2di_ftype_v2di_v2di
21456 = build_function_type_list (V2DI_type_node,
21457 V2DI_type_node, V2DI_type_node, NULL_TREE);
21458 tree v2di_ftype_v2df_v2df
21459 = build_function_type_list (V2DI_type_node,
21460 V2DF_type_node, V2DF_type_node, NULL_TREE);
21461 tree v2df_ftype_v2df
21462 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
21463 tree v2di_ftype_v2di_int
21464 = build_function_type_list (V2DI_type_node,
21465 V2DI_type_node, integer_type_node, NULL_TREE);
21466 tree v2di_ftype_v2di_v2di_int
21467 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21468 V2DI_type_node, integer_type_node, NULL_TREE);
21469 tree v4si_ftype_v4si_int
21470 = build_function_type_list (V4SI_type_node,
21471 V4SI_type_node, integer_type_node, NULL_TREE);
21472 tree v8hi_ftype_v8hi_int
21473 = build_function_type_list (V8HI_type_node,
21474 V8HI_type_node, integer_type_node, NULL_TREE);
21475 tree v4si_ftype_v8hi_v8hi
21476 = build_function_type_list (V4SI_type_node,
21477 V8HI_type_node, V8HI_type_node, NULL_TREE);
21478 tree v1di_ftype_v8qi_v8qi
21479 = build_function_type_list (V1DI_type_node,
21480 V8QI_type_node, V8QI_type_node, NULL_TREE);
21481 tree v1di_ftype_v2si_v2si
21482 = build_function_type_list (V1DI_type_node,
21483 V2SI_type_node, V2SI_type_node, NULL_TREE);
21484 tree v2di_ftype_v16qi_v16qi
21485 = build_function_type_list (V2DI_type_node,
21486 V16QI_type_node, V16QI_type_node, NULL_TREE);
21487 tree v2di_ftype_v4si_v4si
21488 = build_function_type_list (V2DI_type_node,
21489 V4SI_type_node, V4SI_type_node, NULL_TREE);
21490 tree int_ftype_v16qi
21491 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
21492 tree v16qi_ftype_pcchar
21493 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
21494 tree void_ftype_pchar_v16qi
21495 = build_function_type_list (void_type_node,
21496 pchar_type_node, V16QI_type_node, NULL_TREE);
21498 tree v2di_ftype_v2di_unsigned_unsigned
21499 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21500 unsigned_type_node, unsigned_type_node,
21501 NULL_TREE);
21502 tree v2di_ftype_v2di_v2di_unsigned_unsigned
21503 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
21504 unsigned_type_node, unsigned_type_node,
21505 NULL_TREE);
21506 tree v2di_ftype_v2di_v16qi
21507 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
21508 NULL_TREE);
21509 tree v2df_ftype_v2df_v2df_v2df
21510 = build_function_type_list (V2DF_type_node,
21511 V2DF_type_node, V2DF_type_node,
21512 V2DF_type_node, NULL_TREE);
21513 tree v4sf_ftype_v4sf_v4sf_v4sf
21514 = build_function_type_list (V4SF_type_node,
21515 V4SF_type_node, V4SF_type_node,
21516 V4SF_type_node, NULL_TREE);
21517 tree v8hi_ftype_v16qi
21518 = build_function_type_list (V8HI_type_node, V16QI_type_node,
21519 NULL_TREE);
21520 tree v4si_ftype_v16qi
21521 = build_function_type_list (V4SI_type_node, V16QI_type_node,
21522 NULL_TREE);
21523 tree v2di_ftype_v16qi
21524 = build_function_type_list (V2DI_type_node, V16QI_type_node,
21525 NULL_TREE);
21526 tree v4si_ftype_v8hi
21527 = build_function_type_list (V4SI_type_node, V8HI_type_node,
21528 NULL_TREE);
21529 tree v2di_ftype_v8hi
21530 = build_function_type_list (V2DI_type_node, V8HI_type_node,
21531 NULL_TREE);
21532 tree v2di_ftype_v4si
21533 = build_function_type_list (V2DI_type_node, V4SI_type_node,
21534 NULL_TREE);
21535 tree v2di_ftype_pv2di
21536 = build_function_type_list (V2DI_type_node, pv2di_type_node,
21537 NULL_TREE);
21538 tree v16qi_ftype_v16qi_v16qi_int
21539 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21540 V16QI_type_node, integer_type_node,
21541 NULL_TREE);
21542 tree v16qi_ftype_v16qi_v16qi_v16qi
21543 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21544 V16QI_type_node, V16QI_type_node,
21545 NULL_TREE);
21546 tree v8hi_ftype_v8hi_v8hi_int
21547 = build_function_type_list (V8HI_type_node, V8HI_type_node,
21548 V8HI_type_node, integer_type_node,
21549 NULL_TREE);
21550 tree v4si_ftype_v4si_v4si_int
21551 = build_function_type_list (V4SI_type_node, V4SI_type_node,
21552 V4SI_type_node, integer_type_node,
21553 NULL_TREE);
21554 tree int_ftype_v2di_v2di
21555 = build_function_type_list (integer_type_node,
21556 V2DI_type_node, V2DI_type_node,
21557 NULL_TREE);
21558 tree int_ftype_v16qi_int_v16qi_int_int
21559 = build_function_type_list (integer_type_node,
21560 V16QI_type_node,
21561 integer_type_node,
21562 V16QI_type_node,
21563 integer_type_node,
21564 integer_type_node,
21565 NULL_TREE);
21566 tree v16qi_ftype_v16qi_int_v16qi_int_int
21567 = build_function_type_list (V16QI_type_node,
21568 V16QI_type_node,
21569 integer_type_node,
21570 V16QI_type_node,
21571 integer_type_node,
21572 integer_type_node,
21573 NULL_TREE);
21574 tree int_ftype_v16qi_v16qi_int
21575 = build_function_type_list (integer_type_node,
21576 V16QI_type_node,
21577 V16QI_type_node,
21578 integer_type_node,
21579 NULL_TREE);
21581 /* SSE5 instructions */
21582 tree v2di_ftype_v2di_v2di_v2di
21583 = build_function_type_list (V2DI_type_node,
21584 V2DI_type_node,
21585 V2DI_type_node,
21586 V2DI_type_node,
21587 NULL_TREE);
21589 tree v4si_ftype_v4si_v4si_v4si
21590 = build_function_type_list (V4SI_type_node,
21591 V4SI_type_node,
21592 V4SI_type_node,
21593 V4SI_type_node,
21594 NULL_TREE);
21596 tree v4si_ftype_v4si_v4si_v2di
21597 = build_function_type_list (V4SI_type_node,
21598 V4SI_type_node,
21599 V4SI_type_node,
21600 V2DI_type_node,
21601 NULL_TREE);
21603 tree v8hi_ftype_v8hi_v8hi_v8hi
21604 = build_function_type_list (V8HI_type_node,
21605 V8HI_type_node,
21606 V8HI_type_node,
21607 V8HI_type_node,
21608 NULL_TREE);
21610 tree v8hi_ftype_v8hi_v8hi_v4si
21611 = build_function_type_list (V8HI_type_node,
21612 V8HI_type_node,
21613 V8HI_type_node,
21614 V4SI_type_node,
21615 NULL_TREE);
21617 tree v2df_ftype_v2df_v2df_v16qi
21618 = build_function_type_list (V2DF_type_node,
21619 V2DF_type_node,
21620 V2DF_type_node,
21621 V16QI_type_node,
21622 NULL_TREE);
21624 tree v4sf_ftype_v4sf_v4sf_v16qi
21625 = build_function_type_list (V4SF_type_node,
21626 V4SF_type_node,
21627 V4SF_type_node,
21628 V16QI_type_node,
21629 NULL_TREE);
21631 tree v2di_ftype_v2di_si
21632 = build_function_type_list (V2DI_type_node,
21633 V2DI_type_node,
21634 integer_type_node,
21635 NULL_TREE);
21637 tree v4si_ftype_v4si_si
21638 = build_function_type_list (V4SI_type_node,
21639 V4SI_type_node,
21640 integer_type_node,
21641 NULL_TREE);
21643 tree v8hi_ftype_v8hi_si
21644 = build_function_type_list (V8HI_type_node,
21645 V8HI_type_node,
21646 integer_type_node,
21647 NULL_TREE);
21649 tree v16qi_ftype_v16qi_si
21650 = build_function_type_list (V16QI_type_node,
21651 V16QI_type_node,
21652 integer_type_node,
21653 NULL_TREE);
21654 tree v4sf_ftype_v4hi
21655 = build_function_type_list (V4SF_type_node,
21656 V4HI_type_node,
21657 NULL_TREE);
21659 tree v4hi_ftype_v4sf
21660 = build_function_type_list (V4HI_type_node,
21661 V4SF_type_node,
21662 NULL_TREE);
21664 tree v2di_ftype_v2di
21665 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
21667 tree v16qi_ftype_v8hi_v8hi
21668 = build_function_type_list (V16QI_type_node,
21669 V8HI_type_node, V8HI_type_node,
21670 NULL_TREE);
21671 tree v8hi_ftype_v4si_v4si
21672 = build_function_type_list (V8HI_type_node,
21673 V4SI_type_node, V4SI_type_node,
21674 NULL_TREE);
21675 tree v8hi_ftype_v16qi_v16qi
21676 = build_function_type_list (V8HI_type_node,
21677 V16QI_type_node, V16QI_type_node,
21678 NULL_TREE);
21679 tree v4hi_ftype_v8qi_v8qi
21680 = build_function_type_list (V4HI_type_node,
21681 V8QI_type_node, V8QI_type_node,
21682 NULL_TREE);
21683 tree unsigned_ftype_unsigned_uchar
21684 = build_function_type_list (unsigned_type_node,
21685 unsigned_type_node,
21686 unsigned_char_type_node,
21687 NULL_TREE);
21688 tree unsigned_ftype_unsigned_ushort
21689 = build_function_type_list (unsigned_type_node,
21690 unsigned_type_node,
21691 short_unsigned_type_node,
21692 NULL_TREE);
21693 tree unsigned_ftype_unsigned_unsigned
21694 = build_function_type_list (unsigned_type_node,
21695 unsigned_type_node,
21696 unsigned_type_node,
21697 NULL_TREE);
21698 tree uint64_ftype_uint64_uint64
21699 = build_function_type_list (long_long_unsigned_type_node,
21700 long_long_unsigned_type_node,
21701 long_long_unsigned_type_node,
21702 NULL_TREE);
21703 tree float_ftype_float
21704 = build_function_type_list (float_type_node,
21705 float_type_node,
21706 NULL_TREE);
21708 /* AVX builtins */
21709 tree V32QI_type_node = build_vector_type_for_mode (char_type_node,
21710 V32QImode);
21711 tree V8SI_type_node = build_vector_type_for_mode (intSI_type_node,
21712 V8SImode);
21713 tree V8SF_type_node = build_vector_type_for_mode (float_type_node,
21714 V8SFmode);
21715 tree V4DI_type_node = build_vector_type_for_mode (long_long_integer_type_node,
21716 V4DImode);
21717 tree V4DF_type_node = build_vector_type_for_mode (double_type_node,
21718 V4DFmode);
21719 tree v8sf_ftype_v8sf
21720 = build_function_type_list (V8SF_type_node,
21721 V8SF_type_node,
21722 NULL_TREE);
21723 tree v8si_ftype_v8sf
21724 = build_function_type_list (V8SI_type_node,
21725 V8SF_type_node,
21726 NULL_TREE);
21727 tree v8sf_ftype_v8si
21728 = build_function_type_list (V8SF_type_node,
21729 V8SI_type_node,
21730 NULL_TREE);
21731 tree v4si_ftype_v4df
21732 = build_function_type_list (V4SI_type_node,
21733 V4DF_type_node,
21734 NULL_TREE);
21735 tree v4df_ftype_v4df
21736 = build_function_type_list (V4DF_type_node,
21737 V4DF_type_node,
21738 NULL_TREE);
21739 tree v4df_ftype_v4si
21740 = build_function_type_list (V4DF_type_node,
21741 V4SI_type_node,
21742 NULL_TREE);
21743 tree v4df_ftype_v4sf
21744 = build_function_type_list (V4DF_type_node,
21745 V4SF_type_node,
21746 NULL_TREE);
21747 tree v4sf_ftype_v4df
21748 = build_function_type_list (V4SF_type_node,
21749 V4DF_type_node,
21750 NULL_TREE);
21751 tree v8sf_ftype_v8sf_v8sf
21752 = build_function_type_list (V8SF_type_node,
21753 V8SF_type_node, V8SF_type_node,
21754 NULL_TREE);
21755 tree v4df_ftype_v4df_v4df
21756 = build_function_type_list (V4DF_type_node,
21757 V4DF_type_node, V4DF_type_node,
21758 NULL_TREE);
21759 tree v8sf_ftype_v8sf_int
21760 = build_function_type_list (V8SF_type_node,
21761 V8SF_type_node, integer_type_node,
21762 NULL_TREE);
21763 tree v4si_ftype_v8si_int
21764 = build_function_type_list (V4SI_type_node,
21765 V8SI_type_node, integer_type_node,
21766 NULL_TREE);
21767 tree v4df_ftype_v4df_int
21768 = build_function_type_list (V4DF_type_node,
21769 V4DF_type_node, integer_type_node,
21770 NULL_TREE);
21771 tree v4sf_ftype_v8sf_int
21772 = build_function_type_list (V4SF_type_node,
21773 V8SF_type_node, integer_type_node,
21774 NULL_TREE);
21775 tree v2df_ftype_v4df_int
21776 = build_function_type_list (V2DF_type_node,
21777 V4DF_type_node, integer_type_node,
21778 NULL_TREE);
21779 tree v8sf_ftype_v8sf_v8sf_int
21780 = build_function_type_list (V8SF_type_node,
21781 V8SF_type_node, V8SF_type_node,
21782 integer_type_node,
21783 NULL_TREE);
21784 tree v8sf_ftype_v8sf_v8sf_v8sf
21785 = build_function_type_list (V8SF_type_node,
21786 V8SF_type_node, V8SF_type_node,
21787 V8SF_type_node,
21788 NULL_TREE);
21789 tree v4df_ftype_v4df_v4df_v4df
21790 = build_function_type_list (V4DF_type_node,
21791 V4DF_type_node, V4DF_type_node,
21792 V4DF_type_node,
21793 NULL_TREE);
21794 tree v8si_ftype_v8si_v8si_int
21795 = build_function_type_list (V8SI_type_node,
21796 V8SI_type_node, V8SI_type_node,
21797 integer_type_node,
21798 NULL_TREE);
21799 tree v4df_ftype_v4df_v4df_int
21800 = build_function_type_list (V4DF_type_node,
21801 V4DF_type_node, V4DF_type_node,
21802 integer_type_node,
21803 NULL_TREE);
21804 tree v8sf_ftype_v8sf_v8sf_v8si_int
21805 = build_function_type_list (V8SF_type_node,
21806 V8SF_type_node, V8SF_type_node,
21807 V8SI_type_node, integer_type_node,
21808 NULL_TREE);
21809 tree v4df_ftype_v4df_v4df_v4di_int
21810 = build_function_type_list (V4DF_type_node,
21811 V4DF_type_node, V4DF_type_node,
21812 V4DI_type_node, integer_type_node,
21813 NULL_TREE);
21814 tree v4sf_ftype_v4sf_v4sf_v4si_int
21815 = build_function_type_list (V4SF_type_node,
21816 V4SF_type_node, V4SF_type_node,
21817 V4SI_type_node, integer_type_node,
21818 NULL_TREE);
21819 tree v2df_ftype_v2df_v2df_v2di_int
21820 = build_function_type_list (V2DF_type_node,
21821 V2DF_type_node, V2DF_type_node,
21822 V2DI_type_node, integer_type_node,
21823 NULL_TREE);
21824 tree v8sf_ftype_pcfloat
21825 = build_function_type_list (V8SF_type_node,
21826 pcfloat_type_node,
21827 NULL_TREE);
21828 tree v4df_ftype_pcdouble
21829 = build_function_type_list (V4DF_type_node,
21830 pcdouble_type_node,
21831 NULL_TREE);
21832 tree pcv4sf_type_node
21833 = build_pointer_type (build_type_variant (V4SF_type_node, 1, 0));
21834 tree pcv2df_type_node
21835 = build_pointer_type (build_type_variant (V2DF_type_node, 1, 0));
21836 tree v8sf_ftype_pcv4sf
21837 = build_function_type_list (V8SF_type_node,
21838 pcv4sf_type_node,
21839 NULL_TREE);
21840 tree v4df_ftype_pcv2df
21841 = build_function_type_list (V4DF_type_node,
21842 pcv2df_type_node,
21843 NULL_TREE);
21844 tree v32qi_ftype_pcchar
21845 = build_function_type_list (V32QI_type_node,
21846 pcchar_type_node,
21847 NULL_TREE);
21848 tree void_ftype_pchar_v32qi
21849 = build_function_type_list (void_type_node,
21850 pchar_type_node, V32QI_type_node,
21851 NULL_TREE);
21852 tree v8si_ftype_v8si_v4si_int
21853 = build_function_type_list (V8SI_type_node,
21854 V8SI_type_node, V4SI_type_node,
21855 integer_type_node,
21856 NULL_TREE);
21857 tree v8sf_ftype_v8sf_v4sf_int
21858 = build_function_type_list (V8SF_type_node,
21859 V8SF_type_node, V4SF_type_node,
21860 integer_type_node,
21861 NULL_TREE);
21862 tree v4df_ftype_v4df_v2df_int
21863 = build_function_type_list (V4DF_type_node,
21864 V4DF_type_node, V2DF_type_node,
21865 integer_type_node,
21866 NULL_TREE);
21867 tree void_ftype_pfloat_v8sf
21868 = build_function_type_list (void_type_node,
21869 pfloat_type_node, V8SF_type_node,
21870 NULL_TREE);
21871 tree void_ftype_pdouble_v4df
21872 = build_function_type_list (void_type_node,
21873 pdouble_type_node, V4DF_type_node,
21874 NULL_TREE);
21875 tree pv8sf_type_node = build_pointer_type (V8SF_type_node);
21876 tree pv4sf_type_node = build_pointer_type (V4SF_type_node);
21877 tree pv4df_type_node = build_pointer_type (V4DF_type_node);
21878 tree pv2df_type_node = build_pointer_type (V2DF_type_node);
21879 tree pcv8sf_type_node
21880 = build_pointer_type (build_type_variant (V8SF_type_node, 1, 0));
21881 tree pcv4df_type_node
21882 = build_pointer_type (build_type_variant (V4DF_type_node, 1, 0));
21883 tree v8sf_ftype_pcv8sf_v8sf
21884 = build_function_type_list (V8SF_type_node,
21885 pcv8sf_type_node, V8SF_type_node,
21886 NULL_TREE);
21887 tree v4df_ftype_pcv4df_v4df
21888 = build_function_type_list (V4DF_type_node,
21889 pcv4df_type_node, V4DF_type_node,
21890 NULL_TREE);
21891 tree v4sf_ftype_pcv4sf_v4sf
21892 = build_function_type_list (V4SF_type_node,
21893 pcv4sf_type_node, V4SF_type_node,
21894 NULL_TREE);
21895 tree v2df_ftype_pcv2df_v2df
21896 = build_function_type_list (V2DF_type_node,
21897 pcv2df_type_node, V2DF_type_node,
21898 NULL_TREE);
21899 tree void_ftype_pv8sf_v8sf_v8sf
21900 = build_function_type_list (void_type_node,
21901 pv8sf_type_node, V8SF_type_node,
21902 V8SF_type_node,
21903 NULL_TREE);
21904 tree void_ftype_pv4df_v4df_v4df
21905 = build_function_type_list (void_type_node,
21906 pv4df_type_node, V4DF_type_node,
21907 V4DF_type_node,
21908 NULL_TREE);
21909 tree void_ftype_pv4sf_v4sf_v4sf
21910 = build_function_type_list (void_type_node,
21911 pv4sf_type_node, V4SF_type_node,
21912 V4SF_type_node,
21913 NULL_TREE);
21914 tree void_ftype_pv2df_v2df_v2df
21915 = build_function_type_list (void_type_node,
21916 pv2df_type_node, V2DF_type_node,
21917 V2DF_type_node,
21918 NULL_TREE);
21919 tree v4df_ftype_v2df
21920 = build_function_type_list (V4DF_type_node,
21921 V2DF_type_node,
21922 NULL_TREE);
21923 tree v8sf_ftype_v4sf
21924 = build_function_type_list (V8SF_type_node,
21925 V4SF_type_node,
21926 NULL_TREE);
21927 tree v8si_ftype_v4si
21928 = build_function_type_list (V8SI_type_node,
21929 V4SI_type_node,
21930 NULL_TREE);
21931 tree v2df_ftype_v4df
21932 = build_function_type_list (V2DF_type_node,
21933 V4DF_type_node,
21934 NULL_TREE);
21935 tree v4sf_ftype_v8sf
21936 = build_function_type_list (V4SF_type_node,
21937 V8SF_type_node,
21938 NULL_TREE);
21939 tree v4si_ftype_v8si
21940 = build_function_type_list (V4SI_type_node,
21941 V8SI_type_node,
21942 NULL_TREE);
21943 tree int_ftype_v4df
21944 = build_function_type_list (integer_type_node,
21945 V4DF_type_node,
21946 NULL_TREE);
21947 tree int_ftype_v8sf
21948 = build_function_type_list (integer_type_node,
21949 V8SF_type_node,
21950 NULL_TREE);
21951 tree int_ftype_v8sf_v8sf
21952 = build_function_type_list (integer_type_node,
21953 V8SF_type_node, V8SF_type_node,
21954 NULL_TREE);
21955 tree int_ftype_v4di_v4di
21956 = build_function_type_list (integer_type_node,
21957 V4DI_type_node, V4DI_type_node,
21958 NULL_TREE);
21959 tree int_ftype_v4df_v4df
21960 = build_function_type_list (integer_type_node,
21961 V4DF_type_node, V4DF_type_node,
21962 NULL_TREE);
21963 tree v8sf_ftype_v8sf_v8si
21964 = build_function_type_list (V8SF_type_node,
21965 V8SF_type_node, V8SI_type_node,
21966 NULL_TREE);
21967 tree v4df_ftype_v4df_v4di
21968 = build_function_type_list (V4DF_type_node,
21969 V4DF_type_node, V4DI_type_node,
21970 NULL_TREE);
21971 tree v4sf_ftype_v4sf_v4si
21972 = build_function_type_list (V4SF_type_node,
21973 V4SF_type_node, V4SI_type_node, NULL_TREE);
21974 tree v2df_ftype_v2df_v2di
21975 = build_function_type_list (V2DF_type_node,
21976 V2DF_type_node, V2DI_type_node, NULL_TREE);
21978 tree ftype;
21980 /* Add all special builtins with variable number of operands. */
21981 for (i = 0, d = bdesc_special_args;
21982 i < ARRAY_SIZE (bdesc_special_args);
21983 i++, d++)
21985 tree type;
21987 if (d->name == 0)
21988 continue;
21990 switch ((enum ix86_special_builtin_type) d->flag)
21992 case VOID_FTYPE_VOID:
21993 type = void_ftype_void;
21994 break;
21995 case V32QI_FTYPE_PCCHAR:
21996 type = v32qi_ftype_pcchar;
21997 break;
21998 case V16QI_FTYPE_PCCHAR:
21999 type = v16qi_ftype_pcchar;
22000 break;
22001 case V8SF_FTYPE_PCV4SF:
22002 type = v8sf_ftype_pcv4sf;
22003 break;
22004 case V8SF_FTYPE_PCFLOAT:
22005 type = v8sf_ftype_pcfloat;
22006 break;
22007 case V4DF_FTYPE_PCV2DF:
22008 type = v4df_ftype_pcv2df;
22009 break;
22010 case V4DF_FTYPE_PCDOUBLE:
22011 type = v4df_ftype_pcdouble;
22012 break;
22013 case V4SF_FTYPE_PCFLOAT:
22014 type = v4sf_ftype_pcfloat;
22015 break;
22016 case V2DI_FTYPE_PV2DI:
22017 type = v2di_ftype_pv2di;
22018 break;
22019 case V2DF_FTYPE_PCDOUBLE:
22020 type = v2df_ftype_pcdouble;
22021 break;
22022 case V8SF_FTYPE_PCV8SF_V8SF:
22023 type = v8sf_ftype_pcv8sf_v8sf;
22024 break;
22025 case V4DF_FTYPE_PCV4DF_V4DF:
22026 type = v4df_ftype_pcv4df_v4df;
22027 break;
22028 case V4SF_FTYPE_V4SF_PCV2SF:
22029 type = v4sf_ftype_v4sf_pcv2sf;
22030 break;
22031 case V4SF_FTYPE_PCV4SF_V4SF:
22032 type = v4sf_ftype_pcv4sf_v4sf;
22033 break;
22034 case V2DF_FTYPE_V2DF_PCDOUBLE:
22035 type = v2df_ftype_v2df_pcdouble;
22036 break;
22037 case V2DF_FTYPE_PCV2DF_V2DF:
22038 type = v2df_ftype_pcv2df_v2df;
22039 break;
22040 case VOID_FTYPE_PV2SF_V4SF:
22041 type = void_ftype_pv2sf_v4sf;
22042 break;
22043 case VOID_FTYPE_PV2DI_V2DI:
22044 type = void_ftype_pv2di_v2di;
22045 break;
22046 case VOID_FTYPE_PCHAR_V32QI:
22047 type = void_ftype_pchar_v32qi;
22048 break;
22049 case VOID_FTYPE_PCHAR_V16QI:
22050 type = void_ftype_pchar_v16qi;
22051 break;
22052 case VOID_FTYPE_PFLOAT_V8SF:
22053 type = void_ftype_pfloat_v8sf;
22054 break;
22055 case VOID_FTYPE_PFLOAT_V4SF:
22056 type = void_ftype_pfloat_v4sf;
22057 break;
22058 case VOID_FTYPE_PDOUBLE_V4DF:
22059 type = void_ftype_pdouble_v4df;
22060 break;
22061 case VOID_FTYPE_PDOUBLE_V2DF:
22062 type = void_ftype_pdouble_v2df;
22063 break;
22064 case VOID_FTYPE_PDI_DI:
22065 type = void_ftype_pdi_di;
22066 break;
22067 case VOID_FTYPE_PINT_INT:
22068 type = void_ftype_pint_int;
22069 break;
22070 case VOID_FTYPE_PV8SF_V8SF_V8SF:
22071 type = void_ftype_pv8sf_v8sf_v8sf;
22072 break;
22073 case VOID_FTYPE_PV4DF_V4DF_V4DF:
22074 type = void_ftype_pv4df_v4df_v4df;
22075 break;
22076 case VOID_FTYPE_PV4SF_V4SF_V4SF:
22077 type = void_ftype_pv4sf_v4sf_v4sf;
22078 break;
22079 case VOID_FTYPE_PV2DF_V2DF_V2DF:
22080 type = void_ftype_pv2df_v2df_v2df;
22081 break;
22082 default:
22083 gcc_unreachable ();
22086 def_builtin (d->mask, d->name, type, d->code);
22089 /* Add all builtins with variable number of operands. */
22090 for (i = 0, d = bdesc_args;
22091 i < ARRAY_SIZE (bdesc_args);
22092 i++, d++)
22094 tree type;
22096 if (d->name == 0)
22097 continue;
22099 switch ((enum ix86_builtin_type) d->flag)
22101 case FLOAT_FTYPE_FLOAT:
22102 type = float_ftype_float;
22103 break;
22104 case INT_FTYPE_V8SF_V8SF_PTEST:
22105 type = int_ftype_v8sf_v8sf;
22106 break;
22107 case INT_FTYPE_V4DI_V4DI_PTEST:
22108 type = int_ftype_v4di_v4di;
22109 break;
22110 case INT_FTYPE_V4DF_V4DF_PTEST:
22111 type = int_ftype_v4df_v4df;
22112 break;
22113 case INT_FTYPE_V4SF_V4SF_PTEST:
22114 type = int_ftype_v4sf_v4sf;
22115 break;
22116 case INT_FTYPE_V2DI_V2DI_PTEST:
22117 type = int_ftype_v2di_v2di;
22118 break;
22119 case INT_FTYPE_V2DF_V2DF_PTEST:
22120 type = int_ftype_v2df_v2df;
22121 break;
22122 case INT64_FTYPE_V4SF:
22123 type = int64_ftype_v4sf;
22124 break;
22125 case INT64_FTYPE_V2DF:
22126 type = int64_ftype_v2df;
22127 break;
22128 case INT_FTYPE_V16QI:
22129 type = int_ftype_v16qi;
22130 break;
22131 case INT_FTYPE_V8QI:
22132 type = int_ftype_v8qi;
22133 break;
22134 case INT_FTYPE_V8SF:
22135 type = int_ftype_v8sf;
22136 break;
22137 case INT_FTYPE_V4DF:
22138 type = int_ftype_v4df;
22139 break;
22140 case INT_FTYPE_V4SF:
22141 type = int_ftype_v4sf;
22142 break;
22143 case INT_FTYPE_V2DF:
22144 type = int_ftype_v2df;
22145 break;
22146 case V16QI_FTYPE_V16QI:
22147 type = v16qi_ftype_v16qi;
22148 break;
22149 case V8SI_FTYPE_V8SF:
22150 type = v8si_ftype_v8sf;
22151 break;
22152 case V8SI_FTYPE_V4SI:
22153 type = v8si_ftype_v4si;
22154 break;
22155 case V8HI_FTYPE_V8HI:
22156 type = v8hi_ftype_v8hi;
22157 break;
22158 case V8HI_FTYPE_V16QI:
22159 type = v8hi_ftype_v16qi;
22160 break;
22161 case V8QI_FTYPE_V8QI:
22162 type = v8qi_ftype_v8qi;
22163 break;
22164 case V8SF_FTYPE_V8SF:
22165 type = v8sf_ftype_v8sf;
22166 break;
22167 case V8SF_FTYPE_V8SI:
22168 type = v8sf_ftype_v8si;
22169 break;
22170 case V8SF_FTYPE_V4SF:
22171 type = v8sf_ftype_v4sf;
22172 break;
22173 case V4SI_FTYPE_V4DF:
22174 type = v4si_ftype_v4df;
22175 break;
22176 case V4SI_FTYPE_V4SI:
22177 type = v4si_ftype_v4si;
22178 break;
22179 case V4SI_FTYPE_V16QI:
22180 type = v4si_ftype_v16qi;
22181 break;
22182 case V4SI_FTYPE_V8SI:
22183 type = v4si_ftype_v8si;
22184 break;
22185 case V4SI_FTYPE_V8HI:
22186 type = v4si_ftype_v8hi;
22187 break;
22188 case V4SI_FTYPE_V4SF:
22189 type = v4si_ftype_v4sf;
22190 break;
22191 case V4SI_FTYPE_V2DF:
22192 type = v4si_ftype_v2df;
22193 break;
22194 case V4HI_FTYPE_V4HI:
22195 type = v4hi_ftype_v4hi;
22196 break;
22197 case V4DF_FTYPE_V4DF:
22198 type = v4df_ftype_v4df;
22199 break;
22200 case V4DF_FTYPE_V4SI:
22201 type = v4df_ftype_v4si;
22202 break;
22203 case V4DF_FTYPE_V4SF:
22204 type = v4df_ftype_v4sf;
22205 break;
22206 case V4DF_FTYPE_V2DF:
22207 type = v4df_ftype_v2df;
22208 break;
22209 case V4SF_FTYPE_V4SF:
22210 case V4SF_FTYPE_V4SF_VEC_MERGE:
22211 type = v4sf_ftype_v4sf;
22212 break;
22213 case V4SF_FTYPE_V8SF:
22214 type = v4sf_ftype_v8sf;
22215 break;
22216 case V4SF_FTYPE_V4SI:
22217 type = v4sf_ftype_v4si;
22218 break;
22219 case V4SF_FTYPE_V4DF:
22220 type = v4sf_ftype_v4df;
22221 break;
22222 case V4SF_FTYPE_V2DF:
22223 type = v4sf_ftype_v2df;
22224 break;
22225 case V2DI_FTYPE_V2DI:
22226 type = v2di_ftype_v2di;
22227 break;
22228 case V2DI_FTYPE_V16QI:
22229 type = v2di_ftype_v16qi;
22230 break;
22231 case V2DI_FTYPE_V8HI:
22232 type = v2di_ftype_v8hi;
22233 break;
22234 case V2DI_FTYPE_V4SI:
22235 type = v2di_ftype_v4si;
22236 break;
22237 case V2SI_FTYPE_V2SI:
22238 type = v2si_ftype_v2si;
22239 break;
22240 case V2SI_FTYPE_V4SF:
22241 type = v2si_ftype_v4sf;
22242 break;
22243 case V2SI_FTYPE_V2DF:
22244 type = v2si_ftype_v2df;
22245 break;
22246 case V2SI_FTYPE_V2SF:
22247 type = v2si_ftype_v2sf;
22248 break;
22249 case V2DF_FTYPE_V4DF:
22250 type = v2df_ftype_v4df;
22251 break;
22252 case V2DF_FTYPE_V4SF:
22253 type = v2df_ftype_v4sf;
22254 break;
22255 case V2DF_FTYPE_V2DF:
22256 case V2DF_FTYPE_V2DF_VEC_MERGE:
22257 type = v2df_ftype_v2df;
22258 break;
22259 case V2DF_FTYPE_V2SI:
22260 type = v2df_ftype_v2si;
22261 break;
22262 case V2DF_FTYPE_V4SI:
22263 type = v2df_ftype_v4si;
22264 break;
22265 case V2SF_FTYPE_V2SF:
22266 type = v2sf_ftype_v2sf;
22267 break;
22268 case V2SF_FTYPE_V2SI:
22269 type = v2sf_ftype_v2si;
22270 break;
22271 case V16QI_FTYPE_V16QI_V16QI:
22272 type = v16qi_ftype_v16qi_v16qi;
22273 break;
22274 case V16QI_FTYPE_V8HI_V8HI:
22275 type = v16qi_ftype_v8hi_v8hi;
22276 break;
22277 case V8QI_FTYPE_V8QI_V8QI:
22278 type = v8qi_ftype_v8qi_v8qi;
22279 break;
22280 case V8QI_FTYPE_V4HI_V4HI:
22281 type = v8qi_ftype_v4hi_v4hi;
22282 break;
22283 case V8HI_FTYPE_V8HI_V8HI:
22284 case V8HI_FTYPE_V8HI_V8HI_COUNT:
22285 type = v8hi_ftype_v8hi_v8hi;
22286 break;
22287 case V8HI_FTYPE_V16QI_V16QI:
22288 type = v8hi_ftype_v16qi_v16qi;
22289 break;
22290 case V8HI_FTYPE_V4SI_V4SI:
22291 type = v8hi_ftype_v4si_v4si;
22292 break;
22293 case V8HI_FTYPE_V8HI_SI_COUNT:
22294 type = v8hi_ftype_v8hi_int;
22295 break;
22296 case V8SF_FTYPE_V8SF_V8SF:
22297 type = v8sf_ftype_v8sf_v8sf;
22298 break;
22299 case V8SF_FTYPE_V8SF_V8SI:
22300 type = v8sf_ftype_v8sf_v8si;
22301 break;
22302 case V4SI_FTYPE_V4SI_V4SI:
22303 case V4SI_FTYPE_V4SI_V4SI_COUNT:
22304 type = v4si_ftype_v4si_v4si;
22305 break;
22306 case V4SI_FTYPE_V8HI_V8HI:
22307 type = v4si_ftype_v8hi_v8hi;
22308 break;
22309 case V4SI_FTYPE_V4SF_V4SF:
22310 type = v4si_ftype_v4sf_v4sf;
22311 break;
22312 case V4SI_FTYPE_V2DF_V2DF:
22313 type = v4si_ftype_v2df_v2df;
22314 break;
22315 case V4SI_FTYPE_V4SI_SI_COUNT:
22316 type = v4si_ftype_v4si_int;
22317 break;
22318 case V4HI_FTYPE_V4HI_V4HI:
22319 case V4HI_FTYPE_V4HI_V4HI_COUNT:
22320 type = v4hi_ftype_v4hi_v4hi;
22321 break;
22322 case V4HI_FTYPE_V8QI_V8QI:
22323 type = v4hi_ftype_v8qi_v8qi;
22324 break;
22325 case V4HI_FTYPE_V2SI_V2SI:
22326 type = v4hi_ftype_v2si_v2si;
22327 break;
22328 case V4HI_FTYPE_V4HI_SI_COUNT:
22329 type = v4hi_ftype_v4hi_int;
22330 break;
22331 case V4DF_FTYPE_V4DF_V4DF:
22332 type = v4df_ftype_v4df_v4df;
22333 break;
22334 case V4DF_FTYPE_V4DF_V4DI:
22335 type = v4df_ftype_v4df_v4di;
22336 break;
22337 case V4SF_FTYPE_V4SF_V4SF:
22338 case V4SF_FTYPE_V4SF_V4SF_SWAP:
22339 type = v4sf_ftype_v4sf_v4sf;
22340 break;
22341 case V4SF_FTYPE_V4SF_V4SI:
22342 type = v4sf_ftype_v4sf_v4si;
22343 break;
22344 case V4SF_FTYPE_V4SF_V2SI:
22345 type = v4sf_ftype_v4sf_v2si;
22346 break;
22347 case V4SF_FTYPE_V4SF_V2DF:
22348 type = v4sf_ftype_v4sf_v2df;
22349 break;
22350 case V4SF_FTYPE_V4SF_DI:
22351 type = v4sf_ftype_v4sf_int64;
22352 break;
22353 case V4SF_FTYPE_V4SF_SI:
22354 type = v4sf_ftype_v4sf_int;
22355 break;
22356 case V2DI_FTYPE_V2DI_V2DI:
22357 case V2DI_FTYPE_V2DI_V2DI_COUNT:
22358 type = v2di_ftype_v2di_v2di;
22359 break;
22360 case V2DI_FTYPE_V16QI_V16QI:
22361 type = v2di_ftype_v16qi_v16qi;
22362 break;
22363 case V2DI_FTYPE_V4SI_V4SI:
22364 type = v2di_ftype_v4si_v4si;
22365 break;
22366 case V2DI_FTYPE_V2DI_V16QI:
22367 type = v2di_ftype_v2di_v16qi;
22368 break;
22369 case V2DI_FTYPE_V2DF_V2DF:
22370 type = v2di_ftype_v2df_v2df;
22371 break;
22372 case V2DI_FTYPE_V2DI_SI_COUNT:
22373 type = v2di_ftype_v2di_int;
22374 break;
22375 case V2SI_FTYPE_V2SI_V2SI:
22376 case V2SI_FTYPE_V2SI_V2SI_COUNT:
22377 type = v2si_ftype_v2si_v2si;
22378 break;
22379 case V2SI_FTYPE_V4HI_V4HI:
22380 type = v2si_ftype_v4hi_v4hi;
22381 break;
22382 case V2SI_FTYPE_V2SF_V2SF:
22383 type = v2si_ftype_v2sf_v2sf;
22384 break;
22385 case V2SI_FTYPE_V2SI_SI_COUNT:
22386 type = v2si_ftype_v2si_int;
22387 break;
22388 case V2DF_FTYPE_V2DF_V2DF:
22389 case V2DF_FTYPE_V2DF_V2DF_SWAP:
22390 type = v2df_ftype_v2df_v2df;
22391 break;
22392 case V2DF_FTYPE_V2DF_V4SF:
22393 type = v2df_ftype_v2df_v4sf;
22394 break;
22395 case V2DF_FTYPE_V2DF_V2DI:
22396 type = v2df_ftype_v2df_v2di;
22397 break;
22398 case V2DF_FTYPE_V2DF_DI:
22399 type = v2df_ftype_v2df_int64;
22400 break;
22401 case V2DF_FTYPE_V2DF_SI:
22402 type = v2df_ftype_v2df_int;
22403 break;
22404 case V2SF_FTYPE_V2SF_V2SF:
22405 type = v2sf_ftype_v2sf_v2sf;
22406 break;
22407 case V1DI_FTYPE_V1DI_V1DI:
22408 case V1DI_FTYPE_V1DI_V1DI_COUNT:
22409 type = v1di_ftype_v1di_v1di;
22410 break;
22411 case V1DI_FTYPE_V8QI_V8QI:
22412 type = v1di_ftype_v8qi_v8qi;
22413 break;
22414 case V1DI_FTYPE_V2SI_V2SI:
22415 type = v1di_ftype_v2si_v2si;
22416 break;
22417 case V1DI_FTYPE_V1DI_SI_COUNT:
22418 type = v1di_ftype_v1di_int;
22419 break;
22420 case UINT64_FTYPE_UINT64_UINT64:
22421 type = uint64_ftype_uint64_uint64;
22422 break;
22423 case UINT_FTYPE_UINT_UINT:
22424 type = unsigned_ftype_unsigned_unsigned;
22425 break;
22426 case UINT_FTYPE_UINT_USHORT:
22427 type = unsigned_ftype_unsigned_ushort;
22428 break;
22429 case UINT_FTYPE_UINT_UCHAR:
22430 type = unsigned_ftype_unsigned_uchar;
22431 break;
22432 case V8HI_FTYPE_V8HI_INT:
22433 type = v8hi_ftype_v8hi_int;
22434 break;
22435 case V8SF_FTYPE_V8SF_INT:
22436 type = v8sf_ftype_v8sf_int;
22437 break;
22438 case V4SI_FTYPE_V4SI_INT:
22439 type = v4si_ftype_v4si_int;
22440 break;
22441 case V4SI_FTYPE_V8SI_INT:
22442 type = v4si_ftype_v8si_int;
22443 break;
22444 case V4HI_FTYPE_V4HI_INT:
22445 type = v4hi_ftype_v4hi_int;
22446 break;
22447 case V4DF_FTYPE_V4DF_INT:
22448 type = v4df_ftype_v4df_int;
22449 break;
22450 case V4SF_FTYPE_V4SF_INT:
22451 type = v4sf_ftype_v4sf_int;
22452 break;
22453 case V4SF_FTYPE_V8SF_INT:
22454 type = v4sf_ftype_v8sf_int;
22455 break;
22456 case V2DI_FTYPE_V2DI_INT:
22457 case V2DI2TI_FTYPE_V2DI_INT:
22458 type = v2di_ftype_v2di_int;
22459 break;
22460 case V2DF_FTYPE_V2DF_INT:
22461 type = v2df_ftype_v2df_int;
22462 break;
22463 case V2DF_FTYPE_V4DF_INT:
22464 type = v2df_ftype_v4df_int;
22465 break;
22466 case V16QI_FTYPE_V16QI_V16QI_V16QI:
22467 type = v16qi_ftype_v16qi_v16qi_v16qi;
22468 break;
22469 case V8SF_FTYPE_V8SF_V8SF_V8SF:
22470 type = v8sf_ftype_v8sf_v8sf_v8sf;
22471 break;
22472 case V4DF_FTYPE_V4DF_V4DF_V4DF:
22473 type = v4df_ftype_v4df_v4df_v4df;
22474 break;
22475 case V4SF_FTYPE_V4SF_V4SF_V4SF:
22476 type = v4sf_ftype_v4sf_v4sf_v4sf;
22477 break;
22478 case V2DF_FTYPE_V2DF_V2DF_V2DF:
22479 type = v2df_ftype_v2df_v2df_v2df;
22480 break;
22481 case V16QI_FTYPE_V16QI_V16QI_INT:
22482 type = v16qi_ftype_v16qi_v16qi_int;
22483 break;
22484 case V8SI_FTYPE_V8SI_V8SI_INT:
22485 type = v8si_ftype_v8si_v8si_int;
22486 break;
22487 case V8SI_FTYPE_V8SI_V4SI_INT:
22488 type = v8si_ftype_v8si_v4si_int;
22489 break;
22490 case V8HI_FTYPE_V8HI_V8HI_INT:
22491 type = v8hi_ftype_v8hi_v8hi_int;
22492 break;
22493 case V8SF_FTYPE_V8SF_V8SF_INT:
22494 type = v8sf_ftype_v8sf_v8sf_int;
22495 break;
22496 case V8SF_FTYPE_V8SF_V4SF_INT:
22497 type = v8sf_ftype_v8sf_v4sf_int;
22498 break;
22499 case V4SI_FTYPE_V4SI_V4SI_INT:
22500 type = v4si_ftype_v4si_v4si_int;
22501 break;
22502 case V4DF_FTYPE_V4DF_V4DF_INT:
22503 type = v4df_ftype_v4df_v4df_int;
22504 break;
22505 case V4DF_FTYPE_V4DF_V2DF_INT:
22506 type = v4df_ftype_v4df_v2df_int;
22507 break;
22508 case V4SF_FTYPE_V4SF_V4SF_INT:
22509 type = v4sf_ftype_v4sf_v4sf_int;
22510 break;
22511 case V2DI_FTYPE_V2DI_V2DI_INT:
22512 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
22513 type = v2di_ftype_v2di_v2di_int;
22514 break;
22515 case V2DF_FTYPE_V2DF_V2DF_INT:
22516 type = v2df_ftype_v2df_v2df_int;
22517 break;
22518 case V2DI_FTYPE_V2DI_UINT_UINT:
22519 type = v2di_ftype_v2di_unsigned_unsigned;
22520 break;
22521 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
22522 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
22523 break;
22524 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
22525 type = v1di_ftype_v1di_v1di_int;
22526 break;
22527 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
22528 type = v8sf_ftype_v8sf_v8sf_v8si_int;
22529 break;
22530 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
22531 type = v4df_ftype_v4df_v4df_v4di_int;
22532 break;
22533 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
22534 type = v4sf_ftype_v4sf_v4sf_v4si_int;
22535 break;
22536 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
22537 type = v2df_ftype_v2df_v2df_v2di_int;
22538 break;
22539 default:
22540 gcc_unreachable ();
22543 def_builtin_const (d->mask, d->name, type, d->code);
22546 /* pcmpestr[im] insns. */
22547 for (i = 0, d = bdesc_pcmpestr;
22548 i < ARRAY_SIZE (bdesc_pcmpestr);
22549 i++, d++)
22551 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22552 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
22553 else
22554 ftype = int_ftype_v16qi_int_v16qi_int_int;
22555 def_builtin_const (d->mask, d->name, ftype, d->code);
22558 /* pcmpistr[im] insns. */
22559 for (i = 0, d = bdesc_pcmpistr;
22560 i < ARRAY_SIZE (bdesc_pcmpistr);
22561 i++, d++)
22563 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22564 ftype = v16qi_ftype_v16qi_v16qi_int;
22565 else
22566 ftype = int_ftype_v16qi_v16qi_int;
22567 def_builtin_const (d->mask, d->name, ftype, d->code);
22570 /* comi/ucomi insns. */
22571 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22572 if (d->mask == OPTION_MASK_ISA_SSE2)
22573 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
22574 else
22575 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
22577 /* SSE */
22578 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
22579 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
22581 /* SSE or 3DNow!A */
22582 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
22584 /* SSE2 */
22585 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
22587 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
22588 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
22590 /* SSE3. */
22591 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
22592 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
22594 /* AES */
22595 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
22596 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
22597 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
22598 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
22599 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
22600 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
22602 /* PCLMUL */
22603 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
22605 /* AVX */
22606 def_builtin (OPTION_MASK_ISA_AVX, "__builtin_ia32_vzeroupper", void_ftype_void,
22607 TARGET_64BIT ? IX86_BUILTIN_VZEROUPPER_REX64 : IX86_BUILTIN_VZEROUPPER);
22609 /* Access to the vec_init patterns. */
22610 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
22611 integer_type_node, NULL_TREE);
22612 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
22614 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
22615 short_integer_type_node,
22616 short_integer_type_node,
22617 short_integer_type_node, NULL_TREE);
22618 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
22620 ftype = build_function_type_list (V8QI_type_node, char_type_node,
22621 char_type_node, char_type_node,
22622 char_type_node, char_type_node,
22623 char_type_node, char_type_node,
22624 char_type_node, NULL_TREE);
22625 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
22627 /* Access to the vec_extract patterns. */
22628 ftype = build_function_type_list (double_type_node, V2DF_type_node,
22629 integer_type_node, NULL_TREE);
22630 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
22632 ftype = build_function_type_list (long_long_integer_type_node,
22633 V2DI_type_node, integer_type_node,
22634 NULL_TREE);
22635 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
22637 ftype = build_function_type_list (float_type_node, V4SF_type_node,
22638 integer_type_node, NULL_TREE);
22639 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
22641 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
22642 integer_type_node, NULL_TREE);
22643 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
22645 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
22646 integer_type_node, NULL_TREE);
22647 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
22649 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
22650 integer_type_node, NULL_TREE);
22651 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
22653 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
22654 integer_type_node, NULL_TREE);
22655 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
22657 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
22658 integer_type_node, NULL_TREE);
22659 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
22661 /* Access to the vec_set patterns. */
22662 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
22663 intDI_type_node,
22664 integer_type_node, NULL_TREE);
22665 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
22667 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
22668 float_type_node,
22669 integer_type_node, NULL_TREE);
22670 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
22672 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
22673 intSI_type_node,
22674 integer_type_node, NULL_TREE);
22675 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
22677 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
22678 intHI_type_node,
22679 integer_type_node, NULL_TREE);
22680 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
22682 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
22683 intHI_type_node,
22684 integer_type_node, NULL_TREE);
22685 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
22687 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
22688 intQI_type_node,
22689 integer_type_node, NULL_TREE);
22690 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
22692 /* Add SSE5 multi-arg argument instructions */
22693 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22695 tree mtype = NULL_TREE;
22697 if (d->name == 0)
22698 continue;
22700 switch ((enum multi_arg_type)d->flag)
22702 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
22703 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
22704 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
22705 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
22706 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
22707 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
22708 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
22709 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
22710 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
22711 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
22712 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
22713 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
22714 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
22715 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
22716 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
22717 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
22718 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
22719 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
22720 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
22721 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
22722 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
22723 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
22724 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
22725 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
22726 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
22727 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
22728 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
22729 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
22730 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
22731 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
22732 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
22733 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
22734 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
22735 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
22736 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
22737 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
22738 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
22739 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
22740 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
22741 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
22742 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
22743 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
22744 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
22745 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
22746 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
22747 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
22748 case MULTI_ARG_UNKNOWN:
22749 default:
22750 gcc_unreachable ();
22753 if (mtype)
22754 def_builtin_const (d->mask, d->name, mtype, d->code);
22758 /* Internal method for ix86_init_builtins. */
22760 static void
22761 ix86_init_builtins_va_builtins_abi (void)
22763 tree ms_va_ref, sysv_va_ref;
22764 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22765 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22766 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22767 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22769 if (!TARGET_64BIT)
22770 return;
22771 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22772 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22773 ms_va_ref = build_reference_type (ms_va_list_type_node);
22774 sysv_va_ref =
22775 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22777 fnvoid_va_end_ms =
22778 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22779 fnvoid_va_start_ms =
22780 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22781 fnvoid_va_end_sysv =
22782 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22783 fnvoid_va_start_sysv =
22784 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22785 NULL_TREE);
22786 fnvoid_va_copy_ms =
22787 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22788 NULL_TREE);
22789 fnvoid_va_copy_sysv =
22790 build_function_type_list (void_type_node, sysv_va_ref,
22791 sysv_va_ref, NULL_TREE);
22793 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22794 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22795 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22796 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22797 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22798 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22799 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22800 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22801 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22802 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22803 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22804 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22807 static void
22808 ix86_init_builtins (void)
22810 tree float128_type_node = make_node (REAL_TYPE);
22811 tree ftype, decl;
22813 /* The __float80 type. */
22814 if (TYPE_MODE (long_double_type_node) == XFmode)
22815 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
22816 "__float80");
22817 else
22819 /* The __float80 type. */
22820 tree float80_type_node = make_node (REAL_TYPE);
22822 TYPE_PRECISION (float80_type_node) = 80;
22823 layout_type (float80_type_node);
22824 (*lang_hooks.types.register_builtin_type) (float80_type_node,
22825 "__float80");
22828 /* The __float128 type. */
22829 TYPE_PRECISION (float128_type_node) = 128;
22830 layout_type (float128_type_node);
22831 (*lang_hooks.types.register_builtin_type) (float128_type_node,
22832 "__float128");
22834 /* TFmode support builtins. */
22835 ftype = build_function_type (float128_type_node, void_list_node);
22836 decl = add_builtin_function ("__builtin_infq", ftype,
22837 IX86_BUILTIN_INFQ, BUILT_IN_MD,
22838 NULL, NULL_TREE);
22839 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
22841 /* We will expand them to normal call if SSE2 isn't available since
22842 they are used by libgcc. */
22843 ftype = build_function_type_list (float128_type_node,
22844 float128_type_node,
22845 NULL_TREE);
22846 decl = add_builtin_function ("__builtin_fabsq", ftype,
22847 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
22848 "__fabstf2", NULL_TREE);
22849 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
22850 TREE_READONLY (decl) = 1;
22852 ftype = build_function_type_list (float128_type_node,
22853 float128_type_node,
22854 float128_type_node,
22855 NULL_TREE);
22856 decl = add_builtin_function ("__builtin_copysignq", ftype,
22857 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
22858 "__copysigntf3", NULL_TREE);
22859 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
22860 TREE_READONLY (decl) = 1;
22862 ix86_init_mmx_sse_builtins ();
22863 if (TARGET_64BIT)
22864 ix86_init_builtins_va_builtins_abi ();
22867 /* Errors in the source file can cause expand_expr to return const0_rtx
22868 where we expect a vector. To avoid crashing, use one of the vector
22869 clear instructions. */
22870 static rtx
22871 safe_vector_operand (rtx x, enum machine_mode mode)
22873 if (x == const0_rtx)
22874 x = CONST0_RTX (mode);
22875 return x;
22878 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22880 static rtx
22881 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22883 rtx pat;
22884 tree arg0 = CALL_EXPR_ARG (exp, 0);
22885 tree arg1 = CALL_EXPR_ARG (exp, 1);
22886 rtx op0 = expand_normal (arg0);
22887 rtx op1 = expand_normal (arg1);
22888 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22889 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22890 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22892 if (VECTOR_MODE_P (mode0))
22893 op0 = safe_vector_operand (op0, mode0);
22894 if (VECTOR_MODE_P (mode1))
22895 op1 = safe_vector_operand (op1, mode1);
22897 if (optimize || !target
22898 || GET_MODE (target) != tmode
22899 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22900 target = gen_reg_rtx (tmode);
22902 if (GET_MODE (op1) == SImode && mode1 == TImode)
22904 rtx x = gen_reg_rtx (V4SImode);
22905 emit_insn (gen_sse2_loadd (x, op1));
22906 op1 = gen_lowpart (TImode, x);
22909 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22910 op0 = copy_to_mode_reg (mode0, op0);
22911 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22912 op1 = copy_to_mode_reg (mode1, op1);
22914 pat = GEN_FCN (icode) (target, op0, op1);
22915 if (! pat)
22916 return 0;
22918 emit_insn (pat);
22920 return target;
22923 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22925 static rtx
22926 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22927 enum multi_arg_type m_type,
22928 enum insn_code sub_code)
22930 rtx pat;
22931 int i;
22932 int nargs;
22933 bool comparison_p = false;
22934 bool tf_p = false;
22935 bool last_arg_constant = false;
22936 int num_memory = 0;
22937 struct {
22938 rtx op;
22939 enum machine_mode mode;
22940 } args[4];
22942 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22944 switch (m_type)
22946 case MULTI_ARG_3_SF:
22947 case MULTI_ARG_3_DF:
22948 case MULTI_ARG_3_DI:
22949 case MULTI_ARG_3_SI:
22950 case MULTI_ARG_3_SI_DI:
22951 case MULTI_ARG_3_HI:
22952 case MULTI_ARG_3_HI_SI:
22953 case MULTI_ARG_3_QI:
22954 case MULTI_ARG_3_PERMPS:
22955 case MULTI_ARG_3_PERMPD:
22956 nargs = 3;
22957 break;
22959 case MULTI_ARG_2_SF:
22960 case MULTI_ARG_2_DF:
22961 case MULTI_ARG_2_DI:
22962 case MULTI_ARG_2_SI:
22963 case MULTI_ARG_2_HI:
22964 case MULTI_ARG_2_QI:
22965 nargs = 2;
22966 break;
22968 case MULTI_ARG_2_DI_IMM:
22969 case MULTI_ARG_2_SI_IMM:
22970 case MULTI_ARG_2_HI_IMM:
22971 case MULTI_ARG_2_QI_IMM:
22972 nargs = 2;
22973 last_arg_constant = true;
22974 break;
22976 case MULTI_ARG_1_SF:
22977 case MULTI_ARG_1_DF:
22978 case MULTI_ARG_1_DI:
22979 case MULTI_ARG_1_SI:
22980 case MULTI_ARG_1_HI:
22981 case MULTI_ARG_1_QI:
22982 case MULTI_ARG_1_SI_DI:
22983 case MULTI_ARG_1_HI_DI:
22984 case MULTI_ARG_1_HI_SI:
22985 case MULTI_ARG_1_QI_DI:
22986 case MULTI_ARG_1_QI_SI:
22987 case MULTI_ARG_1_QI_HI:
22988 case MULTI_ARG_1_PH2PS:
22989 case MULTI_ARG_1_PS2PH:
22990 nargs = 1;
22991 break;
22993 case MULTI_ARG_2_SF_CMP:
22994 case MULTI_ARG_2_DF_CMP:
22995 case MULTI_ARG_2_DI_CMP:
22996 case MULTI_ARG_2_SI_CMP:
22997 case MULTI_ARG_2_HI_CMP:
22998 case MULTI_ARG_2_QI_CMP:
22999 nargs = 2;
23000 comparison_p = true;
23001 break;
23003 case MULTI_ARG_2_SF_TF:
23004 case MULTI_ARG_2_DF_TF:
23005 case MULTI_ARG_2_DI_TF:
23006 case MULTI_ARG_2_SI_TF:
23007 case MULTI_ARG_2_HI_TF:
23008 case MULTI_ARG_2_QI_TF:
23009 nargs = 2;
23010 tf_p = true;
23011 break;
23013 case MULTI_ARG_UNKNOWN:
23014 default:
23015 gcc_unreachable ();
23018 if (optimize || !target
23019 || GET_MODE (target) != tmode
23020 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23021 target = gen_reg_rtx (tmode);
23023 gcc_assert (nargs <= 4);
23025 for (i = 0; i < nargs; i++)
23027 tree arg = CALL_EXPR_ARG (exp, i);
23028 rtx op = expand_normal (arg);
23029 int adjust = (comparison_p) ? 1 : 0;
23030 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23032 if (last_arg_constant && i == nargs-1)
23034 if (GET_CODE (op) != CONST_INT)
23036 error ("last argument must be an immediate");
23037 return gen_reg_rtx (tmode);
23040 else
23042 if (VECTOR_MODE_P (mode))
23043 op = safe_vector_operand (op, mode);
23045 /* If we aren't optimizing, only allow one memory operand to be
23046 generated. */
23047 if (memory_operand (op, mode))
23048 num_memory++;
23050 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23052 if (optimize
23053 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23054 || num_memory > 1)
23055 op = force_reg (mode, op);
23058 args[i].op = op;
23059 args[i].mode = mode;
23062 switch (nargs)
23064 case 1:
23065 pat = GEN_FCN (icode) (target, args[0].op);
23066 break;
23068 case 2:
23069 if (tf_p)
23070 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23071 GEN_INT ((int)sub_code));
23072 else if (! comparison_p)
23073 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23074 else
23076 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23077 args[0].op,
23078 args[1].op);
23080 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23082 break;
23084 case 3:
23085 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23086 break;
23088 default:
23089 gcc_unreachable ();
23092 if (! pat)
23093 return 0;
23095 emit_insn (pat);
23096 return target;
23099 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23100 insns with vec_merge. */
23102 static rtx
23103 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23104 rtx target)
23106 rtx pat;
23107 tree arg0 = CALL_EXPR_ARG (exp, 0);
23108 rtx op1, op0 = expand_normal (arg0);
23109 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23110 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23112 if (optimize || !target
23113 || GET_MODE (target) != tmode
23114 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23115 target = gen_reg_rtx (tmode);
23117 if (VECTOR_MODE_P (mode0))
23118 op0 = safe_vector_operand (op0, mode0);
23120 if ((optimize && !register_operand (op0, mode0))
23121 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23122 op0 = copy_to_mode_reg (mode0, op0);
23124 op1 = op0;
23125 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23126 op1 = copy_to_mode_reg (mode0, op1);
23128 pat = GEN_FCN (icode) (target, op0, op1);
23129 if (! pat)
23130 return 0;
23131 emit_insn (pat);
23132 return target;
23135 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23137 static rtx
23138 ix86_expand_sse_compare (const struct builtin_description *d,
23139 tree exp, rtx target, bool swap)
23141 rtx pat;
23142 tree arg0 = CALL_EXPR_ARG (exp, 0);
23143 tree arg1 = CALL_EXPR_ARG (exp, 1);
23144 rtx op0 = expand_normal (arg0);
23145 rtx op1 = expand_normal (arg1);
23146 rtx op2;
23147 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23148 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23149 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23150 enum rtx_code comparison = d->comparison;
23152 if (VECTOR_MODE_P (mode0))
23153 op0 = safe_vector_operand (op0, mode0);
23154 if (VECTOR_MODE_P (mode1))
23155 op1 = safe_vector_operand (op1, mode1);
23157 /* Swap operands if we have a comparison that isn't available in
23158 hardware. */
23159 if (swap)
23161 rtx tmp = gen_reg_rtx (mode1);
23162 emit_move_insn (tmp, op1);
23163 op1 = op0;
23164 op0 = tmp;
23167 if (optimize || !target
23168 || GET_MODE (target) != tmode
23169 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23170 target = gen_reg_rtx (tmode);
23172 if ((optimize && !register_operand (op0, mode0))
23173 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23174 op0 = copy_to_mode_reg (mode0, op0);
23175 if ((optimize && !register_operand (op1, mode1))
23176 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23177 op1 = copy_to_mode_reg (mode1, op1);
23179 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23180 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23181 if (! pat)
23182 return 0;
23183 emit_insn (pat);
23184 return target;
23187 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23189 static rtx
23190 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23191 rtx target)
23193 rtx pat;
23194 tree arg0 = CALL_EXPR_ARG (exp, 0);
23195 tree arg1 = CALL_EXPR_ARG (exp, 1);
23196 rtx op0 = expand_normal (arg0);
23197 rtx op1 = expand_normal (arg1);
23198 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23199 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23200 enum rtx_code comparison = d->comparison;
23202 if (VECTOR_MODE_P (mode0))
23203 op0 = safe_vector_operand (op0, mode0);
23204 if (VECTOR_MODE_P (mode1))
23205 op1 = safe_vector_operand (op1, mode1);
23207 /* Swap operands if we have a comparison that isn't available in
23208 hardware. */
23209 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23211 rtx tmp = op1;
23212 op1 = op0;
23213 op0 = tmp;
23216 target = gen_reg_rtx (SImode);
23217 emit_move_insn (target, const0_rtx);
23218 target = gen_rtx_SUBREG (QImode, target, 0);
23220 if ((optimize && !register_operand (op0, mode0))
23221 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23222 op0 = copy_to_mode_reg (mode0, op0);
23223 if ((optimize && !register_operand (op1, mode1))
23224 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23225 op1 = copy_to_mode_reg (mode1, op1);
23227 pat = GEN_FCN (d->icode) (op0, op1);
23228 if (! pat)
23229 return 0;
23230 emit_insn (pat);
23231 emit_insn (gen_rtx_SET (VOIDmode,
23232 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23233 gen_rtx_fmt_ee (comparison, QImode,
23234 SET_DEST (pat),
23235 const0_rtx)));
23237 return SUBREG_REG (target);
23240 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23242 static rtx
23243 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23244 rtx target)
23246 rtx pat;
23247 tree arg0 = CALL_EXPR_ARG (exp, 0);
23248 tree arg1 = CALL_EXPR_ARG (exp, 1);
23249 rtx op0 = expand_normal (arg0);
23250 rtx op1 = expand_normal (arg1);
23251 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23252 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23253 enum rtx_code comparison = d->comparison;
23255 if (VECTOR_MODE_P (mode0))
23256 op0 = safe_vector_operand (op0, mode0);
23257 if (VECTOR_MODE_P (mode1))
23258 op1 = safe_vector_operand (op1, mode1);
23260 target = gen_reg_rtx (SImode);
23261 emit_move_insn (target, const0_rtx);
23262 target = gen_rtx_SUBREG (QImode, target, 0);
23264 if ((optimize && !register_operand (op0, mode0))
23265 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23266 op0 = copy_to_mode_reg (mode0, op0);
23267 if ((optimize && !register_operand (op1, mode1))
23268 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23269 op1 = copy_to_mode_reg (mode1, op1);
23271 pat = GEN_FCN (d->icode) (op0, op1);
23272 if (! pat)
23273 return 0;
23274 emit_insn (pat);
23275 emit_insn (gen_rtx_SET (VOIDmode,
23276 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23277 gen_rtx_fmt_ee (comparison, QImode,
23278 SET_DEST (pat),
23279 const0_rtx)));
23281 return SUBREG_REG (target);
23284 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23286 static rtx
23287 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23288 tree exp, rtx target)
23290 rtx pat;
23291 tree arg0 = CALL_EXPR_ARG (exp, 0);
23292 tree arg1 = CALL_EXPR_ARG (exp, 1);
23293 tree arg2 = CALL_EXPR_ARG (exp, 2);
23294 tree arg3 = CALL_EXPR_ARG (exp, 3);
23295 tree arg4 = CALL_EXPR_ARG (exp, 4);
23296 rtx scratch0, scratch1;
23297 rtx op0 = expand_normal (arg0);
23298 rtx op1 = expand_normal (arg1);
23299 rtx op2 = expand_normal (arg2);
23300 rtx op3 = expand_normal (arg3);
23301 rtx op4 = expand_normal (arg4);
23302 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23304 tmode0 = insn_data[d->icode].operand[0].mode;
23305 tmode1 = insn_data[d->icode].operand[1].mode;
23306 modev2 = insn_data[d->icode].operand[2].mode;
23307 modei3 = insn_data[d->icode].operand[3].mode;
23308 modev4 = insn_data[d->icode].operand[4].mode;
23309 modei5 = insn_data[d->icode].operand[5].mode;
23310 modeimm = insn_data[d->icode].operand[6].mode;
23312 if (VECTOR_MODE_P (modev2))
23313 op0 = safe_vector_operand (op0, modev2);
23314 if (VECTOR_MODE_P (modev4))
23315 op2 = safe_vector_operand (op2, modev4);
23317 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23318 op0 = copy_to_mode_reg (modev2, op0);
23319 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23320 op1 = copy_to_mode_reg (modei3, op1);
23321 if ((optimize && !register_operand (op2, modev4))
23322 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23323 op2 = copy_to_mode_reg (modev4, op2);
23324 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23325 op3 = copy_to_mode_reg (modei5, op3);
23327 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23329 error ("the fifth argument must be a 8-bit immediate");
23330 return const0_rtx;
23333 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23335 if (optimize || !target
23336 || GET_MODE (target) != tmode0
23337 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23338 target = gen_reg_rtx (tmode0);
23340 scratch1 = gen_reg_rtx (tmode1);
23342 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23344 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23346 if (optimize || !target
23347 || GET_MODE (target) != tmode1
23348 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23349 target = gen_reg_rtx (tmode1);
23351 scratch0 = gen_reg_rtx (tmode0);
23353 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23355 else
23357 gcc_assert (d->flag);
23359 scratch0 = gen_reg_rtx (tmode0);
23360 scratch1 = gen_reg_rtx (tmode1);
23362 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23365 if (! pat)
23366 return 0;
23368 emit_insn (pat);
23370 if (d->flag)
23372 target = gen_reg_rtx (SImode);
23373 emit_move_insn (target, const0_rtx);
23374 target = gen_rtx_SUBREG (QImode, target, 0);
23376 emit_insn
23377 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23378 gen_rtx_fmt_ee (EQ, QImode,
23379 gen_rtx_REG ((enum machine_mode) d->flag,
23380 FLAGS_REG),
23381 const0_rtx)));
23382 return SUBREG_REG (target);
23384 else
23385 return target;
23389 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23391 static rtx
23392 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23393 tree exp, rtx target)
23395 rtx pat;
23396 tree arg0 = CALL_EXPR_ARG (exp, 0);
23397 tree arg1 = CALL_EXPR_ARG (exp, 1);
23398 tree arg2 = CALL_EXPR_ARG (exp, 2);
23399 rtx scratch0, scratch1;
23400 rtx op0 = expand_normal (arg0);
23401 rtx op1 = expand_normal (arg1);
23402 rtx op2 = expand_normal (arg2);
23403 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23405 tmode0 = insn_data[d->icode].operand[0].mode;
23406 tmode1 = insn_data[d->icode].operand[1].mode;
23407 modev2 = insn_data[d->icode].operand[2].mode;
23408 modev3 = insn_data[d->icode].operand[3].mode;
23409 modeimm = insn_data[d->icode].operand[4].mode;
23411 if (VECTOR_MODE_P (modev2))
23412 op0 = safe_vector_operand (op0, modev2);
23413 if (VECTOR_MODE_P (modev3))
23414 op1 = safe_vector_operand (op1, modev3);
23416 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23417 op0 = copy_to_mode_reg (modev2, op0);
23418 if ((optimize && !register_operand (op1, modev3))
23419 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23420 op1 = copy_to_mode_reg (modev3, op1);
23422 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23424 error ("the third argument must be a 8-bit immediate");
23425 return const0_rtx;
23428 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23430 if (optimize || !target
23431 || GET_MODE (target) != tmode0
23432 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23433 target = gen_reg_rtx (tmode0);
23435 scratch1 = gen_reg_rtx (tmode1);
23437 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23439 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23441 if (optimize || !target
23442 || GET_MODE (target) != tmode1
23443 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23444 target = gen_reg_rtx (tmode1);
23446 scratch0 = gen_reg_rtx (tmode0);
23448 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23450 else
23452 gcc_assert (d->flag);
23454 scratch0 = gen_reg_rtx (tmode0);
23455 scratch1 = gen_reg_rtx (tmode1);
23457 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23460 if (! pat)
23461 return 0;
23463 emit_insn (pat);
23465 if (d->flag)
23467 target = gen_reg_rtx (SImode);
23468 emit_move_insn (target, const0_rtx);
23469 target = gen_rtx_SUBREG (QImode, target, 0);
23471 emit_insn
23472 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23473 gen_rtx_fmt_ee (EQ, QImode,
23474 gen_rtx_REG ((enum machine_mode) d->flag,
23475 FLAGS_REG),
23476 const0_rtx)));
23477 return SUBREG_REG (target);
23479 else
23480 return target;
23483 /* Subroutine of ix86_expand_builtin to take care of insns with
23484 variable number of operands. */
23486 static rtx
23487 ix86_expand_args_builtin (const struct builtin_description *d,
23488 tree exp, rtx target)
23490 rtx pat, real_target;
23491 unsigned int i, nargs;
23492 unsigned int nargs_constant = 0;
23493 int num_memory = 0;
23494 struct
23496 rtx op;
23497 enum machine_mode mode;
23498 } args[4];
23499 bool last_arg_count = false;
23500 enum insn_code icode = d->icode;
23501 const struct insn_data *insn_p = &insn_data[icode];
23502 enum machine_mode tmode = insn_p->operand[0].mode;
23503 enum machine_mode rmode = VOIDmode;
23504 bool swap = false;
23505 enum rtx_code comparison = d->comparison;
23507 switch ((enum ix86_builtin_type) d->flag)
23509 case INT_FTYPE_V8SF_V8SF_PTEST:
23510 case INT_FTYPE_V4DI_V4DI_PTEST:
23511 case INT_FTYPE_V4DF_V4DF_PTEST:
23512 case INT_FTYPE_V4SF_V4SF_PTEST:
23513 case INT_FTYPE_V2DI_V2DI_PTEST:
23514 case INT_FTYPE_V2DF_V2DF_PTEST:
23515 return ix86_expand_sse_ptest (d, exp, target);
23516 case FLOAT128_FTYPE_FLOAT128:
23517 case FLOAT_FTYPE_FLOAT:
23518 case INT64_FTYPE_V4SF:
23519 case INT64_FTYPE_V2DF:
23520 case INT_FTYPE_V16QI:
23521 case INT_FTYPE_V8QI:
23522 case INT_FTYPE_V8SF:
23523 case INT_FTYPE_V4DF:
23524 case INT_FTYPE_V4SF:
23525 case INT_FTYPE_V2DF:
23526 case V16QI_FTYPE_V16QI:
23527 case V8SI_FTYPE_V8SF:
23528 case V8SI_FTYPE_V4SI:
23529 case V8HI_FTYPE_V8HI:
23530 case V8HI_FTYPE_V16QI:
23531 case V8QI_FTYPE_V8QI:
23532 case V8SF_FTYPE_V8SF:
23533 case V8SF_FTYPE_V8SI:
23534 case V8SF_FTYPE_V4SF:
23535 case V4SI_FTYPE_V4SI:
23536 case V4SI_FTYPE_V16QI:
23537 case V4SI_FTYPE_V4SF:
23538 case V4SI_FTYPE_V8SI:
23539 case V4SI_FTYPE_V8HI:
23540 case V4SI_FTYPE_V4DF:
23541 case V4SI_FTYPE_V2DF:
23542 case V4HI_FTYPE_V4HI:
23543 case V4DF_FTYPE_V4DF:
23544 case V4DF_FTYPE_V4SI:
23545 case V4DF_FTYPE_V4SF:
23546 case V4DF_FTYPE_V2DF:
23547 case V4SF_FTYPE_V4SF:
23548 case V4SF_FTYPE_V4SI:
23549 case V4SF_FTYPE_V8SF:
23550 case V4SF_FTYPE_V4DF:
23551 case V4SF_FTYPE_V2DF:
23552 case V2DI_FTYPE_V2DI:
23553 case V2DI_FTYPE_V16QI:
23554 case V2DI_FTYPE_V8HI:
23555 case V2DI_FTYPE_V4SI:
23556 case V2DF_FTYPE_V2DF:
23557 case V2DF_FTYPE_V4SI:
23558 case V2DF_FTYPE_V4DF:
23559 case V2DF_FTYPE_V4SF:
23560 case V2DF_FTYPE_V2SI:
23561 case V2SI_FTYPE_V2SI:
23562 case V2SI_FTYPE_V4SF:
23563 case V2SI_FTYPE_V2SF:
23564 case V2SI_FTYPE_V2DF:
23565 case V2SF_FTYPE_V2SF:
23566 case V2SF_FTYPE_V2SI:
23567 nargs = 1;
23568 break;
23569 case V4SF_FTYPE_V4SF_VEC_MERGE:
23570 case V2DF_FTYPE_V2DF_VEC_MERGE:
23571 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23572 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23573 case V16QI_FTYPE_V16QI_V16QI:
23574 case V16QI_FTYPE_V8HI_V8HI:
23575 case V8QI_FTYPE_V8QI_V8QI:
23576 case V8QI_FTYPE_V4HI_V4HI:
23577 case V8HI_FTYPE_V8HI_V8HI:
23578 case V8HI_FTYPE_V16QI_V16QI:
23579 case V8HI_FTYPE_V4SI_V4SI:
23580 case V8SF_FTYPE_V8SF_V8SF:
23581 case V8SF_FTYPE_V8SF_V8SI:
23582 case V4SI_FTYPE_V4SI_V4SI:
23583 case V4SI_FTYPE_V8HI_V8HI:
23584 case V4SI_FTYPE_V4SF_V4SF:
23585 case V4SI_FTYPE_V2DF_V2DF:
23586 case V4HI_FTYPE_V4HI_V4HI:
23587 case V4HI_FTYPE_V8QI_V8QI:
23588 case V4HI_FTYPE_V2SI_V2SI:
23589 case V4DF_FTYPE_V4DF_V4DF:
23590 case V4DF_FTYPE_V4DF_V4DI:
23591 case V4SF_FTYPE_V4SF_V4SF:
23592 case V4SF_FTYPE_V4SF_V4SI:
23593 case V4SF_FTYPE_V4SF_V2SI:
23594 case V4SF_FTYPE_V4SF_V2DF:
23595 case V4SF_FTYPE_V4SF_DI:
23596 case V4SF_FTYPE_V4SF_SI:
23597 case V2DI_FTYPE_V2DI_V2DI:
23598 case V2DI_FTYPE_V16QI_V16QI:
23599 case V2DI_FTYPE_V4SI_V4SI:
23600 case V2DI_FTYPE_V2DI_V16QI:
23601 case V2DI_FTYPE_V2DF_V2DF:
23602 case V2SI_FTYPE_V2SI_V2SI:
23603 case V2SI_FTYPE_V4HI_V4HI:
23604 case V2SI_FTYPE_V2SF_V2SF:
23605 case V2DF_FTYPE_V2DF_V2DF:
23606 case V2DF_FTYPE_V2DF_V4SF:
23607 case V2DF_FTYPE_V2DF_V2DI:
23608 case V2DF_FTYPE_V2DF_DI:
23609 case V2DF_FTYPE_V2DF_SI:
23610 case V2SF_FTYPE_V2SF_V2SF:
23611 case V1DI_FTYPE_V1DI_V1DI:
23612 case V1DI_FTYPE_V8QI_V8QI:
23613 case V1DI_FTYPE_V2SI_V2SI:
23614 if (comparison == UNKNOWN)
23615 return ix86_expand_binop_builtin (icode, exp, target);
23616 nargs = 2;
23617 break;
23618 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23619 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23620 gcc_assert (comparison != UNKNOWN);
23621 nargs = 2;
23622 swap = true;
23623 break;
23624 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23625 case V8HI_FTYPE_V8HI_SI_COUNT:
23626 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23627 case V4SI_FTYPE_V4SI_SI_COUNT:
23628 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23629 case V4HI_FTYPE_V4HI_SI_COUNT:
23630 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23631 case V2DI_FTYPE_V2DI_SI_COUNT:
23632 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23633 case V2SI_FTYPE_V2SI_SI_COUNT:
23634 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23635 case V1DI_FTYPE_V1DI_SI_COUNT:
23636 nargs = 2;
23637 last_arg_count = true;
23638 break;
23639 case UINT64_FTYPE_UINT64_UINT64:
23640 case UINT_FTYPE_UINT_UINT:
23641 case UINT_FTYPE_UINT_USHORT:
23642 case UINT_FTYPE_UINT_UCHAR:
23643 nargs = 2;
23644 break;
23645 case V2DI2TI_FTYPE_V2DI_INT:
23646 nargs = 2;
23647 rmode = V2DImode;
23648 nargs_constant = 1;
23649 break;
23650 case V8HI_FTYPE_V8HI_INT:
23651 case V8SF_FTYPE_V8SF_INT:
23652 case V4SI_FTYPE_V4SI_INT:
23653 case V4SI_FTYPE_V8SI_INT:
23654 case V4HI_FTYPE_V4HI_INT:
23655 case V4DF_FTYPE_V4DF_INT:
23656 case V4SF_FTYPE_V4SF_INT:
23657 case V4SF_FTYPE_V8SF_INT:
23658 case V2DI_FTYPE_V2DI_INT:
23659 case V2DF_FTYPE_V2DF_INT:
23660 case V2DF_FTYPE_V4DF_INT:
23661 nargs = 2;
23662 nargs_constant = 1;
23663 break;
23664 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23665 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23666 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23667 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23668 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23669 nargs = 3;
23670 break;
23671 case V16QI_FTYPE_V16QI_V16QI_INT:
23672 case V8HI_FTYPE_V8HI_V8HI_INT:
23673 case V8SI_FTYPE_V8SI_V8SI_INT:
23674 case V8SI_FTYPE_V8SI_V4SI_INT:
23675 case V8SF_FTYPE_V8SF_V8SF_INT:
23676 case V8SF_FTYPE_V8SF_V4SF_INT:
23677 case V4SI_FTYPE_V4SI_V4SI_INT:
23678 case V4DF_FTYPE_V4DF_V4DF_INT:
23679 case V4DF_FTYPE_V4DF_V2DF_INT:
23680 case V4SF_FTYPE_V4SF_V4SF_INT:
23681 case V2DI_FTYPE_V2DI_V2DI_INT:
23682 case V2DF_FTYPE_V2DF_V2DF_INT:
23683 nargs = 3;
23684 nargs_constant = 1;
23685 break;
23686 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
23687 nargs = 3;
23688 rmode = V2DImode;
23689 nargs_constant = 1;
23690 break;
23691 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
23692 nargs = 3;
23693 rmode = DImode;
23694 nargs_constant = 1;
23695 break;
23696 case V2DI_FTYPE_V2DI_UINT_UINT:
23697 nargs = 3;
23698 nargs_constant = 2;
23699 break;
23700 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23701 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23702 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23703 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23704 nargs = 4;
23705 nargs_constant = 1;
23706 break;
23707 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23708 nargs = 4;
23709 nargs_constant = 2;
23710 break;
23711 default:
23712 gcc_unreachable ();
23715 gcc_assert (nargs <= ARRAY_SIZE (args));
23717 if (comparison != UNKNOWN)
23719 gcc_assert (nargs == 2);
23720 return ix86_expand_sse_compare (d, exp, target, swap);
23723 if (rmode == VOIDmode || rmode == tmode)
23725 if (optimize
23726 || target == 0
23727 || GET_MODE (target) != tmode
23728 || ! (*insn_p->operand[0].predicate) (target, tmode))
23729 target = gen_reg_rtx (tmode);
23730 real_target = target;
23732 else
23734 target = gen_reg_rtx (rmode);
23735 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23738 for (i = 0; i < nargs; i++)
23740 tree arg = CALL_EXPR_ARG (exp, i);
23741 rtx op = expand_normal (arg);
23742 enum machine_mode mode = insn_p->operand[i + 1].mode;
23743 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23745 if (last_arg_count && (i + 1) == nargs)
23747 /* SIMD shift insns take either an 8-bit immediate or
23748 register as count. But builtin functions take int as
23749 count. If count doesn't match, we put it in register. */
23750 if (!match)
23752 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23753 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23754 op = copy_to_reg (op);
23757 else if ((nargs - i) <= nargs_constant)
23759 if (!match)
23760 switch (icode)
23762 case CODE_FOR_sse4_1_roundpd:
23763 case CODE_FOR_sse4_1_roundps:
23764 case CODE_FOR_sse4_1_roundsd:
23765 case CODE_FOR_sse4_1_roundss:
23766 case CODE_FOR_sse4_1_blendps:
23767 case CODE_FOR_avx_blendpd256:
23768 case CODE_FOR_avx_vpermilv4df:
23769 case CODE_FOR_avx_roundpd256:
23770 case CODE_FOR_avx_roundps256:
23771 error ("the last argument must be a 4-bit immediate");
23772 return const0_rtx;
23774 case CODE_FOR_sse4_1_blendpd:
23775 case CODE_FOR_avx_vpermilv2df:
23776 case CODE_FOR_avx_vpermil2v2df3:
23777 case CODE_FOR_avx_vpermil2v4sf3:
23778 case CODE_FOR_avx_vpermil2v4df3:
23779 case CODE_FOR_avx_vpermil2v8sf3:
23780 error ("the last argument must be a 2-bit immediate");
23781 return const0_rtx;
23783 case CODE_FOR_avx_vextractf128v4df:
23784 case CODE_FOR_avx_vextractf128v8sf:
23785 case CODE_FOR_avx_vextractf128v8si:
23786 case CODE_FOR_avx_vinsertf128v4df:
23787 case CODE_FOR_avx_vinsertf128v8sf:
23788 case CODE_FOR_avx_vinsertf128v8si:
23789 error ("the last argument must be a 1-bit immediate");
23790 return const0_rtx;
23792 case CODE_FOR_avx_cmpsdv2df3:
23793 case CODE_FOR_avx_cmpssv4sf3:
23794 case CODE_FOR_avx_cmppdv2df3:
23795 case CODE_FOR_avx_cmppsv4sf3:
23796 case CODE_FOR_avx_cmppdv4df3:
23797 case CODE_FOR_avx_cmppsv8sf3:
23798 error ("the last argument must be a 5-bit immediate");
23799 return const0_rtx;
23801 default:
23802 switch (nargs_constant)
23804 case 2:
23805 if ((nargs - i) == nargs_constant)
23807 error ("the next to last argument must be an 8-bit immediate");
23808 break;
23810 case 1:
23811 error ("the last argument must be an 8-bit immediate");
23812 break;
23813 default:
23814 gcc_unreachable ();
23816 return const0_rtx;
23819 else
23821 if (VECTOR_MODE_P (mode))
23822 op = safe_vector_operand (op, mode);
23824 /* If we aren't optimizing, only allow one memory operand to
23825 be generated. */
23826 if (memory_operand (op, mode))
23827 num_memory++;
23829 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23831 if (optimize || !match || num_memory > 1)
23832 op = copy_to_mode_reg (mode, op);
23834 else
23836 op = copy_to_reg (op);
23837 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23841 args[i].op = op;
23842 args[i].mode = mode;
23845 switch (nargs)
23847 case 1:
23848 pat = GEN_FCN (icode) (real_target, args[0].op);
23849 break;
23850 case 2:
23851 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23852 break;
23853 case 3:
23854 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23855 args[2].op);
23856 break;
23857 case 4:
23858 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23859 args[2].op, args[3].op);
23860 break;
23861 default:
23862 gcc_unreachable ();
23865 if (! pat)
23866 return 0;
23868 emit_insn (pat);
23869 return target;
23872 /* Subroutine of ix86_expand_builtin to take care of special insns
23873 with variable number of operands. */
23875 static rtx
23876 ix86_expand_special_args_builtin (const struct builtin_description *d,
23877 tree exp, rtx target)
23879 tree arg;
23880 rtx pat, op;
23881 unsigned int i, nargs, arg_adjust, memory;
23882 struct
23884 rtx op;
23885 enum machine_mode mode;
23886 } args[2];
23887 enum insn_code icode = d->icode;
23888 bool last_arg_constant = false;
23889 const struct insn_data *insn_p = &insn_data[icode];
23890 enum machine_mode tmode = insn_p->operand[0].mode;
23891 enum { load, store } klass;
23893 switch ((enum ix86_special_builtin_type) d->flag)
23895 case VOID_FTYPE_VOID:
23896 emit_insn (GEN_FCN (icode) (target));
23897 return 0;
23898 case V2DI_FTYPE_PV2DI:
23899 case V32QI_FTYPE_PCCHAR:
23900 case V16QI_FTYPE_PCCHAR:
23901 case V8SF_FTYPE_PCV4SF:
23902 case V8SF_FTYPE_PCFLOAT:
23903 case V4SF_FTYPE_PCFLOAT:
23904 case V4DF_FTYPE_PCV2DF:
23905 case V4DF_FTYPE_PCDOUBLE:
23906 case V2DF_FTYPE_PCDOUBLE:
23907 nargs = 1;
23908 klass = load;
23909 memory = 0;
23910 break;
23911 case VOID_FTYPE_PV2SF_V4SF:
23912 case VOID_FTYPE_PV2DI_V2DI:
23913 case VOID_FTYPE_PCHAR_V32QI:
23914 case VOID_FTYPE_PCHAR_V16QI:
23915 case VOID_FTYPE_PFLOAT_V8SF:
23916 case VOID_FTYPE_PFLOAT_V4SF:
23917 case VOID_FTYPE_PDOUBLE_V4DF:
23918 case VOID_FTYPE_PDOUBLE_V2DF:
23919 case VOID_FTYPE_PDI_DI:
23920 case VOID_FTYPE_PINT_INT:
23921 nargs = 1;
23922 klass = store;
23923 /* Reserve memory operand for target. */
23924 memory = ARRAY_SIZE (args);
23925 break;
23926 case V4SF_FTYPE_V4SF_PCV2SF:
23927 case V2DF_FTYPE_V2DF_PCDOUBLE:
23928 nargs = 2;
23929 klass = load;
23930 memory = 1;
23931 break;
23932 case V8SF_FTYPE_PCV8SF_V8SF:
23933 case V4DF_FTYPE_PCV4DF_V4DF:
23934 case V4SF_FTYPE_PCV4SF_V4SF:
23935 case V2DF_FTYPE_PCV2DF_V2DF:
23936 nargs = 2;
23937 klass = load;
23938 memory = 0;
23939 break;
23940 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23941 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23942 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23943 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23944 nargs = 2;
23945 klass = store;
23946 /* Reserve memory operand for target. */
23947 memory = ARRAY_SIZE (args);
23948 break;
23949 default:
23950 gcc_unreachable ();
23953 gcc_assert (nargs <= ARRAY_SIZE (args));
23955 if (klass == store)
23957 arg = CALL_EXPR_ARG (exp, 0);
23958 op = expand_normal (arg);
23959 gcc_assert (target == 0);
23960 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23961 arg_adjust = 1;
23963 else
23965 arg_adjust = 0;
23966 if (optimize
23967 || target == 0
23968 || GET_MODE (target) != tmode
23969 || ! (*insn_p->operand[0].predicate) (target, tmode))
23970 target = gen_reg_rtx (tmode);
23973 for (i = 0; i < nargs; i++)
23975 enum machine_mode mode = insn_p->operand[i + 1].mode;
23976 bool match;
23978 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23979 op = expand_normal (arg);
23980 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23982 if (last_arg_constant && (i + 1) == nargs)
23984 if (!match)
23985 switch (icode)
23987 default:
23988 error ("the last argument must be an 8-bit immediate");
23989 return const0_rtx;
23992 else
23994 if (i == memory)
23996 /* This must be the memory operand. */
23997 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23998 gcc_assert (GET_MODE (op) == mode
23999 || GET_MODE (op) == VOIDmode);
24001 else
24003 /* This must be register. */
24004 if (VECTOR_MODE_P (mode))
24005 op = safe_vector_operand (op, mode);
24007 gcc_assert (GET_MODE (op) == mode
24008 || GET_MODE (op) == VOIDmode);
24009 op = copy_to_mode_reg (mode, op);
24013 args[i].op = op;
24014 args[i].mode = mode;
24017 switch (nargs)
24019 case 1:
24020 pat = GEN_FCN (icode) (target, args[0].op);
24021 break;
24022 case 2:
24023 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24024 break;
24025 default:
24026 gcc_unreachable ();
24029 if (! pat)
24030 return 0;
24031 emit_insn (pat);
24032 return klass == store ? 0 : target;
24035 /* Return the integer constant in ARG. Constrain it to be in the range
24036 of the subparts of VEC_TYPE; issue an error if not. */
24038 static int
24039 get_element_number (tree vec_type, tree arg)
24041 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24043 if (!host_integerp (arg, 1)
24044 || (elt = tree_low_cst (arg, 1), elt > max))
24046 error ("selector must be an integer constant in the range 0..%wi", max);
24047 return 0;
24050 return elt;
24053 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24054 ix86_expand_vector_init. We DO have language-level syntax for this, in
24055 the form of (type){ init-list }. Except that since we can't place emms
24056 instructions from inside the compiler, we can't allow the use of MMX
24057 registers unless the user explicitly asks for it. So we do *not* define
24058 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24059 we have builtins invoked by mmintrin.h that gives us license to emit
24060 these sorts of instructions. */
24062 static rtx
24063 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24065 enum machine_mode tmode = TYPE_MODE (type);
24066 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24067 int i, n_elt = GET_MODE_NUNITS (tmode);
24068 rtvec v = rtvec_alloc (n_elt);
24070 gcc_assert (VECTOR_MODE_P (tmode));
24071 gcc_assert (call_expr_nargs (exp) == n_elt);
24073 for (i = 0; i < n_elt; ++i)
24075 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24076 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24079 if (!target || !register_operand (target, tmode))
24080 target = gen_reg_rtx (tmode);
24082 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24083 return target;
24086 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24087 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24088 had a language-level syntax for referencing vector elements. */
24090 static rtx
24091 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24093 enum machine_mode tmode, mode0;
24094 tree arg0, arg1;
24095 int elt;
24096 rtx op0;
24098 arg0 = CALL_EXPR_ARG (exp, 0);
24099 arg1 = CALL_EXPR_ARG (exp, 1);
24101 op0 = expand_normal (arg0);
24102 elt = get_element_number (TREE_TYPE (arg0), arg1);
24104 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24105 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24106 gcc_assert (VECTOR_MODE_P (mode0));
24108 op0 = force_reg (mode0, op0);
24110 if (optimize || !target || !register_operand (target, tmode))
24111 target = gen_reg_rtx (tmode);
24113 ix86_expand_vector_extract (true, target, op0, elt);
24115 return target;
24118 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24119 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24120 a language-level syntax for referencing vector elements. */
24122 static rtx
24123 ix86_expand_vec_set_builtin (tree exp)
24125 enum machine_mode tmode, mode1;
24126 tree arg0, arg1, arg2;
24127 int elt;
24128 rtx op0, op1, target;
24130 arg0 = CALL_EXPR_ARG (exp, 0);
24131 arg1 = CALL_EXPR_ARG (exp, 1);
24132 arg2 = CALL_EXPR_ARG (exp, 2);
24134 tmode = TYPE_MODE (TREE_TYPE (arg0));
24135 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24136 gcc_assert (VECTOR_MODE_P (tmode));
24138 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24139 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24140 elt = get_element_number (TREE_TYPE (arg0), arg2);
24142 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24143 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24145 op0 = force_reg (tmode, op0);
24146 op1 = force_reg (mode1, op1);
24148 /* OP0 is the source of these builtin functions and shouldn't be
24149 modified. Create a copy, use it and return it as target. */
24150 target = gen_reg_rtx (tmode);
24151 emit_move_insn (target, op0);
24152 ix86_expand_vector_set (true, target, op1, elt);
24154 return target;
24157 /* Expand an expression EXP that calls a built-in function,
24158 with result going to TARGET if that's convenient
24159 (and in mode MODE if that's convenient).
24160 SUBTARGET may be used as the target for computing one of EXP's operands.
24161 IGNORE is nonzero if the value is to be ignored. */
24163 static rtx
24164 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24165 enum machine_mode mode ATTRIBUTE_UNUSED,
24166 int ignore ATTRIBUTE_UNUSED)
24168 const struct builtin_description *d;
24169 size_t i;
24170 enum insn_code icode;
24171 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24172 tree arg0, arg1, arg2;
24173 rtx op0, op1, op2, pat;
24174 enum machine_mode mode0, mode1, mode2;
24175 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24177 /* Determine whether the builtin function is available under the current ISA.
24178 Originally the builtin was not created if it wasn't applicable to the
24179 current ISA based on the command line switches. With function specific
24180 options, we need to check in the context of the function making the call
24181 whether it is supported. */
24182 if (ix86_builtins_isa[fcode].isa
24183 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24185 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24186 NULL, NULL, false);
24188 if (!opts)
24189 error ("%qE needs unknown isa option", fndecl);
24190 else
24192 gcc_assert (opts != NULL);
24193 error ("%qE needs isa option %s", fndecl, opts);
24194 free (opts);
24196 return const0_rtx;
24199 switch (fcode)
24201 case IX86_BUILTIN_MASKMOVQ:
24202 case IX86_BUILTIN_MASKMOVDQU:
24203 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24204 ? CODE_FOR_mmx_maskmovq
24205 : CODE_FOR_sse2_maskmovdqu);
24206 /* Note the arg order is different from the operand order. */
24207 arg1 = CALL_EXPR_ARG (exp, 0);
24208 arg2 = CALL_EXPR_ARG (exp, 1);
24209 arg0 = CALL_EXPR_ARG (exp, 2);
24210 op0 = expand_normal (arg0);
24211 op1 = expand_normal (arg1);
24212 op2 = expand_normal (arg2);
24213 mode0 = insn_data[icode].operand[0].mode;
24214 mode1 = insn_data[icode].operand[1].mode;
24215 mode2 = insn_data[icode].operand[2].mode;
24217 op0 = force_reg (Pmode, op0);
24218 op0 = gen_rtx_MEM (mode1, op0);
24220 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24221 op0 = copy_to_mode_reg (mode0, op0);
24222 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24223 op1 = copy_to_mode_reg (mode1, op1);
24224 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24225 op2 = copy_to_mode_reg (mode2, op2);
24226 pat = GEN_FCN (icode) (op0, op1, op2);
24227 if (! pat)
24228 return 0;
24229 emit_insn (pat);
24230 return 0;
24232 case IX86_BUILTIN_LDMXCSR:
24233 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24234 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24235 emit_move_insn (target, op0);
24236 emit_insn (gen_sse_ldmxcsr (target));
24237 return 0;
24239 case IX86_BUILTIN_STMXCSR:
24240 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24241 emit_insn (gen_sse_stmxcsr (target));
24242 return copy_to_mode_reg (SImode, target);
24244 case IX86_BUILTIN_CLFLUSH:
24245 arg0 = CALL_EXPR_ARG (exp, 0);
24246 op0 = expand_normal (arg0);
24247 icode = CODE_FOR_sse2_clflush;
24248 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24249 op0 = copy_to_mode_reg (Pmode, op0);
24251 emit_insn (gen_sse2_clflush (op0));
24252 return 0;
24254 case IX86_BUILTIN_MONITOR:
24255 arg0 = CALL_EXPR_ARG (exp, 0);
24256 arg1 = CALL_EXPR_ARG (exp, 1);
24257 arg2 = CALL_EXPR_ARG (exp, 2);
24258 op0 = expand_normal (arg0);
24259 op1 = expand_normal (arg1);
24260 op2 = expand_normal (arg2);
24261 if (!REG_P (op0))
24262 op0 = copy_to_mode_reg (Pmode, op0);
24263 if (!REG_P (op1))
24264 op1 = copy_to_mode_reg (SImode, op1);
24265 if (!REG_P (op2))
24266 op2 = copy_to_mode_reg (SImode, op2);
24267 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24268 return 0;
24270 case IX86_BUILTIN_MWAIT:
24271 arg0 = CALL_EXPR_ARG (exp, 0);
24272 arg1 = CALL_EXPR_ARG (exp, 1);
24273 op0 = expand_normal (arg0);
24274 op1 = expand_normal (arg1);
24275 if (!REG_P (op0))
24276 op0 = copy_to_mode_reg (SImode, op0);
24277 if (!REG_P (op1))
24278 op1 = copy_to_mode_reg (SImode, op1);
24279 emit_insn (gen_sse3_mwait (op0, op1));
24280 return 0;
24282 case IX86_BUILTIN_VEC_INIT_V2SI:
24283 case IX86_BUILTIN_VEC_INIT_V4HI:
24284 case IX86_BUILTIN_VEC_INIT_V8QI:
24285 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24287 case IX86_BUILTIN_VEC_EXT_V2DF:
24288 case IX86_BUILTIN_VEC_EXT_V2DI:
24289 case IX86_BUILTIN_VEC_EXT_V4SF:
24290 case IX86_BUILTIN_VEC_EXT_V4SI:
24291 case IX86_BUILTIN_VEC_EXT_V8HI:
24292 case IX86_BUILTIN_VEC_EXT_V2SI:
24293 case IX86_BUILTIN_VEC_EXT_V4HI:
24294 case IX86_BUILTIN_VEC_EXT_V16QI:
24295 return ix86_expand_vec_ext_builtin (exp, target);
24297 case IX86_BUILTIN_VEC_SET_V2DI:
24298 case IX86_BUILTIN_VEC_SET_V4SF:
24299 case IX86_BUILTIN_VEC_SET_V4SI:
24300 case IX86_BUILTIN_VEC_SET_V8HI:
24301 case IX86_BUILTIN_VEC_SET_V4HI:
24302 case IX86_BUILTIN_VEC_SET_V16QI:
24303 return ix86_expand_vec_set_builtin (exp);
24305 case IX86_BUILTIN_INFQ:
24307 REAL_VALUE_TYPE inf;
24308 rtx tmp;
24310 real_inf (&inf);
24311 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24313 tmp = validize_mem (force_const_mem (mode, tmp));
24315 if (target == 0)
24316 target = gen_reg_rtx (mode);
24318 emit_move_insn (target, tmp);
24319 return target;
24322 default:
24323 break;
24326 for (i = 0, d = bdesc_special_args;
24327 i < ARRAY_SIZE (bdesc_special_args);
24328 i++, d++)
24329 if (d->code == fcode)
24330 return ix86_expand_special_args_builtin (d, exp, target);
24332 for (i = 0, d = bdesc_args;
24333 i < ARRAY_SIZE (bdesc_args);
24334 i++, d++)
24335 if (d->code == fcode)
24336 switch (fcode)
24338 case IX86_BUILTIN_FABSQ:
24339 case IX86_BUILTIN_COPYSIGNQ:
24340 if (!TARGET_SSE2)
24341 /* Emit a normal call if SSE2 isn't available. */
24342 return expand_call (exp, target, ignore);
24343 default:
24344 return ix86_expand_args_builtin (d, exp, target);
24347 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24348 if (d->code == fcode)
24349 return ix86_expand_sse_comi (d, exp, target);
24351 for (i = 0, d = bdesc_pcmpestr;
24352 i < ARRAY_SIZE (bdesc_pcmpestr);
24353 i++, d++)
24354 if (d->code == fcode)
24355 return ix86_expand_sse_pcmpestr (d, exp, target);
24357 for (i = 0, d = bdesc_pcmpistr;
24358 i < ARRAY_SIZE (bdesc_pcmpistr);
24359 i++, d++)
24360 if (d->code == fcode)
24361 return ix86_expand_sse_pcmpistr (d, exp, target);
24363 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24364 if (d->code == fcode)
24365 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24366 (enum multi_arg_type)d->flag,
24367 d->comparison);
24369 gcc_unreachable ();
24372 /* Returns a function decl for a vectorized version of the builtin function
24373 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24374 if it is not available. */
24376 static tree
24377 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24378 tree type_in)
24380 enum machine_mode in_mode, out_mode;
24381 int in_n, out_n;
24383 if (TREE_CODE (type_out) != VECTOR_TYPE
24384 || TREE_CODE (type_in) != VECTOR_TYPE)
24385 return NULL_TREE;
24387 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24388 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24389 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24390 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24392 switch (fn)
24394 case BUILT_IN_SQRT:
24395 if (out_mode == DFmode && out_n == 2
24396 && in_mode == DFmode && in_n == 2)
24397 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24398 break;
24400 case BUILT_IN_SQRTF:
24401 if (out_mode == SFmode && out_n == 4
24402 && in_mode == SFmode && in_n == 4)
24403 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24404 break;
24406 case BUILT_IN_LRINT:
24407 if (out_mode == SImode && out_n == 4
24408 && in_mode == DFmode && in_n == 2)
24409 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24410 break;
24412 case BUILT_IN_LRINTF:
24413 if (out_mode == SImode && out_n == 4
24414 && in_mode == SFmode && in_n == 4)
24415 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24416 break;
24418 default:
24422 /* Dispatch to a handler for a vectorization library. */
24423 if (ix86_veclib_handler)
24424 return (*ix86_veclib_handler)(fn, type_out, type_in);
24426 return NULL_TREE;
24429 /* Handler for an SVML-style interface to
24430 a library with vectorized intrinsics. */
24432 static tree
24433 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24435 char name[20];
24436 tree fntype, new_fndecl, args;
24437 unsigned arity;
24438 const char *bname;
24439 enum machine_mode el_mode, in_mode;
24440 int n, in_n;
24442 /* The SVML is suitable for unsafe math only. */
24443 if (!flag_unsafe_math_optimizations)
24444 return NULL_TREE;
24446 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24447 n = TYPE_VECTOR_SUBPARTS (type_out);
24448 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24449 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24450 if (el_mode != in_mode
24451 || n != in_n)
24452 return NULL_TREE;
24454 switch (fn)
24456 case BUILT_IN_EXP:
24457 case BUILT_IN_LOG:
24458 case BUILT_IN_LOG10:
24459 case BUILT_IN_POW:
24460 case BUILT_IN_TANH:
24461 case BUILT_IN_TAN:
24462 case BUILT_IN_ATAN:
24463 case BUILT_IN_ATAN2:
24464 case BUILT_IN_ATANH:
24465 case BUILT_IN_CBRT:
24466 case BUILT_IN_SINH:
24467 case BUILT_IN_SIN:
24468 case BUILT_IN_ASINH:
24469 case BUILT_IN_ASIN:
24470 case BUILT_IN_COSH:
24471 case BUILT_IN_COS:
24472 case BUILT_IN_ACOSH:
24473 case BUILT_IN_ACOS:
24474 if (el_mode != DFmode || n != 2)
24475 return NULL_TREE;
24476 break;
24478 case BUILT_IN_EXPF:
24479 case BUILT_IN_LOGF:
24480 case BUILT_IN_LOG10F:
24481 case BUILT_IN_POWF:
24482 case BUILT_IN_TANHF:
24483 case BUILT_IN_TANF:
24484 case BUILT_IN_ATANF:
24485 case BUILT_IN_ATAN2F:
24486 case BUILT_IN_ATANHF:
24487 case BUILT_IN_CBRTF:
24488 case BUILT_IN_SINHF:
24489 case BUILT_IN_SINF:
24490 case BUILT_IN_ASINHF:
24491 case BUILT_IN_ASINF:
24492 case BUILT_IN_COSHF:
24493 case BUILT_IN_COSF:
24494 case BUILT_IN_ACOSHF:
24495 case BUILT_IN_ACOSF:
24496 if (el_mode != SFmode || n != 4)
24497 return NULL_TREE;
24498 break;
24500 default:
24501 return NULL_TREE;
24504 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24506 if (fn == BUILT_IN_LOGF)
24507 strcpy (name, "vmlsLn4");
24508 else if (fn == BUILT_IN_LOG)
24509 strcpy (name, "vmldLn2");
24510 else if (n == 4)
24512 sprintf (name, "vmls%s", bname+10);
24513 name[strlen (name)-1] = '4';
24515 else
24516 sprintf (name, "vmld%s2", bname+10);
24518 /* Convert to uppercase. */
24519 name[4] &= ~0x20;
24521 arity = 0;
24522 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24523 args = TREE_CHAIN (args))
24524 arity++;
24526 if (arity == 1)
24527 fntype = build_function_type_list (type_out, type_in, NULL);
24528 else
24529 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24531 /* Build a function declaration for the vectorized function. */
24532 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
24533 TREE_PUBLIC (new_fndecl) = 1;
24534 DECL_EXTERNAL (new_fndecl) = 1;
24535 DECL_IS_NOVOPS (new_fndecl) = 1;
24536 TREE_READONLY (new_fndecl) = 1;
24538 return new_fndecl;
24541 /* Handler for an ACML-style interface to
24542 a library with vectorized intrinsics. */
24544 static tree
24545 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24547 char name[20] = "__vr.._";
24548 tree fntype, new_fndecl, args;
24549 unsigned arity;
24550 const char *bname;
24551 enum machine_mode el_mode, in_mode;
24552 int n, in_n;
24554 /* The ACML is 64bits only and suitable for unsafe math only as
24555 it does not correctly support parts of IEEE with the required
24556 precision such as denormals. */
24557 if (!TARGET_64BIT
24558 || !flag_unsafe_math_optimizations)
24559 return NULL_TREE;
24561 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24562 n = TYPE_VECTOR_SUBPARTS (type_out);
24563 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24564 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24565 if (el_mode != in_mode
24566 || n != in_n)
24567 return NULL_TREE;
24569 switch (fn)
24571 case BUILT_IN_SIN:
24572 case BUILT_IN_COS:
24573 case BUILT_IN_EXP:
24574 case BUILT_IN_LOG:
24575 case BUILT_IN_LOG2:
24576 case BUILT_IN_LOG10:
24577 name[4] = 'd';
24578 name[5] = '2';
24579 if (el_mode != DFmode
24580 || n != 2)
24581 return NULL_TREE;
24582 break;
24584 case BUILT_IN_SINF:
24585 case BUILT_IN_COSF:
24586 case BUILT_IN_EXPF:
24587 case BUILT_IN_POWF:
24588 case BUILT_IN_LOGF:
24589 case BUILT_IN_LOG2F:
24590 case BUILT_IN_LOG10F:
24591 name[4] = 's';
24592 name[5] = '4';
24593 if (el_mode != SFmode
24594 || n != 4)
24595 return NULL_TREE;
24596 break;
24598 default:
24599 return NULL_TREE;
24602 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24603 sprintf (name + 7, "%s", bname+10);
24605 arity = 0;
24606 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24607 args = TREE_CHAIN (args))
24608 arity++;
24610 if (arity == 1)
24611 fntype = build_function_type_list (type_out, type_in, NULL);
24612 else
24613 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24615 /* Build a function declaration for the vectorized function. */
24616 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
24617 TREE_PUBLIC (new_fndecl) = 1;
24618 DECL_EXTERNAL (new_fndecl) = 1;
24619 DECL_IS_NOVOPS (new_fndecl) = 1;
24620 TREE_READONLY (new_fndecl) = 1;
24622 return new_fndecl;
24626 /* Returns a decl of a function that implements conversion of an integer vector
24627 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24628 side of the conversion.
24629 Return NULL_TREE if it is not available. */
24631 static tree
24632 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24634 if (TREE_CODE (type) != VECTOR_TYPE)
24635 return NULL_TREE;
24637 switch (code)
24639 case FLOAT_EXPR:
24640 switch (TYPE_MODE (type))
24642 case V4SImode:
24643 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24644 default:
24645 return NULL_TREE;
24648 case FIX_TRUNC_EXPR:
24649 switch (TYPE_MODE (type))
24651 case V4SImode:
24652 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24653 default:
24654 return NULL_TREE;
24656 default:
24657 return NULL_TREE;
24662 /* Returns a code for a target-specific builtin that implements
24663 reciprocal of the function, or NULL_TREE if not available. */
24665 static tree
24666 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24667 bool sqrt ATTRIBUTE_UNUSED)
24669 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
24670 && flag_finite_math_only && !flag_trapping_math
24671 && flag_unsafe_math_optimizations))
24672 return NULL_TREE;
24674 if (md_fn)
24675 /* Machine dependent builtins. */
24676 switch (fn)
24678 /* Vectorized version of sqrt to rsqrt conversion. */
24679 case IX86_BUILTIN_SQRTPS_NR:
24680 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24682 default:
24683 return NULL_TREE;
24685 else
24686 /* Normal builtins. */
24687 switch (fn)
24689 /* Sqrt to rsqrt conversion. */
24690 case BUILT_IN_SQRTF:
24691 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24693 default:
24694 return NULL_TREE;
24698 /* Store OPERAND to the memory after reload is completed. This means
24699 that we can't easily use assign_stack_local. */
24701 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24703 rtx result;
24705 gcc_assert (reload_completed);
24706 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24708 result = gen_rtx_MEM (mode,
24709 gen_rtx_PLUS (Pmode,
24710 stack_pointer_rtx,
24711 GEN_INT (-RED_ZONE_SIZE)));
24712 emit_move_insn (result, operand);
24714 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24716 switch (mode)
24718 case HImode:
24719 case SImode:
24720 operand = gen_lowpart (DImode, operand);
24721 /* FALLTHRU */
24722 case DImode:
24723 emit_insn (
24724 gen_rtx_SET (VOIDmode,
24725 gen_rtx_MEM (DImode,
24726 gen_rtx_PRE_DEC (DImode,
24727 stack_pointer_rtx)),
24728 operand));
24729 break;
24730 default:
24731 gcc_unreachable ();
24733 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24735 else
24737 switch (mode)
24739 case DImode:
24741 rtx operands[2];
24742 split_di (&operand, 1, operands, operands + 1);
24743 emit_insn (
24744 gen_rtx_SET (VOIDmode,
24745 gen_rtx_MEM (SImode,
24746 gen_rtx_PRE_DEC (Pmode,
24747 stack_pointer_rtx)),
24748 operands[1]));
24749 emit_insn (
24750 gen_rtx_SET (VOIDmode,
24751 gen_rtx_MEM (SImode,
24752 gen_rtx_PRE_DEC (Pmode,
24753 stack_pointer_rtx)),
24754 operands[0]));
24756 break;
24757 case HImode:
24758 /* Store HImodes as SImodes. */
24759 operand = gen_lowpart (SImode, operand);
24760 /* FALLTHRU */
24761 case SImode:
24762 emit_insn (
24763 gen_rtx_SET (VOIDmode,
24764 gen_rtx_MEM (GET_MODE (operand),
24765 gen_rtx_PRE_DEC (SImode,
24766 stack_pointer_rtx)),
24767 operand));
24768 break;
24769 default:
24770 gcc_unreachable ();
24772 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24774 return result;
24777 /* Free operand from the memory. */
24778 void
24779 ix86_free_from_memory (enum machine_mode mode)
24781 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24783 int size;
24785 if (mode == DImode || TARGET_64BIT)
24786 size = 8;
24787 else
24788 size = 4;
24789 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24790 to pop or add instruction if registers are available. */
24791 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24792 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24793 GEN_INT (size))));
24797 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24798 QImode must go into class Q_REGS.
24799 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24800 movdf to do mem-to-mem moves through integer regs. */
24801 enum reg_class
24802 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24804 enum machine_mode mode = GET_MODE (x);
24806 /* We're only allowed to return a subclass of CLASS. Many of the
24807 following checks fail for NO_REGS, so eliminate that early. */
24808 if (regclass == NO_REGS)
24809 return NO_REGS;
24811 /* All classes can load zeros. */
24812 if (x == CONST0_RTX (mode))
24813 return regclass;
24815 /* Force constants into memory if we are loading a (nonzero) constant into
24816 an MMX or SSE register. This is because there are no MMX/SSE instructions
24817 to load from a constant. */
24818 if (CONSTANT_P (x)
24819 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24820 return NO_REGS;
24822 /* Prefer SSE regs only, if we can use them for math. */
24823 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24824 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24826 /* Floating-point constants need more complex checks. */
24827 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24829 /* General regs can load everything. */
24830 if (reg_class_subset_p (regclass, GENERAL_REGS))
24831 return regclass;
24833 /* Floats can load 0 and 1 plus some others. Note that we eliminated
24834 zero above. We only want to wind up preferring 80387 registers if
24835 we plan on doing computation with them. */
24836 if (TARGET_80387
24837 && standard_80387_constant_p (x))
24839 /* Limit class to non-sse. */
24840 if (regclass == FLOAT_SSE_REGS)
24841 return FLOAT_REGS;
24842 if (regclass == FP_TOP_SSE_REGS)
24843 return FP_TOP_REG;
24844 if (regclass == FP_SECOND_SSE_REGS)
24845 return FP_SECOND_REG;
24846 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
24847 return regclass;
24850 return NO_REGS;
24853 /* Generally when we see PLUS here, it's the function invariant
24854 (plus soft-fp const_int). Which can only be computed into general
24855 regs. */
24856 if (GET_CODE (x) == PLUS)
24857 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
24859 /* QImode constants are easy to load, but non-constant QImode data
24860 must go into Q_REGS. */
24861 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
24863 if (reg_class_subset_p (regclass, Q_REGS))
24864 return regclass;
24865 if (reg_class_subset_p (Q_REGS, regclass))
24866 return Q_REGS;
24867 return NO_REGS;
24870 return regclass;
24873 /* Discourage putting floating-point values in SSE registers unless
24874 SSE math is being used, and likewise for the 387 registers. */
24875 enum reg_class
24876 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
24878 enum machine_mode mode = GET_MODE (x);
24880 /* Restrict the output reload class to the register bank that we are doing
24881 math on. If we would like not to return a subset of CLASS, reject this
24882 alternative: if reload cannot do this, it will still use its choice. */
24883 mode = GET_MODE (x);
24884 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
24885 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
24887 if (X87_FLOAT_MODE_P (mode))
24889 if (regclass == FP_TOP_SSE_REGS)
24890 return FP_TOP_REG;
24891 else if (regclass == FP_SECOND_SSE_REGS)
24892 return FP_SECOND_REG;
24893 else
24894 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
24897 return regclass;
24900 static enum reg_class
24901 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
24902 enum machine_mode mode,
24903 secondary_reload_info *sri ATTRIBUTE_UNUSED)
24905 /* QImode spills from non-QI registers require
24906 intermediate register on 32bit targets. */
24907 if (!in_p && mode == QImode && !TARGET_64BIT
24908 && (rclass == GENERAL_REGS
24909 || rclass == LEGACY_REGS
24910 || rclass == INDEX_REGS))
24912 int regno;
24914 if (REG_P (x))
24915 regno = REGNO (x);
24916 else
24917 regno = -1;
24919 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
24920 regno = true_regnum (x);
24922 /* Return Q_REGS if the operand is in memory. */
24923 if (regno == -1)
24924 return Q_REGS;
24927 return NO_REGS;
24930 /* If we are copying between general and FP registers, we need a memory
24931 location. The same is true for SSE and MMX registers.
24933 To optimize register_move_cost performance, allow inline variant.
24935 The macro can't work reliably when one of the CLASSES is class containing
24936 registers from multiple units (SSE, MMX, integer). We avoid this by never
24937 combining those units in single alternative in the machine description.
24938 Ensure that this constraint holds to avoid unexpected surprises.
24940 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
24941 enforce these sanity checks. */
24943 static inline int
24944 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
24945 enum machine_mode mode, int strict)
24947 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
24948 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
24949 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
24950 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
24951 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
24952 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
24954 gcc_assert (!strict);
24955 return true;
24958 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
24959 return true;
24961 /* ??? This is a lie. We do have moves between mmx/general, and for
24962 mmx/sse2. But by saying we need secondary memory we discourage the
24963 register allocator from using the mmx registers unless needed. */
24964 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
24965 return true;
24967 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
24969 /* SSE1 doesn't have any direct moves from other classes. */
24970 if (!TARGET_SSE2)
24971 return true;
24973 /* If the target says that inter-unit moves are more expensive
24974 than moving through memory, then don't generate them. */
24975 if (!TARGET_INTER_UNIT_MOVES)
24976 return true;
24978 /* Between SSE and general, we have moves no larger than word size. */
24979 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
24980 return true;
24983 return false;
24987 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
24988 enum machine_mode mode, int strict)
24990 return inline_secondary_memory_needed (class1, class2, mode, strict);
24993 /* Return true if the registers in CLASS cannot represent the change from
24994 modes FROM to TO. */
24996 bool
24997 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
24998 enum reg_class regclass)
25000 if (from == to)
25001 return false;
25003 /* x87 registers can't do subreg at all, as all values are reformatted
25004 to extended precision. */
25005 if (MAYBE_FLOAT_CLASS_P (regclass))
25006 return true;
25008 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25010 /* Vector registers do not support QI or HImode loads. If we don't
25011 disallow a change to these modes, reload will assume it's ok to
25012 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25013 the vec_dupv4hi pattern. */
25014 if (GET_MODE_SIZE (from) < 4)
25015 return true;
25017 /* Vector registers do not support subreg with nonzero offsets, which
25018 are otherwise valid for integer registers. Since we can't see
25019 whether we have a nonzero offset from here, prohibit all
25020 nonparadoxical subregs changing size. */
25021 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25022 return true;
25025 return false;
25028 /* Return the cost of moving data of mode M between a
25029 register and memory. A value of 2 is the default; this cost is
25030 relative to those in `REGISTER_MOVE_COST'.
25032 This function is used extensively by register_move_cost that is used to
25033 build tables at startup. Make it inline in this case.
25034 When IN is 2, return maximum of in and out move cost.
25036 If moving between registers and memory is more expensive than
25037 between two registers, you should define this macro to express the
25038 relative cost.
25040 Model also increased moving costs of QImode registers in non
25041 Q_REGS classes.
25043 static inline int
25044 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25045 int in)
25047 int cost;
25048 if (FLOAT_CLASS_P (regclass))
25050 int index;
25051 switch (mode)
25053 case SFmode:
25054 index = 0;
25055 break;
25056 case DFmode:
25057 index = 1;
25058 break;
25059 case XFmode:
25060 index = 2;
25061 break;
25062 default:
25063 return 100;
25065 if (in == 2)
25066 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25067 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25069 if (SSE_CLASS_P (regclass))
25071 int index;
25072 switch (GET_MODE_SIZE (mode))
25074 case 4:
25075 index = 0;
25076 break;
25077 case 8:
25078 index = 1;
25079 break;
25080 case 16:
25081 index = 2;
25082 break;
25083 default:
25084 return 100;
25086 if (in == 2)
25087 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25088 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25090 if (MMX_CLASS_P (regclass))
25092 int index;
25093 switch (GET_MODE_SIZE (mode))
25095 case 4:
25096 index = 0;
25097 break;
25098 case 8:
25099 index = 1;
25100 break;
25101 default:
25102 return 100;
25104 if (in)
25105 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25106 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25108 switch (GET_MODE_SIZE (mode))
25110 case 1:
25111 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25113 if (!in)
25114 return ix86_cost->int_store[0];
25115 if (TARGET_PARTIAL_REG_DEPENDENCY
25116 && optimize_function_for_speed_p (cfun))
25117 cost = ix86_cost->movzbl_load;
25118 else
25119 cost = ix86_cost->int_load[0];
25120 if (in == 2)
25121 return MAX (cost, ix86_cost->int_store[0]);
25122 return cost;
25124 else
25126 if (in == 2)
25127 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25128 if (in)
25129 return ix86_cost->movzbl_load;
25130 else
25131 return ix86_cost->int_store[0] + 4;
25133 break;
25134 case 2:
25135 if (in == 2)
25136 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25137 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25138 default:
25139 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25140 if (mode == TFmode)
25141 mode = XFmode;
25142 if (in == 2)
25143 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25144 else if (in)
25145 cost = ix86_cost->int_load[2];
25146 else
25147 cost = ix86_cost->int_store[2];
25148 return (cost * (((int) GET_MODE_SIZE (mode)
25149 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25154 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25156 return inline_memory_move_cost (mode, regclass, in);
25160 /* Return the cost of moving data from a register in class CLASS1 to
25161 one in class CLASS2.
25163 It is not required that the cost always equal 2 when FROM is the same as TO;
25164 on some machines it is expensive to move between registers if they are not
25165 general registers. */
25168 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25169 enum reg_class class2)
25171 /* In case we require secondary memory, compute cost of the store followed
25172 by load. In order to avoid bad register allocation choices, we need
25173 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25175 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25177 int cost = 1;
25179 cost += inline_memory_move_cost (mode, class1, 2);
25180 cost += inline_memory_move_cost (mode, class2, 2);
25182 /* In case of copying from general_purpose_register we may emit multiple
25183 stores followed by single load causing memory size mismatch stall.
25184 Count this as arbitrarily high cost of 20. */
25185 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25186 cost += 20;
25188 /* In the case of FP/MMX moves, the registers actually overlap, and we
25189 have to switch modes in order to treat them differently. */
25190 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25191 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25192 cost += 20;
25194 return cost;
25197 /* Moves between SSE/MMX and integer unit are expensive. */
25198 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25199 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25201 /* ??? By keeping returned value relatively high, we limit the number
25202 of moves between integer and MMX/SSE registers for all targets.
25203 Additionally, high value prevents problem with x86_modes_tieable_p(),
25204 where integer modes in MMX/SSE registers are not tieable
25205 because of missing QImode and HImode moves to, from or between
25206 MMX/SSE registers. */
25207 return MAX (8, ix86_cost->mmxsse_to_integer);
25209 if (MAYBE_FLOAT_CLASS_P (class1))
25210 return ix86_cost->fp_move;
25211 if (MAYBE_SSE_CLASS_P (class1))
25212 return ix86_cost->sse_move;
25213 if (MAYBE_MMX_CLASS_P (class1))
25214 return ix86_cost->mmx_move;
25215 return 2;
25218 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25220 bool
25221 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25223 /* Flags and only flags can only hold CCmode values. */
25224 if (CC_REGNO_P (regno))
25225 return GET_MODE_CLASS (mode) == MODE_CC;
25226 if (GET_MODE_CLASS (mode) == MODE_CC
25227 || GET_MODE_CLASS (mode) == MODE_RANDOM
25228 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25229 return 0;
25230 if (FP_REGNO_P (regno))
25231 return VALID_FP_MODE_P (mode);
25232 if (SSE_REGNO_P (regno))
25234 /* We implement the move patterns for all vector modes into and
25235 out of SSE registers, even when no operation instructions
25236 are available. OImode move is available only when AVX is
25237 enabled. */
25238 return ((TARGET_AVX && mode == OImode)
25239 || VALID_AVX256_REG_MODE (mode)
25240 || VALID_SSE_REG_MODE (mode)
25241 || VALID_SSE2_REG_MODE (mode)
25242 || VALID_MMX_REG_MODE (mode)
25243 || VALID_MMX_REG_MODE_3DNOW (mode));
25245 if (MMX_REGNO_P (regno))
25247 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25248 so if the register is available at all, then we can move data of
25249 the given mode into or out of it. */
25250 return (VALID_MMX_REG_MODE (mode)
25251 || VALID_MMX_REG_MODE_3DNOW (mode));
25254 if (mode == QImode)
25256 /* Take care for QImode values - they can be in non-QI regs,
25257 but then they do cause partial register stalls. */
25258 if (regno < 4 || TARGET_64BIT)
25259 return 1;
25260 if (!TARGET_PARTIAL_REG_STALL)
25261 return 1;
25262 return reload_in_progress || reload_completed;
25264 /* We handle both integer and floats in the general purpose registers. */
25265 else if (VALID_INT_MODE_P (mode))
25266 return 1;
25267 else if (VALID_FP_MODE_P (mode))
25268 return 1;
25269 else if (VALID_DFP_MODE_P (mode))
25270 return 1;
25271 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25272 on to use that value in smaller contexts, this can easily force a
25273 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25274 supporting DImode, allow it. */
25275 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25276 return 1;
25278 return 0;
25281 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25282 tieable integer mode. */
25284 static bool
25285 ix86_tieable_integer_mode_p (enum machine_mode mode)
25287 switch (mode)
25289 case HImode:
25290 case SImode:
25291 return true;
25293 case QImode:
25294 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25296 case DImode:
25297 return TARGET_64BIT;
25299 default:
25300 return false;
25304 /* Return true if MODE1 is accessible in a register that can hold MODE2
25305 without copying. That is, all register classes that can hold MODE2
25306 can also hold MODE1. */
25308 bool
25309 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25311 if (mode1 == mode2)
25312 return true;
25314 if (ix86_tieable_integer_mode_p (mode1)
25315 && ix86_tieable_integer_mode_p (mode2))
25316 return true;
25318 /* MODE2 being XFmode implies fp stack or general regs, which means we
25319 can tie any smaller floating point modes to it. Note that we do not
25320 tie this with TFmode. */
25321 if (mode2 == XFmode)
25322 return mode1 == SFmode || mode1 == DFmode;
25324 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25325 that we can tie it with SFmode. */
25326 if (mode2 == DFmode)
25327 return mode1 == SFmode;
25329 /* If MODE2 is only appropriate for an SSE register, then tie with
25330 any other mode acceptable to SSE registers. */
25331 if (GET_MODE_SIZE (mode2) == 16
25332 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25333 return (GET_MODE_SIZE (mode1) == 16
25334 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25336 /* If MODE2 is appropriate for an MMX register, then tie
25337 with any other mode acceptable to MMX registers. */
25338 if (GET_MODE_SIZE (mode2) == 8
25339 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25340 return (GET_MODE_SIZE (mode1) == 8
25341 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25343 return false;
25346 /* Compute a (partial) cost for rtx X. Return true if the complete
25347 cost has been computed, and false if subexpressions should be
25348 scanned. In either case, *TOTAL contains the cost result. */
25350 static bool
25351 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25353 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25354 enum machine_mode mode = GET_MODE (x);
25355 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25357 switch (code)
25359 case CONST_INT:
25360 case CONST:
25361 case LABEL_REF:
25362 case SYMBOL_REF:
25363 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25364 *total = 3;
25365 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25366 *total = 2;
25367 else if (flag_pic && SYMBOLIC_CONST (x)
25368 && (!TARGET_64BIT
25369 || (!GET_CODE (x) != LABEL_REF
25370 && (GET_CODE (x) != SYMBOL_REF
25371 || !SYMBOL_REF_LOCAL_P (x)))))
25372 *total = 1;
25373 else
25374 *total = 0;
25375 return true;
25377 case CONST_DOUBLE:
25378 if (mode == VOIDmode)
25379 *total = 0;
25380 else
25381 switch (standard_80387_constant_p (x))
25383 case 1: /* 0.0 */
25384 *total = 1;
25385 break;
25386 default: /* Other constants */
25387 *total = 2;
25388 break;
25389 case 0:
25390 case -1:
25391 /* Start with (MEM (SYMBOL_REF)), since that's where
25392 it'll probably end up. Add a penalty for size. */
25393 *total = (COSTS_N_INSNS (1)
25394 + (flag_pic != 0 && !TARGET_64BIT)
25395 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25396 break;
25398 return true;
25400 case ZERO_EXTEND:
25401 /* The zero extensions is often completely free on x86_64, so make
25402 it as cheap as possible. */
25403 if (TARGET_64BIT && mode == DImode
25404 && GET_MODE (XEXP (x, 0)) == SImode)
25405 *total = 1;
25406 else if (TARGET_ZERO_EXTEND_WITH_AND)
25407 *total = cost->add;
25408 else
25409 *total = cost->movzx;
25410 return false;
25412 case SIGN_EXTEND:
25413 *total = cost->movsx;
25414 return false;
25416 case ASHIFT:
25417 if (CONST_INT_P (XEXP (x, 1))
25418 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25420 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25421 if (value == 1)
25423 *total = cost->add;
25424 return false;
25426 if ((value == 2 || value == 3)
25427 && cost->lea <= cost->shift_const)
25429 *total = cost->lea;
25430 return false;
25433 /* FALLTHRU */
25435 case ROTATE:
25436 case ASHIFTRT:
25437 case LSHIFTRT:
25438 case ROTATERT:
25439 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25441 if (CONST_INT_P (XEXP (x, 1)))
25443 if (INTVAL (XEXP (x, 1)) > 32)
25444 *total = cost->shift_const + COSTS_N_INSNS (2);
25445 else
25446 *total = cost->shift_const * 2;
25448 else
25450 if (GET_CODE (XEXP (x, 1)) == AND)
25451 *total = cost->shift_var * 2;
25452 else
25453 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25456 else
25458 if (CONST_INT_P (XEXP (x, 1)))
25459 *total = cost->shift_const;
25460 else
25461 *total = cost->shift_var;
25463 return false;
25465 case MULT:
25466 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25468 /* ??? SSE scalar cost should be used here. */
25469 *total = cost->fmul;
25470 return false;
25472 else if (X87_FLOAT_MODE_P (mode))
25474 *total = cost->fmul;
25475 return false;
25477 else if (FLOAT_MODE_P (mode))
25479 /* ??? SSE vector cost should be used here. */
25480 *total = cost->fmul;
25481 return false;
25483 else
25485 rtx op0 = XEXP (x, 0);
25486 rtx op1 = XEXP (x, 1);
25487 int nbits;
25488 if (CONST_INT_P (XEXP (x, 1)))
25490 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25491 for (nbits = 0; value != 0; value &= value - 1)
25492 nbits++;
25494 else
25495 /* This is arbitrary. */
25496 nbits = 7;
25498 /* Compute costs correctly for widening multiplication. */
25499 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25500 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25501 == GET_MODE_SIZE (mode))
25503 int is_mulwiden = 0;
25504 enum machine_mode inner_mode = GET_MODE (op0);
25506 if (GET_CODE (op0) == GET_CODE (op1))
25507 is_mulwiden = 1, op1 = XEXP (op1, 0);
25508 else if (CONST_INT_P (op1))
25510 if (GET_CODE (op0) == SIGN_EXTEND)
25511 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25512 == INTVAL (op1);
25513 else
25514 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25517 if (is_mulwiden)
25518 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25521 *total = (cost->mult_init[MODE_INDEX (mode)]
25522 + nbits * cost->mult_bit
25523 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25525 return true;
25528 case DIV:
25529 case UDIV:
25530 case MOD:
25531 case UMOD:
25532 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25533 /* ??? SSE cost should be used here. */
25534 *total = cost->fdiv;
25535 else if (X87_FLOAT_MODE_P (mode))
25536 *total = cost->fdiv;
25537 else if (FLOAT_MODE_P (mode))
25538 /* ??? SSE vector cost should be used here. */
25539 *total = cost->fdiv;
25540 else
25541 *total = cost->divide[MODE_INDEX (mode)];
25542 return false;
25544 case PLUS:
25545 if (GET_MODE_CLASS (mode) == MODE_INT
25546 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25548 if (GET_CODE (XEXP (x, 0)) == PLUS
25549 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25550 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25551 && CONSTANT_P (XEXP (x, 1)))
25553 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25554 if (val == 2 || val == 4 || val == 8)
25556 *total = cost->lea;
25557 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25558 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25559 outer_code, speed);
25560 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25561 return true;
25564 else if (GET_CODE (XEXP (x, 0)) == MULT
25565 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25567 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25568 if (val == 2 || val == 4 || val == 8)
25570 *total = cost->lea;
25571 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25572 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25573 return true;
25576 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25578 *total = cost->lea;
25579 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25580 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25581 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25582 return true;
25585 /* FALLTHRU */
25587 case MINUS:
25588 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25590 /* ??? SSE cost should be used here. */
25591 *total = cost->fadd;
25592 return false;
25594 else if (X87_FLOAT_MODE_P (mode))
25596 *total = cost->fadd;
25597 return false;
25599 else if (FLOAT_MODE_P (mode))
25601 /* ??? SSE vector cost should be used here. */
25602 *total = cost->fadd;
25603 return false;
25605 /* FALLTHRU */
25607 case AND:
25608 case IOR:
25609 case XOR:
25610 if (!TARGET_64BIT && mode == DImode)
25612 *total = (cost->add * 2
25613 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25614 << (GET_MODE (XEXP (x, 0)) != DImode))
25615 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25616 << (GET_MODE (XEXP (x, 1)) != DImode)));
25617 return true;
25619 /* FALLTHRU */
25621 case NEG:
25622 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25624 /* ??? SSE cost should be used here. */
25625 *total = cost->fchs;
25626 return false;
25628 else if (X87_FLOAT_MODE_P (mode))
25630 *total = cost->fchs;
25631 return false;
25633 else if (FLOAT_MODE_P (mode))
25635 /* ??? SSE vector cost should be used here. */
25636 *total = cost->fchs;
25637 return false;
25639 /* FALLTHRU */
25641 case NOT:
25642 if (!TARGET_64BIT && mode == DImode)
25643 *total = cost->add * 2;
25644 else
25645 *total = cost->add;
25646 return false;
25648 case COMPARE:
25649 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25650 && XEXP (XEXP (x, 0), 1) == const1_rtx
25651 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25652 && XEXP (x, 1) == const0_rtx)
25654 /* This kind of construct is implemented using test[bwl].
25655 Treat it as if we had an AND. */
25656 *total = (cost->add
25657 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25658 + rtx_cost (const1_rtx, outer_code, speed));
25659 return true;
25661 return false;
25663 case FLOAT_EXTEND:
25664 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25665 *total = 0;
25666 return false;
25668 case ABS:
25669 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25670 /* ??? SSE cost should be used here. */
25671 *total = cost->fabs;
25672 else if (X87_FLOAT_MODE_P (mode))
25673 *total = cost->fabs;
25674 else if (FLOAT_MODE_P (mode))
25675 /* ??? SSE vector cost should be used here. */
25676 *total = cost->fabs;
25677 return false;
25679 case SQRT:
25680 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25681 /* ??? SSE cost should be used here. */
25682 *total = cost->fsqrt;
25683 else if (X87_FLOAT_MODE_P (mode))
25684 *total = cost->fsqrt;
25685 else if (FLOAT_MODE_P (mode))
25686 /* ??? SSE vector cost should be used here. */
25687 *total = cost->fsqrt;
25688 return false;
25690 case UNSPEC:
25691 if (XINT (x, 1) == UNSPEC_TP)
25692 *total = 0;
25693 return false;
25695 default:
25696 return false;
25700 #if TARGET_MACHO
25702 static int current_machopic_label_num;
25704 /* Given a symbol name and its associated stub, write out the
25705 definition of the stub. */
25707 void
25708 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25710 unsigned int length;
25711 char *binder_name, *symbol_name, lazy_ptr_name[32];
25712 int label = ++current_machopic_label_num;
25714 /* For 64-bit we shouldn't get here. */
25715 gcc_assert (!TARGET_64BIT);
25717 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25718 symb = (*targetm.strip_name_encoding) (symb);
25720 length = strlen (stub);
25721 binder_name = XALLOCAVEC (char, length + 32);
25722 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25724 length = strlen (symb);
25725 symbol_name = XALLOCAVEC (char, length + 32);
25726 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25728 sprintf (lazy_ptr_name, "L%d$lz", label);
25730 if (MACHOPIC_PURE)
25731 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25732 else
25733 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25735 fprintf (file, "%s:\n", stub);
25736 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25738 if (MACHOPIC_PURE)
25740 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25741 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25742 fprintf (file, "\tjmp\t*%%edx\n");
25744 else
25745 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25747 fprintf (file, "%s:\n", binder_name);
25749 if (MACHOPIC_PURE)
25751 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25752 fprintf (file, "\tpushl\t%%eax\n");
25754 else
25755 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25757 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
25759 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25760 fprintf (file, "%s:\n", lazy_ptr_name);
25761 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25762 fprintf (file, "\t.long %s\n", binder_name);
25765 void
25766 darwin_x86_file_end (void)
25768 darwin_file_end ();
25769 ix86_file_end ();
25771 #endif /* TARGET_MACHO */
25773 /* Order the registers for register allocator. */
25775 void
25776 x86_order_regs_for_local_alloc (void)
25778 int pos = 0;
25779 int i;
25781 /* First allocate the local general purpose registers. */
25782 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25783 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25784 reg_alloc_order [pos++] = i;
25786 /* Global general purpose registers. */
25787 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25788 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25789 reg_alloc_order [pos++] = i;
25791 /* x87 registers come first in case we are doing FP math
25792 using them. */
25793 if (!TARGET_SSE_MATH)
25794 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25795 reg_alloc_order [pos++] = i;
25797 /* SSE registers. */
25798 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25799 reg_alloc_order [pos++] = i;
25800 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25801 reg_alloc_order [pos++] = i;
25803 /* x87 registers. */
25804 if (TARGET_SSE_MATH)
25805 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25806 reg_alloc_order [pos++] = i;
25808 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25809 reg_alloc_order [pos++] = i;
25811 /* Initialize the rest of array as we do not allocate some registers
25812 at all. */
25813 while (pos < FIRST_PSEUDO_REGISTER)
25814 reg_alloc_order [pos++] = 0;
25817 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25818 struct attribute_spec.handler. */
25819 static tree
25820 ix86_handle_abi_attribute (tree *node, tree name,
25821 tree args ATTRIBUTE_UNUSED,
25822 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25824 if (TREE_CODE (*node) != FUNCTION_TYPE
25825 && TREE_CODE (*node) != METHOD_TYPE
25826 && TREE_CODE (*node) != FIELD_DECL
25827 && TREE_CODE (*node) != TYPE_DECL)
25829 warning (OPT_Wattributes, "%qs attribute only applies to functions",
25830 IDENTIFIER_POINTER (name));
25831 *no_add_attrs = true;
25832 return NULL_TREE;
25834 if (!TARGET_64BIT)
25836 warning (OPT_Wattributes, "%qs attribute only available for 64-bit",
25837 IDENTIFIER_POINTER (name));
25838 *no_add_attrs = true;
25839 return NULL_TREE;
25842 /* Can combine regparm with all attributes but fastcall. */
25843 if (is_attribute_p ("ms_abi", name))
25845 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
25847 error ("ms_abi and sysv_abi attributes are not compatible");
25850 return NULL_TREE;
25852 else if (is_attribute_p ("sysv_abi", name))
25854 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
25856 error ("ms_abi and sysv_abi attributes are not compatible");
25859 return NULL_TREE;
25862 return NULL_TREE;
25865 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
25866 struct attribute_spec.handler. */
25867 static tree
25868 ix86_handle_struct_attribute (tree *node, tree name,
25869 tree args ATTRIBUTE_UNUSED,
25870 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25872 tree *type = NULL;
25873 if (DECL_P (*node))
25875 if (TREE_CODE (*node) == TYPE_DECL)
25876 type = &TREE_TYPE (*node);
25878 else
25879 type = node;
25881 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
25882 || TREE_CODE (*type) == UNION_TYPE)))
25884 warning (OPT_Wattributes, "%qs attribute ignored",
25885 IDENTIFIER_POINTER (name));
25886 *no_add_attrs = true;
25889 else if ((is_attribute_p ("ms_struct", name)
25890 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
25891 || ((is_attribute_p ("gcc_struct", name)
25892 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
25894 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
25895 IDENTIFIER_POINTER (name));
25896 *no_add_attrs = true;
25899 return NULL_TREE;
25902 static bool
25903 ix86_ms_bitfield_layout_p (const_tree record_type)
25905 return (TARGET_MS_BITFIELD_LAYOUT &&
25906 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
25907 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
25910 /* Returns an expression indicating where the this parameter is
25911 located on entry to the FUNCTION. */
25913 static rtx
25914 x86_this_parameter (tree function)
25916 tree type = TREE_TYPE (function);
25917 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
25918 int nregs;
25920 if (TARGET_64BIT)
25922 const int *parm_regs;
25924 if (ix86_function_type_abi (type) == MS_ABI)
25925 parm_regs = x86_64_ms_abi_int_parameter_registers;
25926 else
25927 parm_regs = x86_64_int_parameter_registers;
25928 return gen_rtx_REG (DImode, parm_regs[aggr]);
25931 nregs = ix86_function_regparm (type, function);
25933 if (nregs > 0 && !stdarg_p (type))
25935 int regno;
25937 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
25938 regno = aggr ? DX_REG : CX_REG;
25939 else
25941 regno = AX_REG;
25942 if (aggr)
25944 regno = DX_REG;
25945 if (nregs == 1)
25946 return gen_rtx_MEM (SImode,
25947 plus_constant (stack_pointer_rtx, 4));
25950 return gen_rtx_REG (SImode, regno);
25953 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
25956 /* Determine whether x86_output_mi_thunk can succeed. */
25958 static bool
25959 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
25960 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
25961 HOST_WIDE_INT vcall_offset, const_tree function)
25963 /* 64-bit can handle anything. */
25964 if (TARGET_64BIT)
25965 return true;
25967 /* For 32-bit, everything's fine if we have one free register. */
25968 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
25969 return true;
25971 /* Need a free register for vcall_offset. */
25972 if (vcall_offset)
25973 return false;
25975 /* Need a free register for GOT references. */
25976 if (flag_pic && !(*targetm.binds_local_p) (function))
25977 return false;
25979 /* Otherwise ok. */
25980 return true;
25983 /* Output the assembler code for a thunk function. THUNK_DECL is the
25984 declaration for the thunk function itself, FUNCTION is the decl for
25985 the target function. DELTA is an immediate constant offset to be
25986 added to THIS. If VCALL_OFFSET is nonzero, the word at
25987 *(*this + vcall_offset) should be added to THIS. */
25989 static void
25990 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
25991 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
25992 HOST_WIDE_INT vcall_offset, tree function)
25994 rtx xops[3];
25995 rtx this_param = x86_this_parameter (function);
25996 rtx this_reg, tmp;
25998 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
25999 pull it in now and let DELTA benefit. */
26000 if (REG_P (this_param))
26001 this_reg = this_param;
26002 else if (vcall_offset)
26004 /* Put the this parameter into %eax. */
26005 xops[0] = this_param;
26006 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26007 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26009 else
26010 this_reg = NULL_RTX;
26012 /* Adjust the this parameter by a fixed constant. */
26013 if (delta)
26015 xops[0] = GEN_INT (delta);
26016 xops[1] = this_reg ? this_reg : this_param;
26017 if (TARGET_64BIT)
26019 if (!x86_64_general_operand (xops[0], DImode))
26021 tmp = gen_rtx_REG (DImode, R10_REG);
26022 xops[1] = tmp;
26023 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26024 xops[0] = tmp;
26025 xops[1] = this_param;
26027 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26029 else
26030 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26033 /* Adjust the this parameter by a value stored in the vtable. */
26034 if (vcall_offset)
26036 if (TARGET_64BIT)
26037 tmp = gen_rtx_REG (DImode, R10_REG);
26038 else
26040 int tmp_regno = CX_REG;
26041 if (lookup_attribute ("fastcall",
26042 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26043 tmp_regno = AX_REG;
26044 tmp = gen_rtx_REG (SImode, tmp_regno);
26047 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26048 xops[1] = tmp;
26049 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26051 /* Adjust the this parameter. */
26052 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26053 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26055 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26056 xops[0] = GEN_INT (vcall_offset);
26057 xops[1] = tmp2;
26058 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26059 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26061 xops[1] = this_reg;
26062 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26065 /* If necessary, drop THIS back to its stack slot. */
26066 if (this_reg && this_reg != this_param)
26068 xops[0] = this_reg;
26069 xops[1] = this_param;
26070 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26073 xops[0] = XEXP (DECL_RTL (function), 0);
26074 if (TARGET_64BIT)
26076 if (!flag_pic || (*targetm.binds_local_p) (function))
26077 output_asm_insn ("jmp\t%P0", xops);
26078 /* All thunks should be in the same object as their target,
26079 and thus binds_local_p should be true. */
26080 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26081 gcc_unreachable ();
26082 else
26084 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26085 tmp = gen_rtx_CONST (Pmode, tmp);
26086 tmp = gen_rtx_MEM (QImode, tmp);
26087 xops[0] = tmp;
26088 output_asm_insn ("jmp\t%A0", xops);
26091 else
26093 if (!flag_pic || (*targetm.binds_local_p) (function))
26094 output_asm_insn ("jmp\t%P0", xops);
26095 else
26096 #if TARGET_MACHO
26097 if (TARGET_MACHO)
26099 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26100 tmp = (gen_rtx_SYMBOL_REF
26101 (Pmode,
26102 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26103 tmp = gen_rtx_MEM (QImode, tmp);
26104 xops[0] = tmp;
26105 output_asm_insn ("jmp\t%0", xops);
26107 else
26108 #endif /* TARGET_MACHO */
26110 tmp = gen_rtx_REG (SImode, CX_REG);
26111 output_set_got (tmp, NULL_RTX);
26113 xops[1] = tmp;
26114 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26115 output_asm_insn ("jmp\t{*}%1", xops);
26120 static void
26121 x86_file_start (void)
26123 default_file_start ();
26124 #if TARGET_MACHO
26125 darwin_file_start ();
26126 #endif
26127 if (X86_FILE_START_VERSION_DIRECTIVE)
26128 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26129 if (X86_FILE_START_FLTUSED)
26130 fputs ("\t.global\t__fltused\n", asm_out_file);
26131 if (ix86_asm_dialect == ASM_INTEL)
26132 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26136 x86_field_alignment (tree field, int computed)
26138 enum machine_mode mode;
26139 tree type = TREE_TYPE (field);
26141 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26142 return computed;
26143 mode = TYPE_MODE (strip_array_types (type));
26144 if (mode == DFmode || mode == DCmode
26145 || GET_MODE_CLASS (mode) == MODE_INT
26146 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26147 return MIN (32, computed);
26148 return computed;
26151 /* Output assembler code to FILE to increment profiler label # LABELNO
26152 for profiling a function entry. */
26153 void
26154 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26156 if (TARGET_64BIT)
26158 #ifndef NO_PROFILE_COUNTERS
26159 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
26160 #endif
26162 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26163 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
26164 else
26165 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26167 else if (flag_pic)
26169 #ifndef NO_PROFILE_COUNTERS
26170 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
26171 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
26172 #endif
26173 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
26175 else
26177 #ifndef NO_PROFILE_COUNTERS
26178 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
26179 PROFILE_COUNT_REGISTER);
26180 #endif
26181 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26185 /* We don't have exact information about the insn sizes, but we may assume
26186 quite safely that we are informed about all 1 byte insns and memory
26187 address sizes. This is enough to eliminate unnecessary padding in
26188 99% of cases. */
26190 static int
26191 min_insn_size (rtx insn)
26193 int l = 0;
26195 if (!INSN_P (insn) || !active_insn_p (insn))
26196 return 0;
26198 /* Discard alignments we've emit and jump instructions. */
26199 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26200 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26201 return 0;
26202 if (JUMP_P (insn)
26203 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
26204 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
26205 return 0;
26207 /* Important case - calls are always 5 bytes.
26208 It is common to have many calls in the row. */
26209 if (CALL_P (insn)
26210 && symbolic_reference_mentioned_p (PATTERN (insn))
26211 && !SIBLING_CALL_P (insn))
26212 return 5;
26213 if (get_attr_length (insn) <= 1)
26214 return 1;
26216 /* For normal instructions we may rely on the sizes of addresses
26217 and the presence of symbol to require 4 bytes of encoding.
26218 This is not the case for jumps where references are PC relative. */
26219 if (!JUMP_P (insn))
26221 l = get_attr_length_address (insn);
26222 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26223 l = 4;
26225 if (l)
26226 return 1+l;
26227 else
26228 return 2;
26231 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26232 window. */
26234 static void
26235 ix86_avoid_jump_misspredicts (void)
26237 rtx insn, start = get_insns ();
26238 int nbytes = 0, njumps = 0;
26239 int isjump = 0;
26241 /* Look for all minimal intervals of instructions containing 4 jumps.
26242 The intervals are bounded by START and INSN. NBYTES is the total
26243 size of instructions in the interval including INSN and not including
26244 START. When the NBYTES is smaller than 16 bytes, it is possible
26245 that the end of START and INSN ends up in the same 16byte page.
26247 The smallest offset in the page INSN can start is the case where START
26248 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26249 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
26251 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
26254 nbytes += min_insn_size (insn);
26255 if (dump_file)
26256 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
26257 INSN_UID (insn), min_insn_size (insn));
26258 if ((JUMP_P (insn)
26259 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26260 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26261 || CALL_P (insn))
26262 njumps++;
26263 else
26264 continue;
26266 while (njumps > 3)
26268 start = NEXT_INSN (start);
26269 if ((JUMP_P (start)
26270 && GET_CODE (PATTERN (start)) != ADDR_VEC
26271 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26272 || CALL_P (start))
26273 njumps--, isjump = 1;
26274 else
26275 isjump = 0;
26276 nbytes -= min_insn_size (start);
26278 gcc_assert (njumps >= 0);
26279 if (dump_file)
26280 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26281 INSN_UID (start), INSN_UID (insn), nbytes);
26283 if (njumps == 3 && isjump && nbytes < 16)
26285 int padsize = 15 - nbytes + min_insn_size (insn);
26287 if (dump_file)
26288 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26289 INSN_UID (insn), padsize);
26290 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
26295 /* AMD Athlon works faster
26296 when RET is not destination of conditional jump or directly preceded
26297 by other jump instruction. We avoid the penalty by inserting NOP just
26298 before the RET instructions in such cases. */
26299 static void
26300 ix86_pad_returns (void)
26302 edge e;
26303 edge_iterator ei;
26305 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26307 basic_block bb = e->src;
26308 rtx ret = BB_END (bb);
26309 rtx prev;
26310 bool replace = false;
26312 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26313 || optimize_bb_for_size_p (bb))
26314 continue;
26315 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26316 if (active_insn_p (prev) || LABEL_P (prev))
26317 break;
26318 if (prev && LABEL_P (prev))
26320 edge e;
26321 edge_iterator ei;
26323 FOR_EACH_EDGE (e, ei, bb->preds)
26324 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26325 && !(e->flags & EDGE_FALLTHRU))
26326 replace = true;
26328 if (!replace)
26330 prev = prev_active_insn (ret);
26331 if (prev
26332 && ((JUMP_P (prev) && any_condjump_p (prev))
26333 || CALL_P (prev)))
26334 replace = true;
26335 /* Empty functions get branch mispredict even when the jump destination
26336 is not visible to us. */
26337 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26338 replace = true;
26340 if (replace)
26342 emit_insn_before (gen_return_internal_long (), ret);
26343 delete_insn (ret);
26348 /* Implement machine specific optimizations. We implement padding of returns
26349 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26350 static void
26351 ix86_reorg (void)
26353 if (TARGET_PAD_RETURNS && optimize
26354 && optimize_function_for_speed_p (cfun))
26355 ix86_pad_returns ();
26356 if (TARGET_FOUR_JUMP_LIMIT && optimize
26357 && optimize_function_for_speed_p (cfun))
26358 ix86_avoid_jump_misspredicts ();
26361 /* Return nonzero when QImode register that must be represented via REX prefix
26362 is used. */
26363 bool
26364 x86_extended_QIreg_mentioned_p (rtx insn)
26366 int i;
26367 extract_insn_cached (insn);
26368 for (i = 0; i < recog_data.n_operands; i++)
26369 if (REG_P (recog_data.operand[i])
26370 && REGNO (recog_data.operand[i]) >= 4)
26371 return true;
26372 return false;
26375 /* Return nonzero when P points to register encoded via REX prefix.
26376 Called via for_each_rtx. */
26377 static int
26378 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26380 unsigned int regno;
26381 if (!REG_P (*p))
26382 return 0;
26383 regno = REGNO (*p);
26384 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26387 /* Return true when INSN mentions register that must be encoded using REX
26388 prefix. */
26389 bool
26390 x86_extended_reg_mentioned_p (rtx insn)
26392 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26393 extended_reg_mentioned_1, NULL);
26396 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26397 optabs would emit if we didn't have TFmode patterns. */
26399 void
26400 x86_emit_floatuns (rtx operands[2])
26402 rtx neglab, donelab, i0, i1, f0, in, out;
26403 enum machine_mode mode, inmode;
26405 inmode = GET_MODE (operands[1]);
26406 gcc_assert (inmode == SImode || inmode == DImode);
26408 out = operands[0];
26409 in = force_reg (inmode, operands[1]);
26410 mode = GET_MODE (out);
26411 neglab = gen_label_rtx ();
26412 donelab = gen_label_rtx ();
26413 f0 = gen_reg_rtx (mode);
26415 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26417 expand_float (out, in, 0);
26419 emit_jump_insn (gen_jump (donelab));
26420 emit_barrier ();
26422 emit_label (neglab);
26424 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26425 1, OPTAB_DIRECT);
26426 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26427 1, OPTAB_DIRECT);
26428 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26430 expand_float (f0, i0, 0);
26432 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26434 emit_label (donelab);
26437 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26438 with all elements equal to VAR. Return true if successful. */
26440 static bool
26441 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26442 rtx target, rtx val)
26444 enum machine_mode hmode, smode, wsmode, wvmode;
26445 rtx x;
26447 switch (mode)
26449 case V2SImode:
26450 case V2SFmode:
26451 if (!mmx_ok)
26452 return false;
26453 /* FALLTHRU */
26455 case V2DFmode:
26456 case V2DImode:
26457 case V4SFmode:
26458 case V4SImode:
26459 val = force_reg (GET_MODE_INNER (mode), val);
26460 x = gen_rtx_VEC_DUPLICATE (mode, val);
26461 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26462 return true;
26464 case V4HImode:
26465 if (!mmx_ok)
26466 return false;
26467 if (TARGET_SSE || TARGET_3DNOW_A)
26469 val = gen_lowpart (SImode, val);
26470 x = gen_rtx_TRUNCATE (HImode, val);
26471 x = gen_rtx_VEC_DUPLICATE (mode, x);
26472 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26473 return true;
26475 else
26477 smode = HImode;
26478 wsmode = SImode;
26479 wvmode = V2SImode;
26480 goto widen;
26483 case V8QImode:
26484 if (!mmx_ok)
26485 return false;
26486 smode = QImode;
26487 wsmode = HImode;
26488 wvmode = V4HImode;
26489 goto widen;
26490 case V8HImode:
26491 if (TARGET_SSE2)
26493 rtx tmp1, tmp2;
26494 /* Extend HImode to SImode using a paradoxical SUBREG. */
26495 tmp1 = gen_reg_rtx (SImode);
26496 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26497 /* Insert the SImode value as low element of V4SImode vector. */
26498 tmp2 = gen_reg_rtx (V4SImode);
26499 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26500 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26501 CONST0_RTX (V4SImode),
26502 const1_rtx);
26503 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26504 /* Cast the V4SImode vector back to a V8HImode vector. */
26505 tmp1 = gen_reg_rtx (V8HImode);
26506 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
26507 /* Duplicate the low short through the whole low SImode word. */
26508 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
26509 /* Cast the V8HImode vector back to a V4SImode vector. */
26510 tmp2 = gen_reg_rtx (V4SImode);
26511 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26512 /* Replicate the low element of the V4SImode vector. */
26513 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26514 /* Cast the V2SImode back to V8HImode, and store in target. */
26515 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
26516 return true;
26518 smode = HImode;
26519 wsmode = SImode;
26520 wvmode = V4SImode;
26521 goto widen;
26522 case V16QImode:
26523 if (TARGET_SSE2)
26525 rtx tmp1, tmp2;
26526 /* Extend QImode to SImode using a paradoxical SUBREG. */
26527 tmp1 = gen_reg_rtx (SImode);
26528 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26529 /* Insert the SImode value as low element of V4SImode vector. */
26530 tmp2 = gen_reg_rtx (V4SImode);
26531 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26532 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26533 CONST0_RTX (V4SImode),
26534 const1_rtx);
26535 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26536 /* Cast the V4SImode vector back to a V16QImode vector. */
26537 tmp1 = gen_reg_rtx (V16QImode);
26538 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
26539 /* Duplicate the low byte through the whole low SImode word. */
26540 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26541 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26542 /* Cast the V16QImode vector back to a V4SImode vector. */
26543 tmp2 = gen_reg_rtx (V4SImode);
26544 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26545 /* Replicate the low element of the V4SImode vector. */
26546 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26547 /* Cast the V2SImode back to V16QImode, and store in target. */
26548 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
26549 return true;
26551 smode = QImode;
26552 wsmode = HImode;
26553 wvmode = V8HImode;
26554 goto widen;
26555 widen:
26556 /* Replicate the value once into the next wider mode and recurse. */
26557 val = convert_modes (wsmode, smode, val, true);
26558 x = expand_simple_binop (wsmode, ASHIFT, val,
26559 GEN_INT (GET_MODE_BITSIZE (smode)),
26560 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26561 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26563 x = gen_reg_rtx (wvmode);
26564 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
26565 gcc_unreachable ();
26566 emit_move_insn (target, gen_lowpart (mode, x));
26567 return true;
26569 case V4DFmode:
26570 hmode = V2DFmode;
26571 goto half;
26572 case V4DImode:
26573 hmode = V2DImode;
26574 goto half;
26575 case V8SFmode:
26576 hmode = V4SFmode;
26577 goto half;
26578 case V8SImode:
26579 hmode = V4SImode;
26580 goto half;
26581 case V16HImode:
26582 hmode = V8HImode;
26583 goto half;
26584 case V32QImode:
26585 hmode = V16QImode;
26586 goto half;
26587 half:
26589 rtx tmp = gen_reg_rtx (hmode);
26590 ix86_expand_vector_init_duplicate (mmx_ok, hmode, tmp, val);
26591 emit_insn (gen_rtx_SET (VOIDmode, target,
26592 gen_rtx_VEC_CONCAT (mode, tmp, tmp)));
26594 return true;
26596 default:
26597 return false;
26601 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26602 whose ONE_VAR element is VAR, and other elements are zero. Return true
26603 if successful. */
26605 static bool
26606 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26607 rtx target, rtx var, int one_var)
26609 enum machine_mode vsimode;
26610 rtx new_target;
26611 rtx x, tmp;
26612 bool use_vector_set = false;
26614 switch (mode)
26616 case V2DImode:
26617 /* For SSE4.1, we normally use vector set. But if the second
26618 element is zero and inter-unit moves are OK, we use movq
26619 instead. */
26620 use_vector_set = (TARGET_64BIT
26621 && TARGET_SSE4_1
26622 && !(TARGET_INTER_UNIT_MOVES
26623 && one_var == 0));
26624 break;
26625 case V16QImode:
26626 case V4SImode:
26627 case V4SFmode:
26628 use_vector_set = TARGET_SSE4_1;
26629 break;
26630 case V8HImode:
26631 use_vector_set = TARGET_SSE2;
26632 break;
26633 case V4HImode:
26634 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26635 break;
26636 case V32QImode:
26637 case V16HImode:
26638 case V8SImode:
26639 case V8SFmode:
26640 case V4DImode:
26641 case V4DFmode:
26642 use_vector_set = TARGET_AVX;
26643 break;
26644 default:
26645 break;
26648 if (use_vector_set)
26650 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26651 var = force_reg (GET_MODE_INNER (mode), var);
26652 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26653 return true;
26656 switch (mode)
26658 case V2SFmode:
26659 case V2SImode:
26660 if (!mmx_ok)
26661 return false;
26662 /* FALLTHRU */
26664 case V2DFmode:
26665 case V2DImode:
26666 if (one_var != 0)
26667 return false;
26668 var = force_reg (GET_MODE_INNER (mode), var);
26669 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26670 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26671 return true;
26673 case V4SFmode:
26674 case V4SImode:
26675 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26676 new_target = gen_reg_rtx (mode);
26677 else
26678 new_target = target;
26679 var = force_reg (GET_MODE_INNER (mode), var);
26680 x = gen_rtx_VEC_DUPLICATE (mode, var);
26681 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26682 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26683 if (one_var != 0)
26685 /* We need to shuffle the value to the correct position, so
26686 create a new pseudo to store the intermediate result. */
26688 /* With SSE2, we can use the integer shuffle insns. */
26689 if (mode != V4SFmode && TARGET_SSE2)
26691 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26692 GEN_INT (1),
26693 GEN_INT (one_var == 1 ? 0 : 1),
26694 GEN_INT (one_var == 2 ? 0 : 1),
26695 GEN_INT (one_var == 3 ? 0 : 1)));
26696 if (target != new_target)
26697 emit_move_insn (target, new_target);
26698 return true;
26701 /* Otherwise convert the intermediate result to V4SFmode and
26702 use the SSE1 shuffle instructions. */
26703 if (mode != V4SFmode)
26705 tmp = gen_reg_rtx (V4SFmode);
26706 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26708 else
26709 tmp = new_target;
26711 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26712 GEN_INT (1),
26713 GEN_INT (one_var == 1 ? 0 : 1),
26714 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26715 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26717 if (mode != V4SFmode)
26718 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26719 else if (tmp != target)
26720 emit_move_insn (target, tmp);
26722 else if (target != new_target)
26723 emit_move_insn (target, new_target);
26724 return true;
26726 case V8HImode:
26727 case V16QImode:
26728 vsimode = V4SImode;
26729 goto widen;
26730 case V4HImode:
26731 case V8QImode:
26732 if (!mmx_ok)
26733 return false;
26734 vsimode = V2SImode;
26735 goto widen;
26736 widen:
26737 if (one_var != 0)
26738 return false;
26740 /* Zero extend the variable element to SImode and recurse. */
26741 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
26743 x = gen_reg_rtx (vsimode);
26744 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
26745 var, one_var))
26746 gcc_unreachable ();
26748 emit_move_insn (target, gen_lowpart (mode, x));
26749 return true;
26751 default:
26752 return false;
26756 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26757 consisting of the values in VALS. It is known that all elements
26758 except ONE_VAR are constants. Return true if successful. */
26760 static bool
26761 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
26762 rtx target, rtx vals, int one_var)
26764 rtx var = XVECEXP (vals, 0, one_var);
26765 enum machine_mode wmode;
26766 rtx const_vec, x;
26768 const_vec = copy_rtx (vals);
26769 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
26770 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
26772 switch (mode)
26774 case V2DFmode:
26775 case V2DImode:
26776 case V2SFmode:
26777 case V2SImode:
26778 /* For the two element vectors, it's just as easy to use
26779 the general case. */
26780 return false;
26782 case V4DFmode:
26783 case V4DImode:
26784 case V8SFmode:
26785 case V8SImode:
26786 case V16HImode:
26787 case V32QImode:
26788 case V4SFmode:
26789 case V4SImode:
26790 case V8HImode:
26791 case V4HImode:
26792 break;
26794 case V16QImode:
26795 if (TARGET_SSE4_1)
26796 break;
26797 wmode = V8HImode;
26798 goto widen;
26799 case V8QImode:
26800 wmode = V4HImode;
26801 goto widen;
26802 widen:
26803 /* There's no way to set one QImode entry easily. Combine
26804 the variable value with its adjacent constant value, and
26805 promote to an HImode set. */
26806 x = XVECEXP (vals, 0, one_var ^ 1);
26807 if (one_var & 1)
26809 var = convert_modes (HImode, QImode, var, true);
26810 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
26811 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26812 x = GEN_INT (INTVAL (x) & 0xff);
26814 else
26816 var = convert_modes (HImode, QImode, var, true);
26817 x = gen_int_mode (INTVAL (x) << 8, HImode);
26819 if (x != const0_rtx)
26820 var = expand_simple_binop (HImode, IOR, var, x, var,
26821 1, OPTAB_LIB_WIDEN);
26823 x = gen_reg_rtx (wmode);
26824 emit_move_insn (x, gen_lowpart (wmode, const_vec));
26825 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
26827 emit_move_insn (target, gen_lowpart (mode, x));
26828 return true;
26830 default:
26831 return false;
26834 emit_move_insn (target, const_vec);
26835 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26836 return true;
26839 /* A subroutine of ix86_expand_vector_init_general. Use vector
26840 concatenate to handle the most general case: all values variable,
26841 and none identical. */
26843 static void
26844 ix86_expand_vector_init_concat (enum machine_mode mode,
26845 rtx target, rtx *ops, int n)
26847 enum machine_mode cmode, hmode = VOIDmode;
26848 rtx first[8], second[4];
26849 rtvec v;
26850 int i, j;
26852 switch (n)
26854 case 2:
26855 switch (mode)
26857 case V8SImode:
26858 cmode = V4SImode;
26859 break;
26860 case V8SFmode:
26861 cmode = V4SFmode;
26862 break;
26863 case V4DImode:
26864 cmode = V2DImode;
26865 break;
26866 case V4DFmode:
26867 cmode = V2DFmode;
26868 break;
26869 case V4SImode:
26870 cmode = V2SImode;
26871 break;
26872 case V4SFmode:
26873 cmode = V2SFmode;
26874 break;
26875 case V2DImode:
26876 cmode = DImode;
26877 break;
26878 case V2SImode:
26879 cmode = SImode;
26880 break;
26881 case V2DFmode:
26882 cmode = DFmode;
26883 break;
26884 case V2SFmode:
26885 cmode = SFmode;
26886 break;
26887 default:
26888 gcc_unreachable ();
26891 if (!register_operand (ops[1], cmode))
26892 ops[1] = force_reg (cmode, ops[1]);
26893 if (!register_operand (ops[0], cmode))
26894 ops[0] = force_reg (cmode, ops[0]);
26895 emit_insn (gen_rtx_SET (VOIDmode, target,
26896 gen_rtx_VEC_CONCAT (mode, ops[0],
26897 ops[1])));
26898 break;
26900 case 4:
26901 switch (mode)
26903 case V4DImode:
26904 cmode = V2DImode;
26905 break;
26906 case V4DFmode:
26907 cmode = V2DFmode;
26908 break;
26909 case V4SImode:
26910 cmode = V2SImode;
26911 break;
26912 case V4SFmode:
26913 cmode = V2SFmode;
26914 break;
26915 default:
26916 gcc_unreachable ();
26918 goto half;
26920 case 8:
26921 switch (mode)
26923 case V8SImode:
26924 cmode = V2SImode;
26925 hmode = V4SImode;
26926 break;
26927 case V8SFmode:
26928 cmode = V2SFmode;
26929 hmode = V4SFmode;
26930 break;
26931 default:
26932 gcc_unreachable ();
26934 goto half;
26936 half:
26937 /* FIXME: We process inputs backward to help RA. PR 36222. */
26938 i = n - 1;
26939 j = (n >> 1) - 1;
26940 for (; i > 0; i -= 2, j--)
26942 first[j] = gen_reg_rtx (cmode);
26943 v = gen_rtvec (2, ops[i - 1], ops[i]);
26944 ix86_expand_vector_init (false, first[j],
26945 gen_rtx_PARALLEL (cmode, v));
26948 n >>= 1;
26949 if (n > 2)
26951 gcc_assert (hmode != VOIDmode);
26952 for (i = j = 0; i < n; i += 2, j++)
26954 second[j] = gen_reg_rtx (hmode);
26955 ix86_expand_vector_init_concat (hmode, second [j],
26956 &first [i], 2);
26958 n >>= 1;
26959 ix86_expand_vector_init_concat (mode, target, second, n);
26961 else
26962 ix86_expand_vector_init_concat (mode, target, first, n);
26963 break;
26965 default:
26966 gcc_unreachable ();
26970 /* A subroutine of ix86_expand_vector_init_general. Use vector
26971 interleave to handle the most general case: all values variable,
26972 and none identical. */
26974 static void
26975 ix86_expand_vector_init_interleave (enum machine_mode mode,
26976 rtx target, rtx *ops, int n)
26978 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
26979 int i, j;
26980 rtx op0, op1;
26981 rtx (*gen_load_even) (rtx, rtx, rtx);
26982 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
26983 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
26985 switch (mode)
26987 case V8HImode:
26988 gen_load_even = gen_vec_setv8hi;
26989 gen_interleave_first_low = gen_vec_interleave_lowv4si;
26990 gen_interleave_second_low = gen_vec_interleave_lowv2di;
26991 inner_mode = HImode;
26992 first_imode = V4SImode;
26993 second_imode = V2DImode;
26994 third_imode = VOIDmode;
26995 break;
26996 case V16QImode:
26997 gen_load_even = gen_vec_setv16qi;
26998 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
26999 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27000 inner_mode = QImode;
27001 first_imode = V8HImode;
27002 second_imode = V4SImode;
27003 third_imode = V2DImode;
27004 break;
27005 default:
27006 gcc_unreachable ();
27009 for (i = 0; i < n; i++)
27011 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27012 op0 = gen_reg_rtx (SImode);
27013 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27015 /* Insert the SImode value as low element of V4SImode vector. */
27016 op1 = gen_reg_rtx (V4SImode);
27017 op0 = gen_rtx_VEC_MERGE (V4SImode,
27018 gen_rtx_VEC_DUPLICATE (V4SImode,
27019 op0),
27020 CONST0_RTX (V4SImode),
27021 const1_rtx);
27022 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27024 /* Cast the V4SImode vector back to a vector in orignal mode. */
27025 op0 = gen_reg_rtx (mode);
27026 emit_move_insn (op0, gen_lowpart (mode, op1));
27028 /* Load even elements into the second positon. */
27029 emit_insn ((*gen_load_even) (op0,
27030 force_reg (inner_mode,
27031 ops [i + i + 1]),
27032 const1_rtx));
27034 /* Cast vector to FIRST_IMODE vector. */
27035 ops[i] = gen_reg_rtx (first_imode);
27036 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27039 /* Interleave low FIRST_IMODE vectors. */
27040 for (i = j = 0; i < n; i += 2, j++)
27042 op0 = gen_reg_rtx (first_imode);
27043 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27045 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27046 ops[j] = gen_reg_rtx (second_imode);
27047 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27050 /* Interleave low SECOND_IMODE vectors. */
27051 switch (second_imode)
27053 case V4SImode:
27054 for (i = j = 0; i < n / 2; i += 2, j++)
27056 op0 = gen_reg_rtx (second_imode);
27057 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27058 ops[i + 1]));
27060 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27061 vector. */
27062 ops[j] = gen_reg_rtx (third_imode);
27063 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27065 second_imode = V2DImode;
27066 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27067 /* FALLTHRU */
27069 case V2DImode:
27070 op0 = gen_reg_rtx (second_imode);
27071 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27072 ops[1]));
27074 /* Cast the SECOND_IMODE vector back to a vector on original
27075 mode. */
27076 emit_insn (gen_rtx_SET (VOIDmode, target,
27077 gen_lowpart (mode, op0)));
27078 break;
27080 default:
27081 gcc_unreachable ();
27085 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27086 all values variable, and none identical. */
27088 static void
27089 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27090 rtx target, rtx vals)
27092 rtx ops[32], op0, op1;
27093 enum machine_mode half_mode = VOIDmode;
27094 int n, i;
27096 switch (mode)
27098 case V2SFmode:
27099 case V2SImode:
27100 if (!mmx_ok && !TARGET_SSE)
27101 break;
27102 /* FALLTHRU */
27104 case V8SFmode:
27105 case V8SImode:
27106 case V4DFmode:
27107 case V4DImode:
27108 case V4SFmode:
27109 case V4SImode:
27110 case V2DFmode:
27111 case V2DImode:
27112 n = GET_MODE_NUNITS (mode);
27113 for (i = 0; i < n; i++)
27114 ops[i] = XVECEXP (vals, 0, i);
27115 ix86_expand_vector_init_concat (mode, target, ops, n);
27116 return;
27118 case V32QImode:
27119 half_mode = V16QImode;
27120 goto half;
27122 case V16HImode:
27123 half_mode = V8HImode;
27124 goto half;
27126 half:
27127 n = GET_MODE_NUNITS (mode);
27128 for (i = 0; i < n; i++)
27129 ops[i] = XVECEXP (vals, 0, i);
27130 op0 = gen_reg_rtx (half_mode);
27131 op1 = gen_reg_rtx (half_mode);
27132 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27133 n >> 2);
27134 ix86_expand_vector_init_interleave (half_mode, op1,
27135 &ops [n >> 1], n >> 2);
27136 emit_insn (gen_rtx_SET (VOIDmode, target,
27137 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27138 return;
27140 case V16QImode:
27141 if (!TARGET_SSE4_1)
27142 break;
27143 /* FALLTHRU */
27145 case V8HImode:
27146 if (!TARGET_SSE2)
27147 break;
27149 /* Don't use ix86_expand_vector_init_interleave if we can't
27150 move from GPR to SSE register directly. */
27151 if (!TARGET_INTER_UNIT_MOVES)
27152 break;
27154 n = GET_MODE_NUNITS (mode);
27155 for (i = 0; i < n; i++)
27156 ops[i] = XVECEXP (vals, 0, i);
27157 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27158 return;
27160 case V4HImode:
27161 case V8QImode:
27162 break;
27164 default:
27165 gcc_unreachable ();
27169 int i, j, n_elts, n_words, n_elt_per_word;
27170 enum machine_mode inner_mode;
27171 rtx words[4], shift;
27173 inner_mode = GET_MODE_INNER (mode);
27174 n_elts = GET_MODE_NUNITS (mode);
27175 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27176 n_elt_per_word = n_elts / n_words;
27177 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27179 for (i = 0; i < n_words; ++i)
27181 rtx word = NULL_RTX;
27183 for (j = 0; j < n_elt_per_word; ++j)
27185 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27186 elt = convert_modes (word_mode, inner_mode, elt, true);
27188 if (j == 0)
27189 word = elt;
27190 else
27192 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27193 word, 1, OPTAB_LIB_WIDEN);
27194 word = expand_simple_binop (word_mode, IOR, word, elt,
27195 word, 1, OPTAB_LIB_WIDEN);
27199 words[i] = word;
27202 if (n_words == 1)
27203 emit_move_insn (target, gen_lowpart (mode, words[0]));
27204 else if (n_words == 2)
27206 rtx tmp = gen_reg_rtx (mode);
27207 emit_clobber (tmp);
27208 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27209 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27210 emit_move_insn (target, tmp);
27212 else if (n_words == 4)
27214 rtx tmp = gen_reg_rtx (V4SImode);
27215 gcc_assert (word_mode == SImode);
27216 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27217 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27218 emit_move_insn (target, gen_lowpart (mode, tmp));
27220 else
27221 gcc_unreachable ();
27225 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27226 instructions unless MMX_OK is true. */
27228 void
27229 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27231 enum machine_mode mode = GET_MODE (target);
27232 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27233 int n_elts = GET_MODE_NUNITS (mode);
27234 int n_var = 0, one_var = -1;
27235 bool all_same = true, all_const_zero = true;
27236 int i;
27237 rtx x;
27239 for (i = 0; i < n_elts; ++i)
27241 x = XVECEXP (vals, 0, i);
27242 if (!(CONST_INT_P (x)
27243 || GET_CODE (x) == CONST_DOUBLE
27244 || GET_CODE (x) == CONST_FIXED))
27245 n_var++, one_var = i;
27246 else if (x != CONST0_RTX (inner_mode))
27247 all_const_zero = false;
27248 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27249 all_same = false;
27252 /* Constants are best loaded from the constant pool. */
27253 if (n_var == 0)
27255 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27256 return;
27259 /* If all values are identical, broadcast the value. */
27260 if (all_same
27261 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27262 XVECEXP (vals, 0, 0)))
27263 return;
27265 /* Values where only one field is non-constant are best loaded from
27266 the pool and overwritten via move later. */
27267 if (n_var == 1)
27269 if (all_const_zero
27270 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27271 XVECEXP (vals, 0, one_var),
27272 one_var))
27273 return;
27275 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27276 return;
27279 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27282 void
27283 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27285 enum machine_mode mode = GET_MODE (target);
27286 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27287 enum machine_mode half_mode;
27288 bool use_vec_merge = false;
27289 rtx tmp;
27290 static rtx (*gen_extract[6][2]) (rtx, rtx)
27292 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27293 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27294 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27295 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27296 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27297 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27299 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27301 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27302 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27303 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27304 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27305 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27306 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27308 int i, j, n;
27310 switch (mode)
27312 case V2SFmode:
27313 case V2SImode:
27314 if (mmx_ok)
27316 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27317 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27318 if (elt == 0)
27319 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27320 else
27321 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27322 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27323 return;
27325 break;
27327 case V2DImode:
27328 use_vec_merge = TARGET_SSE4_1;
27329 if (use_vec_merge)
27330 break;
27332 case V2DFmode:
27334 rtx op0, op1;
27336 /* For the two element vectors, we implement a VEC_CONCAT with
27337 the extraction of the other element. */
27339 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27340 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27342 if (elt == 0)
27343 op0 = val, op1 = tmp;
27344 else
27345 op0 = tmp, op1 = val;
27347 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27348 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27350 return;
27352 case V4SFmode:
27353 use_vec_merge = TARGET_SSE4_1;
27354 if (use_vec_merge)
27355 break;
27357 switch (elt)
27359 case 0:
27360 use_vec_merge = true;
27361 break;
27363 case 1:
27364 /* tmp = target = A B C D */
27365 tmp = copy_to_reg (target);
27366 /* target = A A B B */
27367 emit_insn (gen_sse_unpcklps (target, target, target));
27368 /* target = X A B B */
27369 ix86_expand_vector_set (false, target, val, 0);
27370 /* target = A X C D */
27371 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27372 GEN_INT (1), GEN_INT (0),
27373 GEN_INT (2+4), GEN_INT (3+4)));
27374 return;
27376 case 2:
27377 /* tmp = target = A B C D */
27378 tmp = copy_to_reg (target);
27379 /* tmp = X B C D */
27380 ix86_expand_vector_set (false, tmp, val, 0);
27381 /* target = A B X D */
27382 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27383 GEN_INT (0), GEN_INT (1),
27384 GEN_INT (0+4), GEN_INT (3+4)));
27385 return;
27387 case 3:
27388 /* tmp = target = A B C D */
27389 tmp = copy_to_reg (target);
27390 /* tmp = X B C D */
27391 ix86_expand_vector_set (false, tmp, val, 0);
27392 /* target = A B X D */
27393 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27394 GEN_INT (0), GEN_INT (1),
27395 GEN_INT (2+4), GEN_INT (0+4)));
27396 return;
27398 default:
27399 gcc_unreachable ();
27401 break;
27403 case V4SImode:
27404 use_vec_merge = TARGET_SSE4_1;
27405 if (use_vec_merge)
27406 break;
27408 /* Element 0 handled by vec_merge below. */
27409 if (elt == 0)
27411 use_vec_merge = true;
27412 break;
27415 if (TARGET_SSE2)
27417 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27418 store into element 0, then shuffle them back. */
27420 rtx order[4];
27422 order[0] = GEN_INT (elt);
27423 order[1] = const1_rtx;
27424 order[2] = const2_rtx;
27425 order[3] = GEN_INT (3);
27426 order[elt] = const0_rtx;
27428 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27429 order[1], order[2], order[3]));
27431 ix86_expand_vector_set (false, target, val, 0);
27433 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27434 order[1], order[2], order[3]));
27436 else
27438 /* For SSE1, we have to reuse the V4SF code. */
27439 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27440 gen_lowpart (SFmode, val), elt);
27442 return;
27444 case V8HImode:
27445 use_vec_merge = TARGET_SSE2;
27446 break;
27447 case V4HImode:
27448 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27449 break;
27451 case V16QImode:
27452 use_vec_merge = TARGET_SSE4_1;
27453 break;
27455 case V8QImode:
27456 break;
27458 case V32QImode:
27459 half_mode = V16QImode;
27460 j = 0;
27461 n = 16;
27462 goto half;
27464 case V16HImode:
27465 half_mode = V8HImode;
27466 j = 1;
27467 n = 8;
27468 goto half;
27470 case V8SImode:
27471 half_mode = V4SImode;
27472 j = 2;
27473 n = 4;
27474 goto half;
27476 case V4DImode:
27477 half_mode = V2DImode;
27478 j = 3;
27479 n = 2;
27480 goto half;
27482 case V8SFmode:
27483 half_mode = V4SFmode;
27484 j = 4;
27485 n = 4;
27486 goto half;
27488 case V4DFmode:
27489 half_mode = V2DFmode;
27490 j = 5;
27491 n = 2;
27492 goto half;
27494 half:
27495 /* Compute offset. */
27496 i = elt / n;
27497 elt %= n;
27499 gcc_assert (i <= 1);
27501 /* Extract the half. */
27502 tmp = gen_reg_rtx (half_mode);
27503 emit_insn ((*gen_extract[j][i]) (tmp, target));
27505 /* Put val in tmp at elt. */
27506 ix86_expand_vector_set (false, tmp, val, elt);
27508 /* Put it back. */
27509 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27510 return;
27512 default:
27513 break;
27516 if (use_vec_merge)
27518 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27519 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27520 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27522 else
27524 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27526 emit_move_insn (mem, target);
27528 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27529 emit_move_insn (tmp, val);
27531 emit_move_insn (target, mem);
27535 void
27536 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27538 enum machine_mode mode = GET_MODE (vec);
27539 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27540 bool use_vec_extr = false;
27541 rtx tmp;
27543 switch (mode)
27545 case V2SImode:
27546 case V2SFmode:
27547 if (!mmx_ok)
27548 break;
27549 /* FALLTHRU */
27551 case V2DFmode:
27552 case V2DImode:
27553 use_vec_extr = true;
27554 break;
27556 case V4SFmode:
27557 use_vec_extr = TARGET_SSE4_1;
27558 if (use_vec_extr)
27559 break;
27561 switch (elt)
27563 case 0:
27564 tmp = vec;
27565 break;
27567 case 1:
27568 case 3:
27569 tmp = gen_reg_rtx (mode);
27570 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27571 GEN_INT (elt), GEN_INT (elt),
27572 GEN_INT (elt+4), GEN_INT (elt+4)));
27573 break;
27575 case 2:
27576 tmp = gen_reg_rtx (mode);
27577 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
27578 break;
27580 default:
27581 gcc_unreachable ();
27583 vec = tmp;
27584 use_vec_extr = true;
27585 elt = 0;
27586 break;
27588 case V4SImode:
27589 use_vec_extr = TARGET_SSE4_1;
27590 if (use_vec_extr)
27591 break;
27593 if (TARGET_SSE2)
27595 switch (elt)
27597 case 0:
27598 tmp = vec;
27599 break;
27601 case 1:
27602 case 3:
27603 tmp = gen_reg_rtx (mode);
27604 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27605 GEN_INT (elt), GEN_INT (elt),
27606 GEN_INT (elt), GEN_INT (elt)));
27607 break;
27609 case 2:
27610 tmp = gen_reg_rtx (mode);
27611 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
27612 break;
27614 default:
27615 gcc_unreachable ();
27617 vec = tmp;
27618 use_vec_extr = true;
27619 elt = 0;
27621 else
27623 /* For SSE1, we have to reuse the V4SF code. */
27624 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27625 gen_lowpart (V4SFmode, vec), elt);
27626 return;
27628 break;
27630 case V8HImode:
27631 use_vec_extr = TARGET_SSE2;
27632 break;
27633 case V4HImode:
27634 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27635 break;
27637 case V16QImode:
27638 use_vec_extr = TARGET_SSE4_1;
27639 break;
27641 case V8QImode:
27642 /* ??? Could extract the appropriate HImode element and shift. */
27643 default:
27644 break;
27647 if (use_vec_extr)
27649 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27650 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27652 /* Let the rtl optimizers know about the zero extension performed. */
27653 if (inner_mode == QImode || inner_mode == HImode)
27655 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27656 target = gen_lowpart (SImode, target);
27659 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27661 else
27663 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27665 emit_move_insn (mem, vec);
27667 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27668 emit_move_insn (target, tmp);
27672 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27673 pattern to reduce; DEST is the destination; IN is the input vector. */
27675 void
27676 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27678 rtx tmp1, tmp2, tmp3;
27680 tmp1 = gen_reg_rtx (V4SFmode);
27681 tmp2 = gen_reg_rtx (V4SFmode);
27682 tmp3 = gen_reg_rtx (V4SFmode);
27684 emit_insn (gen_sse_movhlps (tmp1, in, in));
27685 emit_insn (fn (tmp2, tmp1, in));
27687 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27688 GEN_INT (1), GEN_INT (1),
27689 GEN_INT (1+4), GEN_INT (1+4)));
27690 emit_insn (fn (dest, tmp2, tmp3));
27693 /* Target hook for scalar_mode_supported_p. */
27694 static bool
27695 ix86_scalar_mode_supported_p (enum machine_mode mode)
27697 if (DECIMAL_FLOAT_MODE_P (mode))
27698 return true;
27699 else if (mode == TFmode)
27700 return true;
27701 else
27702 return default_scalar_mode_supported_p (mode);
27705 /* Implements target hook vector_mode_supported_p. */
27706 static bool
27707 ix86_vector_mode_supported_p (enum machine_mode mode)
27709 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27710 return true;
27711 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27712 return true;
27713 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27714 return true;
27715 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27716 return true;
27717 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
27718 return true;
27719 return false;
27722 /* Target hook for c_mode_for_suffix. */
27723 static enum machine_mode
27724 ix86_c_mode_for_suffix (char suffix)
27726 if (suffix == 'q')
27727 return TFmode;
27728 if (suffix == 'w')
27729 return XFmode;
27731 return VOIDmode;
27734 /* Worker function for TARGET_MD_ASM_CLOBBERS.
27736 We do this in the new i386 backend to maintain source compatibility
27737 with the old cc0-based compiler. */
27739 static tree
27740 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
27741 tree inputs ATTRIBUTE_UNUSED,
27742 tree clobbers)
27744 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
27745 clobbers);
27746 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
27747 clobbers);
27748 return clobbers;
27751 /* Implements target vector targetm.asm.encode_section_info. This
27752 is not used by netware. */
27754 static void ATTRIBUTE_UNUSED
27755 ix86_encode_section_info (tree decl, rtx rtl, int first)
27757 default_encode_section_info (decl, rtl, first);
27759 if (TREE_CODE (decl) == VAR_DECL
27760 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
27761 && ix86_in_large_data_p (decl))
27762 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
27765 /* Worker function for REVERSE_CONDITION. */
27767 enum rtx_code
27768 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
27770 return (mode != CCFPmode && mode != CCFPUmode
27771 ? reverse_condition (code)
27772 : reverse_condition_maybe_unordered (code));
27775 /* Output code to perform an x87 FP register move, from OPERANDS[1]
27776 to OPERANDS[0]. */
27778 const char *
27779 output_387_reg_move (rtx insn, rtx *operands)
27781 if (REG_P (operands[0]))
27783 if (REG_P (operands[1])
27784 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27786 if (REGNO (operands[0]) == FIRST_STACK_REG)
27787 return output_387_ffreep (operands, 0);
27788 return "fstp\t%y0";
27790 if (STACK_TOP_P (operands[0]))
27791 return "fld%z1\t%y1";
27792 return "fst\t%y0";
27794 else if (MEM_P (operands[0]))
27796 gcc_assert (REG_P (operands[1]));
27797 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27798 return "fstp%z0\t%y0";
27799 else
27801 /* There is no non-popping store to memory for XFmode.
27802 So if we need one, follow the store with a load. */
27803 if (GET_MODE (operands[0]) == XFmode)
27804 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
27805 else
27806 return "fst%z0\t%y0";
27809 else
27810 gcc_unreachable();
27813 /* Output code to perform a conditional jump to LABEL, if C2 flag in
27814 FP status register is set. */
27816 void
27817 ix86_emit_fp_unordered_jump (rtx label)
27819 rtx reg = gen_reg_rtx (HImode);
27820 rtx temp;
27822 emit_insn (gen_x86_fnstsw_1 (reg));
27824 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
27826 emit_insn (gen_x86_sahf_1 (reg));
27828 temp = gen_rtx_REG (CCmode, FLAGS_REG);
27829 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
27831 else
27833 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
27835 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
27836 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
27839 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
27840 gen_rtx_LABEL_REF (VOIDmode, label),
27841 pc_rtx);
27842 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
27844 emit_jump_insn (temp);
27845 predict_jump (REG_BR_PROB_BASE * 10 / 100);
27848 /* Output code to perform a log1p XFmode calculation. */
27850 void ix86_emit_i387_log1p (rtx op0, rtx op1)
27852 rtx label1 = gen_label_rtx ();
27853 rtx label2 = gen_label_rtx ();
27855 rtx tmp = gen_reg_rtx (XFmode);
27856 rtx tmp2 = gen_reg_rtx (XFmode);
27858 emit_insn (gen_absxf2 (tmp, op1));
27859 emit_insn (gen_cmpxf (tmp,
27860 CONST_DOUBLE_FROM_REAL_VALUE (
27861 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
27862 XFmode)));
27863 emit_jump_insn (gen_bge (label1));
27865 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
27866 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
27867 emit_jump (label2);
27869 emit_label (label1);
27870 emit_move_insn (tmp, CONST1_RTX (XFmode));
27871 emit_insn (gen_addxf3 (tmp, op1, tmp));
27872 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
27873 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
27875 emit_label (label2);
27878 /* Output code to perform a Newton-Rhapson approximation of a single precision
27879 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
27881 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
27883 rtx x0, x1, e0, e1, two;
27885 x0 = gen_reg_rtx (mode);
27886 e0 = gen_reg_rtx (mode);
27887 e1 = gen_reg_rtx (mode);
27888 x1 = gen_reg_rtx (mode);
27890 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
27892 if (VECTOR_MODE_P (mode))
27893 two = ix86_build_const_vector (SFmode, true, two);
27895 two = force_reg (mode, two);
27897 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
27899 /* x0 = rcp(b) estimate */
27900 emit_insn (gen_rtx_SET (VOIDmode, x0,
27901 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
27902 UNSPEC_RCP)));
27903 /* e0 = x0 * b */
27904 emit_insn (gen_rtx_SET (VOIDmode, e0,
27905 gen_rtx_MULT (mode, x0, b)));
27906 /* e1 = 2. - e0 */
27907 emit_insn (gen_rtx_SET (VOIDmode, e1,
27908 gen_rtx_MINUS (mode, two, e0)));
27909 /* x1 = x0 * e1 */
27910 emit_insn (gen_rtx_SET (VOIDmode, x1,
27911 gen_rtx_MULT (mode, x0, e1)));
27912 /* res = a * x1 */
27913 emit_insn (gen_rtx_SET (VOIDmode, res,
27914 gen_rtx_MULT (mode, a, x1)));
27917 /* Output code to perform a Newton-Rhapson approximation of a
27918 single precision floating point [reciprocal] square root. */
27920 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
27921 bool recip)
27923 rtx x0, e0, e1, e2, e3, mthree, mhalf;
27924 REAL_VALUE_TYPE r;
27926 x0 = gen_reg_rtx (mode);
27927 e0 = gen_reg_rtx (mode);
27928 e1 = gen_reg_rtx (mode);
27929 e2 = gen_reg_rtx (mode);
27930 e3 = gen_reg_rtx (mode);
27932 real_from_integer (&r, VOIDmode, -3, -1, 0);
27933 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
27935 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
27936 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
27938 if (VECTOR_MODE_P (mode))
27940 mthree = ix86_build_const_vector (SFmode, true, mthree);
27941 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
27944 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
27945 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
27947 /* x0 = rsqrt(a) estimate */
27948 emit_insn (gen_rtx_SET (VOIDmode, x0,
27949 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
27950 UNSPEC_RSQRT)));
27952 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
27953 if (!recip)
27955 rtx zero, mask;
27957 zero = gen_reg_rtx (mode);
27958 mask = gen_reg_rtx (mode);
27960 zero = force_reg (mode, CONST0_RTX(mode));
27961 emit_insn (gen_rtx_SET (VOIDmode, mask,
27962 gen_rtx_NE (mode, zero, a)));
27964 emit_insn (gen_rtx_SET (VOIDmode, x0,
27965 gen_rtx_AND (mode, x0, mask)));
27968 /* e0 = x0 * a */
27969 emit_insn (gen_rtx_SET (VOIDmode, e0,
27970 gen_rtx_MULT (mode, x0, a)));
27971 /* e1 = e0 * x0 */
27972 emit_insn (gen_rtx_SET (VOIDmode, e1,
27973 gen_rtx_MULT (mode, e0, x0)));
27975 /* e2 = e1 - 3. */
27976 mthree = force_reg (mode, mthree);
27977 emit_insn (gen_rtx_SET (VOIDmode, e2,
27978 gen_rtx_PLUS (mode, e1, mthree)));
27980 mhalf = force_reg (mode, mhalf);
27981 if (recip)
27982 /* e3 = -.5 * x0 */
27983 emit_insn (gen_rtx_SET (VOIDmode, e3,
27984 gen_rtx_MULT (mode, x0, mhalf)));
27985 else
27986 /* e3 = -.5 * e0 */
27987 emit_insn (gen_rtx_SET (VOIDmode, e3,
27988 gen_rtx_MULT (mode, e0, mhalf)));
27989 /* ret = e2 * e3 */
27990 emit_insn (gen_rtx_SET (VOIDmode, res,
27991 gen_rtx_MULT (mode, e2, e3)));
27994 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
27996 static void ATTRIBUTE_UNUSED
27997 i386_solaris_elf_named_section (const char *name, unsigned int flags,
27998 tree decl)
28000 /* With Binutils 2.15, the "@unwind" marker must be specified on
28001 every occurrence of the ".eh_frame" section, not just the first
28002 one. */
28003 if (TARGET_64BIT
28004 && strcmp (name, ".eh_frame") == 0)
28006 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28007 flags & SECTION_WRITE ? "aw" : "a");
28008 return;
28010 default_elf_asm_named_section (name, flags, decl);
28013 /* Return the mangling of TYPE if it is an extended fundamental type. */
28015 static const char *
28016 ix86_mangle_type (const_tree type)
28018 type = TYPE_MAIN_VARIANT (type);
28020 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28021 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28022 return NULL;
28024 switch (TYPE_MODE (type))
28026 case TFmode:
28027 /* __float128 is "g". */
28028 return "g";
28029 case XFmode:
28030 /* "long double" or __float80 is "e". */
28031 return "e";
28032 default:
28033 return NULL;
28037 /* For 32-bit code we can save PIC register setup by using
28038 __stack_chk_fail_local hidden function instead of calling
28039 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28040 register, so it is better to call __stack_chk_fail directly. */
28042 static tree
28043 ix86_stack_protect_fail (void)
28045 return TARGET_64BIT
28046 ? default_external_stack_protect_fail ()
28047 : default_hidden_stack_protect_fail ();
28050 /* Select a format to encode pointers in exception handling data. CODE
28051 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28052 true if the symbol may be affected by dynamic relocations.
28054 ??? All x86 object file formats are capable of representing this.
28055 After all, the relocation needed is the same as for the call insn.
28056 Whether or not a particular assembler allows us to enter such, I
28057 guess we'll have to see. */
28059 asm_preferred_eh_data_format (int code, int global)
28061 if (flag_pic)
28063 int type = DW_EH_PE_sdata8;
28064 if (!TARGET_64BIT
28065 || ix86_cmodel == CM_SMALL_PIC
28066 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28067 type = DW_EH_PE_sdata4;
28068 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28070 if (ix86_cmodel == CM_SMALL
28071 || (ix86_cmodel == CM_MEDIUM && code))
28072 return DW_EH_PE_udata4;
28073 return DW_EH_PE_absptr;
28076 /* Expand copysign from SIGN to the positive value ABS_VALUE
28077 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28078 the sign-bit. */
28079 static void
28080 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28082 enum machine_mode mode = GET_MODE (sign);
28083 rtx sgn = gen_reg_rtx (mode);
28084 if (mask == NULL_RTX)
28086 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28087 if (!VECTOR_MODE_P (mode))
28089 /* We need to generate a scalar mode mask in this case. */
28090 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28091 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28092 mask = gen_reg_rtx (mode);
28093 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28096 else
28097 mask = gen_rtx_NOT (mode, mask);
28098 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28099 gen_rtx_AND (mode, mask, sign)));
28100 emit_insn (gen_rtx_SET (VOIDmode, result,
28101 gen_rtx_IOR (mode, abs_value, sgn)));
28104 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28105 mask for masking out the sign-bit is stored in *SMASK, if that is
28106 non-null. */
28107 static rtx
28108 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28110 enum machine_mode mode = GET_MODE (op0);
28111 rtx xa, mask;
28113 xa = gen_reg_rtx (mode);
28114 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28115 if (!VECTOR_MODE_P (mode))
28117 /* We need to generate a scalar mode mask in this case. */
28118 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28119 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28120 mask = gen_reg_rtx (mode);
28121 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28123 emit_insn (gen_rtx_SET (VOIDmode, xa,
28124 gen_rtx_AND (mode, op0, mask)));
28126 if (smask)
28127 *smask = mask;
28129 return xa;
28132 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28133 swapping the operands if SWAP_OPERANDS is true. The expanded
28134 code is a forward jump to a newly created label in case the
28135 comparison is true. The generated label rtx is returned. */
28136 static rtx
28137 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28138 bool swap_operands)
28140 rtx label, tmp;
28142 if (swap_operands)
28144 tmp = op0;
28145 op0 = op1;
28146 op1 = tmp;
28149 label = gen_label_rtx ();
28150 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28151 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28152 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28153 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28154 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28155 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28156 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28157 JUMP_LABEL (tmp) = label;
28159 return label;
28162 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28163 using comparison code CODE. Operands are swapped for the comparison if
28164 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28165 static rtx
28166 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28167 bool swap_operands)
28169 enum machine_mode mode = GET_MODE (op0);
28170 rtx mask = gen_reg_rtx (mode);
28172 if (swap_operands)
28174 rtx tmp = op0;
28175 op0 = op1;
28176 op1 = tmp;
28179 if (mode == DFmode)
28180 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28181 gen_rtx_fmt_ee (code, mode, op0, op1)));
28182 else
28183 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28184 gen_rtx_fmt_ee (code, mode, op0, op1)));
28186 return mask;
28189 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28190 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28191 static rtx
28192 ix86_gen_TWO52 (enum machine_mode mode)
28194 REAL_VALUE_TYPE TWO52r;
28195 rtx TWO52;
28197 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28198 TWO52 = const_double_from_real_value (TWO52r, mode);
28199 TWO52 = force_reg (mode, TWO52);
28201 return TWO52;
28204 /* Expand SSE sequence for computing lround from OP1 storing
28205 into OP0. */
28206 void
28207 ix86_expand_lround (rtx op0, rtx op1)
28209 /* C code for the stuff we're doing below:
28210 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28211 return (long)tmp;
28213 enum machine_mode mode = GET_MODE (op1);
28214 const struct real_format *fmt;
28215 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28216 rtx adj;
28218 /* load nextafter (0.5, 0.0) */
28219 fmt = REAL_MODE_FORMAT (mode);
28220 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28221 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28223 /* adj = copysign (0.5, op1) */
28224 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28225 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28227 /* adj = op1 + adj */
28228 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28230 /* op0 = (imode)adj */
28231 expand_fix (op0, adj, 0);
28234 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28235 into OPERAND0. */
28236 void
28237 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28239 /* C code for the stuff we're doing below (for do_floor):
28240 xi = (long)op1;
28241 xi -= (double)xi > op1 ? 1 : 0;
28242 return xi;
28244 enum machine_mode fmode = GET_MODE (op1);
28245 enum machine_mode imode = GET_MODE (op0);
28246 rtx ireg, freg, label, tmp;
28248 /* reg = (long)op1 */
28249 ireg = gen_reg_rtx (imode);
28250 expand_fix (ireg, op1, 0);
28252 /* freg = (double)reg */
28253 freg = gen_reg_rtx (fmode);
28254 expand_float (freg, ireg, 0);
28256 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28257 label = ix86_expand_sse_compare_and_jump (UNLE,
28258 freg, op1, !do_floor);
28259 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28260 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28261 emit_move_insn (ireg, tmp);
28263 emit_label (label);
28264 LABEL_NUSES (label) = 1;
28266 emit_move_insn (op0, ireg);
28269 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28270 result in OPERAND0. */
28271 void
28272 ix86_expand_rint (rtx operand0, rtx operand1)
28274 /* C code for the stuff we're doing below:
28275 xa = fabs (operand1);
28276 if (!isless (xa, 2**52))
28277 return operand1;
28278 xa = xa + 2**52 - 2**52;
28279 return copysign (xa, operand1);
28281 enum machine_mode mode = GET_MODE (operand0);
28282 rtx res, xa, label, TWO52, mask;
28284 res = gen_reg_rtx (mode);
28285 emit_move_insn (res, operand1);
28287 /* xa = abs (operand1) */
28288 xa = ix86_expand_sse_fabs (res, &mask);
28290 /* if (!isless (xa, TWO52)) goto label; */
28291 TWO52 = ix86_gen_TWO52 (mode);
28292 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28294 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28295 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28297 ix86_sse_copysign_to_positive (res, xa, res, mask);
28299 emit_label (label);
28300 LABEL_NUSES (label) = 1;
28302 emit_move_insn (operand0, res);
28305 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28306 into OPERAND0. */
28307 void
28308 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28310 /* C code for the stuff we expand below.
28311 double xa = fabs (x), x2;
28312 if (!isless (xa, TWO52))
28313 return x;
28314 xa = xa + TWO52 - TWO52;
28315 x2 = copysign (xa, x);
28316 Compensate. Floor:
28317 if (x2 > x)
28318 x2 -= 1;
28319 Compensate. Ceil:
28320 if (x2 < x)
28321 x2 -= -1;
28322 return x2;
28324 enum machine_mode mode = GET_MODE (operand0);
28325 rtx xa, TWO52, tmp, label, one, res, mask;
28327 TWO52 = ix86_gen_TWO52 (mode);
28329 /* Temporary for holding the result, initialized to the input
28330 operand to ease control flow. */
28331 res = gen_reg_rtx (mode);
28332 emit_move_insn (res, operand1);
28334 /* xa = abs (operand1) */
28335 xa = ix86_expand_sse_fabs (res, &mask);
28337 /* if (!isless (xa, TWO52)) goto label; */
28338 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28340 /* xa = xa + TWO52 - TWO52; */
28341 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28342 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28344 /* xa = copysign (xa, operand1) */
28345 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28347 /* generate 1.0 or -1.0 */
28348 one = force_reg (mode,
28349 const_double_from_real_value (do_floor
28350 ? dconst1 : dconstm1, mode));
28352 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28353 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28354 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28355 gen_rtx_AND (mode, one, tmp)));
28356 /* We always need to subtract here to preserve signed zero. */
28357 tmp = expand_simple_binop (mode, MINUS,
28358 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28359 emit_move_insn (res, tmp);
28361 emit_label (label);
28362 LABEL_NUSES (label) = 1;
28364 emit_move_insn (operand0, res);
28367 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28368 into OPERAND0. */
28369 void
28370 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28372 /* C code for the stuff we expand below.
28373 double xa = fabs (x), x2;
28374 if (!isless (xa, TWO52))
28375 return x;
28376 x2 = (double)(long)x;
28377 Compensate. Floor:
28378 if (x2 > x)
28379 x2 -= 1;
28380 Compensate. Ceil:
28381 if (x2 < x)
28382 x2 += 1;
28383 if (HONOR_SIGNED_ZEROS (mode))
28384 return copysign (x2, x);
28385 return x2;
28387 enum machine_mode mode = GET_MODE (operand0);
28388 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28390 TWO52 = ix86_gen_TWO52 (mode);
28392 /* Temporary for holding the result, initialized to the input
28393 operand to ease control flow. */
28394 res = gen_reg_rtx (mode);
28395 emit_move_insn (res, operand1);
28397 /* xa = abs (operand1) */
28398 xa = ix86_expand_sse_fabs (res, &mask);
28400 /* if (!isless (xa, TWO52)) goto label; */
28401 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28403 /* xa = (double)(long)x */
28404 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28405 expand_fix (xi, res, 0);
28406 expand_float (xa, xi, 0);
28408 /* generate 1.0 */
28409 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28411 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28412 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28413 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28414 gen_rtx_AND (mode, one, tmp)));
28415 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28416 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28417 emit_move_insn (res, tmp);
28419 if (HONOR_SIGNED_ZEROS (mode))
28420 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28422 emit_label (label);
28423 LABEL_NUSES (label) = 1;
28425 emit_move_insn (operand0, res);
28428 /* Expand SSE sequence for computing round from OPERAND1 storing
28429 into OPERAND0. Sequence that works without relying on DImode truncation
28430 via cvttsd2siq that is only available on 64bit targets. */
28431 void
28432 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28434 /* C code for the stuff we expand below.
28435 double xa = fabs (x), xa2, x2;
28436 if (!isless (xa, TWO52))
28437 return x;
28438 Using the absolute value and copying back sign makes
28439 -0.0 -> -0.0 correct.
28440 xa2 = xa + TWO52 - TWO52;
28441 Compensate.
28442 dxa = xa2 - xa;
28443 if (dxa <= -0.5)
28444 xa2 += 1;
28445 else if (dxa > 0.5)
28446 xa2 -= 1;
28447 x2 = copysign (xa2, x);
28448 return x2;
28450 enum machine_mode mode = GET_MODE (operand0);
28451 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28453 TWO52 = ix86_gen_TWO52 (mode);
28455 /* Temporary for holding the result, initialized to the input
28456 operand to ease control flow. */
28457 res = gen_reg_rtx (mode);
28458 emit_move_insn (res, operand1);
28460 /* xa = abs (operand1) */
28461 xa = ix86_expand_sse_fabs (res, &mask);
28463 /* if (!isless (xa, TWO52)) goto label; */
28464 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28466 /* xa2 = xa + TWO52 - TWO52; */
28467 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28468 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28470 /* dxa = xa2 - xa; */
28471 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28473 /* generate 0.5, 1.0 and -0.5 */
28474 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28475 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28476 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28477 0, OPTAB_DIRECT);
28479 /* Compensate. */
28480 tmp = gen_reg_rtx (mode);
28481 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28482 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28483 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28484 gen_rtx_AND (mode, one, tmp)));
28485 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28486 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28487 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28488 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28489 gen_rtx_AND (mode, one, tmp)));
28490 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28492 /* res = copysign (xa2, operand1) */
28493 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28495 emit_label (label);
28496 LABEL_NUSES (label) = 1;
28498 emit_move_insn (operand0, res);
28501 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28502 into OPERAND0. */
28503 void
28504 ix86_expand_trunc (rtx operand0, rtx operand1)
28506 /* C code for SSE variant we expand below.
28507 double xa = fabs (x), x2;
28508 if (!isless (xa, TWO52))
28509 return x;
28510 x2 = (double)(long)x;
28511 if (HONOR_SIGNED_ZEROS (mode))
28512 return copysign (x2, x);
28513 return x2;
28515 enum machine_mode mode = GET_MODE (operand0);
28516 rtx xa, xi, TWO52, label, res, mask;
28518 TWO52 = ix86_gen_TWO52 (mode);
28520 /* Temporary for holding the result, initialized to the input
28521 operand to ease control flow. */
28522 res = gen_reg_rtx (mode);
28523 emit_move_insn (res, operand1);
28525 /* xa = abs (operand1) */
28526 xa = ix86_expand_sse_fabs (res, &mask);
28528 /* if (!isless (xa, TWO52)) goto label; */
28529 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28531 /* x = (double)(long)x */
28532 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28533 expand_fix (xi, res, 0);
28534 expand_float (res, xi, 0);
28536 if (HONOR_SIGNED_ZEROS (mode))
28537 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28539 emit_label (label);
28540 LABEL_NUSES (label) = 1;
28542 emit_move_insn (operand0, res);
28545 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28546 into OPERAND0. */
28547 void
28548 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28550 enum machine_mode mode = GET_MODE (operand0);
28551 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28553 /* C code for SSE variant we expand below.
28554 double xa = fabs (x), x2;
28555 if (!isless (xa, TWO52))
28556 return x;
28557 xa2 = xa + TWO52 - TWO52;
28558 Compensate:
28559 if (xa2 > xa)
28560 xa2 -= 1.0;
28561 x2 = copysign (xa2, x);
28562 return x2;
28565 TWO52 = ix86_gen_TWO52 (mode);
28567 /* Temporary for holding the result, initialized to the input
28568 operand to ease control flow. */
28569 res = gen_reg_rtx (mode);
28570 emit_move_insn (res, operand1);
28572 /* xa = abs (operand1) */
28573 xa = ix86_expand_sse_fabs (res, &smask);
28575 /* if (!isless (xa, TWO52)) goto label; */
28576 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28578 /* res = xa + TWO52 - TWO52; */
28579 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28580 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28581 emit_move_insn (res, tmp);
28583 /* generate 1.0 */
28584 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28586 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28587 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28588 emit_insn (gen_rtx_SET (VOIDmode, mask,
28589 gen_rtx_AND (mode, mask, one)));
28590 tmp = expand_simple_binop (mode, MINUS,
28591 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28592 emit_move_insn (res, tmp);
28594 /* res = copysign (res, operand1) */
28595 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28597 emit_label (label);
28598 LABEL_NUSES (label) = 1;
28600 emit_move_insn (operand0, res);
28603 /* Expand SSE sequence for computing round from OPERAND1 storing
28604 into OPERAND0. */
28605 void
28606 ix86_expand_round (rtx operand0, rtx operand1)
28608 /* C code for the stuff we're doing below:
28609 double xa = fabs (x);
28610 if (!isless (xa, TWO52))
28611 return x;
28612 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28613 return copysign (xa, x);
28615 enum machine_mode mode = GET_MODE (operand0);
28616 rtx res, TWO52, xa, label, xi, half, mask;
28617 const struct real_format *fmt;
28618 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28620 /* Temporary for holding the result, initialized to the input
28621 operand to ease control flow. */
28622 res = gen_reg_rtx (mode);
28623 emit_move_insn (res, operand1);
28625 TWO52 = ix86_gen_TWO52 (mode);
28626 xa = ix86_expand_sse_fabs (res, &mask);
28627 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28629 /* load nextafter (0.5, 0.0) */
28630 fmt = REAL_MODE_FORMAT (mode);
28631 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28632 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28634 /* xa = xa + 0.5 */
28635 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28636 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28638 /* xa = (double)(int64_t)xa */
28639 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28640 expand_fix (xi, xa, 0);
28641 expand_float (xa, xi, 0);
28643 /* res = copysign (xa, operand1) */
28644 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28646 emit_label (label);
28647 LABEL_NUSES (label) = 1;
28649 emit_move_insn (operand0, res);
28653 /* Validate whether a SSE5 instruction is valid or not.
28654 OPERANDS is the array of operands.
28655 NUM is the number of operands.
28656 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
28657 NUM_MEMORY is the maximum number of memory operands to accept.
28658 when COMMUTATIVE is set, operand 1 and 2 can be swapped. */
28660 bool
28661 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
28662 bool uses_oc0, int num_memory, bool commutative)
28664 int mem_mask;
28665 int mem_count;
28666 int i;
28668 /* Count the number of memory arguments */
28669 mem_mask = 0;
28670 mem_count = 0;
28671 for (i = 0; i < num; i++)
28673 enum machine_mode mode = GET_MODE (operands[i]);
28674 if (register_operand (operands[i], mode))
28677 else if (memory_operand (operands[i], mode))
28679 mem_mask |= (1 << i);
28680 mem_count++;
28683 else
28685 rtx pattern = PATTERN (insn);
28687 /* allow 0 for pcmov */
28688 if (GET_CODE (pattern) != SET
28689 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
28690 || i < 2
28691 || operands[i] != CONST0_RTX (mode))
28692 return false;
28696 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
28697 a memory operation. */
28698 if (num_memory < 0)
28700 num_memory = -num_memory;
28701 if ((mem_mask & (1 << (num-1))) != 0)
28703 mem_mask &= ~(1 << (num-1));
28704 mem_count--;
28708 /* If there were no memory operations, allow the insn */
28709 if (mem_mask == 0)
28710 return true;
28712 /* Do not allow the destination register to be a memory operand. */
28713 else if (mem_mask & (1 << 0))
28714 return false;
28716 /* If there are too many memory operations, disallow the instruction. While
28717 the hardware only allows 1 memory reference, before register allocation
28718 for some insns, we allow two memory operations sometimes in order to allow
28719 code like the following to be optimized:
28721 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
28723 or similar cases that are vectorized into using the fmaddss
28724 instruction. */
28725 else if (mem_count > num_memory)
28726 return false;
28728 /* Don't allow more than one memory operation if not optimizing. */
28729 else if (mem_count > 1 && !optimize)
28730 return false;
28732 else if (num == 4 && mem_count == 1)
28734 /* formats (destination is the first argument), example fmaddss:
28735 xmm1, xmm1, xmm2, xmm3/mem
28736 xmm1, xmm1, xmm2/mem, xmm3
28737 xmm1, xmm2, xmm3/mem, xmm1
28738 xmm1, xmm2/mem, xmm3, xmm1 */
28739 if (uses_oc0)
28740 return ((mem_mask == (1 << 1))
28741 || (mem_mask == (1 << 2))
28742 || (mem_mask == (1 << 3)));
28744 /* format, example pmacsdd:
28745 xmm1, xmm2, xmm3/mem, xmm1 */
28746 if (commutative)
28747 return (mem_mask == (1 << 2) || mem_mask == (1 << 1));
28748 else
28749 return (mem_mask == (1 << 2));
28752 else if (num == 4 && num_memory == 2)
28754 /* If there are two memory operations, we can load one of the memory ops
28755 into the destination register. This is for optimizing the
28756 multiply/add ops, which the combiner has optimized both the multiply
28757 and the add insns to have a memory operation. We have to be careful
28758 that the destination doesn't overlap with the inputs. */
28759 rtx op0 = operands[0];
28761 if (reg_mentioned_p (op0, operands[1])
28762 || reg_mentioned_p (op0, operands[2])
28763 || reg_mentioned_p (op0, operands[3]))
28764 return false;
28766 /* formats (destination is the first argument), example fmaddss:
28767 xmm1, xmm1, xmm2, xmm3/mem
28768 xmm1, xmm1, xmm2/mem, xmm3
28769 xmm1, xmm2, xmm3/mem, xmm1
28770 xmm1, xmm2/mem, xmm3, xmm1
28772 For the oc0 case, we will load either operands[1] or operands[3] into
28773 operands[0], so any combination of 2 memory operands is ok. */
28774 if (uses_oc0)
28775 return true;
28777 /* format, example pmacsdd:
28778 xmm1, xmm2, xmm3/mem, xmm1
28780 For the integer multiply/add instructions be more restrictive and
28781 require operands[2] and operands[3] to be the memory operands. */
28782 if (commutative)
28783 return (mem_mask == ((1 << 1) | (1 << 3)) || ((1 << 2) | (1 << 3)));
28784 else
28785 return (mem_mask == ((1 << 2) | (1 << 3)));
28788 else if (num == 3 && num_memory == 1)
28790 /* formats, example protb:
28791 xmm1, xmm2, xmm3/mem
28792 xmm1, xmm2/mem, xmm3 */
28793 if (uses_oc0)
28794 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
28796 /* format, example comeq:
28797 xmm1, xmm2, xmm3/mem */
28798 else
28799 return (mem_mask == (1 << 2));
28802 else
28803 gcc_unreachable ();
28805 return false;
28809 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
28810 hardware will allow by using the destination register to load one of the
28811 memory operations. Presently this is used by the multiply/add routines to
28812 allow 2 memory references. */
28814 void
28815 ix86_expand_sse5_multiple_memory (rtx operands[],
28816 int num,
28817 enum machine_mode mode)
28819 rtx op0 = operands[0];
28820 if (num != 4
28821 || memory_operand (op0, mode)
28822 || reg_mentioned_p (op0, operands[1])
28823 || reg_mentioned_p (op0, operands[2])
28824 || reg_mentioned_p (op0, operands[3]))
28825 gcc_unreachable ();
28827 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
28828 the destination register. */
28829 if (memory_operand (operands[1], mode))
28831 emit_move_insn (op0, operands[1]);
28832 operands[1] = op0;
28834 else if (memory_operand (operands[3], mode))
28836 emit_move_insn (op0, operands[3]);
28837 operands[3] = op0;
28839 else
28840 gcc_unreachable ();
28842 return;
28846 /* Table of valid machine attributes. */
28847 static const struct attribute_spec ix86_attribute_table[] =
28849 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28850 /* Stdcall attribute says callee is responsible for popping arguments
28851 if they are not variable. */
28852 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28853 /* Fastcall attribute says callee is responsible for popping arguments
28854 if they are not variable. */
28855 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28856 /* Cdecl attribute says the callee is a normal C declaration */
28857 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28858 /* Regparm attribute specifies how many integer arguments are to be
28859 passed in registers. */
28860 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28861 /* Sseregparm attribute says we are using x86_64 calling conventions
28862 for FP arguments. */
28863 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28864 /* force_align_arg_pointer says this function realigns the stack at entry. */
28865 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28866 false, true, true, ix86_handle_cconv_attribute },
28867 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28868 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28869 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28870 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28871 #endif
28872 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28873 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28874 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28875 SUBTARGET_ATTRIBUTE_TABLE,
28876 #endif
28877 /* ms_abi and sysv_abi calling convention function attributes. */
28878 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28879 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28880 /* End element. */
28881 { NULL, 0, 0, false, false, false, NULL }
28884 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28885 static int
28886 x86_builtin_vectorization_cost (bool runtime_test)
28888 /* If the branch of the runtime test is taken - i.e. - the vectorized
28889 version is skipped - this incurs a misprediction cost (because the
28890 vectorized version is expected to be the fall-through). So we subtract
28891 the latency of a mispredicted branch from the costs that are incured
28892 when the vectorized version is executed.
28894 TODO: The values in individual target tables have to be tuned or new
28895 fields may be needed. For eg. on K8, the default branch path is the
28896 not-taken path. If the taken path is predicted correctly, the minimum
28897 penalty of going down the taken-path is 1 cycle. If the taken-path is
28898 not predicted correctly, then the minimum penalty is 10 cycles. */
28900 if (runtime_test)
28902 return (-(ix86_cost->cond_taken_branch_cost));
28904 else
28905 return 0;
28908 /* This function returns the calling abi specific va_list type node.
28909 It returns the FNDECL specific va_list type. */
28911 tree
28912 ix86_fn_abi_va_list (tree fndecl)
28914 int abi;
28916 if (!TARGET_64BIT)
28917 return va_list_type_node;
28918 gcc_assert (fndecl != NULL_TREE);
28919 abi = ix86_function_abi ((const_tree) fndecl);
28921 if (abi == MS_ABI)
28922 return ms_va_list_type_node;
28923 else
28924 return sysv_va_list_type_node;
28927 /* Returns the canonical va_list type specified by TYPE. If there
28928 is no valid TYPE provided, it return NULL_TREE. */
28930 tree
28931 ix86_canonical_va_list_type (tree type)
28933 tree wtype, htype;
28935 /* Resolve references and pointers to va_list type. */
28936 if (INDIRECT_REF_P (type))
28937 type = TREE_TYPE (type);
28938 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
28939 type = TREE_TYPE (type);
28941 if (TARGET_64BIT)
28943 wtype = va_list_type_node;
28944 gcc_assert (wtype != NULL_TREE);
28945 htype = type;
28946 if (TREE_CODE (wtype) == ARRAY_TYPE)
28948 /* If va_list is an array type, the argument may have decayed
28949 to a pointer type, e.g. by being passed to another function.
28950 In that case, unwrap both types so that we can compare the
28951 underlying records. */
28952 if (TREE_CODE (htype) == ARRAY_TYPE
28953 || POINTER_TYPE_P (htype))
28955 wtype = TREE_TYPE (wtype);
28956 htype = TREE_TYPE (htype);
28959 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28960 return va_list_type_node;
28961 wtype = sysv_va_list_type_node;
28962 gcc_assert (wtype != NULL_TREE);
28963 htype = type;
28964 if (TREE_CODE (wtype) == ARRAY_TYPE)
28966 /* If va_list is an array type, the argument may have decayed
28967 to a pointer type, e.g. by being passed to another function.
28968 In that case, unwrap both types so that we can compare the
28969 underlying records. */
28970 if (TREE_CODE (htype) == ARRAY_TYPE
28971 || POINTER_TYPE_P (htype))
28973 wtype = TREE_TYPE (wtype);
28974 htype = TREE_TYPE (htype);
28977 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28978 return sysv_va_list_type_node;
28979 wtype = ms_va_list_type_node;
28980 gcc_assert (wtype != NULL_TREE);
28981 htype = type;
28982 if (TREE_CODE (wtype) == ARRAY_TYPE)
28984 /* If va_list is an array type, the argument may have decayed
28985 to a pointer type, e.g. by being passed to another function.
28986 In that case, unwrap both types so that we can compare the
28987 underlying records. */
28988 if (TREE_CODE (htype) == ARRAY_TYPE
28989 || POINTER_TYPE_P (htype))
28991 wtype = TREE_TYPE (wtype);
28992 htype = TREE_TYPE (htype);
28995 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28996 return ms_va_list_type_node;
28997 return NULL_TREE;
28999 return std_canonical_va_list_type (type);
29002 /* Iterate through the target-specific builtin types for va_list.
29003 IDX denotes the iterator, *PTREE is set to the result type of
29004 the va_list builtin, and *PNAME to its internal type.
29005 Returns zero if there is no element for this index, otherwise
29006 IDX should be increased upon the next call.
29007 Note, do not iterate a base builtin's name like __builtin_va_list.
29008 Used from c_common_nodes_and_builtins. */
29011 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
29013 if (!TARGET_64BIT)
29014 return 0;
29015 switch (idx) {
29016 case 0:
29017 *ptree = ms_va_list_type_node;
29018 *pname = "__builtin_ms_va_list";
29019 break;
29020 case 1:
29021 *ptree = sysv_va_list_type_node;
29022 *pname = "__builtin_sysv_va_list";
29023 break;
29024 default:
29025 return 0;
29027 return 1;
29030 /* Initialize the GCC target structure. */
29031 #undef TARGET_RETURN_IN_MEMORY
29032 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
29034 #undef TARGET_ATTRIBUTE_TABLE
29035 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
29036 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29037 # undef TARGET_MERGE_DECL_ATTRIBUTES
29038 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
29039 #endif
29041 #undef TARGET_COMP_TYPE_ATTRIBUTES
29042 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
29044 #undef TARGET_INIT_BUILTINS
29045 #define TARGET_INIT_BUILTINS ix86_init_builtins
29046 #undef TARGET_EXPAND_BUILTIN
29047 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
29049 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
29050 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
29051 ix86_builtin_vectorized_function
29053 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
29054 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
29056 #undef TARGET_BUILTIN_RECIPROCAL
29057 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
29059 #undef TARGET_ASM_FUNCTION_EPILOGUE
29060 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
29062 #undef TARGET_ENCODE_SECTION_INFO
29063 #ifndef SUBTARGET_ENCODE_SECTION_INFO
29064 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
29065 #else
29066 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
29067 #endif
29069 #undef TARGET_ASM_OPEN_PAREN
29070 #define TARGET_ASM_OPEN_PAREN ""
29071 #undef TARGET_ASM_CLOSE_PAREN
29072 #define TARGET_ASM_CLOSE_PAREN ""
29074 #undef TARGET_ASM_ALIGNED_HI_OP
29075 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
29076 #undef TARGET_ASM_ALIGNED_SI_OP
29077 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
29078 #ifdef ASM_QUAD
29079 #undef TARGET_ASM_ALIGNED_DI_OP
29080 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
29081 #endif
29083 #undef TARGET_ASM_UNALIGNED_HI_OP
29084 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
29085 #undef TARGET_ASM_UNALIGNED_SI_OP
29086 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
29087 #undef TARGET_ASM_UNALIGNED_DI_OP
29088 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
29090 #undef TARGET_SCHED_ADJUST_COST
29091 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
29092 #undef TARGET_SCHED_ISSUE_RATE
29093 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
29094 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
29095 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
29096 ia32_multipass_dfa_lookahead
29098 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
29099 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
29101 #ifdef HAVE_AS_TLS
29102 #undef TARGET_HAVE_TLS
29103 #define TARGET_HAVE_TLS true
29104 #endif
29105 #undef TARGET_CANNOT_FORCE_CONST_MEM
29106 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
29107 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
29108 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
29110 #undef TARGET_DELEGITIMIZE_ADDRESS
29111 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
29113 #undef TARGET_MS_BITFIELD_LAYOUT_P
29114 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
29116 #if TARGET_MACHO
29117 #undef TARGET_BINDS_LOCAL_P
29118 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
29119 #endif
29120 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29121 #undef TARGET_BINDS_LOCAL_P
29122 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
29123 #endif
29125 #undef TARGET_ASM_OUTPUT_MI_THUNK
29126 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
29127 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
29128 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
29130 #undef TARGET_ASM_FILE_START
29131 #define TARGET_ASM_FILE_START x86_file_start
29133 #undef TARGET_DEFAULT_TARGET_FLAGS
29134 #define TARGET_DEFAULT_TARGET_FLAGS \
29135 (TARGET_DEFAULT \
29136 | TARGET_SUBTARGET_DEFAULT \
29137 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
29139 #undef TARGET_HANDLE_OPTION
29140 #define TARGET_HANDLE_OPTION ix86_handle_option
29142 #undef TARGET_RTX_COSTS
29143 #define TARGET_RTX_COSTS ix86_rtx_costs
29144 #undef TARGET_ADDRESS_COST
29145 #define TARGET_ADDRESS_COST ix86_address_cost
29147 #undef TARGET_FIXED_CONDITION_CODE_REGS
29148 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
29149 #undef TARGET_CC_MODES_COMPATIBLE
29150 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
29152 #undef TARGET_MACHINE_DEPENDENT_REORG
29153 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
29155 #undef TARGET_BUILD_BUILTIN_VA_LIST
29156 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
29158 #undef TARGET_FN_ABI_VA_LIST
29159 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
29161 #undef TARGET_CANONICAL_VA_LIST_TYPE
29162 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
29164 #undef TARGET_EXPAND_BUILTIN_VA_START
29165 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
29167 #undef TARGET_MD_ASM_CLOBBERS
29168 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
29170 #undef TARGET_PROMOTE_PROTOTYPES
29171 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
29172 #undef TARGET_STRUCT_VALUE_RTX
29173 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
29174 #undef TARGET_SETUP_INCOMING_VARARGS
29175 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
29176 #undef TARGET_MUST_PASS_IN_STACK
29177 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
29178 #undef TARGET_PASS_BY_REFERENCE
29179 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
29180 #undef TARGET_INTERNAL_ARG_POINTER
29181 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
29182 #undef TARGET_UPDATE_STACK_BOUNDARY
29183 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
29184 #undef TARGET_GET_DRAP_RTX
29185 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
29186 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
29187 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
29188 #undef TARGET_STRICT_ARGUMENT_NAMING
29189 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
29191 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
29192 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
29194 #undef TARGET_SCALAR_MODE_SUPPORTED_P
29195 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
29197 #undef TARGET_VECTOR_MODE_SUPPORTED_P
29198 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
29200 #undef TARGET_C_MODE_FOR_SUFFIX
29201 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
29203 #ifdef HAVE_AS_TLS
29204 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
29205 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
29206 #endif
29208 #ifdef SUBTARGET_INSERT_ATTRIBUTES
29209 #undef TARGET_INSERT_ATTRIBUTES
29210 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
29211 #endif
29213 #undef TARGET_MANGLE_TYPE
29214 #define TARGET_MANGLE_TYPE ix86_mangle_type
29216 #undef TARGET_STACK_PROTECT_FAIL
29217 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
29219 #undef TARGET_FUNCTION_VALUE
29220 #define TARGET_FUNCTION_VALUE ix86_function_value
29222 #undef TARGET_SECONDARY_RELOAD
29223 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
29225 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
29226 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
29228 #undef TARGET_SET_CURRENT_FUNCTION
29229 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
29231 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
29232 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
29234 #undef TARGET_OPTION_SAVE
29235 #define TARGET_OPTION_SAVE ix86_function_specific_save
29237 #undef TARGET_OPTION_RESTORE
29238 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
29240 #undef TARGET_OPTION_PRINT
29241 #define TARGET_OPTION_PRINT ix86_function_specific_print
29243 #undef TARGET_OPTION_CAN_INLINE_P
29244 #define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
29246 struct gcc_target targetm = TARGET_INITIALIZER;
29248 #include "gt-i386.h"