Import gcc-4.4.1
[dragonfly.git] / contrib / gcc-4.4 / gcc / config / i386 / i386.c
blob84350614d51951fb9790d9a6183a2f0637481ec7
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
56 #include "cselib.h"
58 static int x86_builtin_vectorization_cost (bool);
59 static rtx legitimize_dllimport_symbol (rtx, bool);
61 #ifndef CHECK_STACK_LIMIT
62 #define CHECK_STACK_LIMIT (-1)
63 #endif
65 /* Return index of given mode in mult and division cost tables. */
66 #define MODE_INDEX(mode) \
67 ((mode) == QImode ? 0 \
68 : (mode) == HImode ? 1 \
69 : (mode) == SImode ? 2 \
70 : (mode) == DImode ? 3 \
71 : 4)
73 /* Processor costs (relative to an add) */
74 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
75 #define COSTS_N_BYTES(N) ((N) * 2)
77 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
79 const
80 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
81 COSTS_N_BYTES (2), /* cost of an add instruction */
82 COSTS_N_BYTES (3), /* cost of a lea instruction */
83 COSTS_N_BYTES (2), /* variable shift costs */
84 COSTS_N_BYTES (3), /* constant shift costs */
85 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 0, /* cost of multiply per each bit set */
91 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
92 COSTS_N_BYTES (3), /* HI */
93 COSTS_N_BYTES (3), /* SI */
94 COSTS_N_BYTES (3), /* DI */
95 COSTS_N_BYTES (5)}, /* other */
96 COSTS_N_BYTES (3), /* cost of movsx */
97 COSTS_N_BYTES (3), /* cost of movzx */
98 0, /* "large" insn */
99 2, /* MOVE_RATIO */
100 2, /* cost for loading QImode using movzbl */
101 {2, 2, 2}, /* cost of loading integer registers
102 in QImode, HImode and SImode.
103 Relative to reg-reg move (2). */
104 {2, 2, 2}, /* cost of storing integer registers */
105 2, /* cost of reg,reg fld/fst */
106 {2, 2, 2}, /* cost of loading fp registers
107 in SFmode, DFmode and XFmode */
108 {2, 2, 2}, /* cost of storing fp registers
109 in SFmode, DFmode and XFmode */
110 3, /* cost of moving MMX register */
111 {3, 3}, /* cost of loading MMX registers
112 in SImode and DImode */
113 {3, 3}, /* cost of storing MMX registers
114 in SImode and DImode */
115 3, /* cost of moving SSE register */
116 {3, 3, 3}, /* cost of loading SSE registers
117 in SImode, DImode and TImode */
118 {3, 3, 3}, /* cost of storing SSE registers
119 in SImode, DImode and TImode */
120 3, /* MMX or SSE register to integer */
121 0, /* size of l1 cache */
122 0, /* size of l2 cache */
123 0, /* size of prefetch block */
124 0, /* number of parallel prefetches */
125 2, /* Branch cost */
126 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
127 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
128 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
129 COSTS_N_BYTES (2), /* cost of FABS instruction. */
130 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
131 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
135 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
136 1, /* scalar_stmt_cost. */
137 1, /* scalar load_cost. */
138 1, /* scalar_store_cost. */
139 1, /* vec_stmt_cost. */
140 1, /* vec_to_scalar_cost. */
141 1, /* scalar_to_vec_cost. */
142 1, /* vec_align_load_cost. */
143 1, /* vec_unalign_load_cost. */
144 1, /* vec_store_cost. */
145 1, /* cond_taken_branch_cost. */
146 1, /* cond_not_taken_branch_cost. */
149 /* Processor costs (relative to an add) */
150 static const
151 struct processor_costs i386_cost = { /* 386 specific costs */
152 COSTS_N_INSNS (1), /* cost of an add instruction */
153 COSTS_N_INSNS (1), /* cost of a lea instruction */
154 COSTS_N_INSNS (3), /* variable shift costs */
155 COSTS_N_INSNS (2), /* constant shift costs */
156 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
157 COSTS_N_INSNS (6), /* HI */
158 COSTS_N_INSNS (6), /* SI */
159 COSTS_N_INSNS (6), /* DI */
160 COSTS_N_INSNS (6)}, /* other */
161 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
162 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
163 COSTS_N_INSNS (23), /* HI */
164 COSTS_N_INSNS (23), /* SI */
165 COSTS_N_INSNS (23), /* DI */
166 COSTS_N_INSNS (23)}, /* other */
167 COSTS_N_INSNS (3), /* cost of movsx */
168 COSTS_N_INSNS (2), /* cost of movzx */
169 15, /* "large" insn */
170 3, /* MOVE_RATIO */
171 4, /* cost for loading QImode using movzbl */
172 {2, 4, 2}, /* cost of loading integer registers
173 in QImode, HImode and SImode.
174 Relative to reg-reg move (2). */
175 {2, 4, 2}, /* cost of storing integer registers */
176 2, /* cost of reg,reg fld/fst */
177 {8, 8, 8}, /* cost of loading fp registers
178 in SFmode, DFmode and XFmode */
179 {8, 8, 8}, /* cost of storing fp registers
180 in SFmode, DFmode and XFmode */
181 2, /* cost of moving MMX register */
182 {4, 8}, /* cost of loading MMX registers
183 in SImode and DImode */
184 {4, 8}, /* cost of storing MMX registers
185 in SImode and DImode */
186 2, /* cost of moving SSE register */
187 {4, 8, 16}, /* cost of loading SSE registers
188 in SImode, DImode and TImode */
189 {4, 8, 16}, /* cost of storing SSE registers
190 in SImode, DImode and TImode */
191 3, /* MMX or SSE register to integer */
192 0, /* size of l1 cache */
193 0, /* size of l2 cache */
194 0, /* size of prefetch block */
195 0, /* number of parallel prefetches */
196 1, /* Branch cost */
197 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
198 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
199 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
200 COSTS_N_INSNS (22), /* cost of FABS instruction. */
201 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
202 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
206 DUMMY_STRINGOP_ALGS},
207 1, /* scalar_stmt_cost. */
208 1, /* scalar load_cost. */
209 1, /* scalar_store_cost. */
210 1, /* vec_stmt_cost. */
211 1, /* vec_to_scalar_cost. */
212 1, /* scalar_to_vec_cost. */
213 1, /* vec_align_load_cost. */
214 2, /* vec_unalign_load_cost. */
215 1, /* vec_store_cost. */
216 3, /* cond_taken_branch_cost. */
217 1, /* cond_not_taken_branch_cost. */
220 static const
221 struct processor_costs i486_cost = { /* 486 specific costs */
222 COSTS_N_INSNS (1), /* cost of an add instruction */
223 COSTS_N_INSNS (1), /* cost of a lea instruction */
224 COSTS_N_INSNS (3), /* variable shift costs */
225 COSTS_N_INSNS (2), /* constant shift costs */
226 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
227 COSTS_N_INSNS (12), /* HI */
228 COSTS_N_INSNS (12), /* SI */
229 COSTS_N_INSNS (12), /* DI */
230 COSTS_N_INSNS (12)}, /* other */
231 1, /* cost of multiply per each bit set */
232 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
233 COSTS_N_INSNS (40), /* HI */
234 COSTS_N_INSNS (40), /* SI */
235 COSTS_N_INSNS (40), /* DI */
236 COSTS_N_INSNS (40)}, /* other */
237 COSTS_N_INSNS (3), /* cost of movsx */
238 COSTS_N_INSNS (2), /* cost of movzx */
239 15, /* "large" insn */
240 3, /* MOVE_RATIO */
241 4, /* cost for loading QImode using movzbl */
242 {2, 4, 2}, /* cost of loading integer registers
243 in QImode, HImode and SImode.
244 Relative to reg-reg move (2). */
245 {2, 4, 2}, /* cost of storing integer registers */
246 2, /* cost of reg,reg fld/fst */
247 {8, 8, 8}, /* cost of loading fp registers
248 in SFmode, DFmode and XFmode */
249 {8, 8, 8}, /* cost of storing fp registers
250 in SFmode, DFmode and XFmode */
251 2, /* cost of moving MMX register */
252 {4, 8}, /* cost of loading MMX registers
253 in SImode and DImode */
254 {4, 8}, /* cost of storing MMX registers
255 in SImode and DImode */
256 2, /* cost of moving SSE register */
257 {4, 8, 16}, /* cost of loading SSE registers
258 in SImode, DImode and TImode */
259 {4, 8, 16}, /* cost of storing SSE registers
260 in SImode, DImode and TImode */
261 3, /* MMX or SSE register to integer */
262 4, /* size of l1 cache. 486 has 8kB cache
263 shared for code and data, so 4kB is
264 not really precise. */
265 4, /* size of l2 cache */
266 0, /* size of prefetch block */
267 0, /* number of parallel prefetches */
268 1, /* Branch cost */
269 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
270 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
271 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
272 COSTS_N_INSNS (3), /* cost of FABS instruction. */
273 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
274 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
278 DUMMY_STRINGOP_ALGS},
279 1, /* scalar_stmt_cost. */
280 1, /* scalar load_cost. */
281 1, /* scalar_store_cost. */
282 1, /* vec_stmt_cost. */
283 1, /* vec_to_scalar_cost. */
284 1, /* scalar_to_vec_cost. */
285 1, /* vec_align_load_cost. */
286 2, /* vec_unalign_load_cost. */
287 1, /* vec_store_cost. */
288 3, /* cond_taken_branch_cost. */
289 1, /* cond_not_taken_branch_cost. */
292 static const
293 struct processor_costs pentium_cost = {
294 COSTS_N_INSNS (1), /* cost of an add instruction */
295 COSTS_N_INSNS (1), /* cost of a lea instruction */
296 COSTS_N_INSNS (4), /* variable shift costs */
297 COSTS_N_INSNS (1), /* constant shift costs */
298 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
299 COSTS_N_INSNS (11), /* HI */
300 COSTS_N_INSNS (11), /* SI */
301 COSTS_N_INSNS (11), /* DI */
302 COSTS_N_INSNS (11)}, /* other */
303 0, /* cost of multiply per each bit set */
304 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
305 COSTS_N_INSNS (25), /* HI */
306 COSTS_N_INSNS (25), /* SI */
307 COSTS_N_INSNS (25), /* DI */
308 COSTS_N_INSNS (25)}, /* other */
309 COSTS_N_INSNS (3), /* cost of movsx */
310 COSTS_N_INSNS (2), /* cost of movzx */
311 8, /* "large" insn */
312 6, /* MOVE_RATIO */
313 6, /* cost for loading QImode using movzbl */
314 {2, 4, 2}, /* cost of loading integer registers
315 in QImode, HImode and SImode.
316 Relative to reg-reg move (2). */
317 {2, 4, 2}, /* cost of storing integer registers */
318 2, /* cost of reg,reg fld/fst */
319 {2, 2, 6}, /* cost of loading fp registers
320 in SFmode, DFmode and XFmode */
321 {4, 4, 6}, /* cost of storing fp registers
322 in SFmode, DFmode and XFmode */
323 8, /* cost of moving MMX register */
324 {8, 8}, /* cost of loading MMX registers
325 in SImode and DImode */
326 {8, 8}, /* cost of storing MMX registers
327 in SImode and DImode */
328 2, /* cost of moving SSE register */
329 {4, 8, 16}, /* cost of loading SSE registers
330 in SImode, DImode and TImode */
331 {4, 8, 16}, /* cost of storing SSE registers
332 in SImode, DImode and TImode */
333 3, /* MMX or SSE register to integer */
334 8, /* size of l1 cache. */
335 8, /* size of l2 cache */
336 0, /* size of prefetch block */
337 0, /* number of parallel prefetches */
338 2, /* Branch cost */
339 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
340 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
341 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
342 COSTS_N_INSNS (1), /* cost of FABS instruction. */
343 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
344 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
345 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
346 DUMMY_STRINGOP_ALGS},
347 {{libcall, {{-1, rep_prefix_4_byte}}},
348 DUMMY_STRINGOP_ALGS},
349 1, /* scalar_stmt_cost. */
350 1, /* scalar load_cost. */
351 1, /* scalar_store_cost. */
352 1, /* vec_stmt_cost. */
353 1, /* vec_to_scalar_cost. */
354 1, /* scalar_to_vec_cost. */
355 1, /* vec_align_load_cost. */
356 2, /* vec_unalign_load_cost. */
357 1, /* vec_store_cost. */
358 3, /* cond_taken_branch_cost. */
359 1, /* cond_not_taken_branch_cost. */
362 static const
363 struct processor_costs pentiumpro_cost = {
364 COSTS_N_INSNS (1), /* cost of an add instruction */
365 COSTS_N_INSNS (1), /* cost of a lea instruction */
366 COSTS_N_INSNS (1), /* variable shift costs */
367 COSTS_N_INSNS (1), /* constant shift costs */
368 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
369 COSTS_N_INSNS (4), /* HI */
370 COSTS_N_INSNS (4), /* SI */
371 COSTS_N_INSNS (4), /* DI */
372 COSTS_N_INSNS (4)}, /* other */
373 0, /* cost of multiply per each bit set */
374 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
375 COSTS_N_INSNS (17), /* HI */
376 COSTS_N_INSNS (17), /* SI */
377 COSTS_N_INSNS (17), /* DI */
378 COSTS_N_INSNS (17)}, /* other */
379 COSTS_N_INSNS (1), /* cost of movsx */
380 COSTS_N_INSNS (1), /* cost of movzx */
381 8, /* "large" insn */
382 6, /* MOVE_RATIO */
383 2, /* cost for loading QImode using movzbl */
384 {4, 4, 4}, /* cost of loading integer registers
385 in QImode, HImode and SImode.
386 Relative to reg-reg move (2). */
387 {2, 2, 2}, /* cost of storing integer registers */
388 2, /* cost of reg,reg fld/fst */
389 {2, 2, 6}, /* cost of loading fp registers
390 in SFmode, DFmode and XFmode */
391 {4, 4, 6}, /* cost of storing fp registers
392 in SFmode, DFmode and XFmode */
393 2, /* cost of moving MMX register */
394 {2, 2}, /* cost of loading MMX registers
395 in SImode and DImode */
396 {2, 2}, /* cost of storing MMX registers
397 in SImode and DImode */
398 2, /* cost of moving SSE register */
399 {2, 2, 8}, /* cost of loading SSE registers
400 in SImode, DImode and TImode */
401 {2, 2, 8}, /* cost of storing SSE registers
402 in SImode, DImode and TImode */
403 3, /* MMX or SSE register to integer */
404 8, /* size of l1 cache. */
405 256, /* size of l2 cache */
406 32, /* size of prefetch block */
407 6, /* number of parallel prefetches */
408 2, /* Branch cost */
409 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
410 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
411 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
412 COSTS_N_INSNS (2), /* cost of FABS instruction. */
413 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
414 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
415 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
416 the alignment). For small blocks inline loop is still a noticeable win, for bigger
417 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
418 more expensive startup time in CPU, but after 4K the difference is down in the noise.
420 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
421 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
422 DUMMY_STRINGOP_ALGS},
423 {{rep_prefix_4_byte, {{1024, unrolled_loop},
424 {8192, rep_prefix_4_byte}, {-1, libcall}}},
425 DUMMY_STRINGOP_ALGS},
426 1, /* scalar_stmt_cost. */
427 1, /* scalar load_cost. */
428 1, /* scalar_store_cost. */
429 1, /* vec_stmt_cost. */
430 1, /* vec_to_scalar_cost. */
431 1, /* scalar_to_vec_cost. */
432 1, /* vec_align_load_cost. */
433 2, /* vec_unalign_load_cost. */
434 1, /* vec_store_cost. */
435 3, /* cond_taken_branch_cost. */
436 1, /* cond_not_taken_branch_cost. */
439 static const
440 struct processor_costs geode_cost = {
441 COSTS_N_INSNS (1), /* cost of an add instruction */
442 COSTS_N_INSNS (1), /* cost of a lea instruction */
443 COSTS_N_INSNS (2), /* variable shift costs */
444 COSTS_N_INSNS (1), /* constant shift costs */
445 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
446 COSTS_N_INSNS (4), /* HI */
447 COSTS_N_INSNS (7), /* SI */
448 COSTS_N_INSNS (7), /* DI */
449 COSTS_N_INSNS (7)}, /* other */
450 0, /* cost of multiply per each bit set */
451 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
452 COSTS_N_INSNS (23), /* HI */
453 COSTS_N_INSNS (39), /* SI */
454 COSTS_N_INSNS (39), /* DI */
455 COSTS_N_INSNS (39)}, /* other */
456 COSTS_N_INSNS (1), /* cost of movsx */
457 COSTS_N_INSNS (1), /* cost of movzx */
458 8, /* "large" insn */
459 4, /* MOVE_RATIO */
460 1, /* cost for loading QImode using movzbl */
461 {1, 1, 1}, /* cost of loading integer registers
462 in QImode, HImode and SImode.
463 Relative to reg-reg move (2). */
464 {1, 1, 1}, /* cost of storing integer registers */
465 1, /* cost of reg,reg fld/fst */
466 {1, 1, 1}, /* cost of loading fp registers
467 in SFmode, DFmode and XFmode */
468 {4, 6, 6}, /* cost of storing fp registers
469 in SFmode, DFmode and XFmode */
471 1, /* cost of moving MMX register */
472 {1, 1}, /* cost of loading MMX registers
473 in SImode and DImode */
474 {1, 1}, /* cost of storing MMX registers
475 in SImode and DImode */
476 1, /* cost of moving SSE register */
477 {1, 1, 1}, /* cost of loading SSE registers
478 in SImode, DImode and TImode */
479 {1, 1, 1}, /* cost of storing SSE registers
480 in SImode, DImode and TImode */
481 1, /* MMX or SSE register to integer */
482 64, /* size of l1 cache. */
483 128, /* size of l2 cache. */
484 32, /* size of prefetch block */
485 1, /* number of parallel prefetches */
486 1, /* Branch cost */
487 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
488 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
489 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
490 COSTS_N_INSNS (1), /* cost of FABS instruction. */
491 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
492 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
496 DUMMY_STRINGOP_ALGS},
497 1, /* scalar_stmt_cost. */
498 1, /* scalar load_cost. */
499 1, /* scalar_store_cost. */
500 1, /* vec_stmt_cost. */
501 1, /* vec_to_scalar_cost. */
502 1, /* scalar_to_vec_cost. */
503 1, /* vec_align_load_cost. */
504 2, /* vec_unalign_load_cost. */
505 1, /* vec_store_cost. */
506 3, /* cond_taken_branch_cost. */
507 1, /* cond_not_taken_branch_cost. */
510 static const
511 struct processor_costs k6_cost = {
512 COSTS_N_INSNS (1), /* cost of an add instruction */
513 COSTS_N_INSNS (2), /* cost of a lea instruction */
514 COSTS_N_INSNS (1), /* variable shift costs */
515 COSTS_N_INSNS (1), /* constant shift costs */
516 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
517 COSTS_N_INSNS (3), /* HI */
518 COSTS_N_INSNS (3), /* SI */
519 COSTS_N_INSNS (3), /* DI */
520 COSTS_N_INSNS (3)}, /* other */
521 0, /* cost of multiply per each bit set */
522 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
523 COSTS_N_INSNS (18), /* HI */
524 COSTS_N_INSNS (18), /* SI */
525 COSTS_N_INSNS (18), /* DI */
526 COSTS_N_INSNS (18)}, /* other */
527 COSTS_N_INSNS (2), /* cost of movsx */
528 COSTS_N_INSNS (2), /* cost of movzx */
529 8, /* "large" insn */
530 4, /* MOVE_RATIO */
531 3, /* cost for loading QImode using movzbl */
532 {4, 5, 4}, /* cost of loading integer registers
533 in QImode, HImode and SImode.
534 Relative to reg-reg move (2). */
535 {2, 3, 2}, /* cost of storing integer registers */
536 4, /* cost of reg,reg fld/fst */
537 {6, 6, 6}, /* cost of loading fp registers
538 in SFmode, DFmode and XFmode */
539 {4, 4, 4}, /* cost of storing fp registers
540 in SFmode, DFmode and XFmode */
541 2, /* cost of moving MMX register */
542 {2, 2}, /* cost of loading MMX registers
543 in SImode and DImode */
544 {2, 2}, /* cost of storing MMX registers
545 in SImode and DImode */
546 2, /* cost of moving SSE register */
547 {2, 2, 8}, /* cost of loading SSE registers
548 in SImode, DImode and TImode */
549 {2, 2, 8}, /* cost of storing SSE registers
550 in SImode, DImode and TImode */
551 6, /* MMX or SSE register to integer */
552 32, /* size of l1 cache. */
553 32, /* size of l2 cache. Some models
554 have integrated l2 cache, but
555 optimizing for k6 is not important
556 enough to worry about that. */
557 32, /* size of prefetch block */
558 1, /* number of parallel prefetches */
559 1, /* Branch cost */
560 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
561 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
562 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
563 COSTS_N_INSNS (2), /* cost of FABS instruction. */
564 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
565 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
569 DUMMY_STRINGOP_ALGS},
570 1, /* scalar_stmt_cost. */
571 1, /* scalar load_cost. */
572 1, /* scalar_store_cost. */
573 1, /* vec_stmt_cost. */
574 1, /* vec_to_scalar_cost. */
575 1, /* scalar_to_vec_cost. */
576 1, /* vec_align_load_cost. */
577 2, /* vec_unalign_load_cost. */
578 1, /* vec_store_cost. */
579 3, /* cond_taken_branch_cost. */
580 1, /* cond_not_taken_branch_cost. */
583 static const
584 struct processor_costs athlon_cost = {
585 COSTS_N_INSNS (1), /* cost of an add instruction */
586 COSTS_N_INSNS (2), /* cost of a lea instruction */
587 COSTS_N_INSNS (1), /* variable shift costs */
588 COSTS_N_INSNS (1), /* constant shift costs */
589 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
590 COSTS_N_INSNS (5), /* HI */
591 COSTS_N_INSNS (5), /* SI */
592 COSTS_N_INSNS (5), /* DI */
593 COSTS_N_INSNS (5)}, /* other */
594 0, /* cost of multiply per each bit set */
595 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
596 COSTS_N_INSNS (26), /* HI */
597 COSTS_N_INSNS (42), /* SI */
598 COSTS_N_INSNS (74), /* DI */
599 COSTS_N_INSNS (74)}, /* other */
600 COSTS_N_INSNS (1), /* cost of movsx */
601 COSTS_N_INSNS (1), /* cost of movzx */
602 8, /* "large" insn */
603 9, /* MOVE_RATIO */
604 4, /* cost for loading QImode using movzbl */
605 {3, 4, 3}, /* cost of loading integer registers
606 in QImode, HImode and SImode.
607 Relative to reg-reg move (2). */
608 {3, 4, 3}, /* cost of storing integer registers */
609 4, /* cost of reg,reg fld/fst */
610 {4, 4, 12}, /* cost of loading fp registers
611 in SFmode, DFmode and XFmode */
612 {6, 6, 8}, /* cost of storing fp registers
613 in SFmode, DFmode and XFmode */
614 2, /* cost of moving MMX register */
615 {4, 4}, /* cost of loading MMX registers
616 in SImode and DImode */
617 {4, 4}, /* cost of storing MMX registers
618 in SImode and DImode */
619 2, /* cost of moving SSE register */
620 {4, 4, 6}, /* cost of loading SSE registers
621 in SImode, DImode and TImode */
622 {4, 4, 5}, /* cost of storing SSE registers
623 in SImode, DImode and TImode */
624 5, /* MMX or SSE register to integer */
625 64, /* size of l1 cache. */
626 256, /* size of l2 cache. */
627 64, /* size of prefetch block */
628 6, /* number of parallel prefetches */
629 5, /* Branch cost */
630 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
631 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
632 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
633 COSTS_N_INSNS (2), /* cost of FABS instruction. */
634 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
635 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
636 /* For some reason, Athlon deals better with REP prefix (relative to loops)
637 compared to K8. Alignment becomes important after 8 bytes for memcpy and
638 128 bytes for memset. */
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
642 DUMMY_STRINGOP_ALGS},
643 1, /* scalar_stmt_cost. */
644 1, /* scalar load_cost. */
645 1, /* scalar_store_cost. */
646 1, /* vec_stmt_cost. */
647 1, /* vec_to_scalar_cost. */
648 1, /* scalar_to_vec_cost. */
649 1, /* vec_align_load_cost. */
650 2, /* vec_unalign_load_cost. */
651 1, /* vec_store_cost. */
652 3, /* cond_taken_branch_cost. */
653 1, /* cond_not_taken_branch_cost. */
656 static const
657 struct processor_costs k8_cost = {
658 COSTS_N_INSNS (1), /* cost of an add instruction */
659 COSTS_N_INSNS (2), /* cost of a lea instruction */
660 COSTS_N_INSNS (1), /* variable shift costs */
661 COSTS_N_INSNS (1), /* constant shift costs */
662 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
663 COSTS_N_INSNS (4), /* HI */
664 COSTS_N_INSNS (3), /* SI */
665 COSTS_N_INSNS (4), /* DI */
666 COSTS_N_INSNS (5)}, /* other */
667 0, /* cost of multiply per each bit set */
668 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
669 COSTS_N_INSNS (26), /* HI */
670 COSTS_N_INSNS (42), /* SI */
671 COSTS_N_INSNS (74), /* DI */
672 COSTS_N_INSNS (74)}, /* other */
673 COSTS_N_INSNS (1), /* cost of movsx */
674 COSTS_N_INSNS (1), /* cost of movzx */
675 8, /* "large" insn */
676 9, /* MOVE_RATIO */
677 4, /* cost for loading QImode using movzbl */
678 {3, 4, 3}, /* cost of loading integer registers
679 in QImode, HImode and SImode.
680 Relative to reg-reg move (2). */
681 {3, 4, 3}, /* cost of storing integer registers */
682 4, /* cost of reg,reg fld/fst */
683 {4, 4, 12}, /* cost of loading fp registers
684 in SFmode, DFmode and XFmode */
685 {6, 6, 8}, /* cost of storing fp registers
686 in SFmode, DFmode and XFmode */
687 2, /* cost of moving MMX register */
688 {3, 3}, /* cost of loading MMX registers
689 in SImode and DImode */
690 {4, 4}, /* cost of storing MMX registers
691 in SImode and DImode */
692 2, /* cost of moving SSE register */
693 {4, 3, 6}, /* cost of loading SSE registers
694 in SImode, DImode and TImode */
695 {4, 4, 5}, /* cost of storing SSE registers
696 in SImode, DImode and TImode */
697 5, /* MMX or SSE register to integer */
698 64, /* size of l1 cache. */
699 512, /* size of l2 cache. */
700 64, /* size of prefetch block */
701 /* New AMD processors never drop prefetches; if they cannot be performed
702 immediately, they are queued. We set number of simultaneous prefetches
703 to a large constant to reflect this (it probably is not a good idea not
704 to limit number of prefetches at all, as their execution also takes some
705 time). */
706 100, /* number of parallel prefetches */
707 3, /* Branch cost */
708 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
709 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
710 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
711 COSTS_N_INSNS (2), /* cost of FABS instruction. */
712 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
713 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
714 /* K8 has optimized REP instruction for medium sized blocks, but for very small
715 blocks it is better to use loop. For large blocks, libcall can do
716 nontemporary accesses and beat inline considerably. */
717 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
718 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
719 {{libcall, {{8, loop}, {24, unrolled_loop},
720 {2048, rep_prefix_4_byte}, {-1, libcall}}},
721 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
722 4, /* scalar_stmt_cost. */
723 2, /* scalar load_cost. */
724 2, /* scalar_store_cost. */
725 5, /* vec_stmt_cost. */
726 0, /* vec_to_scalar_cost. */
727 2, /* scalar_to_vec_cost. */
728 2, /* vec_align_load_cost. */
729 3, /* vec_unalign_load_cost. */
730 3, /* vec_store_cost. */
731 3, /* cond_taken_branch_cost. */
732 2, /* cond_not_taken_branch_cost. */
735 struct processor_costs amdfam10_cost = {
736 COSTS_N_INSNS (1), /* cost of an add instruction */
737 COSTS_N_INSNS (2), /* cost of a lea instruction */
738 COSTS_N_INSNS (1), /* variable shift costs */
739 COSTS_N_INSNS (1), /* constant shift costs */
740 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
741 COSTS_N_INSNS (4), /* HI */
742 COSTS_N_INSNS (3), /* SI */
743 COSTS_N_INSNS (4), /* DI */
744 COSTS_N_INSNS (5)}, /* other */
745 0, /* cost of multiply per each bit set */
746 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
747 COSTS_N_INSNS (35), /* HI */
748 COSTS_N_INSNS (51), /* SI */
749 COSTS_N_INSNS (83), /* DI */
750 COSTS_N_INSNS (83)}, /* other */
751 COSTS_N_INSNS (1), /* cost of movsx */
752 COSTS_N_INSNS (1), /* cost of movzx */
753 8, /* "large" insn */
754 9, /* MOVE_RATIO */
755 4, /* cost for loading QImode using movzbl */
756 {3, 4, 3}, /* cost of loading integer registers
757 in QImode, HImode and SImode.
758 Relative to reg-reg move (2). */
759 {3, 4, 3}, /* cost of storing integer registers */
760 4, /* cost of reg,reg fld/fst */
761 {4, 4, 12}, /* cost of loading fp registers
762 in SFmode, DFmode and XFmode */
763 {6, 6, 8}, /* cost of storing fp registers
764 in SFmode, DFmode and XFmode */
765 2, /* cost of moving MMX register */
766 {3, 3}, /* cost of loading MMX registers
767 in SImode and DImode */
768 {4, 4}, /* cost of storing MMX registers
769 in SImode and DImode */
770 2, /* cost of moving SSE register */
771 {4, 4, 3}, /* cost of loading SSE registers
772 in SImode, DImode and TImode */
773 {4, 4, 5}, /* cost of storing SSE registers
774 in SImode, DImode and TImode */
775 3, /* MMX or SSE register to integer */
776 /* On K8
777 MOVD reg64, xmmreg Double FSTORE 4
778 MOVD reg32, xmmreg Double FSTORE 4
779 On AMDFAM10
780 MOVD reg64, xmmreg Double FADD 3
781 1/1 1/1
782 MOVD reg32, xmmreg Double FADD 3
783 1/1 1/1 */
784 64, /* size of l1 cache. */
785 512, /* size of l2 cache. */
786 64, /* size of prefetch block */
787 /* New AMD processors never drop prefetches; if they cannot be performed
788 immediately, they are queued. We set number of simultaneous prefetches
789 to a large constant to reflect this (it probably is not a good idea not
790 to limit number of prefetches at all, as their execution also takes some
791 time). */
792 100, /* number of parallel prefetches */
793 2, /* Branch cost */
794 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
795 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
796 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
797 COSTS_N_INSNS (2), /* cost of FABS instruction. */
798 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
799 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
801 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
802 very small blocks it is better to use loop. For large blocks, libcall can
803 do nontemporary accesses and beat inline considerably. */
804 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
805 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
806 {{libcall, {{8, loop}, {24, unrolled_loop},
807 {2048, rep_prefix_4_byte}, {-1, libcall}}},
808 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
809 4, /* scalar_stmt_cost. */
810 2, /* scalar load_cost. */
811 2, /* scalar_store_cost. */
812 6, /* vec_stmt_cost. */
813 0, /* vec_to_scalar_cost. */
814 2, /* scalar_to_vec_cost. */
815 2, /* vec_align_load_cost. */
816 2, /* vec_unalign_load_cost. */
817 2, /* vec_store_cost. */
818 2, /* cond_taken_branch_cost. */
819 1, /* cond_not_taken_branch_cost. */
822 static const
823 struct processor_costs pentium4_cost = {
824 COSTS_N_INSNS (1), /* cost of an add instruction */
825 COSTS_N_INSNS (3), /* cost of a lea instruction */
826 COSTS_N_INSNS (4), /* variable shift costs */
827 COSTS_N_INSNS (4), /* constant shift costs */
828 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
829 COSTS_N_INSNS (15), /* HI */
830 COSTS_N_INSNS (15), /* SI */
831 COSTS_N_INSNS (15), /* DI */
832 COSTS_N_INSNS (15)}, /* other */
833 0, /* cost of multiply per each bit set */
834 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
835 COSTS_N_INSNS (56), /* HI */
836 COSTS_N_INSNS (56), /* SI */
837 COSTS_N_INSNS (56), /* DI */
838 COSTS_N_INSNS (56)}, /* other */
839 COSTS_N_INSNS (1), /* cost of movsx */
840 COSTS_N_INSNS (1), /* cost of movzx */
841 16, /* "large" insn */
842 6, /* MOVE_RATIO */
843 2, /* cost for loading QImode using movzbl */
844 {4, 5, 4}, /* cost of loading integer registers
845 in QImode, HImode and SImode.
846 Relative to reg-reg move (2). */
847 {2, 3, 2}, /* cost of storing integer registers */
848 2, /* cost of reg,reg fld/fst */
849 {2, 2, 6}, /* cost of loading fp registers
850 in SFmode, DFmode and XFmode */
851 {4, 4, 6}, /* cost of storing fp registers
852 in SFmode, DFmode and XFmode */
853 2, /* cost of moving MMX register */
854 {2, 2}, /* cost of loading MMX registers
855 in SImode and DImode */
856 {2, 2}, /* cost of storing MMX registers
857 in SImode and DImode */
858 12, /* cost of moving SSE register */
859 {12, 12, 12}, /* cost of loading SSE registers
860 in SImode, DImode and TImode */
861 {2, 2, 8}, /* cost of storing SSE registers
862 in SImode, DImode and TImode */
863 10, /* MMX or SSE register to integer */
864 8, /* size of l1 cache. */
865 256, /* size of l2 cache. */
866 64, /* size of prefetch block */
867 6, /* number of parallel prefetches */
868 2, /* Branch cost */
869 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
870 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
871 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
872 COSTS_N_INSNS (2), /* cost of FABS instruction. */
873 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
874 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
875 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
876 DUMMY_STRINGOP_ALGS},
877 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
878 {-1, libcall}}},
879 DUMMY_STRINGOP_ALGS},
880 1, /* scalar_stmt_cost. */
881 1, /* scalar load_cost. */
882 1, /* scalar_store_cost. */
883 1, /* vec_stmt_cost. */
884 1, /* vec_to_scalar_cost. */
885 1, /* scalar_to_vec_cost. */
886 1, /* vec_align_load_cost. */
887 2, /* vec_unalign_load_cost. */
888 1, /* vec_store_cost. */
889 3, /* cond_taken_branch_cost. */
890 1, /* cond_not_taken_branch_cost. */
893 static const
894 struct processor_costs nocona_cost = {
895 COSTS_N_INSNS (1), /* cost of an add instruction */
896 COSTS_N_INSNS (1), /* cost of a lea instruction */
897 COSTS_N_INSNS (1), /* variable shift costs */
898 COSTS_N_INSNS (1), /* constant shift costs */
899 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
900 COSTS_N_INSNS (10), /* HI */
901 COSTS_N_INSNS (10), /* SI */
902 COSTS_N_INSNS (10), /* DI */
903 COSTS_N_INSNS (10)}, /* other */
904 0, /* cost of multiply per each bit set */
905 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
906 COSTS_N_INSNS (66), /* HI */
907 COSTS_N_INSNS (66), /* SI */
908 COSTS_N_INSNS (66), /* DI */
909 COSTS_N_INSNS (66)}, /* other */
910 COSTS_N_INSNS (1), /* cost of movsx */
911 COSTS_N_INSNS (1), /* cost of movzx */
912 16, /* "large" insn */
913 17, /* MOVE_RATIO */
914 4, /* cost for loading QImode using movzbl */
915 {4, 4, 4}, /* cost of loading integer registers
916 in QImode, HImode and SImode.
917 Relative to reg-reg move (2). */
918 {4, 4, 4}, /* cost of storing integer registers */
919 3, /* cost of reg,reg fld/fst */
920 {12, 12, 12}, /* cost of loading fp registers
921 in SFmode, DFmode and XFmode */
922 {4, 4, 4}, /* cost of storing fp registers
923 in SFmode, DFmode and XFmode */
924 6, /* cost of moving MMX register */
925 {12, 12}, /* cost of loading MMX registers
926 in SImode and DImode */
927 {12, 12}, /* cost of storing MMX registers
928 in SImode and DImode */
929 6, /* cost of moving SSE register */
930 {12, 12, 12}, /* cost of loading SSE registers
931 in SImode, DImode and TImode */
932 {12, 12, 12}, /* cost of storing SSE registers
933 in SImode, DImode and TImode */
934 8, /* MMX or SSE register to integer */
935 8, /* size of l1 cache. */
936 1024, /* size of l2 cache. */
937 128, /* size of prefetch block */
938 8, /* number of parallel prefetches */
939 1, /* Branch cost */
940 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
941 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
942 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
943 COSTS_N_INSNS (3), /* cost of FABS instruction. */
944 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
945 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
946 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
947 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
948 {100000, unrolled_loop}, {-1, libcall}}}},
949 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
950 {-1, libcall}}},
951 {libcall, {{24, loop}, {64, unrolled_loop},
952 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
953 1, /* scalar_stmt_cost. */
954 1, /* scalar load_cost. */
955 1, /* scalar_store_cost. */
956 1, /* vec_stmt_cost. */
957 1, /* vec_to_scalar_cost. */
958 1, /* scalar_to_vec_cost. */
959 1, /* vec_align_load_cost. */
960 2, /* vec_unalign_load_cost. */
961 1, /* vec_store_cost. */
962 3, /* cond_taken_branch_cost. */
963 1, /* cond_not_taken_branch_cost. */
966 static const
967 struct processor_costs core2_cost = {
968 COSTS_N_INSNS (1), /* cost of an add instruction */
969 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
970 COSTS_N_INSNS (1), /* variable shift costs */
971 COSTS_N_INSNS (1), /* constant shift costs */
972 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
973 COSTS_N_INSNS (3), /* HI */
974 COSTS_N_INSNS (3), /* SI */
975 COSTS_N_INSNS (3), /* DI */
976 COSTS_N_INSNS (3)}, /* other */
977 0, /* cost of multiply per each bit set */
978 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
979 COSTS_N_INSNS (22), /* HI */
980 COSTS_N_INSNS (22), /* SI */
981 COSTS_N_INSNS (22), /* DI */
982 COSTS_N_INSNS (22)}, /* other */
983 COSTS_N_INSNS (1), /* cost of movsx */
984 COSTS_N_INSNS (1), /* cost of movzx */
985 8, /* "large" insn */
986 16, /* MOVE_RATIO */
987 2, /* cost for loading QImode using movzbl */
988 {6, 6, 6}, /* cost of loading integer registers
989 in QImode, HImode and SImode.
990 Relative to reg-reg move (2). */
991 {4, 4, 4}, /* cost of storing integer registers */
992 2, /* cost of reg,reg fld/fst */
993 {6, 6, 6}, /* cost of loading fp registers
994 in SFmode, DFmode and XFmode */
995 {4, 4, 4}, /* cost of storing fp registers
996 in SFmode, DFmode and XFmode */
997 2, /* cost of moving MMX register */
998 {6, 6}, /* cost of loading MMX registers
999 in SImode and DImode */
1000 {4, 4}, /* cost of storing MMX registers
1001 in SImode and DImode */
1002 2, /* cost of moving SSE register */
1003 {6, 6, 6}, /* cost of loading SSE registers
1004 in SImode, DImode and TImode */
1005 {4, 4, 4}, /* cost of storing SSE registers
1006 in SImode, DImode and TImode */
1007 2, /* MMX or SSE register to integer */
1008 32, /* size of l1 cache. */
1009 2048, /* size of l2 cache. */
1010 128, /* size of prefetch block */
1011 8, /* number of parallel prefetches */
1012 3, /* Branch cost */
1013 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1014 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1015 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1016 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1017 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1018 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1019 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1020 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1021 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1022 {{libcall, {{8, loop}, {15, unrolled_loop},
1023 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1024 {libcall, {{24, loop}, {32, unrolled_loop},
1025 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1026 1, /* scalar_stmt_cost. */
1027 1, /* scalar load_cost. */
1028 1, /* scalar_store_cost. */
1029 1, /* vec_stmt_cost. */
1030 1, /* vec_to_scalar_cost. */
1031 1, /* scalar_to_vec_cost. */
1032 1, /* vec_align_load_cost. */
1033 2, /* vec_unalign_load_cost. */
1034 1, /* vec_store_cost. */
1035 3, /* cond_taken_branch_cost. */
1036 1, /* cond_not_taken_branch_cost. */
1039 /* Generic64 should produce code tuned for Nocona and K8. */
1040 static const
1041 struct processor_costs generic64_cost = {
1042 COSTS_N_INSNS (1), /* cost of an add instruction */
1043 /* On all chips taken into consideration lea is 2 cycles and more. With
1044 this cost however our current implementation of synth_mult results in
1045 use of unnecessary temporary registers causing regression on several
1046 SPECfp benchmarks. */
1047 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1048 COSTS_N_INSNS (1), /* variable shift costs */
1049 COSTS_N_INSNS (1), /* constant shift costs */
1050 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1051 COSTS_N_INSNS (4), /* HI */
1052 COSTS_N_INSNS (3), /* SI */
1053 COSTS_N_INSNS (4), /* DI */
1054 COSTS_N_INSNS (2)}, /* other */
1055 0, /* cost of multiply per each bit set */
1056 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1057 COSTS_N_INSNS (26), /* HI */
1058 COSTS_N_INSNS (42), /* SI */
1059 COSTS_N_INSNS (74), /* DI */
1060 COSTS_N_INSNS (74)}, /* other */
1061 COSTS_N_INSNS (1), /* cost of movsx */
1062 COSTS_N_INSNS (1), /* cost of movzx */
1063 8, /* "large" insn */
1064 17, /* MOVE_RATIO */
1065 4, /* cost for loading QImode using movzbl */
1066 {4, 4, 4}, /* cost of loading integer registers
1067 in QImode, HImode and SImode.
1068 Relative to reg-reg move (2). */
1069 {4, 4, 4}, /* cost of storing integer registers */
1070 4, /* cost of reg,reg fld/fst */
1071 {12, 12, 12}, /* cost of loading fp registers
1072 in SFmode, DFmode and XFmode */
1073 {6, 6, 8}, /* cost of storing fp registers
1074 in SFmode, DFmode and XFmode */
1075 2, /* cost of moving MMX register */
1076 {8, 8}, /* cost of loading MMX registers
1077 in SImode and DImode */
1078 {8, 8}, /* cost of storing MMX registers
1079 in SImode and DImode */
1080 2, /* cost of moving SSE register */
1081 {8, 8, 8}, /* cost of loading SSE registers
1082 in SImode, DImode and TImode */
1083 {8, 8, 8}, /* cost of storing SSE registers
1084 in SImode, DImode and TImode */
1085 5, /* MMX or SSE register to integer */
1086 32, /* size of l1 cache. */
1087 512, /* size of l2 cache. */
1088 64, /* size of prefetch block */
1089 6, /* number of parallel prefetches */
1090 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1091 is increased to perhaps more appropriate value of 5. */
1092 3, /* Branch cost */
1093 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1094 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1095 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1096 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1097 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1098 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 {DUMMY_STRINGOP_ALGS,
1102 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1103 1, /* scalar_stmt_cost. */
1104 1, /* scalar load_cost. */
1105 1, /* scalar_store_cost. */
1106 1, /* vec_stmt_cost. */
1107 1, /* vec_to_scalar_cost. */
1108 1, /* scalar_to_vec_cost. */
1109 1, /* vec_align_load_cost. */
1110 2, /* vec_unalign_load_cost. */
1111 1, /* vec_store_cost. */
1112 3, /* cond_taken_branch_cost. */
1113 1, /* cond_not_taken_branch_cost. */
1116 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1117 static const
1118 struct processor_costs generic32_cost = {
1119 COSTS_N_INSNS (1), /* cost of an add instruction */
1120 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1121 COSTS_N_INSNS (1), /* variable shift costs */
1122 COSTS_N_INSNS (1), /* constant shift costs */
1123 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1124 COSTS_N_INSNS (4), /* HI */
1125 COSTS_N_INSNS (3), /* SI */
1126 COSTS_N_INSNS (4), /* DI */
1127 COSTS_N_INSNS (2)}, /* other */
1128 0, /* cost of multiply per each bit set */
1129 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1130 COSTS_N_INSNS (26), /* HI */
1131 COSTS_N_INSNS (42), /* SI */
1132 COSTS_N_INSNS (74), /* DI */
1133 COSTS_N_INSNS (74)}, /* other */
1134 COSTS_N_INSNS (1), /* cost of movsx */
1135 COSTS_N_INSNS (1), /* cost of movzx */
1136 8, /* "large" insn */
1137 17, /* MOVE_RATIO */
1138 4, /* cost for loading QImode using movzbl */
1139 {4, 4, 4}, /* cost of loading integer registers
1140 in QImode, HImode and SImode.
1141 Relative to reg-reg move (2). */
1142 {4, 4, 4}, /* cost of storing integer registers */
1143 4, /* cost of reg,reg fld/fst */
1144 {12, 12, 12}, /* cost of loading fp registers
1145 in SFmode, DFmode and XFmode */
1146 {6, 6, 8}, /* cost of storing fp registers
1147 in SFmode, DFmode and XFmode */
1148 2, /* cost of moving MMX register */
1149 {8, 8}, /* cost of loading MMX registers
1150 in SImode and DImode */
1151 {8, 8}, /* cost of storing MMX registers
1152 in SImode and DImode */
1153 2, /* cost of moving SSE register */
1154 {8, 8, 8}, /* cost of loading SSE registers
1155 in SImode, DImode and TImode */
1156 {8, 8, 8}, /* cost of storing SSE registers
1157 in SImode, DImode and TImode */
1158 5, /* MMX or SSE register to integer */
1159 32, /* size of l1 cache. */
1160 256, /* size of l2 cache. */
1161 64, /* size of prefetch block */
1162 6, /* number of parallel prefetches */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1173 DUMMY_STRINGOP_ALGS},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 const struct processor_costs *ix86_cost = &pentium_cost;
1189 /* Processor feature/optimization bitmasks. */
1190 #define m_386 (1<<PROCESSOR_I386)
1191 #define m_486 (1<<PROCESSOR_I486)
1192 #define m_PENT (1<<PROCESSOR_PENTIUM)
1193 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1194 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1195 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1196 #define m_CORE2 (1<<PROCESSOR_CORE2)
1198 #define m_GEODE (1<<PROCESSOR_GEODE)
1199 #define m_K6 (1<<PROCESSOR_K6)
1200 #define m_K6_GEODE (m_K6 | m_GEODE)
1201 #define m_K8 (1<<PROCESSOR_K8)
1202 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1203 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1204 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1205 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1207 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1208 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1210 /* Generic instruction choice should be common subset of supported CPUs
1211 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1212 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1214 /* Feature tests against the various tunings. */
1215 unsigned char ix86_tune_features[X86_TUNE_LAST];
1217 /* Feature tests against the various tunings used to create ix86_tune_features
1218 based on the processor mask. */
1219 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1220 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1221 negatively, so enabling for Generic64 seems like good code size
1222 tradeoff. We can't enable it for 32bit generic because it does not
1223 work well with PPro base chips. */
1224 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1226 /* X86_TUNE_PUSH_MEMORY */
1227 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1228 | m_NOCONA | m_CORE2 | m_GENERIC,
1230 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1231 m_486 | m_PENT,
1233 /* X86_TUNE_UNROLL_STRLEN */
1234 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1236 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1237 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1239 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1240 on simulation result. But after P4 was made, no performance benefit
1241 was observed with branch hints. It also increases the code size.
1242 As a result, icc never generates branch hints. */
1245 /* X86_TUNE_DOUBLE_WITH_ADD */
1246 ~m_386,
1248 /* X86_TUNE_USE_SAHF */
1249 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1250 | m_NOCONA | m_CORE2 | m_GENERIC,
1252 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1253 partial dependencies. */
1254 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1255 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1257 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1258 register stalls on Generic32 compilation setting as well. However
1259 in current implementation the partial register stalls are not eliminated
1260 very well - they can be introduced via subregs synthesized by combine
1261 and can happen in caller/callee saving sequences. Because this option
1262 pays back little on PPro based chips and is in conflict with partial reg
1263 dependencies used by Athlon/P4 based chips, it is better to leave it off
1264 for generic32 for now. */
1265 m_PPRO,
1267 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1268 m_CORE2 | m_GENERIC,
1270 /* X86_TUNE_USE_HIMODE_FIOP */
1271 m_386 | m_486 | m_K6_GEODE,
1273 /* X86_TUNE_USE_SIMODE_FIOP */
1274 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1276 /* X86_TUNE_USE_MOV0 */
1277 m_K6,
1279 /* X86_TUNE_USE_CLTD */
1280 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1282 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1283 m_PENT4,
1285 /* X86_TUNE_SPLIT_LONG_MOVES */
1286 m_PPRO,
1288 /* X86_TUNE_READ_MODIFY_WRITE */
1289 ~m_PENT,
1291 /* X86_TUNE_READ_MODIFY */
1292 ~(m_PENT | m_PPRO),
1294 /* X86_TUNE_PROMOTE_QIMODE */
1295 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1296 | m_GENERIC /* | m_PENT4 ? */,
1298 /* X86_TUNE_FAST_PREFIX */
1299 ~(m_PENT | m_486 | m_386),
1301 /* X86_TUNE_SINGLE_STRINGOP */
1302 m_386 | m_PENT4 | m_NOCONA,
1304 /* X86_TUNE_QIMODE_MATH */
1307 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1308 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1309 might be considered for Generic32 if our scheme for avoiding partial
1310 stalls was more effective. */
1311 ~m_PPRO,
1313 /* X86_TUNE_PROMOTE_QI_REGS */
1316 /* X86_TUNE_PROMOTE_HI_REGS */
1317 m_PPRO,
1319 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1320 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1322 /* X86_TUNE_ADD_ESP_8 */
1323 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1324 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_SUB_ESP_4 */
1327 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1329 /* X86_TUNE_SUB_ESP_8 */
1330 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1331 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1333 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1334 for DFmode copies */
1335 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1336 | m_GENERIC | m_GEODE),
1338 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1339 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1341 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1342 conflict here in between PPro/Pentium4 based chips that thread 128bit
1343 SSE registers as single units versus K8 based chips that divide SSE
1344 registers to two 64bit halves. This knob promotes all store destinations
1345 to be 128bit to allow register renaming on 128bit SSE units, but usually
1346 results in one extra microop on 64bit SSE units. Experimental results
1347 shows that disabling this option on P4 brings over 20% SPECfp regression,
1348 while enabling it on K8 brings roughly 2.4% regression that can be partly
1349 masked by careful scheduling of moves. */
1350 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1352 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1353 m_AMDFAM10,
1355 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1356 are resolved on SSE register parts instead of whole registers, so we may
1357 maintain just lower part of scalar values in proper format leaving the
1358 upper part undefined. */
1359 m_ATHLON_K8,
1361 /* X86_TUNE_SSE_TYPELESS_STORES */
1362 m_AMD_MULTIPLE,
1364 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1365 m_PPRO | m_PENT4 | m_NOCONA,
1367 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1368 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1370 /* X86_TUNE_PROLOGUE_USING_MOVE */
1371 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1373 /* X86_TUNE_EPILOGUE_USING_MOVE */
1374 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1376 /* X86_TUNE_SHIFT1 */
1377 ~m_486,
1379 /* X86_TUNE_USE_FFREEP */
1380 m_AMD_MULTIPLE,
1382 /* X86_TUNE_INTER_UNIT_MOVES */
1383 ~(m_AMD_MULTIPLE | m_GENERIC),
1385 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1386 ~(m_AMDFAM10),
1388 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1389 than 4 branch instructions in the 16 byte window. */
1390 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_SCHEDULE */
1393 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1395 /* X86_TUNE_USE_BT */
1396 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_USE_INCDEC */
1399 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1401 /* X86_TUNE_PAD_RETURNS */
1402 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1404 /* X86_TUNE_EXT_80387_CONSTANTS */
1405 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1407 /* X86_TUNE_SHORTEN_X87_SSE */
1408 ~m_K8,
1410 /* X86_TUNE_AVOID_VECTOR_DECODE */
1411 m_K8 | m_GENERIC64,
1413 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1414 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1415 ~(m_386 | m_486),
1417 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1418 vector path on AMD machines. */
1419 m_K8 | m_GENERIC64 | m_AMDFAM10,
1421 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1422 machines. */
1423 m_K8 | m_GENERIC64 | m_AMDFAM10,
1425 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1426 than a MOV. */
1427 m_PENT,
1429 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1430 but one byte longer. */
1431 m_PENT,
1433 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1434 operand that cannot be represented using a modRM byte. The XOR
1435 replacement is long decoded, so this split helps here as well. */
1436 m_K6,
1438 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1439 from FP to FP. */
1440 m_AMDFAM10 | m_GENERIC,
1442 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1443 from integer to FP. */
1444 m_AMDFAM10,
1446 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1447 with a subsequent conditional jump instruction into a single
1448 compare-and-branch uop. */
1449 m_CORE2,
1452 /* Feature tests against the various architecture variations. */
1453 unsigned char ix86_arch_features[X86_ARCH_LAST];
1455 /* Feature tests against the various architecture variations, used to create
1456 ix86_arch_features based on the processor mask. */
1457 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1458 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1459 ~(m_386 | m_486 | m_PENT | m_K6),
1461 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1462 ~m_386,
1464 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1465 ~(m_386 | m_486),
1467 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1468 ~m_386,
1470 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1471 ~m_386,
1474 static const unsigned int x86_accumulate_outgoing_args
1475 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1477 static const unsigned int x86_arch_always_fancy_math_387
1478 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1479 | m_NOCONA | m_CORE2 | m_GENERIC;
1481 static enum stringop_alg stringop_alg = no_stringop;
1483 /* In case the average insn count for single function invocation is
1484 lower than this constant, emit fast (but longer) prologue and
1485 epilogue code. */
1486 #define FAST_PROLOGUE_INSN_COUNT 20
1488 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1489 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1490 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1491 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1493 /* Array of the smallest class containing reg number REGNO, indexed by
1494 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1496 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1498 /* ax, dx, cx, bx */
1499 AREG, DREG, CREG, BREG,
1500 /* si, di, bp, sp */
1501 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1502 /* FP registers */
1503 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1504 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1505 /* arg pointer */
1506 NON_Q_REGS,
1507 /* flags, fpsr, fpcr, frame */
1508 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1509 /* SSE registers */
1510 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1511 SSE_REGS, SSE_REGS,
1512 /* MMX registers */
1513 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1514 MMX_REGS, MMX_REGS,
1515 /* REX registers */
1516 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1517 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1518 /* SSE REX registers */
1519 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1520 SSE_REGS, SSE_REGS,
1523 /* The "default" register map used in 32bit mode. */
1525 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1527 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1528 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1529 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1530 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1531 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1532 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1533 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1536 /* The "default" register map used in 64bit mode. */
1538 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1540 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1541 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1542 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1543 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1544 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1545 8,9,10,11,12,13,14,15, /* extended integer registers */
1546 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1549 /* Define the register numbers to be used in Dwarf debugging information.
1550 The SVR4 reference port C compiler uses the following register numbers
1551 in its Dwarf output code:
1552 0 for %eax (gcc regno = 0)
1553 1 for %ecx (gcc regno = 2)
1554 2 for %edx (gcc regno = 1)
1555 3 for %ebx (gcc regno = 3)
1556 4 for %esp (gcc regno = 7)
1557 5 for %ebp (gcc regno = 6)
1558 6 for %esi (gcc regno = 4)
1559 7 for %edi (gcc regno = 5)
1560 The following three DWARF register numbers are never generated by
1561 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1562 believes these numbers have these meanings.
1563 8 for %eip (no gcc equivalent)
1564 9 for %eflags (gcc regno = 17)
1565 10 for %trapno (no gcc equivalent)
1566 It is not at all clear how we should number the FP stack registers
1567 for the x86 architecture. If the version of SDB on x86/svr4 were
1568 a bit less brain dead with respect to floating-point then we would
1569 have a precedent to follow with respect to DWARF register numbers
1570 for x86 FP registers, but the SDB on x86/svr4 is so completely
1571 broken with respect to FP registers that it is hardly worth thinking
1572 of it as something to strive for compatibility with.
1573 The version of x86/svr4 SDB I have at the moment does (partially)
1574 seem to believe that DWARF register number 11 is associated with
1575 the x86 register %st(0), but that's about all. Higher DWARF
1576 register numbers don't seem to be associated with anything in
1577 particular, and even for DWARF regno 11, SDB only seems to under-
1578 stand that it should say that a variable lives in %st(0) (when
1579 asked via an `=' command) if we said it was in DWARF regno 11,
1580 but SDB still prints garbage when asked for the value of the
1581 variable in question (via a `/' command).
1582 (Also note that the labels SDB prints for various FP stack regs
1583 when doing an `x' command are all wrong.)
1584 Note that these problems generally don't affect the native SVR4
1585 C compiler because it doesn't allow the use of -O with -g and
1586 because when it is *not* optimizing, it allocates a memory
1587 location for each floating-point variable, and the memory
1588 location is what gets described in the DWARF AT_location
1589 attribute for the variable in question.
1590 Regardless of the severe mental illness of the x86/svr4 SDB, we
1591 do something sensible here and we use the following DWARF
1592 register numbers. Note that these are all stack-top-relative
1593 numbers.
1594 11 for %st(0) (gcc regno = 8)
1595 12 for %st(1) (gcc regno = 9)
1596 13 for %st(2) (gcc regno = 10)
1597 14 for %st(3) (gcc regno = 11)
1598 15 for %st(4) (gcc regno = 12)
1599 16 for %st(5) (gcc regno = 13)
1600 17 for %st(6) (gcc regno = 14)
1601 18 for %st(7) (gcc regno = 15)
1603 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1605 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1606 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1607 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1608 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1609 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1610 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1611 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1614 /* Test and compare insns in i386.md store the information needed to
1615 generate branch and scc insns here. */
1617 rtx ix86_compare_op0 = NULL_RTX;
1618 rtx ix86_compare_op1 = NULL_RTX;
1619 rtx ix86_compare_emitted = NULL_RTX;
1621 /* Define parameter passing and return registers. */
1623 static int const x86_64_int_parameter_registers[6] =
1625 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1628 static int const x86_64_ms_abi_int_parameter_registers[4] =
1630 CX_REG, DX_REG, R8_REG, R9_REG
1633 static int const x86_64_int_return_registers[4] =
1635 AX_REG, DX_REG, DI_REG, SI_REG
1638 /* Define the structure for the machine field in struct function. */
1640 struct stack_local_entry GTY(())
1642 unsigned short mode;
1643 unsigned short n;
1644 rtx rtl;
1645 struct stack_local_entry *next;
1648 /* Structure describing stack frame layout.
1649 Stack grows downward:
1651 [arguments]
1652 <- ARG_POINTER
1653 saved pc
1655 saved frame pointer if frame_pointer_needed
1656 <- HARD_FRAME_POINTER
1657 [saved regs]
1659 [padding0]
1661 [saved SSE regs]
1663 [padding1] \
1665 [va_arg registers] (
1666 > to_allocate <- FRAME_POINTER
1667 [frame] (
1669 [padding2] /
1671 struct ix86_frame
1673 int padding0;
1674 int nsseregs;
1675 int nregs;
1676 int padding1;
1677 int va_arg_size;
1678 HOST_WIDE_INT frame;
1679 int padding2;
1680 int outgoing_arguments_size;
1681 int red_zone_size;
1683 HOST_WIDE_INT to_allocate;
1684 /* The offsets relative to ARG_POINTER. */
1685 HOST_WIDE_INT frame_pointer_offset;
1686 HOST_WIDE_INT hard_frame_pointer_offset;
1687 HOST_WIDE_INT stack_pointer_offset;
1689 /* When save_regs_using_mov is set, emit prologue using
1690 move instead of push instructions. */
1691 bool save_regs_using_mov;
1694 /* Code model option. */
1695 enum cmodel ix86_cmodel;
1696 /* Asm dialect. */
1697 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1698 /* TLS dialects. */
1699 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1701 /* Which unit we are generating floating point math for. */
1702 enum fpmath_unit ix86_fpmath;
1704 /* Which cpu are we scheduling for. */
1705 enum attr_cpu ix86_schedule;
1707 /* Which cpu are we optimizing for. */
1708 enum processor_type ix86_tune;
1710 /* Which instruction set architecture to use. */
1711 enum processor_type ix86_arch;
1713 /* true if sse prefetch instruction is not NOOP. */
1714 int x86_prefetch_sse;
1716 /* ix86_regparm_string as a number */
1717 static int ix86_regparm;
1719 /* -mstackrealign option */
1720 extern int ix86_force_align_arg_pointer;
1721 static const char ix86_force_align_arg_pointer_string[]
1722 = "force_align_arg_pointer";
1724 static rtx (*ix86_gen_leave) (void);
1725 static rtx (*ix86_gen_pop1) (rtx);
1726 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1727 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1728 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1729 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1730 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1731 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1733 /* Preferred alignment for stack boundary in bits. */
1734 unsigned int ix86_preferred_stack_boundary;
1736 /* Alignment for incoming stack boundary in bits specified at
1737 command line. */
1738 static unsigned int ix86_user_incoming_stack_boundary;
1740 /* Default alignment for incoming stack boundary in bits. */
1741 static unsigned int ix86_default_incoming_stack_boundary;
1743 /* Alignment for incoming stack boundary in bits. */
1744 unsigned int ix86_incoming_stack_boundary;
1746 /* Values 1-5: see jump.c */
1747 int ix86_branch_cost;
1749 /* Calling abi specific va_list type nodes. */
1750 static GTY(()) tree sysv_va_list_type_node;
1751 static GTY(()) tree ms_va_list_type_node;
1753 /* Variables which are this size or smaller are put in the data/bss
1754 or ldata/lbss sections. */
1756 int ix86_section_threshold = 65536;
1758 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1759 char internal_label_prefix[16];
1760 int internal_label_prefix_len;
1762 /* Fence to use after loop using movnt. */
1763 tree x86_mfence;
1765 /* Register class used for passing given 64bit part of the argument.
1766 These represent classes as documented by the PS ABI, with the exception
1767 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1768 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1770 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1771 whenever possible (upper half does contain padding). */
1772 enum x86_64_reg_class
1774 X86_64_NO_CLASS,
1775 X86_64_INTEGER_CLASS,
1776 X86_64_INTEGERSI_CLASS,
1777 X86_64_SSE_CLASS,
1778 X86_64_SSESF_CLASS,
1779 X86_64_SSEDF_CLASS,
1780 X86_64_SSEUP_CLASS,
1781 X86_64_X87_CLASS,
1782 X86_64_X87UP_CLASS,
1783 X86_64_COMPLEX_X87_CLASS,
1784 X86_64_MEMORY_CLASS
1787 #define MAX_CLASSES 4
1789 /* Table of constants used by fldpi, fldln2, etc.... */
1790 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1791 static bool ext_80387_constants_init = 0;
1794 static struct machine_function * ix86_init_machine_status (void);
1795 static rtx ix86_function_value (const_tree, const_tree, bool);
1796 static int ix86_function_regparm (const_tree, const_tree);
1797 static void ix86_compute_frame_layout (struct ix86_frame *);
1798 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1799 rtx, rtx, int);
1800 static void ix86_add_new_builtins (int);
1802 enum ix86_function_specific_strings
1804 IX86_FUNCTION_SPECIFIC_ARCH,
1805 IX86_FUNCTION_SPECIFIC_TUNE,
1806 IX86_FUNCTION_SPECIFIC_FPMATH,
1807 IX86_FUNCTION_SPECIFIC_MAX
1810 static char *ix86_target_string (int, int, const char *, const char *,
1811 const char *, bool);
1812 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1813 static void ix86_function_specific_save (struct cl_target_option *);
1814 static void ix86_function_specific_restore (struct cl_target_option *);
1815 static void ix86_function_specific_print (FILE *, int,
1816 struct cl_target_option *);
1817 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1818 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1819 static bool ix86_can_inline_p (tree, tree);
1820 static void ix86_set_current_function (tree);
1823 /* The svr4 ABI for the i386 says that records and unions are returned
1824 in memory. */
1825 #ifndef DEFAULT_PCC_STRUCT_RETURN
1826 #define DEFAULT_PCC_STRUCT_RETURN 1
1827 #endif
1829 /* Whether -mtune= or -march= were specified */
1830 static int ix86_tune_defaulted;
1831 static int ix86_arch_specified;
1833 /* Bit flags that specify the ISA we are compiling for. */
1834 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1836 /* A mask of ix86_isa_flags that includes bit X if X
1837 was set or cleared on the command line. */
1838 static int ix86_isa_flags_explicit;
1840 /* Define a set of ISAs which are available when a given ISA is
1841 enabled. MMX and SSE ISAs are handled separately. */
1843 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1844 #define OPTION_MASK_ISA_3DNOW_SET \
1845 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1847 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1848 #define OPTION_MASK_ISA_SSE2_SET \
1849 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1850 #define OPTION_MASK_ISA_SSE3_SET \
1851 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1852 #define OPTION_MASK_ISA_SSSE3_SET \
1853 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1854 #define OPTION_MASK_ISA_SSE4_1_SET \
1855 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1856 #define OPTION_MASK_ISA_SSE4_2_SET \
1857 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1858 #define OPTION_MASK_ISA_AVX_SET \
1859 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1860 #define OPTION_MASK_ISA_FMA_SET \
1861 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1863 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1864 as -msse4.2. */
1865 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1867 #define OPTION_MASK_ISA_SSE4A_SET \
1868 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1869 #define OPTION_MASK_ISA_SSE5_SET \
1870 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1872 /* AES and PCLMUL need SSE2 because they use xmm registers */
1873 #define OPTION_MASK_ISA_AES_SET \
1874 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1875 #define OPTION_MASK_ISA_PCLMUL_SET \
1876 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1878 #define OPTION_MASK_ISA_ABM_SET \
1879 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1880 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1881 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1882 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1884 /* Define a set of ISAs which aren't available when a given ISA is
1885 disabled. MMX and SSE ISAs are handled separately. */
1887 #define OPTION_MASK_ISA_MMX_UNSET \
1888 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1889 #define OPTION_MASK_ISA_3DNOW_UNSET \
1890 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1891 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1893 #define OPTION_MASK_ISA_SSE_UNSET \
1894 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1895 #define OPTION_MASK_ISA_SSE2_UNSET \
1896 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1897 #define OPTION_MASK_ISA_SSE3_UNSET \
1898 (OPTION_MASK_ISA_SSE3 \
1899 | OPTION_MASK_ISA_SSSE3_UNSET \
1900 | OPTION_MASK_ISA_SSE4A_UNSET )
1901 #define OPTION_MASK_ISA_SSSE3_UNSET \
1902 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1903 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1904 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1905 #define OPTION_MASK_ISA_SSE4_2_UNSET \
1906 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
1907 #define OPTION_MASK_ISA_AVX_UNSET \
1908 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET)
1909 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
1911 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1912 as -mno-sse4.1. */
1913 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1915 #define OPTION_MASK_ISA_SSE4A_UNSET \
1916 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1917 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1918 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
1919 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
1920 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
1921 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
1922 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
1923 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
1925 /* Vectorization library interface and handlers. */
1926 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1927 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1928 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1930 /* Processor target table, indexed by processor number */
1931 struct ptt
1933 const struct processor_costs *cost; /* Processor costs */
1934 const int align_loop; /* Default alignments. */
1935 const int align_loop_max_skip;
1936 const int align_jump;
1937 const int align_jump_max_skip;
1938 const int align_func;
1941 static const struct ptt processor_target_table[PROCESSOR_max] =
1943 {&i386_cost, 4, 3, 4, 3, 4},
1944 {&i486_cost, 16, 15, 16, 15, 16},
1945 {&pentium_cost, 16, 7, 16, 7, 16},
1946 {&pentiumpro_cost, 16, 15, 16, 10, 16},
1947 {&geode_cost, 0, 0, 0, 0, 0},
1948 {&k6_cost, 32, 7, 32, 7, 32},
1949 {&athlon_cost, 16, 7, 16, 7, 16},
1950 {&pentium4_cost, 0, 0, 0, 0, 0},
1951 {&k8_cost, 16, 7, 16, 7, 16},
1952 {&nocona_cost, 0, 0, 0, 0, 0},
1953 {&core2_cost, 16, 10, 16, 10, 16},
1954 {&generic32_cost, 16, 7, 16, 7, 16},
1955 {&generic64_cost, 16, 10, 16, 10, 16},
1956 {&amdfam10_cost, 32, 24, 32, 7, 32}
1959 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
1961 "generic",
1962 "i386",
1963 "i486",
1964 "pentium",
1965 "pentium-mmx",
1966 "pentiumpro",
1967 "pentium2",
1968 "pentium3",
1969 "pentium4",
1970 "pentium-m",
1971 "prescott",
1972 "nocona",
1973 "core2",
1974 "geode",
1975 "k6",
1976 "k6-2",
1977 "k6-3",
1978 "athlon",
1979 "athlon-4",
1980 "k8",
1981 "amdfam10"
1984 /* Implement TARGET_HANDLE_OPTION. */
1986 static bool
1987 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1989 switch (code)
1991 case OPT_mmmx:
1992 if (value)
1994 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1995 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1997 else
1999 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2000 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2002 return true;
2004 case OPT_m3dnow:
2005 if (value)
2007 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2008 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2010 else
2012 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2013 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2015 return true;
2017 case OPT_m3dnowa:
2018 return false;
2020 case OPT_msse:
2021 if (value)
2023 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2024 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2026 else
2028 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2029 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2031 return true;
2033 case OPT_msse2:
2034 if (value)
2036 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2037 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2039 else
2041 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2042 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2044 return true;
2046 case OPT_msse3:
2047 if (value)
2049 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2050 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2052 else
2054 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2055 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2057 return true;
2059 case OPT_mssse3:
2060 if (value)
2062 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2063 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2065 else
2067 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2068 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2070 return true;
2072 case OPT_msse4_1:
2073 if (value)
2075 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2076 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2078 else
2080 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2081 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2083 return true;
2085 case OPT_msse4_2:
2086 if (value)
2088 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2089 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2091 else
2093 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2094 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2096 return true;
2098 case OPT_mavx:
2099 if (value)
2101 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2102 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2104 else
2106 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2107 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2109 return true;
2111 case OPT_mfma:
2112 if (value)
2114 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2115 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2117 else
2119 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2120 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2122 return true;
2124 case OPT_msse4:
2125 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2126 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2127 return true;
2129 case OPT_mno_sse4:
2130 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2131 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2132 return true;
2134 case OPT_msse4a:
2135 if (value)
2137 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2140 else
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2145 return true;
2147 case OPT_msse5:
2148 if (value)
2150 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
2153 else
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
2158 return true;
2160 case OPT_mabm:
2161 if (value)
2163 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2166 else
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2171 return true;
2173 case OPT_mpopcnt:
2174 if (value)
2176 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2179 else
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2184 return true;
2186 case OPT_msahf:
2187 if (value)
2189 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2190 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2192 else
2194 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2195 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2197 return true;
2199 case OPT_mcx16:
2200 if (value)
2202 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2203 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2205 else
2207 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2208 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2210 return true;
2212 case OPT_maes:
2213 if (value)
2215 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2216 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2218 else
2220 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2221 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2223 return true;
2225 case OPT_mpclmul:
2226 if (value)
2228 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2229 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2231 else
2233 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2236 return true;
2238 default:
2239 return true;
2243 /* Return a string the documents the current -m options. The caller is
2244 responsible for freeing the string. */
2246 static char *
2247 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2248 const char *fpmath, bool add_nl_p)
2250 struct ix86_target_opts
2252 const char *option; /* option string */
2253 int mask; /* isa mask options */
2256 /* This table is ordered so that options like -msse5 or -msse4.2 that imply
2257 preceding options while match those first. */
2258 static struct ix86_target_opts isa_opts[] =
2260 { "-m64", OPTION_MASK_ISA_64BIT },
2261 { "-msse5", OPTION_MASK_ISA_SSE5 },
2262 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2263 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2264 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2265 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2266 { "-msse3", OPTION_MASK_ISA_SSE3 },
2267 { "-msse2", OPTION_MASK_ISA_SSE2 },
2268 { "-msse", OPTION_MASK_ISA_SSE },
2269 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2270 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2271 { "-mmmx", OPTION_MASK_ISA_MMX },
2272 { "-mabm", OPTION_MASK_ISA_ABM },
2273 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2274 { "-maes", OPTION_MASK_ISA_AES },
2275 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2278 /* Flag options. */
2279 static struct ix86_target_opts flag_opts[] =
2281 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2282 { "-m80387", MASK_80387 },
2283 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2284 { "-malign-double", MASK_ALIGN_DOUBLE },
2285 { "-mcld", MASK_CLD },
2286 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2287 { "-mieee-fp", MASK_IEEE_FP },
2288 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2289 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2290 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2291 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2292 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2293 { "-mno-fused-madd", MASK_NO_FUSED_MADD },
2294 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2295 { "-mno-red-zone", MASK_NO_RED_ZONE },
2296 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2297 { "-mrecip", MASK_RECIP },
2298 { "-mrtd", MASK_RTD },
2299 { "-msseregparm", MASK_SSEREGPARM },
2300 { "-mstack-arg-probe", MASK_STACK_PROBE },
2301 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2304 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2306 char isa_other[40];
2307 char target_other[40];
2308 unsigned num = 0;
2309 unsigned i, j;
2310 char *ret;
2311 char *ptr;
2312 size_t len;
2313 size_t line_len;
2314 size_t sep_len;
2316 memset (opts, '\0', sizeof (opts));
2318 /* Add -march= option. */
2319 if (arch)
2321 opts[num][0] = "-march=";
2322 opts[num++][1] = arch;
2325 /* Add -mtune= option. */
2326 if (tune)
2328 opts[num][0] = "-mtune=";
2329 opts[num++][1] = tune;
2332 /* Pick out the options in isa options. */
2333 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2335 if ((isa & isa_opts[i].mask) != 0)
2337 opts[num++][0] = isa_opts[i].option;
2338 isa &= ~ isa_opts[i].mask;
2342 if (isa && add_nl_p)
2344 opts[num++][0] = isa_other;
2345 sprintf (isa_other, "(other isa: 0x%x)", isa);
2348 /* Add flag options. */
2349 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2351 if ((flags & flag_opts[i].mask) != 0)
2353 opts[num++][0] = flag_opts[i].option;
2354 flags &= ~ flag_opts[i].mask;
2358 if (flags && add_nl_p)
2360 opts[num++][0] = target_other;
2361 sprintf (target_other, "(other flags: 0x%x)", isa);
2364 /* Add -fpmath= option. */
2365 if (fpmath)
2367 opts[num][0] = "-mfpmath=";
2368 opts[num++][1] = fpmath;
2371 /* Any options? */
2372 if (num == 0)
2373 return NULL;
2375 gcc_assert (num < ARRAY_SIZE (opts));
2377 /* Size the string. */
2378 len = 0;
2379 sep_len = (add_nl_p) ? 3 : 1;
2380 for (i = 0; i < num; i++)
2382 len += sep_len;
2383 for (j = 0; j < 2; j++)
2384 if (opts[i][j])
2385 len += strlen (opts[i][j]);
2388 /* Build the string. */
2389 ret = ptr = (char *) xmalloc (len);
2390 line_len = 0;
2392 for (i = 0; i < num; i++)
2394 size_t len2[2];
2396 for (j = 0; j < 2; j++)
2397 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2399 if (i != 0)
2401 *ptr++ = ' ';
2402 line_len++;
2404 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2406 *ptr++ = '\\';
2407 *ptr++ = '\n';
2408 line_len = 0;
2412 for (j = 0; j < 2; j++)
2413 if (opts[i][j])
2415 memcpy (ptr, opts[i][j], len2[j]);
2416 ptr += len2[j];
2417 line_len += len2[j];
2421 *ptr = '\0';
2422 gcc_assert (ret + len >= ptr);
2424 return ret;
2427 /* Function that is callable from the debugger to print the current
2428 options. */
2429 void
2430 ix86_debug_options (void)
2432 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2433 ix86_arch_string, ix86_tune_string,
2434 ix86_fpmath_string, true);
2436 if (opts)
2438 fprintf (stderr, "%s\n\n", opts);
2439 free (opts);
2441 else
2442 fprintf (stderr, "<no options>\n\n");
2444 return;
2447 /* Sometimes certain combinations of command options do not make
2448 sense on a particular target machine. You can define a macro
2449 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2450 defined, is executed once just after all the command options have
2451 been parsed.
2453 Don't use this macro to turn on various extra optimizations for
2454 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2456 void
2457 override_options (bool main_args_p)
2459 int i;
2460 unsigned int ix86_arch_mask, ix86_tune_mask;
2461 const char *prefix;
2462 const char *suffix;
2463 const char *sw;
2465 /* Comes from final.c -- no real reason to change it. */
2466 #define MAX_CODE_ALIGN 16
2468 enum pta_flags
2470 PTA_SSE = 1 << 0,
2471 PTA_SSE2 = 1 << 1,
2472 PTA_SSE3 = 1 << 2,
2473 PTA_MMX = 1 << 3,
2474 PTA_PREFETCH_SSE = 1 << 4,
2475 PTA_3DNOW = 1 << 5,
2476 PTA_3DNOW_A = 1 << 6,
2477 PTA_64BIT = 1 << 7,
2478 PTA_SSSE3 = 1 << 8,
2479 PTA_CX16 = 1 << 9,
2480 PTA_POPCNT = 1 << 10,
2481 PTA_ABM = 1 << 11,
2482 PTA_SSE4A = 1 << 12,
2483 PTA_NO_SAHF = 1 << 13,
2484 PTA_SSE4_1 = 1 << 14,
2485 PTA_SSE4_2 = 1 << 15,
2486 PTA_SSE5 = 1 << 16,
2487 PTA_AES = 1 << 17,
2488 PTA_PCLMUL = 1 << 18,
2489 PTA_AVX = 1 << 19,
2490 PTA_FMA = 1 << 20
2493 static struct pta
2495 const char *const name; /* processor name or nickname. */
2496 const enum processor_type processor;
2497 const enum attr_cpu schedule;
2498 const unsigned /*enum pta_flags*/ flags;
2500 const processor_alias_table[] =
2502 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2503 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2504 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2505 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2506 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2507 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2508 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2509 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2510 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2511 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2512 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2513 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2514 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2515 PTA_MMX | PTA_SSE},
2516 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2517 PTA_MMX | PTA_SSE},
2518 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2519 PTA_MMX | PTA_SSE | PTA_SSE2},
2520 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2521 PTA_MMX |PTA_SSE | PTA_SSE2},
2522 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2523 PTA_MMX | PTA_SSE | PTA_SSE2},
2524 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2525 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2526 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2527 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2528 | PTA_CX16 | PTA_NO_SAHF},
2529 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2530 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2531 | PTA_SSSE3 | PTA_CX16},
2532 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2533 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2534 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2535 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2536 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2537 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2538 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2539 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2540 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2541 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2542 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2543 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2544 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2545 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2546 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2547 {"x86-64", PROCESSOR_K8, CPU_K8,
2548 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2549 {"k8", PROCESSOR_K8, CPU_K8,
2550 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2551 | PTA_SSE2 | PTA_NO_SAHF},
2552 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2553 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2554 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2555 {"opteron", PROCESSOR_K8, CPU_K8,
2556 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2557 | PTA_SSE2 | PTA_NO_SAHF},
2558 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2559 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2560 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2561 {"athlon64", PROCESSOR_K8, CPU_K8,
2562 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2563 | PTA_SSE2 | PTA_NO_SAHF},
2564 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2565 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2566 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2567 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2568 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2569 | PTA_SSE2 | PTA_NO_SAHF},
2570 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2571 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2572 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2573 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2574 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2575 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2576 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2577 0 /* flags are only used for -march switch. */ },
2578 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2579 PTA_64BIT /* flags are only used for -march switch. */ },
2582 int const pta_size = ARRAY_SIZE (processor_alias_table);
2584 /* Set up prefix/suffix so the error messages refer to either the command
2585 line argument, or the attribute(target). */
2586 if (main_args_p)
2588 prefix = "-m";
2589 suffix = "";
2590 sw = "switch";
2592 else
2594 prefix = "option(\"";
2595 suffix = "\")";
2596 sw = "attribute";
2599 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2600 SUBTARGET_OVERRIDE_OPTIONS;
2601 #endif
2603 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2604 SUBSUBTARGET_OVERRIDE_OPTIONS;
2605 #endif
2607 /* -fPIC is the default for x86_64. */
2608 if (TARGET_MACHO && TARGET_64BIT)
2609 flag_pic = 2;
2611 /* Set the default values for switches whose default depends on TARGET_64BIT
2612 in case they weren't overwritten by command line options. */
2613 if (TARGET_64BIT)
2615 /* Mach-O doesn't support omitting the frame pointer for now. */
2616 if (flag_omit_frame_pointer == 2)
2617 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2618 if (flag_asynchronous_unwind_tables == 2)
2619 flag_asynchronous_unwind_tables = 1;
2620 if (flag_pcc_struct_return == 2)
2621 flag_pcc_struct_return = 0;
2623 else
2625 if (flag_omit_frame_pointer == 2)
2626 flag_omit_frame_pointer = 0;
2627 if (flag_asynchronous_unwind_tables == 2)
2628 flag_asynchronous_unwind_tables = 0;
2629 if (flag_pcc_struct_return == 2)
2630 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2633 /* Need to check -mtune=generic first. */
2634 if (ix86_tune_string)
2636 if (!strcmp (ix86_tune_string, "generic")
2637 || !strcmp (ix86_tune_string, "i686")
2638 /* As special support for cross compilers we read -mtune=native
2639 as -mtune=generic. With native compilers we won't see the
2640 -mtune=native, as it was changed by the driver. */
2641 || !strcmp (ix86_tune_string, "native"))
2643 if (TARGET_64BIT)
2644 ix86_tune_string = "generic64";
2645 else
2646 ix86_tune_string = "generic32";
2648 /* If this call is for setting the option attribute, allow the
2649 generic32/generic64 that was previously set. */
2650 else if (!main_args_p
2651 && (!strcmp (ix86_tune_string, "generic32")
2652 || !strcmp (ix86_tune_string, "generic64")))
2654 else if (!strncmp (ix86_tune_string, "generic", 7))
2655 error ("bad value (%s) for %stune=%s %s",
2656 ix86_tune_string, prefix, suffix, sw);
2658 else
2660 if (ix86_arch_string)
2661 ix86_tune_string = ix86_arch_string;
2662 if (!ix86_tune_string)
2664 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2665 ix86_tune_defaulted = 1;
2668 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2669 need to use a sensible tune option. */
2670 if (!strcmp (ix86_tune_string, "generic")
2671 || !strcmp (ix86_tune_string, "x86-64")
2672 || !strcmp (ix86_tune_string, "i686"))
2674 if (TARGET_64BIT)
2675 ix86_tune_string = "generic64";
2676 else
2677 ix86_tune_string = "generic32";
2680 if (ix86_stringop_string)
2682 if (!strcmp (ix86_stringop_string, "rep_byte"))
2683 stringop_alg = rep_prefix_1_byte;
2684 else if (!strcmp (ix86_stringop_string, "libcall"))
2685 stringop_alg = libcall;
2686 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2687 stringop_alg = rep_prefix_4_byte;
2688 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2689 && TARGET_64BIT)
2690 /* rep; movq isn't available in 32-bit code. */
2691 stringop_alg = rep_prefix_8_byte;
2692 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2693 stringop_alg = loop_1_byte;
2694 else if (!strcmp (ix86_stringop_string, "loop"))
2695 stringop_alg = loop;
2696 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2697 stringop_alg = unrolled_loop;
2698 else
2699 error ("bad value (%s) for %sstringop-strategy=%s %s",
2700 ix86_stringop_string, prefix, suffix, sw);
2702 if (!strcmp (ix86_tune_string, "x86-64"))
2703 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2704 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2705 prefix, suffix, prefix, suffix, prefix, suffix);
2707 if (!ix86_arch_string)
2708 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2709 else
2710 ix86_arch_specified = 1;
2712 if (!strcmp (ix86_arch_string, "generic"))
2713 error ("generic CPU can be used only for %stune=%s %s",
2714 prefix, suffix, sw);
2715 if (!strncmp (ix86_arch_string, "generic", 7))
2716 error ("bad value (%s) for %sarch=%s %s",
2717 ix86_arch_string, prefix, suffix, sw);
2719 if (ix86_cmodel_string != 0)
2721 if (!strcmp (ix86_cmodel_string, "small"))
2722 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2723 else if (!strcmp (ix86_cmodel_string, "medium"))
2724 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2725 else if (!strcmp (ix86_cmodel_string, "large"))
2726 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2727 else if (flag_pic)
2728 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2729 else if (!strcmp (ix86_cmodel_string, "32"))
2730 ix86_cmodel = CM_32;
2731 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2732 ix86_cmodel = CM_KERNEL;
2733 else
2734 error ("bad value (%s) for %scmodel=%s %s",
2735 ix86_cmodel_string, prefix, suffix, sw);
2737 else
2739 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2740 use of rip-relative addressing. This eliminates fixups that
2741 would otherwise be needed if this object is to be placed in a
2742 DLL, and is essentially just as efficient as direct addressing. */
2743 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2744 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2745 else if (TARGET_64BIT)
2746 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2747 else
2748 ix86_cmodel = CM_32;
2750 if (ix86_asm_string != 0)
2752 if (! TARGET_MACHO
2753 && !strcmp (ix86_asm_string, "intel"))
2754 ix86_asm_dialect = ASM_INTEL;
2755 else if (!strcmp (ix86_asm_string, "att"))
2756 ix86_asm_dialect = ASM_ATT;
2757 else
2758 error ("bad value (%s) for %sasm=%s %s",
2759 ix86_asm_string, prefix, suffix, sw);
2761 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2762 error ("code model %qs not supported in the %s bit mode",
2763 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2764 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2765 sorry ("%i-bit mode not compiled in",
2766 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2768 for (i = 0; i < pta_size; i++)
2769 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2771 ix86_schedule = processor_alias_table[i].schedule;
2772 ix86_arch = processor_alias_table[i].processor;
2773 /* Default cpu tuning to the architecture. */
2774 ix86_tune = ix86_arch;
2776 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2777 error ("CPU you selected does not support x86-64 "
2778 "instruction set");
2780 if (processor_alias_table[i].flags & PTA_MMX
2781 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2782 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2783 if (processor_alias_table[i].flags & PTA_3DNOW
2784 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2785 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2786 if (processor_alias_table[i].flags & PTA_3DNOW_A
2787 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2788 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2789 if (processor_alias_table[i].flags & PTA_SSE
2790 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2791 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2792 if (processor_alias_table[i].flags & PTA_SSE2
2793 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2794 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2795 if (processor_alias_table[i].flags & PTA_SSE3
2796 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2797 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2798 if (processor_alias_table[i].flags & PTA_SSSE3
2799 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2800 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2801 if (processor_alias_table[i].flags & PTA_SSE4_1
2802 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2803 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2804 if (processor_alias_table[i].flags & PTA_SSE4_2
2805 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2806 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2807 if (processor_alias_table[i].flags & PTA_AVX
2808 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2809 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2810 if (processor_alias_table[i].flags & PTA_FMA
2811 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2812 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2813 if (processor_alias_table[i].flags & PTA_SSE4A
2814 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2815 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2816 if (processor_alias_table[i].flags & PTA_SSE5
2817 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2818 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2819 if (processor_alias_table[i].flags & PTA_ABM
2820 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2821 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2822 if (processor_alias_table[i].flags & PTA_CX16
2823 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2824 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2825 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2826 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2827 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2828 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2829 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2830 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2831 if (processor_alias_table[i].flags & PTA_AES
2832 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2833 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2834 if (processor_alias_table[i].flags & PTA_PCLMUL
2835 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2836 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2837 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2838 x86_prefetch_sse = true;
2840 break;
2843 if (i == pta_size)
2844 error ("bad value (%s) for %sarch=%s %s",
2845 ix86_arch_string, prefix, suffix, sw);
2847 ix86_arch_mask = 1u << ix86_arch;
2848 for (i = 0; i < X86_ARCH_LAST; ++i)
2849 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2851 for (i = 0; i < pta_size; i++)
2852 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2854 ix86_schedule = processor_alias_table[i].schedule;
2855 ix86_tune = processor_alias_table[i].processor;
2856 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2858 if (ix86_tune_defaulted)
2860 ix86_tune_string = "x86-64";
2861 for (i = 0; i < pta_size; i++)
2862 if (! strcmp (ix86_tune_string,
2863 processor_alias_table[i].name))
2864 break;
2865 ix86_schedule = processor_alias_table[i].schedule;
2866 ix86_tune = processor_alias_table[i].processor;
2868 else
2869 error ("CPU you selected does not support x86-64 "
2870 "instruction set");
2872 /* Intel CPUs have always interpreted SSE prefetch instructions as
2873 NOPs; so, we can enable SSE prefetch instructions even when
2874 -mtune (rather than -march) points us to a processor that has them.
2875 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2876 higher processors. */
2877 if (TARGET_CMOVE
2878 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2879 x86_prefetch_sse = true;
2880 break;
2882 if (i == pta_size)
2883 error ("bad value (%s) for %stune=%s %s",
2884 ix86_tune_string, prefix, suffix, sw);
2886 ix86_tune_mask = 1u << ix86_tune;
2887 for (i = 0; i < X86_TUNE_LAST; ++i)
2888 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
2890 if (optimize_size)
2891 ix86_cost = &ix86_size_cost;
2892 else
2893 ix86_cost = processor_target_table[ix86_tune].cost;
2895 /* Arrange to set up i386_stack_locals for all functions. */
2896 init_machine_status = ix86_init_machine_status;
2898 /* Validate -mregparm= value. */
2899 if (ix86_regparm_string)
2901 if (TARGET_64BIT)
2902 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
2903 i = atoi (ix86_regparm_string);
2904 if (i < 0 || i > REGPARM_MAX)
2905 error ("%sregparm=%d%s is not between 0 and %d",
2906 prefix, i, suffix, REGPARM_MAX);
2907 else
2908 ix86_regparm = i;
2910 if (TARGET_64BIT)
2911 ix86_regparm = REGPARM_MAX;
2913 /* If the user has provided any of the -malign-* options,
2914 warn and use that value only if -falign-* is not set.
2915 Remove this code in GCC 3.2 or later. */
2916 if (ix86_align_loops_string)
2918 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
2919 prefix, suffix, suffix);
2920 if (align_loops == 0)
2922 i = atoi (ix86_align_loops_string);
2923 if (i < 0 || i > MAX_CODE_ALIGN)
2924 error ("%salign-loops=%d%s is not between 0 and %d",
2925 prefix, i, suffix, MAX_CODE_ALIGN);
2926 else
2927 align_loops = 1 << i;
2931 if (ix86_align_jumps_string)
2933 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
2934 prefix, suffix, suffix);
2935 if (align_jumps == 0)
2937 i = atoi (ix86_align_jumps_string);
2938 if (i < 0 || i > MAX_CODE_ALIGN)
2939 error ("%salign-loops=%d%s is not between 0 and %d",
2940 prefix, i, suffix, MAX_CODE_ALIGN);
2941 else
2942 align_jumps = 1 << i;
2946 if (ix86_align_funcs_string)
2948 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
2949 prefix, suffix, suffix);
2950 if (align_functions == 0)
2952 i = atoi (ix86_align_funcs_string);
2953 if (i < 0 || i > MAX_CODE_ALIGN)
2954 error ("%salign-loops=%d%s is not between 0 and %d",
2955 prefix, i, suffix, MAX_CODE_ALIGN);
2956 else
2957 align_functions = 1 << i;
2961 /* Default align_* from the processor table. */
2962 if (align_loops == 0)
2964 align_loops = processor_target_table[ix86_tune].align_loop;
2965 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2967 if (align_jumps == 0)
2969 align_jumps = processor_target_table[ix86_tune].align_jump;
2970 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2972 if (align_functions == 0)
2974 align_functions = processor_target_table[ix86_tune].align_func;
2977 /* Validate -mbranch-cost= value, or provide default. */
2978 ix86_branch_cost = ix86_cost->branch_cost;
2979 if (ix86_branch_cost_string)
2981 i = atoi (ix86_branch_cost_string);
2982 if (i < 0 || i > 5)
2983 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
2984 else
2985 ix86_branch_cost = i;
2987 if (ix86_section_threshold_string)
2989 i = atoi (ix86_section_threshold_string);
2990 if (i < 0)
2991 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
2992 else
2993 ix86_section_threshold = i;
2996 if (ix86_tls_dialect_string)
2998 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2999 ix86_tls_dialect = TLS_DIALECT_GNU;
3000 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3001 ix86_tls_dialect = TLS_DIALECT_GNU2;
3002 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3003 ix86_tls_dialect = TLS_DIALECT_SUN;
3004 else
3005 error ("bad value (%s) for %stls-dialect=%s %s",
3006 ix86_tls_dialect_string, prefix, suffix, sw);
3009 if (ix87_precision_string)
3011 i = atoi (ix87_precision_string);
3012 if (i != 32 && i != 64 && i != 80)
3013 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3016 if (TARGET_64BIT)
3018 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3020 /* Enable by default the SSE and MMX builtins. Do allow the user to
3021 explicitly disable any of these. In particular, disabling SSE and
3022 MMX for kernel code is extremely useful. */
3023 if (!ix86_arch_specified)
3024 ix86_isa_flags
3025 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3026 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3028 if (TARGET_RTD)
3029 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3031 else
3033 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3035 if (!ix86_arch_specified)
3036 ix86_isa_flags
3037 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3039 /* i386 ABI does not specify red zone. It still makes sense to use it
3040 when programmer takes care to stack from being destroyed. */
3041 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3042 target_flags |= MASK_NO_RED_ZONE;
3045 /* Keep nonleaf frame pointers. */
3046 if (flag_omit_frame_pointer)
3047 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3048 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3049 flag_omit_frame_pointer = 1;
3051 /* If we're doing fast math, we don't care about comparison order
3052 wrt NaNs. This lets us use a shorter comparison sequence. */
3053 if (flag_finite_math_only)
3054 target_flags &= ~MASK_IEEE_FP;
3056 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3057 since the insns won't need emulation. */
3058 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3059 target_flags &= ~MASK_NO_FANCY_MATH_387;
3061 /* Likewise, if the target doesn't have a 387, or we've specified
3062 software floating point, don't use 387 inline intrinsics. */
3063 if (!TARGET_80387)
3064 target_flags |= MASK_NO_FANCY_MATH_387;
3066 /* Turn on MMX builtins for -msse. */
3067 if (TARGET_SSE)
3069 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3070 x86_prefetch_sse = true;
3073 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3074 if (TARGET_SSE4_2 || TARGET_ABM)
3075 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3077 /* Validate -mpreferred-stack-boundary= value or default it to
3078 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3079 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3080 if (ix86_preferred_stack_boundary_string)
3082 i = atoi (ix86_preferred_stack_boundary_string);
3083 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3084 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3085 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3086 else
3087 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3090 /* Set the default value for -mstackrealign. */
3091 if (ix86_force_align_arg_pointer == -1)
3092 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3094 /* Validate -mincoming-stack-boundary= value or default it to
3095 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3096 if (ix86_force_align_arg_pointer)
3097 ix86_default_incoming_stack_boundary = MIN_STACK_BOUNDARY;
3098 else
3099 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3100 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3101 if (ix86_incoming_stack_boundary_string)
3103 i = atoi (ix86_incoming_stack_boundary_string);
3104 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3105 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3106 i, TARGET_64BIT ? 4 : 2);
3107 else
3109 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3110 ix86_incoming_stack_boundary
3111 = ix86_user_incoming_stack_boundary;
3115 /* Accept -msseregparm only if at least SSE support is enabled. */
3116 if (TARGET_SSEREGPARM
3117 && ! TARGET_SSE)
3118 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3120 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3121 if (ix86_fpmath_string != 0)
3123 if (! strcmp (ix86_fpmath_string, "387"))
3124 ix86_fpmath = FPMATH_387;
3125 else if (! strcmp (ix86_fpmath_string, "sse"))
3127 if (!TARGET_SSE)
3129 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3130 ix86_fpmath = FPMATH_387;
3132 else
3133 ix86_fpmath = FPMATH_SSE;
3135 else if (! strcmp (ix86_fpmath_string, "387,sse")
3136 || ! strcmp (ix86_fpmath_string, "387+sse")
3137 || ! strcmp (ix86_fpmath_string, "sse,387")
3138 || ! strcmp (ix86_fpmath_string, "sse+387")
3139 || ! strcmp (ix86_fpmath_string, "both"))
3141 if (!TARGET_SSE)
3143 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3144 ix86_fpmath = FPMATH_387;
3146 else if (!TARGET_80387)
3148 warning (0, "387 instruction set disabled, using SSE arithmetics");
3149 ix86_fpmath = FPMATH_SSE;
3151 else
3152 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3154 else
3155 error ("bad value (%s) for %sfpmath=%s %s",
3156 ix86_fpmath_string, prefix, suffix, sw);
3159 /* If the i387 is disabled, then do not return values in it. */
3160 if (!TARGET_80387)
3161 target_flags &= ~MASK_FLOAT_RETURNS;
3163 /* Use external vectorized library in vectorizing intrinsics. */
3164 if (ix86_veclibabi_string)
3166 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3167 ix86_veclib_handler = ix86_veclibabi_svml;
3168 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3169 ix86_veclib_handler = ix86_veclibabi_acml;
3170 else
3171 error ("unknown vectorization library ABI type (%s) for "
3172 "%sveclibabi=%s %s", ix86_veclibabi_string,
3173 prefix, suffix, sw);
3176 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3177 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3178 && !optimize_size)
3179 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3181 /* ??? Unwind info is not correct around the CFG unless either a frame
3182 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3183 unwind info generation to be aware of the CFG and propagating states
3184 around edges. */
3185 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3186 || flag_exceptions || flag_non_call_exceptions)
3187 && flag_omit_frame_pointer
3188 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3190 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3191 warning (0, "unwind tables currently require either a frame pointer "
3192 "or %saccumulate-outgoing-args%s for correctness",
3193 prefix, suffix);
3194 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3197 /* If stack probes are required, the space used for large function
3198 arguments on the stack must also be probed, so enable
3199 -maccumulate-outgoing-args so this happens in the prologue. */
3200 if (TARGET_STACK_PROBE
3201 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3203 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3204 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3205 "for correctness", prefix, suffix);
3206 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3209 /* For sane SSE instruction set generation we need fcomi instruction.
3210 It is safe to enable all CMOVE instructions. */
3211 if (TARGET_SSE)
3212 TARGET_CMOVE = 1;
3214 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3216 char *p;
3217 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3218 p = strchr (internal_label_prefix, 'X');
3219 internal_label_prefix_len = p - internal_label_prefix;
3220 *p = '\0';
3223 /* When scheduling description is not available, disable scheduler pass
3224 so it won't slow down the compilation and make x87 code slower. */
3225 if (!TARGET_SCHEDULE)
3226 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3228 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3229 set_param_value ("simultaneous-prefetches",
3230 ix86_cost->simultaneous_prefetches);
3231 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3232 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3233 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3234 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3235 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3236 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3238 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3239 can be optimized to ap = __builtin_next_arg (0). */
3240 if (!TARGET_64BIT)
3241 targetm.expand_builtin_va_start = NULL;
3243 if (TARGET_64BIT)
3245 ix86_gen_leave = gen_leave_rex64;
3246 ix86_gen_pop1 = gen_popdi1;
3247 ix86_gen_add3 = gen_adddi3;
3248 ix86_gen_sub3 = gen_subdi3;
3249 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3250 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3251 ix86_gen_monitor = gen_sse3_monitor64;
3252 ix86_gen_andsp = gen_anddi3;
3254 else
3256 ix86_gen_leave = gen_leave;
3257 ix86_gen_pop1 = gen_popsi1;
3258 ix86_gen_add3 = gen_addsi3;
3259 ix86_gen_sub3 = gen_subsi3;
3260 ix86_gen_sub3_carry = gen_subsi3_carry;
3261 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3262 ix86_gen_monitor = gen_sse3_monitor;
3263 ix86_gen_andsp = gen_andsi3;
3266 #ifdef USE_IX86_CLD
3267 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3268 if (!TARGET_64BIT)
3269 target_flags |= MASK_CLD & ~target_flags_explicit;
3270 #endif
3272 /* Save the initial options in case the user does function specific options */
3273 if (main_args_p)
3274 target_option_default_node = target_option_current_node
3275 = build_target_option_node ();
3278 /* Update register usage after having seen the compiler flags. */
3280 void
3281 ix86_conditional_register_usage (void)
3283 int i;
3284 unsigned int j;
3286 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3288 if (fixed_regs[i] > 1)
3289 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3290 if (call_used_regs[i] > 1)
3291 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3294 /* The PIC register, if it exists, is fixed. */
3295 j = PIC_OFFSET_TABLE_REGNUM;
3296 if (j != INVALID_REGNUM)
3297 fixed_regs[j] = call_used_regs[j] = 1;
3299 /* The MS_ABI changes the set of call-used registers. */
3300 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3302 call_used_regs[SI_REG] = 0;
3303 call_used_regs[DI_REG] = 0;
3304 call_used_regs[XMM6_REG] = 0;
3305 call_used_regs[XMM7_REG] = 0;
3306 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3307 call_used_regs[i] = 0;
3310 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3311 other call-clobbered regs for 64-bit. */
3312 if (TARGET_64BIT)
3314 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3316 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3317 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3318 && call_used_regs[i])
3319 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3322 /* If MMX is disabled, squash the registers. */
3323 if (! TARGET_MMX)
3324 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3325 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3326 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3328 /* If SSE is disabled, squash the registers. */
3329 if (! TARGET_SSE)
3330 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3331 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3332 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3334 /* If the FPU is disabled, squash the registers. */
3335 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3336 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3337 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3338 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3340 /* If 32-bit, squash the 64-bit registers. */
3341 if (! TARGET_64BIT)
3343 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3344 reg_names[i] = "";
3345 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3346 reg_names[i] = "";
3351 /* Save the current options */
3353 static void
3354 ix86_function_specific_save (struct cl_target_option *ptr)
3356 gcc_assert (IN_RANGE (ix86_arch, 0, 255));
3357 gcc_assert (IN_RANGE (ix86_schedule, 0, 255));
3358 gcc_assert (IN_RANGE (ix86_tune, 0, 255));
3359 gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
3360 gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
3362 ptr->arch = ix86_arch;
3363 ptr->schedule = ix86_schedule;
3364 ptr->tune = ix86_tune;
3365 ptr->fpmath = ix86_fpmath;
3366 ptr->branch_cost = ix86_branch_cost;
3367 ptr->tune_defaulted = ix86_tune_defaulted;
3368 ptr->arch_specified = ix86_arch_specified;
3369 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3370 ptr->target_flags_explicit = target_flags_explicit;
3373 /* Restore the current options */
3375 static void
3376 ix86_function_specific_restore (struct cl_target_option *ptr)
3378 enum processor_type old_tune = ix86_tune;
3379 enum processor_type old_arch = ix86_arch;
3380 unsigned int ix86_arch_mask, ix86_tune_mask;
3381 int i;
3383 ix86_arch = ptr->arch;
3384 ix86_schedule = ptr->schedule;
3385 ix86_tune = ptr->tune;
3386 ix86_fpmath = ptr->fpmath;
3387 ix86_branch_cost = ptr->branch_cost;
3388 ix86_tune_defaulted = ptr->tune_defaulted;
3389 ix86_arch_specified = ptr->arch_specified;
3390 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3391 target_flags_explicit = ptr->target_flags_explicit;
3393 /* Recreate the arch feature tests if the arch changed */
3394 if (old_arch != ix86_arch)
3396 ix86_arch_mask = 1u << ix86_arch;
3397 for (i = 0; i < X86_ARCH_LAST; ++i)
3398 ix86_arch_features[i]
3399 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3402 /* Recreate the tune optimization tests */
3403 if (old_tune != ix86_tune)
3405 ix86_tune_mask = 1u << ix86_tune;
3406 for (i = 0; i < X86_TUNE_LAST; ++i)
3407 ix86_tune_features[i]
3408 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3412 /* Print the current options */
3414 static void
3415 ix86_function_specific_print (FILE *file, int indent,
3416 struct cl_target_option *ptr)
3418 char *target_string
3419 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3420 NULL, NULL, NULL, false);
3422 fprintf (file, "%*sarch = %d (%s)\n",
3423 indent, "",
3424 ptr->arch,
3425 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3426 ? cpu_names[ptr->arch]
3427 : "<unknown>"));
3429 fprintf (file, "%*stune = %d (%s)\n",
3430 indent, "",
3431 ptr->tune,
3432 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3433 ? cpu_names[ptr->tune]
3434 : "<unknown>"));
3436 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3437 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3438 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3439 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3441 if (target_string)
3443 fprintf (file, "%*s%s\n", indent, "", target_string);
3444 free (target_string);
3449 /* Inner function to process the attribute((target(...))), take an argument and
3450 set the current options from the argument. If we have a list, recursively go
3451 over the list. */
3453 static bool
3454 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3456 char *next_optstr;
3457 bool ret = true;
3459 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3460 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3461 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3462 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3464 enum ix86_opt_type
3466 ix86_opt_unknown,
3467 ix86_opt_yes,
3468 ix86_opt_no,
3469 ix86_opt_str,
3470 ix86_opt_isa
3473 static const struct
3475 const char *string;
3476 size_t len;
3477 enum ix86_opt_type type;
3478 int opt;
3479 int mask;
3480 } attrs[] = {
3481 /* isa options */
3482 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3483 IX86_ATTR_ISA ("abm", OPT_mabm),
3484 IX86_ATTR_ISA ("aes", OPT_maes),
3485 IX86_ATTR_ISA ("avx", OPT_mavx),
3486 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3487 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3488 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3489 IX86_ATTR_ISA ("sse", OPT_msse),
3490 IX86_ATTR_ISA ("sse2", OPT_msse2),
3491 IX86_ATTR_ISA ("sse3", OPT_msse3),
3492 IX86_ATTR_ISA ("sse4", OPT_msse4),
3493 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3494 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3495 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3496 IX86_ATTR_ISA ("sse5", OPT_msse5),
3497 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3499 /* string options */
3500 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3501 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3502 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3504 /* flag options */
3505 IX86_ATTR_YES ("cld",
3506 OPT_mcld,
3507 MASK_CLD),
3509 IX86_ATTR_NO ("fancy-math-387",
3510 OPT_mfancy_math_387,
3511 MASK_NO_FANCY_MATH_387),
3513 IX86_ATTR_NO ("fused-madd",
3514 OPT_mfused_madd,
3515 MASK_NO_FUSED_MADD),
3517 IX86_ATTR_YES ("ieee-fp",
3518 OPT_mieee_fp,
3519 MASK_IEEE_FP),
3521 IX86_ATTR_YES ("inline-all-stringops",
3522 OPT_minline_all_stringops,
3523 MASK_INLINE_ALL_STRINGOPS),
3525 IX86_ATTR_YES ("inline-stringops-dynamically",
3526 OPT_minline_stringops_dynamically,
3527 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3529 IX86_ATTR_NO ("align-stringops",
3530 OPT_mno_align_stringops,
3531 MASK_NO_ALIGN_STRINGOPS),
3533 IX86_ATTR_YES ("recip",
3534 OPT_mrecip,
3535 MASK_RECIP),
3539 /* If this is a list, recurse to get the options. */
3540 if (TREE_CODE (args) == TREE_LIST)
3542 bool ret = true;
3544 for (; args; args = TREE_CHAIN (args))
3545 if (TREE_VALUE (args)
3546 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3547 ret = false;
3549 return ret;
3552 else if (TREE_CODE (args) != STRING_CST)
3553 gcc_unreachable ();
3555 /* Handle multiple arguments separated by commas. */
3556 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3558 while (next_optstr && *next_optstr != '\0')
3560 char *p = next_optstr;
3561 char *orig_p = p;
3562 char *comma = strchr (next_optstr, ',');
3563 const char *opt_string;
3564 size_t len, opt_len;
3565 int opt;
3566 bool opt_set_p;
3567 char ch;
3568 unsigned i;
3569 enum ix86_opt_type type = ix86_opt_unknown;
3570 int mask = 0;
3572 if (comma)
3574 *comma = '\0';
3575 len = comma - next_optstr;
3576 next_optstr = comma + 1;
3578 else
3580 len = strlen (p);
3581 next_optstr = NULL;
3584 /* Recognize no-xxx. */
3585 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3587 opt_set_p = false;
3588 p += 3;
3589 len -= 3;
3591 else
3592 opt_set_p = true;
3594 /* Find the option. */
3595 ch = *p;
3596 opt = N_OPTS;
3597 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3599 type = attrs[i].type;
3600 opt_len = attrs[i].len;
3601 if (ch == attrs[i].string[0]
3602 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3603 && memcmp (p, attrs[i].string, opt_len) == 0)
3605 opt = attrs[i].opt;
3606 mask = attrs[i].mask;
3607 opt_string = attrs[i].string;
3608 break;
3612 /* Process the option. */
3613 if (opt == N_OPTS)
3615 error ("attribute(target(\"%s\")) is unknown", orig_p);
3616 ret = false;
3619 else if (type == ix86_opt_isa)
3620 ix86_handle_option (opt, p, opt_set_p);
3622 else if (type == ix86_opt_yes || type == ix86_opt_no)
3624 if (type == ix86_opt_no)
3625 opt_set_p = !opt_set_p;
3627 if (opt_set_p)
3628 target_flags |= mask;
3629 else
3630 target_flags &= ~mask;
3633 else if (type == ix86_opt_str)
3635 if (p_strings[opt])
3637 error ("option(\"%s\") was already specified", opt_string);
3638 ret = false;
3640 else
3641 p_strings[opt] = xstrdup (p + opt_len);
3644 else
3645 gcc_unreachable ();
3648 return ret;
3651 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3653 tree
3654 ix86_valid_target_attribute_tree (tree args)
3656 const char *orig_arch_string = ix86_arch_string;
3657 const char *orig_tune_string = ix86_tune_string;
3658 const char *orig_fpmath_string = ix86_fpmath_string;
3659 int orig_tune_defaulted = ix86_tune_defaulted;
3660 int orig_arch_specified = ix86_arch_specified;
3661 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3662 tree t = NULL_TREE;
3663 int i;
3664 struct cl_target_option *def
3665 = TREE_TARGET_OPTION (target_option_default_node);
3667 /* Process each of the options on the chain. */
3668 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3669 return NULL_TREE;
3671 /* If the changed options are different from the default, rerun override_options,
3672 and then save the options away. The string options are are attribute options,
3673 and will be undone when we copy the save structure. */
3674 if (ix86_isa_flags != def->ix86_isa_flags
3675 || target_flags != def->target_flags
3676 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3677 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3678 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3680 /* If we are using the default tune= or arch=, undo the string assigned,
3681 and use the default. */
3682 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3683 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3684 else if (!orig_arch_specified)
3685 ix86_arch_string = NULL;
3687 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3688 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3689 else if (orig_tune_defaulted)
3690 ix86_tune_string = NULL;
3692 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3693 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3694 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3695 else if (!TARGET_64BIT && TARGET_SSE)
3696 ix86_fpmath_string = "sse,387";
3698 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3699 override_options (false);
3701 /* Add any builtin functions with the new isa if any. */
3702 ix86_add_new_builtins (ix86_isa_flags);
3704 /* Save the current options unless we are validating options for
3705 #pragma. */
3706 t = build_target_option_node ();
3708 ix86_arch_string = orig_arch_string;
3709 ix86_tune_string = orig_tune_string;
3710 ix86_fpmath_string = orig_fpmath_string;
3712 /* Free up memory allocated to hold the strings */
3713 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3714 if (option_strings[i])
3715 free (option_strings[i]);
3718 return t;
3721 /* Hook to validate attribute((target("string"))). */
3723 static bool
3724 ix86_valid_target_attribute_p (tree fndecl,
3725 tree ARG_UNUSED (name),
3726 tree args,
3727 int ARG_UNUSED (flags))
3729 struct cl_target_option cur_target;
3730 bool ret = true;
3731 tree old_optimize = build_optimization_node ();
3732 tree new_target, new_optimize;
3733 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3735 /* If the function changed the optimization levels as well as setting target
3736 options, start with the optimizations specified. */
3737 if (func_optimize && func_optimize != old_optimize)
3738 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3740 /* The target attributes may also change some optimization flags, so update
3741 the optimization options if necessary. */
3742 cl_target_option_save (&cur_target);
3743 new_target = ix86_valid_target_attribute_tree (args);
3744 new_optimize = build_optimization_node ();
3746 if (!new_target)
3747 ret = false;
3749 else if (fndecl)
3751 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3753 if (old_optimize != new_optimize)
3754 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3757 cl_target_option_restore (&cur_target);
3759 if (old_optimize != new_optimize)
3760 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3762 return ret;
3766 /* Hook to determine if one function can safely inline another. */
3768 static bool
3769 ix86_can_inline_p (tree caller, tree callee)
3771 bool ret = false;
3772 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3773 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3775 /* If callee has no option attributes, then it is ok to inline. */
3776 if (!callee_tree)
3777 ret = true;
3779 /* If caller has no option attributes, but callee does then it is not ok to
3780 inline. */
3781 else if (!caller_tree)
3782 ret = false;
3784 else
3786 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3787 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3789 /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
3790 can inline a SSE2 function but a SSE2 function can't inline a SSE5
3791 function. */
3792 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3793 != callee_opts->ix86_isa_flags)
3794 ret = false;
3796 /* See if we have the same non-isa options. */
3797 else if (caller_opts->target_flags != callee_opts->target_flags)
3798 ret = false;
3800 /* See if arch, tune, etc. are the same. */
3801 else if (caller_opts->arch != callee_opts->arch)
3802 ret = false;
3804 else if (caller_opts->tune != callee_opts->tune)
3805 ret = false;
3807 else if (caller_opts->fpmath != callee_opts->fpmath)
3808 ret = false;
3810 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3811 ret = false;
3813 else
3814 ret = true;
3817 return ret;
3821 /* Remember the last target of ix86_set_current_function. */
3822 static GTY(()) tree ix86_previous_fndecl;
3824 /* Establish appropriate back-end context for processing the function
3825 FNDECL. The argument might be NULL to indicate processing at top
3826 level, outside of any function scope. */
3827 static void
3828 ix86_set_current_function (tree fndecl)
3830 /* Only change the context if the function changes. This hook is called
3831 several times in the course of compiling a function, and we don't want to
3832 slow things down too much or call target_reinit when it isn't safe. */
3833 if (fndecl && fndecl != ix86_previous_fndecl)
3835 tree old_tree = (ix86_previous_fndecl
3836 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3837 : NULL_TREE);
3839 tree new_tree = (fndecl
3840 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3841 : NULL_TREE);
3843 ix86_previous_fndecl = fndecl;
3844 if (old_tree == new_tree)
3847 else if (new_tree)
3849 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3850 target_reinit ();
3853 else if (old_tree)
3855 struct cl_target_option *def
3856 = TREE_TARGET_OPTION (target_option_current_node);
3858 cl_target_option_restore (def);
3859 target_reinit ();
3865 /* Return true if this goes in large data/bss. */
3867 static bool
3868 ix86_in_large_data_p (tree exp)
3870 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3871 return false;
3873 /* Functions are never large data. */
3874 if (TREE_CODE (exp) == FUNCTION_DECL)
3875 return false;
3877 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3879 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3880 if (strcmp (section, ".ldata") == 0
3881 || strcmp (section, ".lbss") == 0)
3882 return true;
3883 return false;
3885 else
3887 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
3889 /* If this is an incomplete type with size 0, then we can't put it
3890 in data because it might be too big when completed. */
3891 if (!size || size > ix86_section_threshold)
3892 return true;
3895 return false;
3898 /* Switch to the appropriate section for output of DECL.
3899 DECL is either a `VAR_DECL' node or a constant of some sort.
3900 RELOC indicates whether forming the initial value of DECL requires
3901 link-time relocations. */
3903 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
3904 ATTRIBUTE_UNUSED;
3906 static section *
3907 x86_64_elf_select_section (tree decl, int reloc,
3908 unsigned HOST_WIDE_INT align)
3910 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3911 && ix86_in_large_data_p (decl))
3913 const char *sname = NULL;
3914 unsigned int flags = SECTION_WRITE;
3915 switch (categorize_decl_for_section (decl, reloc))
3917 case SECCAT_DATA:
3918 sname = ".ldata";
3919 break;
3920 case SECCAT_DATA_REL:
3921 sname = ".ldata.rel";
3922 break;
3923 case SECCAT_DATA_REL_LOCAL:
3924 sname = ".ldata.rel.local";
3925 break;
3926 case SECCAT_DATA_REL_RO:
3927 sname = ".ldata.rel.ro";
3928 break;
3929 case SECCAT_DATA_REL_RO_LOCAL:
3930 sname = ".ldata.rel.ro.local";
3931 break;
3932 case SECCAT_BSS:
3933 sname = ".lbss";
3934 flags |= SECTION_BSS;
3935 break;
3936 case SECCAT_RODATA:
3937 case SECCAT_RODATA_MERGE_STR:
3938 case SECCAT_RODATA_MERGE_STR_INIT:
3939 case SECCAT_RODATA_MERGE_CONST:
3940 sname = ".lrodata";
3941 flags = 0;
3942 break;
3943 case SECCAT_SRODATA:
3944 case SECCAT_SDATA:
3945 case SECCAT_SBSS:
3946 gcc_unreachable ();
3947 case SECCAT_TEXT:
3948 case SECCAT_TDATA:
3949 case SECCAT_TBSS:
3950 /* We don't split these for medium model. Place them into
3951 default sections and hope for best. */
3952 break;
3953 case SECCAT_EMUTLS_VAR:
3954 case SECCAT_EMUTLS_TMPL:
3955 gcc_unreachable ();
3957 if (sname)
3959 /* We might get called with string constants, but get_named_section
3960 doesn't like them as they are not DECLs. Also, we need to set
3961 flags in that case. */
3962 if (!DECL_P (decl))
3963 return get_section (sname, flags, NULL);
3964 return get_named_section (decl, sname, reloc);
3967 return default_elf_select_section (decl, reloc, align);
3970 /* Build up a unique section name, expressed as a
3971 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
3972 RELOC indicates whether the initial value of EXP requires
3973 link-time relocations. */
3975 static void ATTRIBUTE_UNUSED
3976 x86_64_elf_unique_section (tree decl, int reloc)
3978 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3979 && ix86_in_large_data_p (decl))
3981 const char *prefix = NULL;
3982 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
3983 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3985 switch (categorize_decl_for_section (decl, reloc))
3987 case SECCAT_DATA:
3988 case SECCAT_DATA_REL:
3989 case SECCAT_DATA_REL_LOCAL:
3990 case SECCAT_DATA_REL_RO:
3991 case SECCAT_DATA_REL_RO_LOCAL:
3992 prefix = one_only ? ".ld" : ".ldata";
3993 break;
3994 case SECCAT_BSS:
3995 prefix = one_only ? ".lb" : ".lbss";
3996 break;
3997 case SECCAT_RODATA:
3998 case SECCAT_RODATA_MERGE_STR:
3999 case SECCAT_RODATA_MERGE_STR_INIT:
4000 case SECCAT_RODATA_MERGE_CONST:
4001 prefix = one_only ? ".lr" : ".lrodata";
4002 break;
4003 case SECCAT_SRODATA:
4004 case SECCAT_SDATA:
4005 case SECCAT_SBSS:
4006 gcc_unreachable ();
4007 case SECCAT_TEXT:
4008 case SECCAT_TDATA:
4009 case SECCAT_TBSS:
4010 /* We don't split these for medium model. Place them into
4011 default sections and hope for best. */
4012 break;
4013 case SECCAT_EMUTLS_VAR:
4014 prefix = targetm.emutls.var_section;
4015 break;
4016 case SECCAT_EMUTLS_TMPL:
4017 prefix = targetm.emutls.tmpl_section;
4018 break;
4020 if (prefix)
4022 const char *name, *linkonce;
4023 char *string;
4025 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4026 name = targetm.strip_name_encoding (name);
4028 /* If we're using one_only, then there needs to be a .gnu.linkonce
4029 prefix to the section name. */
4030 linkonce = one_only ? ".gnu.linkonce" : "";
4032 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4034 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4035 return;
4038 default_unique_section (decl, reloc);
4041 #ifdef COMMON_ASM_OP
4042 /* This says how to output assembler code to declare an
4043 uninitialized external linkage data object.
4045 For medium model x86-64 we need to use .largecomm opcode for
4046 large objects. */
4047 void
4048 x86_elf_aligned_common (FILE *file,
4049 const char *name, unsigned HOST_WIDE_INT size,
4050 int align)
4052 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4053 && size > (unsigned int)ix86_section_threshold)
4054 fprintf (file, ".largecomm\t");
4055 else
4056 fprintf (file, "%s", COMMON_ASM_OP);
4057 assemble_name (file, name);
4058 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
4059 size, align / BITS_PER_UNIT);
4061 #endif
4063 /* Utility function for targets to use in implementing
4064 ASM_OUTPUT_ALIGNED_BSS. */
4066 void
4067 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4068 const char *name, unsigned HOST_WIDE_INT size,
4069 int align)
4071 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4072 && size > (unsigned int)ix86_section_threshold)
4073 switch_to_section (get_named_section (decl, ".lbss", 0));
4074 else
4075 switch_to_section (bss_section);
4076 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4077 #ifdef ASM_DECLARE_OBJECT_NAME
4078 last_assemble_variable_decl = decl;
4079 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4080 #else
4081 /* Standard thing is just output label for the object. */
4082 ASM_OUTPUT_LABEL (file, name);
4083 #endif /* ASM_DECLARE_OBJECT_NAME */
4084 ASM_OUTPUT_SKIP (file, size ? size : 1);
4087 void
4088 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4090 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4091 make the problem with not enough registers even worse. */
4092 #ifdef INSN_SCHEDULING
4093 if (level > 1)
4094 flag_schedule_insns = 0;
4095 #endif
4097 if (TARGET_MACHO)
4098 /* The Darwin libraries never set errno, so we might as well
4099 avoid calling them when that's the only reason we would. */
4100 flag_errno_math = 0;
4102 /* The default values of these switches depend on the TARGET_64BIT
4103 that is not known at this moment. Mark these values with 2 and
4104 let user the to override these. In case there is no command line option
4105 specifying them, we will set the defaults in override_options. */
4106 if (optimize >= 1)
4107 flag_omit_frame_pointer = 2;
4108 flag_pcc_struct_return = 2;
4109 flag_asynchronous_unwind_tables = 2;
4110 flag_vect_cost_model = 1;
4111 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4112 SUBTARGET_OPTIMIZATION_OPTIONS;
4113 #endif
4116 /* Decide whether we can make a sibling call to a function. DECL is the
4117 declaration of the function being targeted by the call and EXP is the
4118 CALL_EXPR representing the call. */
4120 static bool
4121 ix86_function_ok_for_sibcall (tree decl, tree exp)
4123 tree type, decl_or_type;
4124 rtx a, b;
4126 /* If we are generating position-independent code, we cannot sibcall
4127 optimize any indirect call, or a direct call to a global function,
4128 as the PLT requires %ebx be live. */
4129 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4130 return false;
4132 /* If we need to align the outgoing stack, then sibcalling would
4133 unalign the stack, which may break the called function. */
4134 if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
4135 return false;
4137 if (decl)
4139 decl_or_type = decl;
4140 type = TREE_TYPE (decl);
4142 else
4144 /* We're looking at the CALL_EXPR, we need the type of the function. */
4145 type = CALL_EXPR_FN (exp); /* pointer expression */
4146 type = TREE_TYPE (type); /* pointer type */
4147 type = TREE_TYPE (type); /* function type */
4148 decl_or_type = type;
4151 /* Check that the return value locations are the same. Like
4152 if we are returning floats on the 80387 register stack, we cannot
4153 make a sibcall from a function that doesn't return a float to a
4154 function that does or, conversely, from a function that does return
4155 a float to a function that doesn't; the necessary stack adjustment
4156 would not be executed. This is also the place we notice
4157 differences in the return value ABI. Note that it is ok for one
4158 of the functions to have void return type as long as the return
4159 value of the other is passed in a register. */
4160 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4161 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4162 cfun->decl, false);
4163 if (STACK_REG_P (a) || STACK_REG_P (b))
4165 if (!rtx_equal_p (a, b))
4166 return false;
4168 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4170 else if (!rtx_equal_p (a, b))
4171 return false;
4173 if (TARGET_64BIT)
4175 /* The SYSV ABI has more call-clobbered registers;
4176 disallow sibcalls from MS to SYSV. */
4177 if (cfun->machine->call_abi == MS_ABI
4178 && ix86_function_type_abi (type) == SYSV_ABI)
4179 return false;
4181 else
4183 /* If this call is indirect, we'll need to be able to use a
4184 call-clobbered register for the address of the target function.
4185 Make sure that all such registers are not used for passing
4186 parameters. Note that DLLIMPORT functions are indirect. */
4187 if (!decl
4188 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4190 if (ix86_function_regparm (type, NULL) >= 3)
4192 /* ??? Need to count the actual number of registers to be used,
4193 not the possible number of registers. Fix later. */
4194 return false;
4199 /* Otherwise okay. That also includes certain types of indirect calls. */
4200 return true;
4203 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4204 calling convention attributes;
4205 arguments as in struct attribute_spec.handler. */
4207 static tree
4208 ix86_handle_cconv_attribute (tree *node, tree name,
4209 tree args,
4210 int flags ATTRIBUTE_UNUSED,
4211 bool *no_add_attrs)
4213 if (TREE_CODE (*node) != FUNCTION_TYPE
4214 && TREE_CODE (*node) != METHOD_TYPE
4215 && TREE_CODE (*node) != FIELD_DECL
4216 && TREE_CODE (*node) != TYPE_DECL)
4218 warning (OPT_Wattributes, "%qs attribute only applies to functions",
4219 IDENTIFIER_POINTER (name));
4220 *no_add_attrs = true;
4221 return NULL_TREE;
4224 /* Can combine regparm with all attributes but fastcall. */
4225 if (is_attribute_p ("regparm", name))
4227 tree cst;
4229 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4231 error ("fastcall and regparm attributes are not compatible");
4234 cst = TREE_VALUE (args);
4235 if (TREE_CODE (cst) != INTEGER_CST)
4237 warning (OPT_Wattributes,
4238 "%qs attribute requires an integer constant argument",
4239 IDENTIFIER_POINTER (name));
4240 *no_add_attrs = true;
4242 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4244 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
4245 IDENTIFIER_POINTER (name), REGPARM_MAX);
4246 *no_add_attrs = true;
4249 return NULL_TREE;
4252 if (TARGET_64BIT)
4254 /* Do not warn when emulating the MS ABI. */
4255 if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
4256 warning (OPT_Wattributes, "%qs attribute ignored",
4257 IDENTIFIER_POINTER (name));
4258 *no_add_attrs = true;
4259 return NULL_TREE;
4262 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4263 if (is_attribute_p ("fastcall", name))
4265 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4267 error ("fastcall and cdecl attributes are not compatible");
4269 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4271 error ("fastcall and stdcall attributes are not compatible");
4273 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4275 error ("fastcall and regparm attributes are not compatible");
4279 /* Can combine stdcall with fastcall (redundant), regparm and
4280 sseregparm. */
4281 else if (is_attribute_p ("stdcall", name))
4283 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4285 error ("stdcall and cdecl attributes are not compatible");
4287 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4289 error ("stdcall and fastcall attributes are not compatible");
4293 /* Can combine cdecl with regparm and sseregparm. */
4294 else if (is_attribute_p ("cdecl", name))
4296 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4298 error ("stdcall and cdecl attributes are not compatible");
4300 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4302 error ("fastcall and cdecl attributes are not compatible");
4306 /* Can combine sseregparm with all attributes. */
4308 return NULL_TREE;
4311 /* Return 0 if the attributes for two types are incompatible, 1 if they
4312 are compatible, and 2 if they are nearly compatible (which causes a
4313 warning to be generated). */
4315 static int
4316 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4318 /* Check for mismatch of non-default calling convention. */
4319 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4321 if (TREE_CODE (type1) != FUNCTION_TYPE
4322 && TREE_CODE (type1) != METHOD_TYPE)
4323 return 1;
4325 /* Check for mismatched fastcall/regparm types. */
4326 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4327 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4328 || (ix86_function_regparm (type1, NULL)
4329 != ix86_function_regparm (type2, NULL)))
4330 return 0;
4332 /* Check for mismatched sseregparm types. */
4333 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4334 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4335 return 0;
4337 /* Check for mismatched return types (cdecl vs stdcall). */
4338 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4339 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4340 return 0;
4342 return 1;
4345 /* Return the regparm value for a function with the indicated TYPE and DECL.
4346 DECL may be NULL when calling function indirectly
4347 or considering a libcall. */
4349 static int
4350 ix86_function_regparm (const_tree type, const_tree decl)
4352 tree attr;
4353 int regparm;
4355 static bool error_issued;
4357 if (TARGET_64BIT)
4358 return (ix86_function_type_abi (type) == SYSV_ABI
4359 ? X86_64_REGPARM_MAX : X64_REGPARM_MAX);
4361 regparm = ix86_regparm;
4362 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4363 if (attr)
4365 regparm
4366 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4368 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4370 /* We can't use regparm(3) for nested functions because
4371 these pass static chain pointer in %ecx register. */
4372 if (!error_issued && regparm == 3
4373 && decl_function_context (decl)
4374 && !DECL_NO_STATIC_CHAIN (decl))
4376 error ("nested functions are limited to 2 register parameters");
4377 error_issued = true;
4378 return 0;
4382 return regparm;
4385 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4386 return 2;
4388 /* Use register calling convention for local functions when possible. */
4389 if (decl
4390 && TREE_CODE (decl) == FUNCTION_DECL
4391 && optimize
4392 && !profile_flag)
4394 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4395 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4396 if (i && i->local)
4398 int local_regparm, globals = 0, regno;
4399 struct function *f;
4401 /* Make sure no regparm register is taken by a
4402 fixed register variable. */
4403 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4404 if (fixed_regs[local_regparm])
4405 break;
4407 /* We can't use regparm(3) for nested functions as these use
4408 static chain pointer in third argument. */
4409 if (local_regparm == 3
4410 && decl_function_context (decl)
4411 && !DECL_NO_STATIC_CHAIN (decl))
4412 local_regparm = 2;
4414 /* If the function realigns its stackpointer, the prologue will
4415 clobber %ecx. If we've already generated code for the callee,
4416 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
4417 scanning the attributes for the self-realigning property. */
4418 f = DECL_STRUCT_FUNCTION (decl);
4419 /* Since current internal arg pointer won't conflict with
4420 parameter passing regs, so no need to change stack
4421 realignment and adjust regparm number.
4423 Each fixed register usage increases register pressure,
4424 so less registers should be used for argument passing.
4425 This functionality can be overriden by an explicit
4426 regparm value. */
4427 for (regno = 0; regno <= DI_REG; regno++)
4428 if (fixed_regs[regno])
4429 globals++;
4431 local_regparm
4432 = globals < local_regparm ? local_regparm - globals : 0;
4434 if (local_regparm > regparm)
4435 regparm = local_regparm;
4439 return regparm;
4442 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4443 DFmode (2) arguments in SSE registers for a function with the
4444 indicated TYPE and DECL. DECL may be NULL when calling function
4445 indirectly or considering a libcall. Otherwise return 0. */
4447 static int
4448 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4450 gcc_assert (!TARGET_64BIT);
4452 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4453 by the sseregparm attribute. */
4454 if (TARGET_SSEREGPARM
4455 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4457 if (!TARGET_SSE)
4459 if (warn)
4461 if (decl)
4462 error ("Calling %qD with attribute sseregparm without "
4463 "SSE/SSE2 enabled", decl);
4464 else
4465 error ("Calling %qT with attribute sseregparm without "
4466 "SSE/SSE2 enabled", type);
4468 return 0;
4471 return 2;
4474 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4475 (and DFmode for SSE2) arguments in SSE registers. */
4476 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4478 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4479 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4480 if (i && i->local)
4481 return TARGET_SSE2 ? 2 : 1;
4484 return 0;
4487 /* Return true if EAX is live at the start of the function. Used by
4488 ix86_expand_prologue to determine if we need special help before
4489 calling allocate_stack_worker. */
4491 static bool
4492 ix86_eax_live_at_start_p (void)
4494 /* Cheat. Don't bother working forward from ix86_function_regparm
4495 to the function type to whether an actual argument is located in
4496 eax. Instead just look at cfg info, which is still close enough
4497 to correct at this point. This gives false positives for broken
4498 functions that might use uninitialized data that happens to be
4499 allocated in eax, but who cares? */
4500 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4503 /* Value is the number of bytes of arguments automatically
4504 popped when returning from a subroutine call.
4505 FUNDECL is the declaration node of the function (as a tree),
4506 FUNTYPE is the data type of the function (as a tree),
4507 or for a library call it is an identifier node for the subroutine name.
4508 SIZE is the number of bytes of arguments passed on the stack.
4510 On the 80386, the RTD insn may be used to pop them if the number
4511 of args is fixed, but if the number is variable then the caller
4512 must pop them all. RTD can't be used for library calls now
4513 because the library is compiled with the Unix compiler.
4514 Use of RTD is a selectable option, since it is incompatible with
4515 standard Unix calling sequences. If the option is not selected,
4516 the caller must always pop the args.
4518 The attribute stdcall is equivalent to RTD on a per module basis. */
4521 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4523 int rtd;
4525 /* None of the 64-bit ABIs pop arguments. */
4526 if (TARGET_64BIT)
4527 return 0;
4529 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4531 /* Cdecl functions override -mrtd, and never pop the stack. */
4532 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4534 /* Stdcall and fastcall functions will pop the stack if not
4535 variable args. */
4536 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4537 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4538 rtd = 1;
4540 if (rtd && ! stdarg_p (funtype))
4541 return size;
4544 /* Lose any fake structure return argument if it is passed on the stack. */
4545 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4546 && !KEEP_AGGREGATE_RETURN_POINTER)
4548 int nregs = ix86_function_regparm (funtype, fundecl);
4549 if (nregs == 0)
4550 return GET_MODE_SIZE (Pmode);
4553 return 0;
4556 /* Argument support functions. */
4558 /* Return true when register may be used to pass function parameters. */
4559 bool
4560 ix86_function_arg_regno_p (int regno)
4562 int i;
4563 const int *parm_regs;
4565 if (!TARGET_64BIT)
4567 if (TARGET_MACHO)
4568 return (regno < REGPARM_MAX
4569 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4570 else
4571 return (regno < REGPARM_MAX
4572 || (TARGET_MMX && MMX_REGNO_P (regno)
4573 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4574 || (TARGET_SSE && SSE_REGNO_P (regno)
4575 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4578 if (TARGET_MACHO)
4580 if (SSE_REGNO_P (regno) && TARGET_SSE)
4581 return true;
4583 else
4585 if (TARGET_SSE && SSE_REGNO_P (regno)
4586 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4587 return true;
4590 /* TODO: The function should depend on current function ABI but
4591 builtins.c would need updating then. Therefore we use the
4592 default ABI. */
4594 /* RAX is used as hidden argument to va_arg functions. */
4595 if (DEFAULT_ABI == SYSV_ABI && regno == AX_REG)
4596 return true;
4598 if (DEFAULT_ABI == MS_ABI)
4599 parm_regs = x86_64_ms_abi_int_parameter_registers;
4600 else
4601 parm_regs = x86_64_int_parameter_registers;
4602 for (i = 0; i < (DEFAULT_ABI == MS_ABI ? X64_REGPARM_MAX
4603 : X86_64_REGPARM_MAX); i++)
4604 if (regno == parm_regs[i])
4605 return true;
4606 return false;
4609 /* Return if we do not know how to pass TYPE solely in registers. */
4611 static bool
4612 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4614 if (must_pass_in_stack_var_size_or_pad (mode, type))
4615 return true;
4617 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4618 The layout_type routine is crafty and tries to trick us into passing
4619 currently unsupported vector types on the stack by using TImode. */
4620 return (!TARGET_64BIT && mode == TImode
4621 && type && TREE_CODE (type) != VECTOR_TYPE);
4624 /* It returns the size, in bytes, of the area reserved for arguments passed
4625 in registers for the function represented by fndecl dependent to the used
4626 abi format. */
4628 ix86_reg_parm_stack_space (const_tree fndecl)
4630 int call_abi = SYSV_ABI;
4631 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4632 call_abi = ix86_function_abi (fndecl);
4633 else
4634 call_abi = ix86_function_type_abi (fndecl);
4635 if (call_abi == MS_ABI)
4636 return 32;
4637 return 0;
4640 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4641 call abi used. */
4643 ix86_function_type_abi (const_tree fntype)
4645 if (TARGET_64BIT && fntype != NULL)
4647 int abi;
4648 if (DEFAULT_ABI == SYSV_ABI)
4649 abi = lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)) ? MS_ABI : SYSV_ABI;
4650 else
4651 abi = lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)) ? SYSV_ABI : MS_ABI;
4653 return abi;
4655 return DEFAULT_ABI;
4659 ix86_function_abi (const_tree fndecl)
4661 if (! fndecl)
4662 return DEFAULT_ABI;
4663 return ix86_function_type_abi (TREE_TYPE (fndecl));
4666 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4667 call abi used. */
4669 ix86_cfun_abi (void)
4671 if (! cfun || ! TARGET_64BIT)
4672 return DEFAULT_ABI;
4673 return cfun->machine->call_abi;
4676 /* regclass.c */
4677 extern void init_regs (void);
4679 /* Implementation of call abi switching target hook. Specific to FNDECL
4680 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4681 for more details. */
4682 void
4683 ix86_call_abi_override (const_tree fndecl)
4685 if (fndecl == NULL_TREE)
4686 cfun->machine->call_abi = DEFAULT_ABI;
4687 else
4688 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4691 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4692 re-initialization of init_regs each time we switch function context since
4693 this is needed only during RTL expansion. */
4694 static void
4695 ix86_maybe_switch_abi (void)
4697 if (TARGET_64BIT &&
4698 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4699 reinit_regs ();
4702 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4703 for a call to a function whose data type is FNTYPE.
4704 For a library call, FNTYPE is 0. */
4706 void
4707 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4708 tree fntype, /* tree ptr for function decl */
4709 rtx libname, /* SYMBOL_REF of library name or 0 */
4710 tree fndecl)
4712 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4713 memset (cum, 0, sizeof (*cum));
4715 if (fndecl)
4716 cum->call_abi = ix86_function_abi (fndecl);
4717 else
4718 cum->call_abi = ix86_function_type_abi (fntype);
4719 /* Set up the number of registers to use for passing arguments. */
4721 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4722 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4723 "or subtarget optimization implying it");
4724 cum->nregs = ix86_regparm;
4725 if (TARGET_64BIT)
4727 if (cum->call_abi != DEFAULT_ABI)
4728 cum->nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX
4729 : X64_REGPARM_MAX;
4731 if (TARGET_SSE)
4733 cum->sse_nregs = SSE_REGPARM_MAX;
4734 if (TARGET_64BIT)
4736 if (cum->call_abi != DEFAULT_ABI)
4737 cum->sse_nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
4738 : X64_SSE_REGPARM_MAX;
4741 if (TARGET_MMX)
4742 cum->mmx_nregs = MMX_REGPARM_MAX;
4743 cum->warn_avx = true;
4744 cum->warn_sse = true;
4745 cum->warn_mmx = true;
4747 /* Because type might mismatch in between caller and callee, we need to
4748 use actual type of function for local calls.
4749 FIXME: cgraph_analyze can be told to actually record if function uses
4750 va_start so for local functions maybe_vaarg can be made aggressive
4751 helping K&R code.
4752 FIXME: once typesytem is fixed, we won't need this code anymore. */
4753 if (i && i->local)
4754 fntype = TREE_TYPE (fndecl);
4755 cum->maybe_vaarg = (fntype
4756 ? (!prototype_p (fntype) || stdarg_p (fntype))
4757 : !libname);
4759 if (!TARGET_64BIT)
4761 /* If there are variable arguments, then we won't pass anything
4762 in registers in 32-bit mode. */
4763 if (stdarg_p (fntype))
4765 cum->nregs = 0;
4766 cum->sse_nregs = 0;
4767 cum->mmx_nregs = 0;
4768 cum->warn_avx = 0;
4769 cum->warn_sse = 0;
4770 cum->warn_mmx = 0;
4771 return;
4774 /* Use ecx and edx registers if function has fastcall attribute,
4775 else look for regparm information. */
4776 if (fntype)
4778 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4780 cum->nregs = 2;
4781 cum->fastcall = 1;
4783 else
4784 cum->nregs = ix86_function_regparm (fntype, fndecl);
4787 /* Set up the number of SSE registers used for passing SFmode
4788 and DFmode arguments. Warn for mismatching ABI. */
4789 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4793 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4794 But in the case of vector types, it is some vector mode.
4796 When we have only some of our vector isa extensions enabled, then there
4797 are some modes for which vector_mode_supported_p is false. For these
4798 modes, the generic vector support in gcc will choose some non-vector mode
4799 in order to implement the type. By computing the natural mode, we'll
4800 select the proper ABI location for the operand and not depend on whatever
4801 the middle-end decides to do with these vector types.
4803 The midde-end can't deal with the vector types > 16 bytes. In this
4804 case, we return the original mode and warn ABI change if CUM isn't
4805 NULL. */
4807 static enum machine_mode
4808 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4810 enum machine_mode mode = TYPE_MODE (type);
4812 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4814 HOST_WIDE_INT size = int_size_in_bytes (type);
4815 if ((size == 8 || size == 16 || size == 32)
4816 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4817 && TYPE_VECTOR_SUBPARTS (type) > 1)
4819 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4821 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4822 mode = MIN_MODE_VECTOR_FLOAT;
4823 else
4824 mode = MIN_MODE_VECTOR_INT;
4826 /* Get the mode which has this inner mode and number of units. */
4827 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4828 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4829 && GET_MODE_INNER (mode) == innermode)
4831 if (size == 32 && !TARGET_AVX)
4833 static bool warnedavx;
4835 if (cum
4836 && !warnedavx
4837 && cum->warn_avx)
4839 warnedavx = true;
4840 warning (0, "AVX vector argument without AVX "
4841 "enabled changes the ABI");
4843 return TYPE_MODE (type);
4845 else
4846 return mode;
4849 gcc_unreachable ();
4853 return mode;
4856 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4857 this may not agree with the mode that the type system has chosen for the
4858 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4859 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4861 static rtx
4862 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4863 unsigned int regno)
4865 rtx tmp;
4867 if (orig_mode != BLKmode)
4868 tmp = gen_rtx_REG (orig_mode, regno);
4869 else
4871 tmp = gen_rtx_REG (mode, regno);
4872 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4873 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4876 return tmp;
4879 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4880 of this code is to classify each 8bytes of incoming argument by the register
4881 class and assign registers accordingly. */
4883 /* Return the union class of CLASS1 and CLASS2.
4884 See the x86-64 PS ABI for details. */
4886 static enum x86_64_reg_class
4887 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
4889 /* Rule #1: If both classes are equal, this is the resulting class. */
4890 if (class1 == class2)
4891 return class1;
4893 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
4894 the other class. */
4895 if (class1 == X86_64_NO_CLASS)
4896 return class2;
4897 if (class2 == X86_64_NO_CLASS)
4898 return class1;
4900 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
4901 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
4902 return X86_64_MEMORY_CLASS;
4904 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
4905 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
4906 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
4907 return X86_64_INTEGERSI_CLASS;
4908 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
4909 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
4910 return X86_64_INTEGER_CLASS;
4912 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
4913 MEMORY is used. */
4914 if (class1 == X86_64_X87_CLASS
4915 || class1 == X86_64_X87UP_CLASS
4916 || class1 == X86_64_COMPLEX_X87_CLASS
4917 || class2 == X86_64_X87_CLASS
4918 || class2 == X86_64_X87UP_CLASS
4919 || class2 == X86_64_COMPLEX_X87_CLASS)
4920 return X86_64_MEMORY_CLASS;
4922 /* Rule #6: Otherwise class SSE is used. */
4923 return X86_64_SSE_CLASS;
4926 /* Classify the argument of type TYPE and mode MODE.
4927 CLASSES will be filled by the register class used to pass each word
4928 of the operand. The number of words is returned. In case the parameter
4929 should be passed in memory, 0 is returned. As a special case for zero
4930 sized containers, classes[0] will be NO_CLASS and 1 is returned.
4932 BIT_OFFSET is used internally for handling records and specifies offset
4933 of the offset in bits modulo 256 to avoid overflow cases.
4935 See the x86-64 PS ABI for details.
4938 static int
4939 classify_argument (enum machine_mode mode, const_tree type,
4940 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
4942 HOST_WIDE_INT bytes =
4943 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4944 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4946 /* Variable sized entities are always passed/returned in memory. */
4947 if (bytes < 0)
4948 return 0;
4950 if (mode != VOIDmode
4951 && targetm.calls.must_pass_in_stack (mode, type))
4952 return 0;
4954 if (type && AGGREGATE_TYPE_P (type))
4956 int i;
4957 tree field;
4958 enum x86_64_reg_class subclasses[MAX_CLASSES];
4960 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
4961 if (bytes > 32)
4962 return 0;
4964 for (i = 0; i < words; i++)
4965 classes[i] = X86_64_NO_CLASS;
4967 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
4968 signalize memory class, so handle it as special case. */
4969 if (!words)
4971 classes[0] = X86_64_NO_CLASS;
4972 return 1;
4975 /* Classify each field of record and merge classes. */
4976 switch (TREE_CODE (type))
4978 case RECORD_TYPE:
4979 /* And now merge the fields of structure. */
4980 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4982 if (TREE_CODE (field) == FIELD_DECL)
4984 int num;
4986 if (TREE_TYPE (field) == error_mark_node)
4987 continue;
4989 /* Bitfields are always classified as integer. Handle them
4990 early, since later code would consider them to be
4991 misaligned integers. */
4992 if (DECL_BIT_FIELD (field))
4994 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4995 i < ((int_bit_position (field) + (bit_offset % 64))
4996 + tree_low_cst (DECL_SIZE (field), 0)
4997 + 63) / 8 / 8; i++)
4998 classes[i] =
4999 merge_classes (X86_64_INTEGER_CLASS,
5000 classes[i]);
5002 else
5004 type = TREE_TYPE (field);
5006 /* Flexible array member is ignored. */
5007 if (TYPE_MODE (type) == BLKmode
5008 && TREE_CODE (type) == ARRAY_TYPE
5009 && TYPE_SIZE (type) == NULL_TREE
5010 && TYPE_DOMAIN (type) != NULL_TREE
5011 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5012 == NULL_TREE))
5014 static bool warned;
5016 if (!warned && warn_psabi)
5018 warned = true;
5019 inform (input_location,
5020 "The ABI of passing struct with"
5021 " a flexible array member has"
5022 " changed in GCC 4.4");
5024 continue;
5026 num = classify_argument (TYPE_MODE (type), type,
5027 subclasses,
5028 (int_bit_position (field)
5029 + bit_offset) % 256);
5030 if (!num)
5031 return 0;
5032 for (i = 0; i < num; i++)
5034 int pos =
5035 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5036 classes[i + pos] =
5037 merge_classes (subclasses[i], classes[i + pos]);
5042 break;
5044 case ARRAY_TYPE:
5045 /* Arrays are handled as small records. */
5047 int num;
5048 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5049 TREE_TYPE (type), subclasses, bit_offset);
5050 if (!num)
5051 return 0;
5053 /* The partial classes are now full classes. */
5054 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5055 subclasses[0] = X86_64_SSE_CLASS;
5056 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5057 && !((bit_offset % 64) == 0 && bytes == 4))
5058 subclasses[0] = X86_64_INTEGER_CLASS;
5060 for (i = 0; i < words; i++)
5061 classes[i] = subclasses[i % num];
5063 break;
5065 case UNION_TYPE:
5066 case QUAL_UNION_TYPE:
5067 /* Unions are similar to RECORD_TYPE but offset is always 0.
5069 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5071 if (TREE_CODE (field) == FIELD_DECL)
5073 int num;
5075 if (TREE_TYPE (field) == error_mark_node)
5076 continue;
5078 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5079 TREE_TYPE (field), subclasses,
5080 bit_offset);
5081 if (!num)
5082 return 0;
5083 for (i = 0; i < num; i++)
5084 classes[i] = merge_classes (subclasses[i], classes[i]);
5087 break;
5089 default:
5090 gcc_unreachable ();
5093 if (words > 2)
5095 /* When size > 16 bytes, if the first one isn't
5096 X86_64_SSE_CLASS or any other ones aren't
5097 X86_64_SSEUP_CLASS, everything should be passed in
5098 memory. */
5099 if (classes[0] != X86_64_SSE_CLASS)
5100 return 0;
5102 for (i = 1; i < words; i++)
5103 if (classes[i] != X86_64_SSEUP_CLASS)
5104 return 0;
5107 /* Final merger cleanup. */
5108 for (i = 0; i < words; i++)
5110 /* If one class is MEMORY, everything should be passed in
5111 memory. */
5112 if (classes[i] == X86_64_MEMORY_CLASS)
5113 return 0;
5115 /* The X86_64_SSEUP_CLASS should be always preceded by
5116 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5117 if (classes[i] == X86_64_SSEUP_CLASS
5118 && classes[i - 1] != X86_64_SSE_CLASS
5119 && classes[i - 1] != X86_64_SSEUP_CLASS)
5121 /* The first one should never be X86_64_SSEUP_CLASS. */
5122 gcc_assert (i != 0);
5123 classes[i] = X86_64_SSE_CLASS;
5126 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5127 everything should be passed in memory. */
5128 if (classes[i] == X86_64_X87UP_CLASS
5129 && (classes[i - 1] != X86_64_X87_CLASS))
5131 static bool warned;
5133 /* The first one should never be X86_64_X87UP_CLASS. */
5134 gcc_assert (i != 0);
5135 if (!warned && warn_psabi)
5137 warned = true;
5138 inform (input_location,
5139 "The ABI of passing union with long double"
5140 " has changed in GCC 4.4");
5142 return 0;
5145 return words;
5148 /* Compute alignment needed. We align all types to natural boundaries with
5149 exception of XFmode that is aligned to 64bits. */
5150 if (mode != VOIDmode && mode != BLKmode)
5152 int mode_alignment = GET_MODE_BITSIZE (mode);
5154 if (mode == XFmode)
5155 mode_alignment = 128;
5156 else if (mode == XCmode)
5157 mode_alignment = 256;
5158 if (COMPLEX_MODE_P (mode))
5159 mode_alignment /= 2;
5160 /* Misaligned fields are always returned in memory. */
5161 if (bit_offset % mode_alignment)
5162 return 0;
5165 /* for V1xx modes, just use the base mode */
5166 if (VECTOR_MODE_P (mode) && mode != V1DImode
5167 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5168 mode = GET_MODE_INNER (mode);
5170 /* Classification of atomic types. */
5171 switch (mode)
5173 case SDmode:
5174 case DDmode:
5175 classes[0] = X86_64_SSE_CLASS;
5176 return 1;
5177 case TDmode:
5178 classes[0] = X86_64_SSE_CLASS;
5179 classes[1] = X86_64_SSEUP_CLASS;
5180 return 2;
5181 case DImode:
5182 case SImode:
5183 case HImode:
5184 case QImode:
5185 case CSImode:
5186 case CHImode:
5187 case CQImode:
5189 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5191 if (size <= 32)
5193 classes[0] = X86_64_INTEGERSI_CLASS;
5194 return 1;
5196 else if (size <= 64)
5198 classes[0] = X86_64_INTEGER_CLASS;
5199 return 1;
5201 else if (size <= 64+32)
5203 classes[0] = X86_64_INTEGER_CLASS;
5204 classes[1] = X86_64_INTEGERSI_CLASS;
5205 return 2;
5207 else if (size <= 64+64)
5209 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5210 return 2;
5212 else
5213 gcc_unreachable ();
5215 case CDImode:
5216 case TImode:
5217 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5218 return 2;
5219 case COImode:
5220 case OImode:
5221 /* OImode shouldn't be used directly. */
5222 gcc_unreachable ();
5223 case CTImode:
5224 return 0;
5225 case SFmode:
5226 if (!(bit_offset % 64))
5227 classes[0] = X86_64_SSESF_CLASS;
5228 else
5229 classes[0] = X86_64_SSE_CLASS;
5230 return 1;
5231 case DFmode:
5232 classes[0] = X86_64_SSEDF_CLASS;
5233 return 1;
5234 case XFmode:
5235 classes[0] = X86_64_X87_CLASS;
5236 classes[1] = X86_64_X87UP_CLASS;
5237 return 2;
5238 case TFmode:
5239 classes[0] = X86_64_SSE_CLASS;
5240 classes[1] = X86_64_SSEUP_CLASS;
5241 return 2;
5242 case SCmode:
5243 classes[0] = X86_64_SSE_CLASS;
5244 if (!(bit_offset % 64))
5245 return 1;
5246 else
5248 static bool warned;
5250 if (!warned && warn_psabi)
5252 warned = true;
5253 inform (input_location,
5254 "The ABI of passing structure with complex float"
5255 " member has changed in GCC 4.4");
5257 classes[1] = X86_64_SSESF_CLASS;
5258 return 2;
5260 case DCmode:
5261 classes[0] = X86_64_SSEDF_CLASS;
5262 classes[1] = X86_64_SSEDF_CLASS;
5263 return 2;
5264 case XCmode:
5265 classes[0] = X86_64_COMPLEX_X87_CLASS;
5266 return 1;
5267 case TCmode:
5268 /* This modes is larger than 16 bytes. */
5269 return 0;
5270 case V8SFmode:
5271 case V8SImode:
5272 case V32QImode:
5273 case V16HImode:
5274 case V4DFmode:
5275 case V4DImode:
5276 classes[0] = X86_64_SSE_CLASS;
5277 classes[1] = X86_64_SSEUP_CLASS;
5278 classes[2] = X86_64_SSEUP_CLASS;
5279 classes[3] = X86_64_SSEUP_CLASS;
5280 return 4;
5281 case V4SFmode:
5282 case V4SImode:
5283 case V16QImode:
5284 case V8HImode:
5285 case V2DFmode:
5286 case V2DImode:
5287 classes[0] = X86_64_SSE_CLASS;
5288 classes[1] = X86_64_SSEUP_CLASS;
5289 return 2;
5290 case V1DImode:
5291 case V2SFmode:
5292 case V2SImode:
5293 case V4HImode:
5294 case V8QImode:
5295 classes[0] = X86_64_SSE_CLASS;
5296 return 1;
5297 case BLKmode:
5298 case VOIDmode:
5299 return 0;
5300 default:
5301 gcc_assert (VECTOR_MODE_P (mode));
5303 if (bytes > 16)
5304 return 0;
5306 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5308 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5309 classes[0] = X86_64_INTEGERSI_CLASS;
5310 else
5311 classes[0] = X86_64_INTEGER_CLASS;
5312 classes[1] = X86_64_INTEGER_CLASS;
5313 return 1 + (bytes > 8);
5317 /* Examine the argument and return set number of register required in each
5318 class. Return 0 iff parameter should be passed in memory. */
5319 static int
5320 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5321 int *int_nregs, int *sse_nregs)
5323 enum x86_64_reg_class regclass[MAX_CLASSES];
5324 int n = classify_argument (mode, type, regclass, 0);
5326 *int_nregs = 0;
5327 *sse_nregs = 0;
5328 if (!n)
5329 return 0;
5330 for (n--; n >= 0; n--)
5331 switch (regclass[n])
5333 case X86_64_INTEGER_CLASS:
5334 case X86_64_INTEGERSI_CLASS:
5335 (*int_nregs)++;
5336 break;
5337 case X86_64_SSE_CLASS:
5338 case X86_64_SSESF_CLASS:
5339 case X86_64_SSEDF_CLASS:
5340 (*sse_nregs)++;
5341 break;
5342 case X86_64_NO_CLASS:
5343 case X86_64_SSEUP_CLASS:
5344 break;
5345 case X86_64_X87_CLASS:
5346 case X86_64_X87UP_CLASS:
5347 if (!in_return)
5348 return 0;
5349 break;
5350 case X86_64_COMPLEX_X87_CLASS:
5351 return in_return ? 2 : 0;
5352 case X86_64_MEMORY_CLASS:
5353 gcc_unreachable ();
5355 return 1;
5358 /* Construct container for the argument used by GCC interface. See
5359 FUNCTION_ARG for the detailed description. */
5361 static rtx
5362 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5363 const_tree type, int in_return, int nintregs, int nsseregs,
5364 const int *intreg, int sse_regno)
5366 /* The following variables hold the static issued_error state. */
5367 static bool issued_sse_arg_error;
5368 static bool issued_sse_ret_error;
5369 static bool issued_x87_ret_error;
5371 enum machine_mode tmpmode;
5372 int bytes =
5373 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5374 enum x86_64_reg_class regclass[MAX_CLASSES];
5375 int n;
5376 int i;
5377 int nexps = 0;
5378 int needed_sseregs, needed_intregs;
5379 rtx exp[MAX_CLASSES];
5380 rtx ret;
5382 n = classify_argument (mode, type, regclass, 0);
5383 if (!n)
5384 return NULL;
5385 if (!examine_argument (mode, type, in_return, &needed_intregs,
5386 &needed_sseregs))
5387 return NULL;
5388 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5389 return NULL;
5391 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5392 some less clueful developer tries to use floating-point anyway. */
5393 if (needed_sseregs && !TARGET_SSE)
5395 if (in_return)
5397 if (!issued_sse_ret_error)
5399 error ("SSE register return with SSE disabled");
5400 issued_sse_ret_error = true;
5403 else if (!issued_sse_arg_error)
5405 error ("SSE register argument with SSE disabled");
5406 issued_sse_arg_error = true;
5408 return NULL;
5411 /* Likewise, error if the ABI requires us to return values in the
5412 x87 registers and the user specified -mno-80387. */
5413 if (!TARGET_80387 && in_return)
5414 for (i = 0; i < n; i++)
5415 if (regclass[i] == X86_64_X87_CLASS
5416 || regclass[i] == X86_64_X87UP_CLASS
5417 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5419 if (!issued_x87_ret_error)
5421 error ("x87 register return with x87 disabled");
5422 issued_x87_ret_error = true;
5424 return NULL;
5427 /* First construct simple cases. Avoid SCmode, since we want to use
5428 single register to pass this type. */
5429 if (n == 1 && mode != SCmode)
5430 switch (regclass[0])
5432 case X86_64_INTEGER_CLASS:
5433 case X86_64_INTEGERSI_CLASS:
5434 return gen_rtx_REG (mode, intreg[0]);
5435 case X86_64_SSE_CLASS:
5436 case X86_64_SSESF_CLASS:
5437 case X86_64_SSEDF_CLASS:
5438 if (mode != BLKmode)
5439 return gen_reg_or_parallel (mode, orig_mode,
5440 SSE_REGNO (sse_regno));
5441 break;
5442 case X86_64_X87_CLASS:
5443 case X86_64_COMPLEX_X87_CLASS:
5444 return gen_rtx_REG (mode, FIRST_STACK_REG);
5445 case X86_64_NO_CLASS:
5446 /* Zero sized array, struct or class. */
5447 return NULL;
5448 default:
5449 gcc_unreachable ();
5451 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5452 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5453 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5454 if (n == 4
5455 && regclass[0] == X86_64_SSE_CLASS
5456 && regclass[1] == X86_64_SSEUP_CLASS
5457 && regclass[2] == X86_64_SSEUP_CLASS
5458 && regclass[3] == X86_64_SSEUP_CLASS
5459 && mode != BLKmode)
5460 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5462 if (n == 2
5463 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5464 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5465 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5466 && regclass[1] == X86_64_INTEGER_CLASS
5467 && (mode == CDImode || mode == TImode || mode == TFmode)
5468 && intreg[0] + 1 == intreg[1])
5469 return gen_rtx_REG (mode, intreg[0]);
5471 /* Otherwise figure out the entries of the PARALLEL. */
5472 for (i = 0; i < n; i++)
5474 int pos;
5476 switch (regclass[i])
5478 case X86_64_NO_CLASS:
5479 break;
5480 case X86_64_INTEGER_CLASS:
5481 case X86_64_INTEGERSI_CLASS:
5482 /* Merge TImodes on aligned occasions here too. */
5483 if (i * 8 + 8 > bytes)
5484 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5485 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5486 tmpmode = SImode;
5487 else
5488 tmpmode = DImode;
5489 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5490 if (tmpmode == BLKmode)
5491 tmpmode = DImode;
5492 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5493 gen_rtx_REG (tmpmode, *intreg),
5494 GEN_INT (i*8));
5495 intreg++;
5496 break;
5497 case X86_64_SSESF_CLASS:
5498 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5499 gen_rtx_REG (SFmode,
5500 SSE_REGNO (sse_regno)),
5501 GEN_INT (i*8));
5502 sse_regno++;
5503 break;
5504 case X86_64_SSEDF_CLASS:
5505 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5506 gen_rtx_REG (DFmode,
5507 SSE_REGNO (sse_regno)),
5508 GEN_INT (i*8));
5509 sse_regno++;
5510 break;
5511 case X86_64_SSE_CLASS:
5512 pos = i;
5513 switch (n)
5515 case 1:
5516 tmpmode = DImode;
5517 break;
5518 case 2:
5519 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5521 tmpmode = TImode;
5522 i++;
5524 else
5525 tmpmode = DImode;
5526 break;
5527 case 4:
5528 gcc_assert (i == 0
5529 && regclass[1] == X86_64_SSEUP_CLASS
5530 && regclass[2] == X86_64_SSEUP_CLASS
5531 && regclass[3] == X86_64_SSEUP_CLASS);
5532 tmpmode = OImode;
5533 i += 3;
5534 break;
5535 default:
5536 gcc_unreachable ();
5538 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5539 gen_rtx_REG (tmpmode,
5540 SSE_REGNO (sse_regno)),
5541 GEN_INT (pos*8));
5542 sse_regno++;
5543 break;
5544 default:
5545 gcc_unreachable ();
5549 /* Empty aligned struct, union or class. */
5550 if (nexps == 0)
5551 return NULL;
5553 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5554 for (i = 0; i < nexps; i++)
5555 XVECEXP (ret, 0, i) = exp [i];
5556 return ret;
5559 /* Update the data in CUM to advance over an argument of mode MODE
5560 and data type TYPE. (TYPE is null for libcalls where that information
5561 may not be available.) */
5563 static void
5564 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5565 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5567 switch (mode)
5569 default:
5570 break;
5572 case BLKmode:
5573 if (bytes < 0)
5574 break;
5575 /* FALLTHRU */
5577 case DImode:
5578 case SImode:
5579 case HImode:
5580 case QImode:
5581 cum->words += words;
5582 cum->nregs -= words;
5583 cum->regno += words;
5585 if (cum->nregs <= 0)
5587 cum->nregs = 0;
5588 cum->regno = 0;
5590 break;
5592 case OImode:
5593 /* OImode shouldn't be used directly. */
5594 gcc_unreachable ();
5596 case DFmode:
5597 if (cum->float_in_sse < 2)
5598 break;
5599 case SFmode:
5600 if (cum->float_in_sse < 1)
5601 break;
5602 /* FALLTHRU */
5604 case V8SFmode:
5605 case V8SImode:
5606 case V32QImode:
5607 case V16HImode:
5608 case V4DFmode:
5609 case V4DImode:
5610 case TImode:
5611 case V16QImode:
5612 case V8HImode:
5613 case V4SImode:
5614 case V2DImode:
5615 case V4SFmode:
5616 case V2DFmode:
5617 if (!type || !AGGREGATE_TYPE_P (type))
5619 cum->sse_words += words;
5620 cum->sse_nregs -= 1;
5621 cum->sse_regno += 1;
5622 if (cum->sse_nregs <= 0)
5624 cum->sse_nregs = 0;
5625 cum->sse_regno = 0;
5628 break;
5630 case V8QImode:
5631 case V4HImode:
5632 case V2SImode:
5633 case V2SFmode:
5634 case V1DImode:
5635 if (!type || !AGGREGATE_TYPE_P (type))
5637 cum->mmx_words += words;
5638 cum->mmx_nregs -= 1;
5639 cum->mmx_regno += 1;
5640 if (cum->mmx_nregs <= 0)
5642 cum->mmx_nregs = 0;
5643 cum->mmx_regno = 0;
5646 break;
5650 static void
5651 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5652 tree type, HOST_WIDE_INT words, int named)
5654 int int_nregs, sse_nregs;
5656 /* Unnamed 256bit vector mode parameters are passed on stack. */
5657 if (!named && VALID_AVX256_REG_MODE (mode))
5658 return;
5660 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5661 cum->words += words;
5662 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5664 cum->nregs -= int_nregs;
5665 cum->sse_nregs -= sse_nregs;
5666 cum->regno += int_nregs;
5667 cum->sse_regno += sse_nregs;
5669 else
5670 cum->words += words;
5673 static void
5674 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5675 HOST_WIDE_INT words)
5677 /* Otherwise, this should be passed indirect. */
5678 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5680 cum->words += words;
5681 if (cum->nregs > 0)
5683 cum->nregs -= 1;
5684 cum->regno += 1;
5688 void
5689 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5690 tree type, int named)
5692 HOST_WIDE_INT bytes, words;
5694 if (mode == BLKmode)
5695 bytes = int_size_in_bytes (type);
5696 else
5697 bytes = GET_MODE_SIZE (mode);
5698 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5700 if (type)
5701 mode = type_natural_mode (type, NULL);
5703 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5704 function_arg_advance_ms_64 (cum, bytes, words);
5705 else if (TARGET_64BIT)
5706 function_arg_advance_64 (cum, mode, type, words, named);
5707 else
5708 function_arg_advance_32 (cum, mode, type, bytes, words);
5711 /* Define where to put the arguments to a function.
5712 Value is zero to push the argument on the stack,
5713 or a hard register in which to store the argument.
5715 MODE is the argument's machine mode.
5716 TYPE is the data type of the argument (as a tree).
5717 This is null for libcalls where that information may
5718 not be available.
5719 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5720 the preceding args and about the function being called.
5721 NAMED is nonzero if this argument is a named parameter
5722 (otherwise it is an extra parameter matching an ellipsis). */
5724 static rtx
5725 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5726 enum machine_mode orig_mode, tree type,
5727 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5729 static bool warnedsse, warnedmmx;
5731 /* Avoid the AL settings for the Unix64 ABI. */
5732 if (mode == VOIDmode)
5733 return constm1_rtx;
5735 switch (mode)
5737 default:
5738 break;
5740 case BLKmode:
5741 if (bytes < 0)
5742 break;
5743 /* FALLTHRU */
5744 case DImode:
5745 case SImode:
5746 case HImode:
5747 case QImode:
5748 if (words <= cum->nregs)
5750 int regno = cum->regno;
5752 /* Fastcall allocates the first two DWORD (SImode) or
5753 smaller arguments to ECX and EDX if it isn't an
5754 aggregate type . */
5755 if (cum->fastcall)
5757 if (mode == BLKmode
5758 || mode == DImode
5759 || (type && AGGREGATE_TYPE_P (type)))
5760 break;
5762 /* ECX not EAX is the first allocated register. */
5763 if (regno == AX_REG)
5764 regno = CX_REG;
5766 return gen_rtx_REG (mode, regno);
5768 break;
5770 case DFmode:
5771 if (cum->float_in_sse < 2)
5772 break;
5773 case SFmode:
5774 if (cum->float_in_sse < 1)
5775 break;
5776 /* FALLTHRU */
5777 case TImode:
5778 /* In 32bit, we pass TImode in xmm registers. */
5779 case V16QImode:
5780 case V8HImode:
5781 case V4SImode:
5782 case V2DImode:
5783 case V4SFmode:
5784 case V2DFmode:
5785 if (!type || !AGGREGATE_TYPE_P (type))
5787 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5789 warnedsse = true;
5790 warning (0, "SSE vector argument without SSE enabled "
5791 "changes the ABI");
5793 if (cum->sse_nregs)
5794 return gen_reg_or_parallel (mode, orig_mode,
5795 cum->sse_regno + FIRST_SSE_REG);
5797 break;
5799 case OImode:
5800 /* OImode shouldn't be used directly. */
5801 gcc_unreachable ();
5803 case V8SFmode:
5804 case V8SImode:
5805 case V32QImode:
5806 case V16HImode:
5807 case V4DFmode:
5808 case V4DImode:
5809 if (!type || !AGGREGATE_TYPE_P (type))
5811 if (cum->sse_nregs)
5812 return gen_reg_or_parallel (mode, orig_mode,
5813 cum->sse_regno + FIRST_SSE_REG);
5815 break;
5817 case V8QImode:
5818 case V4HImode:
5819 case V2SImode:
5820 case V2SFmode:
5821 case V1DImode:
5822 if (!type || !AGGREGATE_TYPE_P (type))
5824 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5826 warnedmmx = true;
5827 warning (0, "MMX vector argument without MMX enabled "
5828 "changes the ABI");
5830 if (cum->mmx_nregs)
5831 return gen_reg_or_parallel (mode, orig_mode,
5832 cum->mmx_regno + FIRST_MMX_REG);
5834 break;
5837 return NULL_RTX;
5840 static rtx
5841 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5842 enum machine_mode orig_mode, tree type, int named)
5844 /* Handle a hidden AL argument containing number of registers
5845 for varargs x86-64 functions. */
5846 if (mode == VOIDmode)
5847 return GEN_INT (cum->maybe_vaarg
5848 ? (cum->sse_nregs < 0
5849 ? (cum->call_abi == DEFAULT_ABI
5850 ? SSE_REGPARM_MAX
5851 : (DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
5852 : X64_SSE_REGPARM_MAX))
5853 : cum->sse_regno)
5854 : -1);
5856 switch (mode)
5858 default:
5859 break;
5861 case V8SFmode:
5862 case V8SImode:
5863 case V32QImode:
5864 case V16HImode:
5865 case V4DFmode:
5866 case V4DImode:
5867 /* Unnamed 256bit vector mode parameters are passed on stack. */
5868 if (!named)
5869 return NULL;
5870 break;
5873 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5874 cum->sse_nregs,
5875 &x86_64_int_parameter_registers [cum->regno],
5876 cum->sse_regno);
5879 static rtx
5880 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5881 enum machine_mode orig_mode, int named,
5882 HOST_WIDE_INT bytes)
5884 unsigned int regno;
5886 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
5887 We use value of -2 to specify that current function call is MSABI. */
5888 if (mode == VOIDmode)
5889 return GEN_INT (-2);
5891 /* If we've run out of registers, it goes on the stack. */
5892 if (cum->nregs == 0)
5893 return NULL_RTX;
5895 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
5897 /* Only floating point modes are passed in anything but integer regs. */
5898 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
5900 if (named)
5901 regno = cum->regno + FIRST_SSE_REG;
5902 else
5904 rtx t1, t2;
5906 /* Unnamed floating parameters are passed in both the
5907 SSE and integer registers. */
5908 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
5909 t2 = gen_rtx_REG (mode, regno);
5910 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
5911 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
5912 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
5915 /* Handle aggregated types passed in register. */
5916 if (orig_mode == BLKmode)
5918 if (bytes > 0 && bytes <= 8)
5919 mode = (bytes > 4 ? DImode : SImode);
5920 if (mode == BLKmode)
5921 mode = DImode;
5924 return gen_reg_or_parallel (mode, orig_mode, regno);
5928 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
5929 tree type, int named)
5931 enum machine_mode mode = omode;
5932 HOST_WIDE_INT bytes, words;
5934 if (mode == BLKmode)
5935 bytes = int_size_in_bytes (type);
5936 else
5937 bytes = GET_MODE_SIZE (mode);
5938 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5940 /* To simplify the code below, represent vector types with a vector mode
5941 even if MMX/SSE are not active. */
5942 if (type && TREE_CODE (type) == VECTOR_TYPE)
5943 mode = type_natural_mode (type, cum);
5945 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5946 return function_arg_ms_64 (cum, mode, omode, named, bytes);
5947 else if (TARGET_64BIT)
5948 return function_arg_64 (cum, mode, omode, type, named);
5949 else
5950 return function_arg_32 (cum, mode, omode, type, bytes, words);
5953 /* A C expression that indicates when an argument must be passed by
5954 reference. If nonzero for an argument, a copy of that argument is
5955 made in memory and a pointer to the argument is passed instead of
5956 the argument itself. The pointer is passed in whatever way is
5957 appropriate for passing a pointer to that type. */
5959 static bool
5960 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5961 enum machine_mode mode ATTRIBUTE_UNUSED,
5962 const_tree type, bool named ATTRIBUTE_UNUSED)
5964 /* See Windows x64 Software Convention. */
5965 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5967 int msize = (int) GET_MODE_SIZE (mode);
5968 if (type)
5970 /* Arrays are passed by reference. */
5971 if (TREE_CODE (type) == ARRAY_TYPE)
5972 return true;
5974 if (AGGREGATE_TYPE_P (type))
5976 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
5977 are passed by reference. */
5978 msize = int_size_in_bytes (type);
5982 /* __m128 is passed by reference. */
5983 switch (msize) {
5984 case 1: case 2: case 4: case 8:
5985 break;
5986 default:
5987 return true;
5990 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
5991 return 1;
5993 return 0;
5996 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
5997 ABI. */
5998 static bool
5999 contains_aligned_value_p (tree type)
6001 enum machine_mode mode = TYPE_MODE (type);
6002 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6003 || mode == TDmode
6004 || mode == TFmode
6005 || mode == TCmode)
6006 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6007 return true;
6008 if (TYPE_ALIGN (type) < 128)
6009 return false;
6011 if (AGGREGATE_TYPE_P (type))
6013 /* Walk the aggregates recursively. */
6014 switch (TREE_CODE (type))
6016 case RECORD_TYPE:
6017 case UNION_TYPE:
6018 case QUAL_UNION_TYPE:
6020 tree field;
6022 /* Walk all the structure fields. */
6023 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6025 if (TREE_CODE (field) == FIELD_DECL
6026 && contains_aligned_value_p (TREE_TYPE (field)))
6027 return true;
6029 break;
6032 case ARRAY_TYPE:
6033 /* Just for use if some languages passes arrays by value. */
6034 if (contains_aligned_value_p (TREE_TYPE (type)))
6035 return true;
6036 break;
6038 default:
6039 gcc_unreachable ();
6042 return false;
6045 /* Gives the alignment boundary, in bits, of an argument with the
6046 specified mode and type. */
6049 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6051 int align;
6052 if (type)
6054 /* Since canonical type is used for call, we convert it to
6055 canonical type if needed. */
6056 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6057 type = TYPE_CANONICAL (type);
6058 align = TYPE_ALIGN (type);
6060 else
6061 align = GET_MODE_ALIGNMENT (mode);
6062 if (align < PARM_BOUNDARY)
6063 align = PARM_BOUNDARY;
6064 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6065 natural boundaries. */
6066 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6068 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6069 make an exception for SSE modes since these require 128bit
6070 alignment.
6072 The handling here differs from field_alignment. ICC aligns MMX
6073 arguments to 4 byte boundaries, while structure fields are aligned
6074 to 8 byte boundaries. */
6075 if (!type)
6077 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6078 align = PARM_BOUNDARY;
6080 else
6082 if (!contains_aligned_value_p (type))
6083 align = PARM_BOUNDARY;
6086 if (align > BIGGEST_ALIGNMENT)
6087 align = BIGGEST_ALIGNMENT;
6088 return align;
6091 /* Return true if N is a possible register number of function value. */
6093 bool
6094 ix86_function_value_regno_p (int regno)
6096 switch (regno)
6098 case 0:
6099 return true;
6101 case FIRST_FLOAT_REG:
6102 /* TODO: The function should depend on current function ABI but
6103 builtins.c would need updating then. Therefore we use the
6104 default ABI. */
6105 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
6106 return false;
6107 return TARGET_FLOAT_RETURNS_IN_80387;
6109 case FIRST_SSE_REG:
6110 return TARGET_SSE;
6112 case FIRST_MMX_REG:
6113 if (TARGET_MACHO || TARGET_64BIT)
6114 return false;
6115 return TARGET_MMX;
6118 return false;
6121 /* Define how to find the value returned by a function.
6122 VALTYPE is the data type of the value (as a tree).
6123 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6124 otherwise, FUNC is 0. */
6126 static rtx
6127 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6128 const_tree fntype, const_tree fn)
6130 unsigned int regno;
6132 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6133 we normally prevent this case when mmx is not available. However
6134 some ABIs may require the result to be returned like DImode. */
6135 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6136 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6138 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6139 we prevent this case when sse is not available. However some ABIs
6140 may require the result to be returned like integer TImode. */
6141 else if (mode == TImode
6142 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6143 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6145 /* 32-byte vector modes in %ymm0. */
6146 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6147 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6149 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6150 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6151 regno = FIRST_FLOAT_REG;
6152 else
6153 /* Most things go in %eax. */
6154 regno = AX_REG;
6156 /* Override FP return register with %xmm0 for local functions when
6157 SSE math is enabled or for functions with sseregparm attribute. */
6158 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6160 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6161 if ((sse_level >= 1 && mode == SFmode)
6162 || (sse_level == 2 && mode == DFmode))
6163 regno = FIRST_SSE_REG;
6166 /* OImode shouldn't be used directly. */
6167 gcc_assert (mode != OImode);
6169 return gen_rtx_REG (orig_mode, regno);
6172 static rtx
6173 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6174 const_tree valtype)
6176 rtx ret;
6178 /* Handle libcalls, which don't provide a type node. */
6179 if (valtype == NULL)
6181 switch (mode)
6183 case SFmode:
6184 case SCmode:
6185 case DFmode:
6186 case DCmode:
6187 case TFmode:
6188 case SDmode:
6189 case DDmode:
6190 case TDmode:
6191 return gen_rtx_REG (mode, FIRST_SSE_REG);
6192 case XFmode:
6193 case XCmode:
6194 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6195 case TCmode:
6196 return NULL;
6197 default:
6198 return gen_rtx_REG (mode, AX_REG);
6202 ret = construct_container (mode, orig_mode, valtype, 1,
6203 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6204 x86_64_int_return_registers, 0);
6206 /* For zero sized structures, construct_container returns NULL, but we
6207 need to keep rest of compiler happy by returning meaningful value. */
6208 if (!ret)
6209 ret = gen_rtx_REG (orig_mode, AX_REG);
6211 return ret;
6214 static rtx
6215 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6217 unsigned int regno = AX_REG;
6219 if (TARGET_SSE)
6221 switch (GET_MODE_SIZE (mode))
6223 case 16:
6224 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6225 && !COMPLEX_MODE_P (mode))
6226 regno = FIRST_SSE_REG;
6227 break;
6228 case 8:
6229 case 4:
6230 if (mode == SFmode || mode == DFmode)
6231 regno = FIRST_SSE_REG;
6232 break;
6233 default:
6234 break;
6237 return gen_rtx_REG (orig_mode, regno);
6240 static rtx
6241 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6242 enum machine_mode orig_mode, enum machine_mode mode)
6244 const_tree fn, fntype;
6246 fn = NULL_TREE;
6247 if (fntype_or_decl && DECL_P (fntype_or_decl))
6248 fn = fntype_or_decl;
6249 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6251 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6252 return function_value_ms_64 (orig_mode, mode);
6253 else if (TARGET_64BIT)
6254 return function_value_64 (orig_mode, mode, valtype);
6255 else
6256 return function_value_32 (orig_mode, mode, fntype, fn);
6259 static rtx
6260 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6261 bool outgoing ATTRIBUTE_UNUSED)
6263 enum machine_mode mode, orig_mode;
6265 orig_mode = TYPE_MODE (valtype);
6266 mode = type_natural_mode (valtype, NULL);
6267 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6271 ix86_libcall_value (enum machine_mode mode)
6273 return ix86_function_value_1 (NULL, NULL, mode, mode);
6276 /* Return true iff type is returned in memory. */
6278 static int ATTRIBUTE_UNUSED
6279 return_in_memory_32 (const_tree type, enum machine_mode mode)
6281 HOST_WIDE_INT size;
6283 if (mode == BLKmode)
6284 return 1;
6286 size = int_size_in_bytes (type);
6288 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6289 return 0;
6291 if (VECTOR_MODE_P (mode) || mode == TImode)
6293 /* User-created vectors small enough to fit in EAX. */
6294 if (size < 8)
6295 return 0;
6297 /* MMX/3dNow values are returned in MM0,
6298 except when it doesn't exits. */
6299 if (size == 8)
6300 return (TARGET_MMX ? 0 : 1);
6302 /* SSE values are returned in XMM0, except when it doesn't exist. */
6303 if (size == 16)
6304 return (TARGET_SSE ? 0 : 1);
6306 /* AVX values are returned in YMM0, except when it doesn't exist. */
6307 if (size == 32)
6308 return TARGET_AVX ? 0 : 1;
6311 if (mode == XFmode)
6312 return 0;
6314 if (size > 12)
6315 return 1;
6317 /* OImode shouldn't be used directly. */
6318 gcc_assert (mode != OImode);
6320 return 0;
6323 static int ATTRIBUTE_UNUSED
6324 return_in_memory_64 (const_tree type, enum machine_mode mode)
6326 int needed_intregs, needed_sseregs;
6327 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6330 static int ATTRIBUTE_UNUSED
6331 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6333 HOST_WIDE_INT size = int_size_in_bytes (type);
6335 /* __m128 is returned in xmm0. */
6336 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6337 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6338 return 0;
6340 /* Otherwise, the size must be exactly in [1248]. */
6341 return (size != 1 && size != 2 && size != 4 && size != 8);
6344 static bool
6345 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6347 #ifdef SUBTARGET_RETURN_IN_MEMORY
6348 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6349 #else
6350 const enum machine_mode mode = type_natural_mode (type, NULL);
6352 if (TARGET_64BIT)
6354 if (ix86_function_type_abi (fntype) == MS_ABI)
6355 return return_in_memory_ms_64 (type, mode);
6356 else
6357 return return_in_memory_64 (type, mode);
6359 else
6360 return return_in_memory_32 (type, mode);
6361 #endif
6364 /* Return false iff TYPE is returned in memory. This version is used
6365 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6366 but differs notably in that when MMX is available, 8-byte vectors
6367 are returned in memory, rather than in MMX registers. */
6369 bool
6370 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6372 int size;
6373 enum machine_mode mode = type_natural_mode (type, NULL);
6375 if (TARGET_64BIT)
6376 return return_in_memory_64 (type, mode);
6378 if (mode == BLKmode)
6379 return 1;
6381 size = int_size_in_bytes (type);
6383 if (VECTOR_MODE_P (mode))
6385 /* Return in memory only if MMX registers *are* available. This
6386 seems backwards, but it is consistent with the existing
6387 Solaris x86 ABI. */
6388 if (size == 8)
6389 return TARGET_MMX;
6390 if (size == 16)
6391 return !TARGET_SSE;
6393 else if (mode == TImode)
6394 return !TARGET_SSE;
6395 else if (mode == XFmode)
6396 return 0;
6398 return size > 12;
6401 /* When returning SSE vector types, we have a choice of either
6402 (1) being abi incompatible with a -march switch, or
6403 (2) generating an error.
6404 Given no good solution, I think the safest thing is one warning.
6405 The user won't be able to use -Werror, but....
6407 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6408 called in response to actually generating a caller or callee that
6409 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6410 via aggregate_value_p for general type probing from tree-ssa. */
6412 static rtx
6413 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6415 static bool warnedsse, warnedmmx;
6417 if (!TARGET_64BIT && type)
6419 /* Look at the return type of the function, not the function type. */
6420 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6422 if (!TARGET_SSE && !warnedsse)
6424 if (mode == TImode
6425 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6427 warnedsse = true;
6428 warning (0, "SSE vector return without SSE enabled "
6429 "changes the ABI");
6433 if (!TARGET_MMX && !warnedmmx)
6435 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6437 warnedmmx = true;
6438 warning (0, "MMX vector return without MMX enabled "
6439 "changes the ABI");
6444 return NULL;
6448 /* Create the va_list data type. */
6450 /* Returns the calling convention specific va_list date type.
6451 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6453 static tree
6454 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6456 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6458 /* For i386 we use plain pointer to argument area. */
6459 if (!TARGET_64BIT || abi == MS_ABI)
6460 return build_pointer_type (char_type_node);
6462 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6463 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6465 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
6466 unsigned_type_node);
6467 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
6468 unsigned_type_node);
6469 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6470 ptr_type_node);
6471 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6472 ptr_type_node);
6474 va_list_gpr_counter_field = f_gpr;
6475 va_list_fpr_counter_field = f_fpr;
6477 DECL_FIELD_CONTEXT (f_gpr) = record;
6478 DECL_FIELD_CONTEXT (f_fpr) = record;
6479 DECL_FIELD_CONTEXT (f_ovf) = record;
6480 DECL_FIELD_CONTEXT (f_sav) = record;
6482 TREE_CHAIN (record) = type_decl;
6483 TYPE_NAME (record) = type_decl;
6484 TYPE_FIELDS (record) = f_gpr;
6485 TREE_CHAIN (f_gpr) = f_fpr;
6486 TREE_CHAIN (f_fpr) = f_ovf;
6487 TREE_CHAIN (f_ovf) = f_sav;
6489 layout_type (record);
6491 /* The correct type is an array type of one element. */
6492 return build_array_type (record, build_index_type (size_zero_node));
6495 /* Setup the builtin va_list data type and for 64-bit the additional
6496 calling convention specific va_list data types. */
6498 static tree
6499 ix86_build_builtin_va_list (void)
6501 tree ret = ix86_build_builtin_va_list_abi (DEFAULT_ABI);
6503 /* Initialize abi specific va_list builtin types. */
6504 if (TARGET_64BIT)
6506 tree t;
6507 if (DEFAULT_ABI == MS_ABI)
6509 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6510 if (TREE_CODE (t) != RECORD_TYPE)
6511 t = build_variant_type_copy (t);
6512 sysv_va_list_type_node = t;
6514 else
6516 t = ret;
6517 if (TREE_CODE (t) != RECORD_TYPE)
6518 t = build_variant_type_copy (t);
6519 sysv_va_list_type_node = t;
6521 if (DEFAULT_ABI != MS_ABI)
6523 t = ix86_build_builtin_va_list_abi (MS_ABI);
6524 if (TREE_CODE (t) != RECORD_TYPE)
6525 t = build_variant_type_copy (t);
6526 ms_va_list_type_node = t;
6528 else
6530 t = ret;
6531 if (TREE_CODE (t) != RECORD_TYPE)
6532 t = build_variant_type_copy (t);
6533 ms_va_list_type_node = t;
6537 return ret;
6540 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6542 static void
6543 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6545 rtx save_area, mem;
6546 rtx label;
6547 rtx label_ref;
6548 rtx tmp_reg;
6549 rtx nsse_reg;
6550 alias_set_type set;
6551 int i;
6552 int regparm = ix86_regparm;
6554 if (cum->call_abi != DEFAULT_ABI)
6555 regparm = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
6557 /* GPR size of varargs save area. */
6558 if (cfun->va_list_gpr_size)
6559 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6560 else
6561 ix86_varargs_gpr_size = 0;
6563 /* FPR size of varargs save area. We don't need it if we don't pass
6564 anything in SSE registers. */
6565 if (cum->sse_nregs && cfun->va_list_fpr_size)
6566 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6567 else
6568 ix86_varargs_fpr_size = 0;
6570 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6571 return;
6573 save_area = frame_pointer_rtx;
6574 set = get_varargs_alias_set ();
6576 for (i = cum->regno;
6577 i < regparm
6578 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6579 i++)
6581 mem = gen_rtx_MEM (Pmode,
6582 plus_constant (save_area, i * UNITS_PER_WORD));
6583 MEM_NOTRAP_P (mem) = 1;
6584 set_mem_alias_set (mem, set);
6585 emit_move_insn (mem, gen_rtx_REG (Pmode,
6586 x86_64_int_parameter_registers[i]));
6589 if (ix86_varargs_fpr_size)
6591 /* Now emit code to save SSE registers. The AX parameter contains number
6592 of SSE parameter registers used to call this function. We use
6593 sse_prologue_save insn template that produces computed jump across
6594 SSE saves. We need some preparation work to get this working. */
6596 label = gen_label_rtx ();
6597 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6599 /* Compute address to jump to :
6600 label - eax*4 + nnamed_sse_arguments*4 Or
6601 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6602 tmp_reg = gen_reg_rtx (Pmode);
6603 nsse_reg = gen_reg_rtx (Pmode);
6604 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6605 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6606 gen_rtx_MULT (Pmode, nsse_reg,
6607 GEN_INT (4))));
6609 /* vmovaps is one byte longer than movaps. */
6610 if (TARGET_AVX)
6611 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6612 gen_rtx_PLUS (Pmode, tmp_reg,
6613 nsse_reg)));
6615 if (cum->sse_regno)
6616 emit_move_insn
6617 (nsse_reg,
6618 gen_rtx_CONST (DImode,
6619 gen_rtx_PLUS (DImode,
6620 label_ref,
6621 GEN_INT (cum->sse_regno
6622 * (TARGET_AVX ? 5 : 4)))));
6623 else
6624 emit_move_insn (nsse_reg, label_ref);
6625 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6627 /* Compute address of memory block we save into. We always use pointer
6628 pointing 127 bytes after first byte to store - this is needed to keep
6629 instruction size limited by 4 bytes (5 bytes for AVX) with one
6630 byte displacement. */
6631 tmp_reg = gen_reg_rtx (Pmode);
6632 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6633 plus_constant (save_area,
6634 ix86_varargs_gpr_size + 127)));
6635 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6636 MEM_NOTRAP_P (mem) = 1;
6637 set_mem_alias_set (mem, set);
6638 set_mem_align (mem, BITS_PER_WORD);
6640 /* And finally do the dirty job! */
6641 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6642 GEN_INT (cum->sse_regno), label));
6646 static void
6647 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6649 alias_set_type set = get_varargs_alias_set ();
6650 int i;
6652 for (i = cum->regno; i < X64_REGPARM_MAX; i++)
6654 rtx reg, mem;
6656 mem = gen_rtx_MEM (Pmode,
6657 plus_constant (virtual_incoming_args_rtx,
6658 i * UNITS_PER_WORD));
6659 MEM_NOTRAP_P (mem) = 1;
6660 set_mem_alias_set (mem, set);
6662 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6663 emit_move_insn (mem, reg);
6667 static void
6668 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6669 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6670 int no_rtl)
6672 CUMULATIVE_ARGS next_cum;
6673 tree fntype;
6675 /* This argument doesn't appear to be used anymore. Which is good,
6676 because the old code here didn't suppress rtl generation. */
6677 gcc_assert (!no_rtl);
6679 if (!TARGET_64BIT)
6680 return;
6682 fntype = TREE_TYPE (current_function_decl);
6684 /* For varargs, we do not want to skip the dummy va_dcl argument.
6685 For stdargs, we do want to skip the last named argument. */
6686 next_cum = *cum;
6687 if (stdarg_p (fntype))
6688 function_arg_advance (&next_cum, mode, type, 1);
6690 if (cum->call_abi == MS_ABI)
6691 setup_incoming_varargs_ms_64 (&next_cum);
6692 else
6693 setup_incoming_varargs_64 (&next_cum);
6696 /* Checks if TYPE is of kind va_list char *. */
6698 static bool
6699 is_va_list_char_pointer (tree type)
6701 tree canonic;
6703 /* For 32-bit it is always true. */
6704 if (!TARGET_64BIT)
6705 return true;
6706 canonic = ix86_canonical_va_list_type (type);
6707 return (canonic == ms_va_list_type_node
6708 || (DEFAULT_ABI == MS_ABI && canonic == va_list_type_node));
6711 /* Implement va_start. */
6713 static void
6714 ix86_va_start (tree valist, rtx nextarg)
6716 HOST_WIDE_INT words, n_gpr, n_fpr;
6717 tree f_gpr, f_fpr, f_ovf, f_sav;
6718 tree gpr, fpr, ovf, sav, t;
6719 tree type;
6721 /* Only 64bit target needs something special. */
6722 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6724 std_expand_builtin_va_start (valist, nextarg);
6725 return;
6728 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6729 f_fpr = TREE_CHAIN (f_gpr);
6730 f_ovf = TREE_CHAIN (f_fpr);
6731 f_sav = TREE_CHAIN (f_ovf);
6733 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6734 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6735 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6736 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6737 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6739 /* Count number of gp and fp argument registers used. */
6740 words = crtl->args.info.words;
6741 n_gpr = crtl->args.info.regno;
6742 n_fpr = crtl->args.info.sse_regno;
6744 if (cfun->va_list_gpr_size)
6746 type = TREE_TYPE (gpr);
6747 t = build2 (MODIFY_EXPR, type,
6748 gpr, build_int_cst (type, n_gpr * 8));
6749 TREE_SIDE_EFFECTS (t) = 1;
6750 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6753 if (TARGET_SSE && cfun->va_list_fpr_size)
6755 type = TREE_TYPE (fpr);
6756 t = build2 (MODIFY_EXPR, type, fpr,
6757 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6758 TREE_SIDE_EFFECTS (t) = 1;
6759 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6762 /* Find the overflow area. */
6763 type = TREE_TYPE (ovf);
6764 t = make_tree (type, crtl->args.internal_arg_pointer);
6765 if (words != 0)
6766 t = build2 (POINTER_PLUS_EXPR, type, t,
6767 size_int (words * UNITS_PER_WORD));
6768 t = build2 (MODIFY_EXPR, type, ovf, t);
6769 TREE_SIDE_EFFECTS (t) = 1;
6770 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6772 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6774 /* Find the register save area.
6775 Prologue of the function save it right above stack frame. */
6776 type = TREE_TYPE (sav);
6777 t = make_tree (type, frame_pointer_rtx);
6778 if (!ix86_varargs_gpr_size)
6779 t = build2 (POINTER_PLUS_EXPR, type, t,
6780 size_int (-8 * X86_64_REGPARM_MAX));
6781 t = build2 (MODIFY_EXPR, type, sav, t);
6782 TREE_SIDE_EFFECTS (t) = 1;
6783 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6787 /* Implement va_arg. */
6789 static tree
6790 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6791 gimple_seq *post_p)
6793 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6794 tree f_gpr, f_fpr, f_ovf, f_sav;
6795 tree gpr, fpr, ovf, sav, t;
6796 int size, rsize;
6797 tree lab_false, lab_over = NULL_TREE;
6798 tree addr, t2;
6799 rtx container;
6800 int indirect_p = 0;
6801 tree ptrtype;
6802 enum machine_mode nat_mode;
6803 int arg_boundary;
6805 /* Only 64bit target needs something special. */
6806 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6807 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6809 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6810 f_fpr = TREE_CHAIN (f_gpr);
6811 f_ovf = TREE_CHAIN (f_fpr);
6812 f_sav = TREE_CHAIN (f_ovf);
6814 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
6815 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
6816 valist = build_va_arg_indirect_ref (valist);
6817 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6818 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6819 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6821 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6822 if (indirect_p)
6823 type = build_pointer_type (type);
6824 size = int_size_in_bytes (type);
6825 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6827 nat_mode = type_natural_mode (type, NULL);
6828 switch (nat_mode)
6830 case V8SFmode:
6831 case V8SImode:
6832 case V32QImode:
6833 case V16HImode:
6834 case V4DFmode:
6835 case V4DImode:
6836 /* Unnamed 256bit vector mode parameters are passed on stack. */
6837 if (ix86_cfun_abi () == SYSV_ABI)
6839 container = NULL;
6840 break;
6843 default:
6844 container = construct_container (nat_mode, TYPE_MODE (type),
6845 type, 0, X86_64_REGPARM_MAX,
6846 X86_64_SSE_REGPARM_MAX, intreg,
6848 break;
6851 /* Pull the value out of the saved registers. */
6853 addr = create_tmp_var (ptr_type_node, "addr");
6854 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
6856 if (container)
6858 int needed_intregs, needed_sseregs;
6859 bool need_temp;
6860 tree int_addr, sse_addr;
6862 lab_false = create_artificial_label ();
6863 lab_over = create_artificial_label ();
6865 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6867 need_temp = (!REG_P (container)
6868 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6869 || TYPE_ALIGN (type) > 128));
6871 /* In case we are passing structure, verify that it is consecutive block
6872 on the register save area. If not we need to do moves. */
6873 if (!need_temp && !REG_P (container))
6875 /* Verify that all registers are strictly consecutive */
6876 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6878 int i;
6880 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6882 rtx slot = XVECEXP (container, 0, i);
6883 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
6884 || INTVAL (XEXP (slot, 1)) != i * 16)
6885 need_temp = 1;
6888 else
6890 int i;
6892 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6894 rtx slot = XVECEXP (container, 0, i);
6895 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
6896 || INTVAL (XEXP (slot, 1)) != i * 8)
6897 need_temp = 1;
6901 if (!need_temp)
6903 int_addr = addr;
6904 sse_addr = addr;
6906 else
6908 int_addr = create_tmp_var (ptr_type_node, "int_addr");
6909 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
6910 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
6911 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
6914 /* First ensure that we fit completely in registers. */
6915 if (needed_intregs)
6917 t = build_int_cst (TREE_TYPE (gpr),
6918 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
6919 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
6920 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6921 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6922 gimplify_and_add (t, pre_p);
6924 if (needed_sseregs)
6926 t = build_int_cst (TREE_TYPE (fpr),
6927 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
6928 + X86_64_REGPARM_MAX * 8);
6929 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
6930 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6931 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6932 gimplify_and_add (t, pre_p);
6935 /* Compute index to start of area used for integer regs. */
6936 if (needed_intregs)
6938 /* int_addr = gpr + sav; */
6939 t = fold_convert (sizetype, gpr);
6940 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6941 gimplify_assign (int_addr, t, pre_p);
6943 if (needed_sseregs)
6945 /* sse_addr = fpr + sav; */
6946 t = fold_convert (sizetype, fpr);
6947 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6948 gimplify_assign (sse_addr, t, pre_p);
6950 if (need_temp)
6952 int i;
6953 tree temp = create_tmp_var (type, "va_arg_tmp");
6955 /* addr = &temp; */
6956 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
6957 gimplify_assign (addr, t, pre_p);
6959 for (i = 0; i < XVECLEN (container, 0); i++)
6961 rtx slot = XVECEXP (container, 0, i);
6962 rtx reg = XEXP (slot, 0);
6963 enum machine_mode mode = GET_MODE (reg);
6964 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
6965 tree addr_type = build_pointer_type (piece_type);
6966 tree daddr_type = build_pointer_type_for_mode (piece_type,
6967 ptr_mode, true);
6968 tree src_addr, src;
6969 int src_offset;
6970 tree dest_addr, dest;
6972 if (SSE_REGNO_P (REGNO (reg)))
6974 src_addr = sse_addr;
6975 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
6977 else
6979 src_addr = int_addr;
6980 src_offset = REGNO (reg) * 8;
6982 src_addr = fold_convert (addr_type, src_addr);
6983 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
6984 size_int (src_offset));
6985 src = build_va_arg_indirect_ref (src_addr);
6987 dest_addr = fold_convert (daddr_type, addr);
6988 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
6989 size_int (INTVAL (XEXP (slot, 1))));
6990 dest = build_va_arg_indirect_ref (dest_addr);
6992 gimplify_assign (dest, src, pre_p);
6996 if (needed_intregs)
6998 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
6999 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7000 gimplify_assign (gpr, t, pre_p);
7003 if (needed_sseregs)
7005 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7006 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7007 gimplify_assign (fpr, t, pre_p);
7010 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7012 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7015 /* ... otherwise out of the overflow area. */
7017 /* When we align parameter on stack for caller, if the parameter
7018 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7019 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7020 here with caller. */
7021 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7022 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7023 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7025 /* Care for on-stack alignment if needed. */
7026 if (arg_boundary <= 64
7027 || integer_zerop (TYPE_SIZE (type)))
7028 t = ovf;
7029 else
7031 HOST_WIDE_INT align = arg_boundary / 8;
7032 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7033 size_int (align - 1));
7034 t = fold_convert (sizetype, t);
7035 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7036 size_int (-align));
7037 t = fold_convert (TREE_TYPE (ovf), t);
7039 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7040 gimplify_assign (addr, t, pre_p);
7042 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7043 size_int (rsize * UNITS_PER_WORD));
7044 gimplify_assign (unshare_expr (ovf), t, pre_p);
7046 if (container)
7047 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7049 ptrtype = build_pointer_type (type);
7050 addr = fold_convert (ptrtype, addr);
7052 if (indirect_p)
7053 addr = build_va_arg_indirect_ref (addr);
7054 return build_va_arg_indirect_ref (addr);
7057 /* Return nonzero if OPNUM's MEM should be matched
7058 in movabs* patterns. */
7061 ix86_check_movabs (rtx insn, int opnum)
7063 rtx set, mem;
7065 set = PATTERN (insn);
7066 if (GET_CODE (set) == PARALLEL)
7067 set = XVECEXP (set, 0, 0);
7068 gcc_assert (GET_CODE (set) == SET);
7069 mem = XEXP (set, opnum);
7070 while (GET_CODE (mem) == SUBREG)
7071 mem = SUBREG_REG (mem);
7072 gcc_assert (MEM_P (mem));
7073 return (volatile_ok || !MEM_VOLATILE_P (mem));
7076 /* Initialize the table of extra 80387 mathematical constants. */
7078 static void
7079 init_ext_80387_constants (void)
7081 static const char * cst[5] =
7083 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7084 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7085 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7086 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7087 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7089 int i;
7091 for (i = 0; i < 5; i++)
7093 real_from_string (&ext_80387_constants_table[i], cst[i]);
7094 /* Ensure each constant is rounded to XFmode precision. */
7095 real_convert (&ext_80387_constants_table[i],
7096 XFmode, &ext_80387_constants_table[i]);
7099 ext_80387_constants_init = 1;
7102 /* Return true if the constant is something that can be loaded with
7103 a special instruction. */
7106 standard_80387_constant_p (rtx x)
7108 enum machine_mode mode = GET_MODE (x);
7110 REAL_VALUE_TYPE r;
7112 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7113 return -1;
7115 if (x == CONST0_RTX (mode))
7116 return 1;
7117 if (x == CONST1_RTX (mode))
7118 return 2;
7120 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7122 /* For XFmode constants, try to find a special 80387 instruction when
7123 optimizing for size or on those CPUs that benefit from them. */
7124 if (mode == XFmode
7125 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7127 int i;
7129 if (! ext_80387_constants_init)
7130 init_ext_80387_constants ();
7132 for (i = 0; i < 5; i++)
7133 if (real_identical (&r, &ext_80387_constants_table[i]))
7134 return i + 3;
7137 /* Load of the constant -0.0 or -1.0 will be split as
7138 fldz;fchs or fld1;fchs sequence. */
7139 if (real_isnegzero (&r))
7140 return 8;
7141 if (real_identical (&r, &dconstm1))
7142 return 9;
7144 return 0;
7147 /* Return the opcode of the special instruction to be used to load
7148 the constant X. */
7150 const char *
7151 standard_80387_constant_opcode (rtx x)
7153 switch (standard_80387_constant_p (x))
7155 case 1:
7156 return "fldz";
7157 case 2:
7158 return "fld1";
7159 case 3:
7160 return "fldlg2";
7161 case 4:
7162 return "fldln2";
7163 case 5:
7164 return "fldl2e";
7165 case 6:
7166 return "fldl2t";
7167 case 7:
7168 return "fldpi";
7169 case 8:
7170 case 9:
7171 return "#";
7172 default:
7173 gcc_unreachable ();
7177 /* Return the CONST_DOUBLE representing the 80387 constant that is
7178 loaded by the specified special instruction. The argument IDX
7179 matches the return value from standard_80387_constant_p. */
7182 standard_80387_constant_rtx (int idx)
7184 int i;
7186 if (! ext_80387_constants_init)
7187 init_ext_80387_constants ();
7189 switch (idx)
7191 case 3:
7192 case 4:
7193 case 5:
7194 case 6:
7195 case 7:
7196 i = idx - 3;
7197 break;
7199 default:
7200 gcc_unreachable ();
7203 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7204 XFmode);
7207 /* Return 1 if mode is a valid mode for sse. */
7208 static int
7209 standard_sse_mode_p (enum machine_mode mode)
7211 switch (mode)
7213 case V16QImode:
7214 case V8HImode:
7215 case V4SImode:
7216 case V2DImode:
7217 case V4SFmode:
7218 case V2DFmode:
7219 return 1;
7221 default:
7222 return 0;
7226 /* Return 1 if X is all 0s. For all 1s, return 2 if X is in 128bit
7227 SSE modes and SSE2 is enabled, return 3 if X is in 256bit AVX
7228 modes and AVX is enabled. */
7231 standard_sse_constant_p (rtx x)
7233 enum machine_mode mode = GET_MODE (x);
7235 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7236 return 1;
7237 if (vector_all_ones_operand (x, mode))
7239 if (standard_sse_mode_p (mode))
7240 return TARGET_SSE2 ? 2 : -2;
7241 else if (VALID_AVX256_REG_MODE (mode))
7242 return TARGET_AVX ? 3 : -3;
7245 return 0;
7248 /* Return the opcode of the special instruction to be used to load
7249 the constant X. */
7251 const char *
7252 standard_sse_constant_opcode (rtx insn, rtx x)
7254 switch (standard_sse_constant_p (x))
7256 case 1:
7257 switch (get_attr_mode (insn))
7259 case MODE_V4SF:
7260 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7261 case MODE_V2DF:
7262 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7263 case MODE_TI:
7264 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7265 case MODE_V8SF:
7266 return "vxorps\t%x0, %x0, %x0";
7267 case MODE_V4DF:
7268 return "vxorpd\t%x0, %x0, %x0";
7269 case MODE_OI:
7270 return "vpxor\t%x0, %x0, %x0";
7271 default:
7272 gcc_unreachable ();
7274 case 2:
7275 if (TARGET_AVX)
7276 switch (get_attr_mode (insn))
7278 case MODE_V4SF:
7279 case MODE_V2DF:
7280 case MODE_TI:
7281 return "vpcmpeqd\t%0, %0, %0";
7282 break;
7283 default:
7284 gcc_unreachable ();
7286 else
7287 return "pcmpeqd\t%0, %0";
7289 gcc_unreachable ();
7292 /* Returns 1 if OP contains a symbol reference */
7295 symbolic_reference_mentioned_p (rtx op)
7297 const char *fmt;
7298 int i;
7300 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7301 return 1;
7303 fmt = GET_RTX_FORMAT (GET_CODE (op));
7304 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7306 if (fmt[i] == 'E')
7308 int j;
7310 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7311 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7312 return 1;
7315 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7316 return 1;
7319 return 0;
7322 /* Return 1 if it is appropriate to emit `ret' instructions in the
7323 body of a function. Do this only if the epilogue is simple, needing a
7324 couple of insns. Prior to reloading, we can't tell how many registers
7325 must be saved, so return 0 then. Return 0 if there is no frame
7326 marker to de-allocate. */
7329 ix86_can_use_return_insn_p (void)
7331 struct ix86_frame frame;
7333 if (! reload_completed || frame_pointer_needed)
7334 return 0;
7336 /* Don't allow more than 32 pop, since that's all we can do
7337 with one instruction. */
7338 if (crtl->args.pops_args
7339 && crtl->args.size >= 32768)
7340 return 0;
7342 ix86_compute_frame_layout (&frame);
7343 return frame.to_allocate == 0 && (frame.nregs + frame.nsseregs) == 0;
7346 /* Value should be nonzero if functions must have frame pointers.
7347 Zero means the frame pointer need not be set up (and parms may
7348 be accessed via the stack pointer) in functions that seem suitable. */
7351 ix86_frame_pointer_required (void)
7353 /* If we accessed previous frames, then the generated code expects
7354 to be able to access the saved ebp value in our frame. */
7355 if (cfun->machine->accesses_prev_frame)
7356 return 1;
7358 /* Several x86 os'es need a frame pointer for other reasons,
7359 usually pertaining to setjmp. */
7360 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7361 return 1;
7363 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7364 the frame pointer by default. Turn it back on now if we've not
7365 got a leaf function. */
7366 if (TARGET_OMIT_LEAF_FRAME_POINTER
7367 && (!current_function_is_leaf
7368 || ix86_current_function_calls_tls_descriptor))
7369 return 1;
7371 if (crtl->profile)
7372 return 1;
7374 return 0;
7377 /* Record that the current function accesses previous call frames. */
7379 void
7380 ix86_setup_frame_addresses (void)
7382 cfun->machine->accesses_prev_frame = 1;
7385 #ifndef USE_HIDDEN_LINKONCE
7386 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7387 # define USE_HIDDEN_LINKONCE 1
7388 # else
7389 # define USE_HIDDEN_LINKONCE 0
7390 # endif
7391 #endif
7393 static int pic_labels_used;
7395 /* Fills in the label name that should be used for a pc thunk for
7396 the given register. */
7398 static void
7399 get_pc_thunk_name (char name[32], unsigned int regno)
7401 gcc_assert (!TARGET_64BIT);
7403 if (USE_HIDDEN_LINKONCE)
7404 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7405 else
7406 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7410 /* This function generates code for -fpic that loads %ebx with
7411 the return address of the caller and then returns. */
7413 void
7414 ix86_file_end (void)
7416 rtx xops[2];
7417 int regno;
7419 for (regno = 0; regno < 8; ++regno)
7421 char name[32];
7423 if (! ((pic_labels_used >> regno) & 1))
7424 continue;
7426 get_pc_thunk_name (name, regno);
7428 #if TARGET_MACHO
7429 if (TARGET_MACHO)
7431 switch_to_section (darwin_sections[text_coal_section]);
7432 fputs ("\t.weak_definition\t", asm_out_file);
7433 assemble_name (asm_out_file, name);
7434 fputs ("\n\t.private_extern\t", asm_out_file);
7435 assemble_name (asm_out_file, name);
7436 fputs ("\n", asm_out_file);
7437 ASM_OUTPUT_LABEL (asm_out_file, name);
7439 else
7440 #endif
7441 if (USE_HIDDEN_LINKONCE)
7443 tree decl;
7445 decl = build_decl (FUNCTION_DECL, get_identifier (name),
7446 error_mark_node);
7447 TREE_PUBLIC (decl) = 1;
7448 TREE_STATIC (decl) = 1;
7449 DECL_ONE_ONLY (decl) = 1;
7451 (*targetm.asm_out.unique_section) (decl, 0);
7452 switch_to_section (get_named_section (decl, NULL, 0));
7454 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7455 fputs ("\t.hidden\t", asm_out_file);
7456 assemble_name (asm_out_file, name);
7457 fputc ('\n', asm_out_file);
7458 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7460 else
7462 switch_to_section (text_section);
7463 ASM_OUTPUT_LABEL (asm_out_file, name);
7466 xops[0] = gen_rtx_REG (Pmode, regno);
7467 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7468 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7469 output_asm_insn ("ret", xops);
7472 if (NEED_INDICATE_EXEC_STACK)
7473 file_end_indicate_exec_stack ();
7476 /* Emit code for the SET_GOT patterns. */
7478 const char *
7479 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7481 rtx xops[3];
7483 xops[0] = dest;
7485 if (TARGET_VXWORKS_RTP && flag_pic)
7487 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7488 xops[2] = gen_rtx_MEM (Pmode,
7489 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7490 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7492 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7493 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7494 an unadorned address. */
7495 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7496 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7497 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7498 return "";
7501 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7503 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7505 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7507 if (!flag_pic)
7508 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7509 else
7510 output_asm_insn ("call\t%a2", xops);
7512 #if TARGET_MACHO
7513 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7514 is what will be referenced by the Mach-O PIC subsystem. */
7515 if (!label)
7516 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7517 #endif
7519 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7520 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7522 if (flag_pic)
7523 output_asm_insn ("pop%z0\t%0", xops);
7525 else
7527 char name[32];
7528 get_pc_thunk_name (name, REGNO (dest));
7529 pic_labels_used |= 1 << REGNO (dest);
7531 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7532 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7533 output_asm_insn ("call\t%X2", xops);
7534 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7535 is what will be referenced by the Mach-O PIC subsystem. */
7536 #if TARGET_MACHO
7537 if (!label)
7538 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7539 else
7540 targetm.asm_out.internal_label (asm_out_file, "L",
7541 CODE_LABEL_NUMBER (label));
7542 #endif
7545 if (TARGET_MACHO)
7546 return "";
7548 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7549 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7550 else
7551 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7553 return "";
7556 /* Generate an "push" pattern for input ARG. */
7558 static rtx
7559 gen_push (rtx arg)
7561 return gen_rtx_SET (VOIDmode,
7562 gen_rtx_MEM (Pmode,
7563 gen_rtx_PRE_DEC (Pmode,
7564 stack_pointer_rtx)),
7565 arg);
7568 /* Return >= 0 if there is an unused call-clobbered register available
7569 for the entire function. */
7571 static unsigned int
7572 ix86_select_alt_pic_regnum (void)
7574 if (current_function_is_leaf && !crtl->profile
7575 && !ix86_current_function_calls_tls_descriptor)
7577 int i, drap;
7578 /* Can't use the same register for both PIC and DRAP. */
7579 if (crtl->drap_reg)
7580 drap = REGNO (crtl->drap_reg);
7581 else
7582 drap = -1;
7583 for (i = 2; i >= 0; --i)
7584 if (i != drap && !df_regs_ever_live_p (i))
7585 return i;
7588 return INVALID_REGNUM;
7591 /* Return 1 if we need to save REGNO. */
7592 static int
7593 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7595 if (pic_offset_table_rtx
7596 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7597 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7598 || crtl->profile
7599 || crtl->calls_eh_return
7600 || crtl->uses_const_pool))
7602 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7603 return 0;
7604 return 1;
7607 if (crtl->calls_eh_return && maybe_eh_return)
7609 unsigned i;
7610 for (i = 0; ; i++)
7612 unsigned test = EH_RETURN_DATA_REGNO (i);
7613 if (test == INVALID_REGNUM)
7614 break;
7615 if (test == regno)
7616 return 1;
7620 if (crtl->drap_reg
7621 && regno == REGNO (crtl->drap_reg))
7622 return 1;
7624 return (df_regs_ever_live_p (regno)
7625 && !call_used_regs[regno]
7626 && !fixed_regs[regno]
7627 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7630 /* Return number of saved general prupose registers. */
7632 static int
7633 ix86_nsaved_regs (void)
7635 int nregs = 0;
7636 int regno;
7638 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7639 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7640 nregs ++;
7641 return nregs;
7644 /* Return number of saved SSE registrers. */
7646 static int
7647 ix86_nsaved_sseregs (void)
7649 int nregs = 0;
7650 int regno;
7652 if (ix86_cfun_abi () != MS_ABI)
7653 return 0;
7654 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7655 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7656 nregs ++;
7657 return nregs;
7660 /* Given FROM and TO register numbers, say whether this elimination is
7661 allowed. If stack alignment is needed, we can only replace argument
7662 pointer with hard frame pointer, or replace frame pointer with stack
7663 pointer. Otherwise, frame pointer elimination is automatically
7664 handled and all other eliminations are valid. */
7667 ix86_can_eliminate (int from, int to)
7669 if (stack_realign_fp)
7670 return ((from == ARG_POINTER_REGNUM
7671 && to == HARD_FRAME_POINTER_REGNUM)
7672 || (from == FRAME_POINTER_REGNUM
7673 && to == STACK_POINTER_REGNUM));
7674 else
7675 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : 1;
7678 /* Return the offset between two registers, one to be eliminated, and the other
7679 its replacement, at the start of a routine. */
7681 HOST_WIDE_INT
7682 ix86_initial_elimination_offset (int from, int to)
7684 struct ix86_frame frame;
7685 ix86_compute_frame_layout (&frame);
7687 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7688 return frame.hard_frame_pointer_offset;
7689 else if (from == FRAME_POINTER_REGNUM
7690 && to == HARD_FRAME_POINTER_REGNUM)
7691 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7692 else
7694 gcc_assert (to == STACK_POINTER_REGNUM);
7696 if (from == ARG_POINTER_REGNUM)
7697 return frame.stack_pointer_offset;
7699 gcc_assert (from == FRAME_POINTER_REGNUM);
7700 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7704 /* In a dynamically-aligned function, we can't know the offset from
7705 stack pointer to frame pointer, so we must ensure that setjmp
7706 eliminates fp against the hard fp (%ebp) rather than trying to
7707 index from %esp up to the top of the frame across a gap that is
7708 of unknown (at compile-time) size. */
7709 static rtx
7710 ix86_builtin_setjmp_frame_value (void)
7712 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7715 /* Fill structure ix86_frame about frame of currently computed function. */
7717 static void
7718 ix86_compute_frame_layout (struct ix86_frame *frame)
7720 HOST_WIDE_INT total_size;
7721 unsigned int stack_alignment_needed;
7722 HOST_WIDE_INT offset;
7723 unsigned int preferred_alignment;
7724 HOST_WIDE_INT size = get_frame_size ();
7726 frame->nregs = ix86_nsaved_regs ();
7727 frame->nsseregs = ix86_nsaved_sseregs ();
7728 total_size = size;
7730 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7731 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7733 /* MS ABI seem to require stack alignment to be always 16 except for function
7734 prologues. */
7735 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7737 preferred_alignment = 16;
7738 stack_alignment_needed = 16;
7739 crtl->preferred_stack_boundary = 128;
7740 crtl->stack_alignment_needed = 128;
7743 gcc_assert (!size || stack_alignment_needed);
7744 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7745 gcc_assert (preferred_alignment <= stack_alignment_needed);
7747 /* During reload iteration the amount of registers saved can change.
7748 Recompute the value as needed. Do not recompute when amount of registers
7749 didn't change as reload does multiple calls to the function and does not
7750 expect the decision to change within single iteration. */
7751 if (!optimize_function_for_size_p (cfun)
7752 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7754 int count = frame->nregs;
7756 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7757 /* The fast prologue uses move instead of push to save registers. This
7758 is significantly longer, but also executes faster as modern hardware
7759 can execute the moves in parallel, but can't do that for push/pop.
7761 Be careful about choosing what prologue to emit: When function takes
7762 many instructions to execute we may use slow version as well as in
7763 case function is known to be outside hot spot (this is known with
7764 feedback only). Weight the size of function by number of registers
7765 to save as it is cheap to use one or two push instructions but very
7766 slow to use many of them. */
7767 if (count)
7768 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7769 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7770 || (flag_branch_probabilities
7771 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7772 cfun->machine->use_fast_prologue_epilogue = false;
7773 else
7774 cfun->machine->use_fast_prologue_epilogue
7775 = !expensive_function_p (count);
7777 if (TARGET_PROLOGUE_USING_MOVE
7778 && cfun->machine->use_fast_prologue_epilogue)
7779 frame->save_regs_using_mov = true;
7780 else
7781 frame->save_regs_using_mov = false;
7784 /* Skip return address and saved base pointer. */
7785 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7787 frame->hard_frame_pointer_offset = offset;
7789 /* Set offset to aligned because the realigned frame starts from
7790 here. */
7791 if (stack_realign_fp)
7792 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7794 /* Register save area */
7795 offset += frame->nregs * UNITS_PER_WORD;
7797 /* Align SSE reg save area. */
7798 if (frame->nsseregs)
7799 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
7800 else
7801 frame->padding0 = 0;
7803 /* SSE register save area. */
7804 offset += frame->padding0 + frame->nsseregs * 16;
7806 /* Va-arg area */
7807 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7808 offset += frame->va_arg_size;
7810 /* Align start of frame for local function. */
7811 frame->padding1 = ((offset + stack_alignment_needed - 1)
7812 & -stack_alignment_needed) - offset;
7814 offset += frame->padding1;
7816 /* Frame pointer points here. */
7817 frame->frame_pointer_offset = offset;
7819 offset += size;
7821 /* Add outgoing arguments area. Can be skipped if we eliminated
7822 all the function calls as dead code.
7823 Skipping is however impossible when function calls alloca. Alloca
7824 expander assumes that last crtl->outgoing_args_size
7825 of stack frame are unused. */
7826 if (ACCUMULATE_OUTGOING_ARGS
7827 && (!current_function_is_leaf || cfun->calls_alloca
7828 || ix86_current_function_calls_tls_descriptor))
7830 offset += crtl->outgoing_args_size;
7831 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7833 else
7834 frame->outgoing_arguments_size = 0;
7836 /* Align stack boundary. Only needed if we're calling another function
7837 or using alloca. */
7838 if (!current_function_is_leaf || cfun->calls_alloca
7839 || ix86_current_function_calls_tls_descriptor)
7840 frame->padding2 = ((offset + preferred_alignment - 1)
7841 & -preferred_alignment) - offset;
7842 else
7843 frame->padding2 = 0;
7845 offset += frame->padding2;
7847 /* We've reached end of stack frame. */
7848 frame->stack_pointer_offset = offset;
7850 /* Size prologue needs to allocate. */
7851 frame->to_allocate =
7852 (size + frame->padding1 + frame->padding2
7853 + frame->outgoing_arguments_size + frame->va_arg_size);
7855 if ((!frame->to_allocate && frame->nregs <= 1)
7856 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7857 frame->save_regs_using_mov = false;
7859 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
7860 && current_function_sp_is_unchanging
7861 && current_function_is_leaf
7862 && !ix86_current_function_calls_tls_descriptor)
7864 frame->red_zone_size = frame->to_allocate;
7865 if (frame->save_regs_using_mov)
7866 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7867 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7868 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7870 else
7871 frame->red_zone_size = 0;
7872 frame->to_allocate -= frame->red_zone_size;
7873 frame->stack_pointer_offset -= frame->red_zone_size;
7874 #if 0
7875 fprintf (stderr, "\n");
7876 fprintf (stderr, "size: %ld\n", (long)size);
7877 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
7878 fprintf (stderr, "nsseregs: %ld\n", (long)frame->nsseregs);
7879 fprintf (stderr, "padding0: %ld\n", (long)frame->padding0);
7880 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
7881 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
7882 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
7883 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
7884 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
7885 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
7886 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
7887 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
7888 (long)frame->hard_frame_pointer_offset);
7889 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
7890 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
7891 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
7892 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
7893 #endif
7896 /* Emit code to save registers in the prologue. */
7898 static void
7899 ix86_emit_save_regs (void)
7901 unsigned int regno;
7902 rtx insn;
7904 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
7905 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7907 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7908 RTX_FRAME_RELATED_P (insn) = 1;
7912 /* Emit code to save registers using MOV insns. First register
7913 is restored from POINTER + OFFSET. */
7914 static void
7915 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7917 unsigned int regno;
7918 rtx insn;
7920 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7921 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7923 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7924 Pmode, offset),
7925 gen_rtx_REG (Pmode, regno));
7926 RTX_FRAME_RELATED_P (insn) = 1;
7927 offset += UNITS_PER_WORD;
7931 /* Emit code to save registers using MOV insns. First register
7932 is restored from POINTER + OFFSET. */
7933 static void
7934 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7936 unsigned int regno;
7937 rtx insn;
7938 rtx mem;
7940 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7941 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7943 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
7944 set_mem_align (mem, 128);
7945 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
7946 RTX_FRAME_RELATED_P (insn) = 1;
7947 offset += 16;
7951 /* Expand prologue or epilogue stack adjustment.
7952 The pattern exist to put a dependency on all ebp-based memory accesses.
7953 STYLE should be negative if instructions should be marked as frame related,
7954 zero if %r11 register is live and cannot be freely used and positive
7955 otherwise. */
7957 static void
7958 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
7960 rtx insn;
7962 if (! TARGET_64BIT)
7963 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
7964 else if (x86_64_immediate_operand (offset, DImode))
7965 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
7966 else
7968 rtx r11;
7969 /* r11 is used by indirect sibcall return as well, set before the
7970 epilogue and used after the epilogue. ATM indirect sibcall
7971 shouldn't be used together with huge frame sizes in one
7972 function because of the frame_size check in sibcall.c. */
7973 gcc_assert (style);
7974 r11 = gen_rtx_REG (DImode, R11_REG);
7975 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
7976 if (style < 0)
7977 RTX_FRAME_RELATED_P (insn) = 1;
7978 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
7979 offset));
7981 if (style < 0)
7982 RTX_FRAME_RELATED_P (insn) = 1;
7985 /* Find an available register to be used as dynamic realign argument
7986 pointer regsiter. Such a register will be written in prologue and
7987 used in begin of body, so it must not be
7988 1. parameter passing register.
7989 2. GOT pointer.
7990 We reuse static-chain register if it is available. Otherwise, we
7991 use DI for i386 and R13 for x86-64. We chose R13 since it has
7992 shorter encoding.
7994 Return: the regno of chosen register. */
7996 static unsigned int
7997 find_drap_reg (void)
7999 tree decl = cfun->decl;
8001 if (TARGET_64BIT)
8003 /* Use R13 for nested function or function need static chain.
8004 Since function with tail call may use any caller-saved
8005 registers in epilogue, DRAP must not use caller-saved
8006 register in such case. */
8007 if ((decl_function_context (decl)
8008 && !DECL_NO_STATIC_CHAIN (decl))
8009 || crtl->tail_call_emit)
8010 return R13_REG;
8012 return R10_REG;
8014 else
8016 /* Use DI for nested function or function need static chain.
8017 Since function with tail call may use any caller-saved
8018 registers in epilogue, DRAP must not use caller-saved
8019 register in such case. */
8020 if ((decl_function_context (decl)
8021 && !DECL_NO_STATIC_CHAIN (decl))
8022 || crtl->tail_call_emit)
8023 return DI_REG;
8025 /* Reuse static chain register if it isn't used for parameter
8026 passing. */
8027 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8028 && !lookup_attribute ("fastcall",
8029 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8030 return CX_REG;
8031 else
8032 return DI_REG;
8036 /* Update incoming stack boundary and estimated stack alignment. */
8038 static void
8039 ix86_update_stack_boundary (void)
8041 /* Prefer the one specified at command line. */
8042 ix86_incoming_stack_boundary
8043 = (ix86_user_incoming_stack_boundary
8044 ? ix86_user_incoming_stack_boundary
8045 : ix86_default_incoming_stack_boundary);
8047 /* Incoming stack alignment can be changed on individual functions
8048 via force_align_arg_pointer attribute. We use the smallest
8049 incoming stack boundary. */
8050 if (ix86_incoming_stack_boundary > MIN_STACK_BOUNDARY
8051 && lookup_attribute (ix86_force_align_arg_pointer_string,
8052 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8053 ix86_incoming_stack_boundary = MIN_STACK_BOUNDARY;
8055 /* The incoming stack frame has to be aligned at least at
8056 parm_stack_boundary. */
8057 if (ix86_incoming_stack_boundary < crtl->parm_stack_boundary)
8058 ix86_incoming_stack_boundary = crtl->parm_stack_boundary;
8060 /* Stack at entrance of main is aligned by runtime. We use the
8061 smallest incoming stack boundary. */
8062 if (ix86_incoming_stack_boundary > MAIN_STACK_BOUNDARY
8063 && DECL_NAME (current_function_decl)
8064 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8065 && DECL_FILE_SCOPE_P (current_function_decl))
8066 ix86_incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8068 /* x86_64 vararg needs 16byte stack alignment for register save
8069 area. */
8070 if (TARGET_64BIT
8071 && cfun->stdarg
8072 && crtl->stack_alignment_estimated < 128)
8073 crtl->stack_alignment_estimated = 128;
8076 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8077 needed or an rtx for DRAP otherwise. */
8079 static rtx
8080 ix86_get_drap_rtx (void)
8082 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8083 crtl->need_drap = true;
8085 if (stack_realign_drap)
8087 /* Assign DRAP to vDRAP and returns vDRAP */
8088 unsigned int regno = find_drap_reg ();
8089 rtx drap_vreg;
8090 rtx arg_ptr;
8091 rtx seq, insn;
8093 arg_ptr = gen_rtx_REG (Pmode, regno);
8094 crtl->drap_reg = arg_ptr;
8096 start_sequence ();
8097 drap_vreg = copy_to_reg (arg_ptr);
8098 seq = get_insns ();
8099 end_sequence ();
8101 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8102 RTX_FRAME_RELATED_P (insn) = 1;
8103 return drap_vreg;
8105 else
8106 return NULL;
8109 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8111 static rtx
8112 ix86_internal_arg_pointer (void)
8114 return virtual_incoming_args_rtx;
8117 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8118 This is called from dwarf2out.c to emit call frame instructions
8119 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8120 static void
8121 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
8123 rtx unspec = SET_SRC (pattern);
8124 gcc_assert (GET_CODE (unspec) == UNSPEC);
8126 switch (index)
8128 case UNSPEC_REG_SAVE:
8129 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
8130 SET_DEST (pattern));
8131 break;
8132 case UNSPEC_DEF_CFA:
8133 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
8134 INTVAL (XVECEXP (unspec, 0, 0)));
8135 break;
8136 default:
8137 gcc_unreachable ();
8141 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8142 to be generated in correct form. */
8143 static void
8144 ix86_finalize_stack_realign_flags (void)
8146 /* Check if stack realign is really needed after reload, and
8147 stores result in cfun */
8148 unsigned int incoming_stack_boundary
8149 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8150 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8151 unsigned int stack_realign = (incoming_stack_boundary
8152 < (current_function_is_leaf
8153 ? crtl->max_used_stack_slot_alignment
8154 : crtl->stack_alignment_needed));
8156 if (crtl->stack_realign_finalized)
8158 /* After stack_realign_needed is finalized, we can't no longer
8159 change it. */
8160 gcc_assert (crtl->stack_realign_needed == stack_realign);
8162 else
8164 crtl->stack_realign_needed = stack_realign;
8165 crtl->stack_realign_finalized = true;
8169 /* Expand the prologue into a bunch of separate insns. */
8171 void
8172 ix86_expand_prologue (void)
8174 rtx insn;
8175 bool pic_reg_used;
8176 struct ix86_frame frame;
8177 HOST_WIDE_INT allocate;
8179 ix86_finalize_stack_realign_flags ();
8181 /* DRAP should not coexist with stack_realign_fp */
8182 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8184 ix86_compute_frame_layout (&frame);
8186 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8187 of DRAP is needed and stack realignment is really needed after reload */
8188 if (crtl->drap_reg && crtl->stack_realign_needed)
8190 rtx x, y;
8191 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8192 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8193 ? 0 : UNITS_PER_WORD);
8195 gcc_assert (stack_realign_drap);
8197 /* Grab the argument pointer. */
8198 x = plus_constant (stack_pointer_rtx,
8199 (UNITS_PER_WORD + param_ptr_offset));
8200 y = crtl->drap_reg;
8202 /* Only need to push parameter pointer reg if it is caller
8203 saved reg */
8204 if (!call_used_regs[REGNO (crtl->drap_reg)])
8206 /* Push arg pointer reg */
8207 insn = emit_insn (gen_push (y));
8208 RTX_FRAME_RELATED_P (insn) = 1;
8211 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8212 RTX_FRAME_RELATED_P (insn) = 1;
8214 /* Align the stack. */
8215 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8216 stack_pointer_rtx,
8217 GEN_INT (-align_bytes)));
8218 RTX_FRAME_RELATED_P (insn) = 1;
8220 /* Replicate the return address on the stack so that return
8221 address can be reached via (argp - 1) slot. This is needed
8222 to implement macro RETURN_ADDR_RTX and intrinsic function
8223 expand_builtin_return_addr etc. */
8224 x = crtl->drap_reg;
8225 x = gen_frame_mem (Pmode,
8226 plus_constant (x, -UNITS_PER_WORD));
8227 insn = emit_insn (gen_push (x));
8228 RTX_FRAME_RELATED_P (insn) = 1;
8231 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8232 slower on all targets. Also sdb doesn't like it. */
8234 if (frame_pointer_needed)
8236 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8237 RTX_FRAME_RELATED_P (insn) = 1;
8239 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8240 RTX_FRAME_RELATED_P (insn) = 1;
8243 if (stack_realign_fp)
8245 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8246 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8248 /* Align the stack. */
8249 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8250 stack_pointer_rtx,
8251 GEN_INT (-align_bytes)));
8252 RTX_FRAME_RELATED_P (insn) = 1;
8255 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8257 if (!frame.save_regs_using_mov)
8258 ix86_emit_save_regs ();
8259 else
8260 allocate += frame.nregs * UNITS_PER_WORD;
8262 /* When using red zone we may start register saving before allocating
8263 the stack frame saving one cycle of the prologue. However I will
8264 avoid doing this if I am going to have to probe the stack since
8265 at least on x86_64 the stack probe can turn into a call that clobbers
8266 a red zone location */
8267 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8268 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8269 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8270 && !crtl->stack_realign_needed)
8271 ? hard_frame_pointer_rtx
8272 : stack_pointer_rtx,
8273 -frame.nregs * UNITS_PER_WORD);
8275 if (allocate == 0)
8277 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8278 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8279 GEN_INT (-allocate), -1);
8280 else
8282 /* Only valid for Win32. */
8283 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8284 bool eax_live;
8285 rtx t;
8287 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8289 if (cfun->machine->call_abi == MS_ABI)
8290 eax_live = false;
8291 else
8292 eax_live = ix86_eax_live_at_start_p ();
8294 if (eax_live)
8296 emit_insn (gen_push (eax));
8297 allocate -= UNITS_PER_WORD;
8300 emit_move_insn (eax, GEN_INT (allocate));
8302 if (TARGET_64BIT)
8303 insn = gen_allocate_stack_worker_64 (eax, eax);
8304 else
8305 insn = gen_allocate_stack_worker_32 (eax, eax);
8306 insn = emit_insn (insn);
8307 RTX_FRAME_RELATED_P (insn) = 1;
8308 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8309 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8310 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8311 t, REG_NOTES (insn));
8313 if (eax_live)
8315 if (frame_pointer_needed)
8316 t = plus_constant (hard_frame_pointer_rtx,
8317 allocate
8318 - frame.to_allocate
8319 - frame.nregs * UNITS_PER_WORD);
8320 else
8321 t = plus_constant (stack_pointer_rtx, allocate);
8322 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8326 if (frame.save_regs_using_mov
8327 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8328 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8330 if (!frame_pointer_needed
8331 || !frame.to_allocate
8332 || crtl->stack_realign_needed)
8333 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8334 frame.to_allocate
8335 + frame.nsseregs * 16 + frame.padding0);
8336 else
8337 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8338 -frame.nregs * UNITS_PER_WORD);
8340 if (!frame_pointer_needed
8341 || !frame.to_allocate
8342 || crtl->stack_realign_needed)
8343 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8344 frame.to_allocate);
8345 else
8346 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8347 - frame.nregs * UNITS_PER_WORD
8348 - frame.nsseregs * 16
8349 - frame.padding0);
8351 pic_reg_used = false;
8352 if (pic_offset_table_rtx
8353 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8354 || crtl->profile))
8356 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8358 if (alt_pic_reg_used != INVALID_REGNUM)
8359 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8361 pic_reg_used = true;
8364 if (pic_reg_used)
8366 if (TARGET_64BIT)
8368 if (ix86_cmodel == CM_LARGE_PIC)
8370 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8371 rtx label = gen_label_rtx ();
8372 emit_label (label);
8373 LABEL_PRESERVE_P (label) = 1;
8374 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8375 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8376 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8377 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8378 pic_offset_table_rtx, tmp_reg));
8380 else
8381 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8383 else
8384 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8387 /* In the pic_reg_used case, make sure that the got load isn't deleted
8388 when mcount needs it. Blockage to avoid call movement across mcount
8389 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8390 note. */
8391 if (crtl->profile && pic_reg_used)
8392 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8394 if (crtl->drap_reg && !crtl->stack_realign_needed)
8396 /* vDRAP is setup but after reload it turns out stack realign
8397 isn't necessary, here we will emit prologue to setup DRAP
8398 without stack realign adjustment */
8399 int drap_bp_offset = UNITS_PER_WORD * 2;
8400 rtx x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8401 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8404 /* Prevent instructions from being scheduled into register save push
8405 sequence when access to the redzone area is done through frame pointer.
8406 The offset betweeh the frame pointer and the stack pointer is calculated
8407 relative to the value of the stack pointer at the end of the function
8408 prologue, and moving instructions that access redzone area via frame
8409 pointer inside push sequence violates this assumption. */
8410 if (frame_pointer_needed && frame.red_zone_size)
8411 emit_insn (gen_memory_blockage ());
8413 /* Emit cld instruction if stringops are used in the function. */
8414 if (TARGET_CLD && ix86_current_function_needs_cld)
8415 emit_insn (gen_cld ());
8418 /* Emit code to restore saved registers using MOV insns. First register
8419 is restored from POINTER + OFFSET. */
8420 static void
8421 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8422 int maybe_eh_return)
8424 int regno;
8425 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8427 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8428 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8430 /* Ensure that adjust_address won't be forced to produce pointer
8431 out of range allowed by x86-64 instruction set. */
8432 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8434 rtx r11;
8436 r11 = gen_rtx_REG (DImode, R11_REG);
8437 emit_move_insn (r11, GEN_INT (offset));
8438 emit_insn (gen_adddi3 (r11, r11, pointer));
8439 base_address = gen_rtx_MEM (Pmode, r11);
8440 offset = 0;
8442 emit_move_insn (gen_rtx_REG (Pmode, regno),
8443 adjust_address (base_address, Pmode, offset));
8444 offset += UNITS_PER_WORD;
8448 /* Emit code to restore saved registers using MOV insns. First register
8449 is restored from POINTER + OFFSET. */
8450 static void
8451 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8452 int maybe_eh_return)
8454 int regno;
8455 rtx base_address = gen_rtx_MEM (TImode, pointer);
8456 rtx mem;
8458 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8459 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8461 /* Ensure that adjust_address won't be forced to produce pointer
8462 out of range allowed by x86-64 instruction set. */
8463 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8465 rtx r11;
8467 r11 = gen_rtx_REG (DImode, R11_REG);
8468 emit_move_insn (r11, GEN_INT (offset));
8469 emit_insn (gen_adddi3 (r11, r11, pointer));
8470 base_address = gen_rtx_MEM (TImode, r11);
8471 offset = 0;
8473 mem = adjust_address (base_address, TImode, offset);
8474 set_mem_align (mem, 128);
8475 emit_move_insn (gen_rtx_REG (TImode, regno), mem);
8476 offset += 16;
8480 /* Restore function stack, frame, and registers. */
8482 void
8483 ix86_expand_epilogue (int style)
8485 int regno;
8486 int sp_valid;
8487 struct ix86_frame frame;
8488 HOST_WIDE_INT offset;
8490 ix86_finalize_stack_realign_flags ();
8492 /* When stack is realigned, SP must be valid. */
8493 sp_valid = (!frame_pointer_needed
8494 || current_function_sp_is_unchanging
8495 || stack_realign_fp);
8497 ix86_compute_frame_layout (&frame);
8499 /* See the comment about red zone and frame
8500 pointer usage in ix86_expand_prologue. */
8501 if (frame_pointer_needed && frame.red_zone_size)
8502 emit_insn (gen_memory_blockage ());
8504 /* Calculate start of saved registers relative to ebp. Special care
8505 must be taken for the normal return case of a function using
8506 eh_return: the eax and edx registers are marked as saved, but not
8507 restored along this path. */
8508 offset = frame.nregs;
8509 if (crtl->calls_eh_return && style != 2)
8510 offset -= 2;
8511 offset *= -UNITS_PER_WORD;
8512 offset -= frame.nsseregs * 16 + frame.padding0;
8514 /* If we're only restoring one register and sp is not valid then
8515 using a move instruction to restore the register since it's
8516 less work than reloading sp and popping the register.
8518 The default code result in stack adjustment using add/lea instruction,
8519 while this code results in LEAVE instruction (or discrete equivalent),
8520 so it is profitable in some other cases as well. Especially when there
8521 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8522 and there is exactly one register to pop. This heuristic may need some
8523 tuning in future. */
8524 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
8525 || (TARGET_EPILOGUE_USING_MOVE
8526 && cfun->machine->use_fast_prologue_epilogue
8527 && ((frame.nregs + frame.nsseregs) > 1 || frame.to_allocate))
8528 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs) && frame.to_allocate)
8529 || (frame_pointer_needed && TARGET_USE_LEAVE
8530 && cfun->machine->use_fast_prologue_epilogue
8531 && (frame.nregs + frame.nsseregs) == 1)
8532 || crtl->calls_eh_return)
8534 /* Restore registers. We can use ebp or esp to address the memory
8535 locations. If both are available, default to ebp, since offsets
8536 are known to be small. Only exception is esp pointing directly
8537 to the end of block of saved registers, where we may simplify
8538 addressing mode.
8540 If we are realigning stack with bp and sp, regs restore can't
8541 be addressed by bp. sp must be used instead. */
8543 if (!frame_pointer_needed
8544 || (sp_valid && !frame.to_allocate)
8545 || stack_realign_fp)
8547 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8548 frame.to_allocate, style == 2);
8549 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8550 frame.to_allocate
8551 + frame.nsseregs * 16
8552 + frame.padding0, style == 2);
8554 else
8556 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
8557 offset, style == 2);
8558 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8559 offset
8560 + frame.nsseregs * 16
8561 + frame.padding0, style == 2);
8564 /* eh_return epilogues need %ecx added to the stack pointer. */
8565 if (style == 2)
8567 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8569 /* Stack align doesn't work with eh_return. */
8570 gcc_assert (!crtl->stack_realign_needed);
8572 if (frame_pointer_needed)
8574 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
8575 tmp = plus_constant (tmp, UNITS_PER_WORD);
8576 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
8578 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
8579 emit_move_insn (hard_frame_pointer_rtx, tmp);
8581 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
8582 const0_rtx, style);
8584 else
8586 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
8587 tmp = plus_constant (tmp, (frame.to_allocate
8588 + frame.nregs * UNITS_PER_WORD
8589 + frame.nsseregs * 16
8590 + frame.padding0));
8591 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
8594 else if (!frame_pointer_needed)
8595 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8596 GEN_INT (frame.to_allocate
8597 + frame.nregs * UNITS_PER_WORD
8598 + frame.nsseregs * 16
8599 + frame.padding0),
8600 style);
8601 /* If not an i386, mov & pop is faster than "leave". */
8602 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
8603 || !cfun->machine->use_fast_prologue_epilogue)
8604 emit_insn ((*ix86_gen_leave) ());
8605 else
8607 pro_epilogue_adjust_stack (stack_pointer_rtx,
8608 hard_frame_pointer_rtx,
8609 const0_rtx, style);
8611 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8614 else
8616 /* First step is to deallocate the stack frame so that we can
8617 pop the registers.
8619 If we realign stack with frame pointer, then stack pointer
8620 won't be able to recover via lea $offset(%bp), %sp, because
8621 there is a padding area between bp and sp for realign.
8622 "add $to_allocate, %sp" must be used instead. */
8623 if (!sp_valid)
8625 gcc_assert (frame_pointer_needed);
8626 gcc_assert (!stack_realign_fp);
8627 pro_epilogue_adjust_stack (stack_pointer_rtx,
8628 hard_frame_pointer_rtx,
8629 GEN_INT (offset), style);
8630 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8631 frame.to_allocate, style == 2);
8632 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8633 GEN_INT (frame.nsseregs * 16), style);
8635 else if (frame.to_allocate || frame.nsseregs)
8637 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8638 frame.to_allocate,
8639 style == 2);
8640 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8641 GEN_INT (frame.to_allocate
8642 + frame.nsseregs * 16
8643 + frame.padding0), style);
8646 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8647 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8648 emit_insn ((*ix86_gen_pop1) (gen_rtx_REG (Pmode, regno)));
8649 if (frame_pointer_needed)
8651 /* Leave results in shorter dependency chains on CPUs that are
8652 able to grok it fast. */
8653 if (TARGET_USE_LEAVE)
8654 emit_insn ((*ix86_gen_leave) ());
8655 else
8657 /* For stack realigned really happens, recover stack
8658 pointer to hard frame pointer is a must, if not using
8659 leave. */
8660 if (stack_realign_fp)
8661 pro_epilogue_adjust_stack (stack_pointer_rtx,
8662 hard_frame_pointer_rtx,
8663 const0_rtx, style);
8664 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8669 if (crtl->drap_reg && crtl->stack_realign_needed)
8671 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8672 ? 0 : UNITS_PER_WORD);
8673 gcc_assert (stack_realign_drap);
8674 emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
8675 crtl->drap_reg,
8676 GEN_INT (-(UNITS_PER_WORD
8677 + param_ptr_offset))));
8678 if (!call_used_regs[REGNO (crtl->drap_reg)])
8679 emit_insn ((*ix86_gen_pop1) (crtl->drap_reg));
8683 /* Sibcall epilogues don't want a return instruction. */
8684 if (style == 0)
8685 return;
8687 if (crtl->args.pops_args && crtl->args.size)
8689 rtx popc = GEN_INT (crtl->args.pops_args);
8691 /* i386 can only pop 64K bytes. If asked to pop more, pop
8692 return address, do explicit add, and jump indirectly to the
8693 caller. */
8695 if (crtl->args.pops_args >= 65536)
8697 rtx ecx = gen_rtx_REG (SImode, CX_REG);
8699 /* There is no "pascal" calling convention in any 64bit ABI. */
8700 gcc_assert (!TARGET_64BIT);
8702 emit_insn (gen_popsi1 (ecx));
8703 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
8704 emit_jump_insn (gen_return_indirect_internal (ecx));
8706 else
8707 emit_jump_insn (gen_return_pop_internal (popc));
8709 else
8710 emit_jump_insn (gen_return_internal ());
8713 /* Reset from the function's potential modifications. */
8715 static void
8716 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8717 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8719 if (pic_offset_table_rtx)
8720 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
8721 #if TARGET_MACHO
8722 /* Mach-O doesn't support labels at the end of objects, so if
8723 it looks like we might want one, insert a NOP. */
8725 rtx insn = get_last_insn ();
8726 while (insn
8727 && NOTE_P (insn)
8728 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
8729 insn = PREV_INSN (insn);
8730 if (insn
8731 && (LABEL_P (insn)
8732 || (NOTE_P (insn)
8733 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
8734 fputs ("\tnop\n", file);
8736 #endif
8740 /* Extract the parts of an RTL expression that is a valid memory address
8741 for an instruction. Return 0 if the structure of the address is
8742 grossly off. Return -1 if the address contains ASHIFT, so it is not
8743 strictly valid, but still used for computing length of lea instruction. */
8746 ix86_decompose_address (rtx addr, struct ix86_address *out)
8748 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
8749 rtx base_reg, index_reg;
8750 HOST_WIDE_INT scale = 1;
8751 rtx scale_rtx = NULL_RTX;
8752 int retval = 1;
8753 enum ix86_address_seg seg = SEG_DEFAULT;
8755 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
8756 base = addr;
8757 else if (GET_CODE (addr) == PLUS)
8759 rtx addends[4], op;
8760 int n = 0, i;
8762 op = addr;
8765 if (n >= 4)
8766 return 0;
8767 addends[n++] = XEXP (op, 1);
8768 op = XEXP (op, 0);
8770 while (GET_CODE (op) == PLUS);
8771 if (n >= 4)
8772 return 0;
8773 addends[n] = op;
8775 for (i = n; i >= 0; --i)
8777 op = addends[i];
8778 switch (GET_CODE (op))
8780 case MULT:
8781 if (index)
8782 return 0;
8783 index = XEXP (op, 0);
8784 scale_rtx = XEXP (op, 1);
8785 break;
8787 case UNSPEC:
8788 if (XINT (op, 1) == UNSPEC_TP
8789 && TARGET_TLS_DIRECT_SEG_REFS
8790 && seg == SEG_DEFAULT)
8791 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
8792 else
8793 return 0;
8794 break;
8796 case REG:
8797 case SUBREG:
8798 if (!base)
8799 base = op;
8800 else if (!index)
8801 index = op;
8802 else
8803 return 0;
8804 break;
8806 case CONST:
8807 case CONST_INT:
8808 case SYMBOL_REF:
8809 case LABEL_REF:
8810 if (disp)
8811 return 0;
8812 disp = op;
8813 break;
8815 default:
8816 return 0;
8820 else if (GET_CODE (addr) == MULT)
8822 index = XEXP (addr, 0); /* index*scale */
8823 scale_rtx = XEXP (addr, 1);
8825 else if (GET_CODE (addr) == ASHIFT)
8827 rtx tmp;
8829 /* We're called for lea too, which implements ashift on occasion. */
8830 index = XEXP (addr, 0);
8831 tmp = XEXP (addr, 1);
8832 if (!CONST_INT_P (tmp))
8833 return 0;
8834 scale = INTVAL (tmp);
8835 if ((unsigned HOST_WIDE_INT) scale > 3)
8836 return 0;
8837 scale = 1 << scale;
8838 retval = -1;
8840 else
8841 disp = addr; /* displacement */
8843 /* Extract the integral value of scale. */
8844 if (scale_rtx)
8846 if (!CONST_INT_P (scale_rtx))
8847 return 0;
8848 scale = INTVAL (scale_rtx);
8851 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
8852 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
8854 /* Allow arg pointer and stack pointer as index if there is not scaling. */
8855 if (base_reg && index_reg && scale == 1
8856 && (index_reg == arg_pointer_rtx
8857 || index_reg == frame_pointer_rtx
8858 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
8860 rtx tmp;
8861 tmp = base, base = index, index = tmp;
8862 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
8865 /* Special case: %ebp cannot be encoded as a base without a displacement. */
8866 if ((base_reg == hard_frame_pointer_rtx
8867 || base_reg == frame_pointer_rtx
8868 || base_reg == arg_pointer_rtx) && !disp)
8869 disp = const0_rtx;
8871 /* Special case: on K6, [%esi] makes the instruction vector decoded.
8872 Avoid this by transforming to [%esi+0].
8873 Reload calls address legitimization without cfun defined, so we need
8874 to test cfun for being non-NULL. */
8875 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
8876 && base_reg && !index_reg && !disp
8877 && REG_P (base_reg)
8878 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
8879 disp = const0_rtx;
8881 /* Special case: encode reg+reg instead of reg*2. */
8882 if (!base && index && scale && scale == 2)
8883 base = index, base_reg = index_reg, scale = 1;
8885 /* Special case: scaling cannot be encoded without base or displacement. */
8886 if (!base && !disp && index && scale != 1)
8887 disp = const0_rtx;
8889 out->base = base;
8890 out->index = index;
8891 out->disp = disp;
8892 out->scale = scale;
8893 out->seg = seg;
8895 return retval;
8898 /* Return cost of the memory address x.
8899 For i386, it is better to use a complex address than let gcc copy
8900 the address into a reg and make a new pseudo. But not if the address
8901 requires to two regs - that would mean more pseudos with longer
8902 lifetimes. */
8903 static int
8904 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
8906 struct ix86_address parts;
8907 int cost = 1;
8908 int ok = ix86_decompose_address (x, &parts);
8910 gcc_assert (ok);
8912 if (parts.base && GET_CODE (parts.base) == SUBREG)
8913 parts.base = SUBREG_REG (parts.base);
8914 if (parts.index && GET_CODE (parts.index) == SUBREG)
8915 parts.index = SUBREG_REG (parts.index);
8917 /* Attempt to minimize number of registers in the address. */
8918 if ((parts.base
8919 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
8920 || (parts.index
8921 && (!REG_P (parts.index)
8922 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
8923 cost++;
8925 if (parts.base
8926 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
8927 && parts.index
8928 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
8929 && parts.base != parts.index)
8930 cost++;
8932 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
8933 since it's predecode logic can't detect the length of instructions
8934 and it degenerates to vector decoded. Increase cost of such
8935 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
8936 to split such addresses or even refuse such addresses at all.
8938 Following addressing modes are affected:
8939 [base+scale*index]
8940 [scale*index+disp]
8941 [base+index]
8943 The first and last case may be avoidable by explicitly coding the zero in
8944 memory address, but I don't have AMD-K6 machine handy to check this
8945 theory. */
8947 if (TARGET_K6
8948 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
8949 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
8950 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
8951 cost += 10;
8953 return cost;
8956 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
8957 this is used for to form addresses to local data when -fPIC is in
8958 use. */
8960 static bool
8961 darwin_local_data_pic (rtx disp)
8963 return (GET_CODE (disp) == UNSPEC
8964 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
8967 /* Determine if a given RTX is a valid constant. We already know this
8968 satisfies CONSTANT_P. */
8970 bool
8971 legitimate_constant_p (rtx x)
8973 switch (GET_CODE (x))
8975 case CONST:
8976 x = XEXP (x, 0);
8978 if (GET_CODE (x) == PLUS)
8980 if (!CONST_INT_P (XEXP (x, 1)))
8981 return false;
8982 x = XEXP (x, 0);
8985 if (TARGET_MACHO && darwin_local_data_pic (x))
8986 return true;
8988 /* Only some unspecs are valid as "constants". */
8989 if (GET_CODE (x) == UNSPEC)
8990 switch (XINT (x, 1))
8992 case UNSPEC_GOT:
8993 case UNSPEC_GOTOFF:
8994 case UNSPEC_PLTOFF:
8995 return TARGET_64BIT;
8996 case UNSPEC_TPOFF:
8997 case UNSPEC_NTPOFF:
8998 x = XVECEXP (x, 0, 0);
8999 return (GET_CODE (x) == SYMBOL_REF
9000 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9001 case UNSPEC_DTPOFF:
9002 x = XVECEXP (x, 0, 0);
9003 return (GET_CODE (x) == SYMBOL_REF
9004 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9005 default:
9006 return false;
9009 /* We must have drilled down to a symbol. */
9010 if (GET_CODE (x) == LABEL_REF)
9011 return true;
9012 if (GET_CODE (x) != SYMBOL_REF)
9013 return false;
9014 /* FALLTHRU */
9016 case SYMBOL_REF:
9017 /* TLS symbols are never valid. */
9018 if (SYMBOL_REF_TLS_MODEL (x))
9019 return false;
9021 /* DLLIMPORT symbols are never valid. */
9022 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9023 && SYMBOL_REF_DLLIMPORT_P (x))
9024 return false;
9025 break;
9027 case CONST_DOUBLE:
9028 if (GET_MODE (x) == TImode
9029 && x != CONST0_RTX (TImode)
9030 && !TARGET_64BIT)
9031 return false;
9032 break;
9034 case CONST_VECTOR:
9035 if (x == CONST0_RTX (GET_MODE (x)))
9036 return true;
9037 return false;
9039 default:
9040 break;
9043 /* Otherwise we handle everything else in the move patterns. */
9044 return true;
9047 /* Determine if it's legal to put X into the constant pool. This
9048 is not possible for the address of thread-local symbols, which
9049 is checked above. */
9051 static bool
9052 ix86_cannot_force_const_mem (rtx x)
9054 /* We can always put integral constants and vectors in memory. */
9055 switch (GET_CODE (x))
9057 case CONST_INT:
9058 case CONST_DOUBLE:
9059 case CONST_VECTOR:
9060 return false;
9062 default:
9063 break;
9065 return !legitimate_constant_p (x);
9068 /* Determine if a given RTX is a valid constant address. */
9070 bool
9071 constant_address_p (rtx x)
9073 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
9076 /* Nonzero if the constant value X is a legitimate general operand
9077 when generating PIC code. It is given that flag_pic is on and
9078 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9080 bool
9081 legitimate_pic_operand_p (rtx x)
9083 rtx inner;
9085 switch (GET_CODE (x))
9087 case CONST:
9088 inner = XEXP (x, 0);
9089 if (GET_CODE (inner) == PLUS
9090 && CONST_INT_P (XEXP (inner, 1)))
9091 inner = XEXP (inner, 0);
9093 /* Only some unspecs are valid as "constants". */
9094 if (GET_CODE (inner) == UNSPEC)
9095 switch (XINT (inner, 1))
9097 case UNSPEC_GOT:
9098 case UNSPEC_GOTOFF:
9099 case UNSPEC_PLTOFF:
9100 return TARGET_64BIT;
9101 case UNSPEC_TPOFF:
9102 x = XVECEXP (inner, 0, 0);
9103 return (GET_CODE (x) == SYMBOL_REF
9104 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9105 case UNSPEC_MACHOPIC_OFFSET:
9106 return legitimate_pic_address_disp_p (x);
9107 default:
9108 return false;
9110 /* FALLTHRU */
9112 case SYMBOL_REF:
9113 case LABEL_REF:
9114 return legitimate_pic_address_disp_p (x);
9116 default:
9117 return true;
9121 /* Determine if a given CONST RTX is a valid memory displacement
9122 in PIC mode. */
9125 legitimate_pic_address_disp_p (rtx disp)
9127 bool saw_plus;
9129 /* In 64bit mode we can allow direct addresses of symbols and labels
9130 when they are not dynamic symbols. */
9131 if (TARGET_64BIT)
9133 rtx op0 = disp, op1;
9135 switch (GET_CODE (disp))
9137 case LABEL_REF:
9138 return true;
9140 case CONST:
9141 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9142 break;
9143 op0 = XEXP (XEXP (disp, 0), 0);
9144 op1 = XEXP (XEXP (disp, 0), 1);
9145 if (!CONST_INT_P (op1)
9146 || INTVAL (op1) >= 16*1024*1024
9147 || INTVAL (op1) < -16*1024*1024)
9148 break;
9149 if (GET_CODE (op0) == LABEL_REF)
9150 return true;
9151 if (GET_CODE (op0) != SYMBOL_REF)
9152 break;
9153 /* FALLTHRU */
9155 case SYMBOL_REF:
9156 /* TLS references should always be enclosed in UNSPEC. */
9157 if (SYMBOL_REF_TLS_MODEL (op0))
9158 return false;
9159 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9160 && ix86_cmodel != CM_LARGE_PIC)
9161 return true;
9162 break;
9164 default:
9165 break;
9168 if (GET_CODE (disp) != CONST)
9169 return 0;
9170 disp = XEXP (disp, 0);
9172 if (TARGET_64BIT)
9174 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9175 of GOT tables. We should not need these anyway. */
9176 if (GET_CODE (disp) != UNSPEC
9177 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9178 && XINT (disp, 1) != UNSPEC_GOTOFF
9179 && XINT (disp, 1) != UNSPEC_PLTOFF))
9180 return 0;
9182 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9183 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9184 return 0;
9185 return 1;
9188 saw_plus = false;
9189 if (GET_CODE (disp) == PLUS)
9191 if (!CONST_INT_P (XEXP (disp, 1)))
9192 return 0;
9193 disp = XEXP (disp, 0);
9194 saw_plus = true;
9197 if (TARGET_MACHO && darwin_local_data_pic (disp))
9198 return 1;
9200 if (GET_CODE (disp) != UNSPEC)
9201 return 0;
9203 switch (XINT (disp, 1))
9205 case UNSPEC_GOT:
9206 if (saw_plus)
9207 return false;
9208 /* We need to check for both symbols and labels because VxWorks loads
9209 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9210 details. */
9211 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9212 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9213 case UNSPEC_GOTOFF:
9214 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9215 While ABI specify also 32bit relocation but we don't produce it in
9216 small PIC model at all. */
9217 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9218 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9219 && !TARGET_64BIT)
9220 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9221 return false;
9222 case UNSPEC_GOTTPOFF:
9223 case UNSPEC_GOTNTPOFF:
9224 case UNSPEC_INDNTPOFF:
9225 if (saw_plus)
9226 return false;
9227 disp = XVECEXP (disp, 0, 0);
9228 return (GET_CODE (disp) == SYMBOL_REF
9229 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9230 case UNSPEC_NTPOFF:
9231 disp = XVECEXP (disp, 0, 0);
9232 return (GET_CODE (disp) == SYMBOL_REF
9233 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9234 case UNSPEC_DTPOFF:
9235 disp = XVECEXP (disp, 0, 0);
9236 return (GET_CODE (disp) == SYMBOL_REF
9237 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9240 return 0;
9243 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
9244 memory address for an instruction. The MODE argument is the machine mode
9245 for the MEM expression that wants to use this address.
9247 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9248 convert common non-canonical forms to canonical form so that they will
9249 be recognized. */
9252 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9253 rtx addr, int strict)
9255 struct ix86_address parts;
9256 rtx base, index, disp;
9257 HOST_WIDE_INT scale;
9258 const char *reason = NULL;
9259 rtx reason_rtx = NULL_RTX;
9261 if (ix86_decompose_address (addr, &parts) <= 0)
9263 reason = "decomposition failed";
9264 goto report_error;
9267 base = parts.base;
9268 index = parts.index;
9269 disp = parts.disp;
9270 scale = parts.scale;
9272 /* Validate base register.
9274 Don't allow SUBREG's that span more than a word here. It can lead to spill
9275 failures when the base is one word out of a two word structure, which is
9276 represented internally as a DImode int. */
9278 if (base)
9280 rtx reg;
9281 reason_rtx = base;
9283 if (REG_P (base))
9284 reg = base;
9285 else if (GET_CODE (base) == SUBREG
9286 && REG_P (SUBREG_REG (base))
9287 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9288 <= UNITS_PER_WORD)
9289 reg = SUBREG_REG (base);
9290 else
9292 reason = "base is not a register";
9293 goto report_error;
9296 if (GET_MODE (base) != Pmode)
9298 reason = "base is not in Pmode";
9299 goto report_error;
9302 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9303 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9305 reason = "base is not valid";
9306 goto report_error;
9310 /* Validate index register.
9312 Don't allow SUBREG's that span more than a word here -- same as above. */
9314 if (index)
9316 rtx reg;
9317 reason_rtx = index;
9319 if (REG_P (index))
9320 reg = index;
9321 else if (GET_CODE (index) == SUBREG
9322 && REG_P (SUBREG_REG (index))
9323 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9324 <= UNITS_PER_WORD)
9325 reg = SUBREG_REG (index);
9326 else
9328 reason = "index is not a register";
9329 goto report_error;
9332 if (GET_MODE (index) != Pmode)
9334 reason = "index is not in Pmode";
9335 goto report_error;
9338 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9339 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9341 reason = "index is not valid";
9342 goto report_error;
9346 /* Validate scale factor. */
9347 if (scale != 1)
9349 reason_rtx = GEN_INT (scale);
9350 if (!index)
9352 reason = "scale without index";
9353 goto report_error;
9356 if (scale != 2 && scale != 4 && scale != 8)
9358 reason = "scale is not a valid multiplier";
9359 goto report_error;
9363 /* Validate displacement. */
9364 if (disp)
9366 reason_rtx = disp;
9368 if (GET_CODE (disp) == CONST
9369 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9370 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9371 switch (XINT (XEXP (disp, 0), 1))
9373 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9374 used. While ABI specify also 32bit relocations, we don't produce
9375 them at all and use IP relative instead. */
9376 case UNSPEC_GOT:
9377 case UNSPEC_GOTOFF:
9378 gcc_assert (flag_pic);
9379 if (!TARGET_64BIT)
9380 goto is_legitimate_pic;
9381 reason = "64bit address unspec";
9382 goto report_error;
9384 case UNSPEC_GOTPCREL:
9385 gcc_assert (flag_pic);
9386 goto is_legitimate_pic;
9388 case UNSPEC_GOTTPOFF:
9389 case UNSPEC_GOTNTPOFF:
9390 case UNSPEC_INDNTPOFF:
9391 case UNSPEC_NTPOFF:
9392 case UNSPEC_DTPOFF:
9393 break;
9395 default:
9396 reason = "invalid address unspec";
9397 goto report_error;
9400 else if (SYMBOLIC_CONST (disp)
9401 && (flag_pic
9402 || (TARGET_MACHO
9403 #if TARGET_MACHO
9404 && MACHOPIC_INDIRECT
9405 && !machopic_operand_p (disp)
9406 #endif
9410 is_legitimate_pic:
9411 if (TARGET_64BIT && (index || base))
9413 /* foo@dtpoff(%rX) is ok. */
9414 if (GET_CODE (disp) != CONST
9415 || GET_CODE (XEXP (disp, 0)) != PLUS
9416 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9417 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9418 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9419 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9421 reason = "non-constant pic memory reference";
9422 goto report_error;
9425 else if (! legitimate_pic_address_disp_p (disp))
9427 reason = "displacement is an invalid pic construct";
9428 goto report_error;
9431 /* This code used to verify that a symbolic pic displacement
9432 includes the pic_offset_table_rtx register.
9434 While this is good idea, unfortunately these constructs may
9435 be created by "adds using lea" optimization for incorrect
9436 code like:
9438 int a;
9439 int foo(int i)
9441 return *(&a+i);
9444 This code is nonsensical, but results in addressing
9445 GOT table with pic_offset_table_rtx base. We can't
9446 just refuse it easily, since it gets matched by
9447 "addsi3" pattern, that later gets split to lea in the
9448 case output register differs from input. While this
9449 can be handled by separate addsi pattern for this case
9450 that never results in lea, this seems to be easier and
9451 correct fix for crash to disable this test. */
9453 else if (GET_CODE (disp) != LABEL_REF
9454 && !CONST_INT_P (disp)
9455 && (GET_CODE (disp) != CONST
9456 || !legitimate_constant_p (disp))
9457 && (GET_CODE (disp) != SYMBOL_REF
9458 || !legitimate_constant_p (disp)))
9460 reason = "displacement is not constant";
9461 goto report_error;
9463 else if (TARGET_64BIT
9464 && !x86_64_immediate_operand (disp, VOIDmode))
9466 reason = "displacement is out of range";
9467 goto report_error;
9471 /* Everything looks valid. */
9472 return TRUE;
9474 report_error:
9475 return FALSE;
9478 /* Return a unique alias set for the GOT. */
9480 static alias_set_type
9481 ix86_GOT_alias_set (void)
9483 static alias_set_type set = -1;
9484 if (set == -1)
9485 set = new_alias_set ();
9486 return set;
9489 /* Return a legitimate reference for ORIG (an address) using the
9490 register REG. If REG is 0, a new pseudo is generated.
9492 There are two types of references that must be handled:
9494 1. Global data references must load the address from the GOT, via
9495 the PIC reg. An insn is emitted to do this load, and the reg is
9496 returned.
9498 2. Static data references, constant pool addresses, and code labels
9499 compute the address as an offset from the GOT, whose base is in
9500 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9501 differentiate them from global data objects. The returned
9502 address is the PIC reg + an unspec constant.
9504 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
9505 reg also appears in the address. */
9507 static rtx
9508 legitimize_pic_address (rtx orig, rtx reg)
9510 rtx addr = orig;
9511 rtx new_rtx = orig;
9512 rtx base;
9514 #if TARGET_MACHO
9515 if (TARGET_MACHO && !TARGET_64BIT)
9517 if (reg == 0)
9518 reg = gen_reg_rtx (Pmode);
9519 /* Use the generic Mach-O PIC machinery. */
9520 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
9522 #endif
9524 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
9525 new_rtx = addr;
9526 else if (TARGET_64BIT
9527 && ix86_cmodel != CM_SMALL_PIC
9528 && gotoff_operand (addr, Pmode))
9530 rtx tmpreg;
9531 /* This symbol may be referenced via a displacement from the PIC
9532 base address (@GOTOFF). */
9534 if (reload_in_progress)
9535 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9536 if (GET_CODE (addr) == CONST)
9537 addr = XEXP (addr, 0);
9538 if (GET_CODE (addr) == PLUS)
9540 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9541 UNSPEC_GOTOFF);
9542 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9544 else
9545 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9546 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9547 if (!reg)
9548 tmpreg = gen_reg_rtx (Pmode);
9549 else
9550 tmpreg = reg;
9551 emit_move_insn (tmpreg, new_rtx);
9553 if (reg != 0)
9555 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
9556 tmpreg, 1, OPTAB_DIRECT);
9557 new_rtx = reg;
9559 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
9561 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
9563 /* This symbol may be referenced via a displacement from the PIC
9564 base address (@GOTOFF). */
9566 if (reload_in_progress)
9567 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9568 if (GET_CODE (addr) == CONST)
9569 addr = XEXP (addr, 0);
9570 if (GET_CODE (addr) == PLUS)
9572 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9573 UNSPEC_GOTOFF);
9574 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9576 else
9577 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9578 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9579 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9581 if (reg != 0)
9583 emit_move_insn (reg, new_rtx);
9584 new_rtx = reg;
9587 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
9588 /* We can't use @GOTOFF for text labels on VxWorks;
9589 see gotoff_operand. */
9590 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
9592 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9594 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
9595 return legitimize_dllimport_symbol (addr, true);
9596 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
9597 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
9598 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
9600 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
9601 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
9605 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
9607 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
9608 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9609 new_rtx = gen_const_mem (Pmode, new_rtx);
9610 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9612 if (reg == 0)
9613 reg = gen_reg_rtx (Pmode);
9614 /* Use directly gen_movsi, otherwise the address is loaded
9615 into register for CSE. We don't want to CSE this addresses,
9616 instead we CSE addresses from the GOT table, so skip this. */
9617 emit_insn (gen_movsi (reg, new_rtx));
9618 new_rtx = reg;
9620 else
9622 /* This symbol must be referenced via a load from the
9623 Global Offset Table (@GOT). */
9625 if (reload_in_progress)
9626 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9627 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
9628 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9629 if (TARGET_64BIT)
9630 new_rtx = force_reg (Pmode, new_rtx);
9631 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9632 new_rtx = gen_const_mem (Pmode, new_rtx);
9633 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9635 if (reg == 0)
9636 reg = gen_reg_rtx (Pmode);
9637 emit_move_insn (reg, new_rtx);
9638 new_rtx = reg;
9641 else
9643 if (CONST_INT_P (addr)
9644 && !x86_64_immediate_operand (addr, VOIDmode))
9646 if (reg)
9648 emit_move_insn (reg, addr);
9649 new_rtx = reg;
9651 else
9652 new_rtx = force_reg (Pmode, addr);
9654 else if (GET_CODE (addr) == CONST)
9656 addr = XEXP (addr, 0);
9658 /* We must match stuff we generate before. Assume the only
9659 unspecs that can get here are ours. Not that we could do
9660 anything with them anyway.... */
9661 if (GET_CODE (addr) == UNSPEC
9662 || (GET_CODE (addr) == PLUS
9663 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
9664 return orig;
9665 gcc_assert (GET_CODE (addr) == PLUS);
9667 if (GET_CODE (addr) == PLUS)
9669 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
9671 /* Check first to see if this is a constant offset from a @GOTOFF
9672 symbol reference. */
9673 if (gotoff_operand (op0, Pmode)
9674 && CONST_INT_P (op1))
9676 if (!TARGET_64BIT)
9678 if (reload_in_progress)
9679 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9680 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
9681 UNSPEC_GOTOFF);
9682 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
9683 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9684 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9686 if (reg != 0)
9688 emit_move_insn (reg, new_rtx);
9689 new_rtx = reg;
9692 else
9694 if (INTVAL (op1) < -16*1024*1024
9695 || INTVAL (op1) >= 16*1024*1024)
9697 if (!x86_64_immediate_operand (op1, Pmode))
9698 op1 = force_reg (Pmode, op1);
9699 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
9703 else
9705 base = legitimize_pic_address (XEXP (addr, 0), reg);
9706 new_rtx = legitimize_pic_address (XEXP (addr, 1),
9707 base == reg ? NULL_RTX : reg);
9709 if (CONST_INT_P (new_rtx))
9710 new_rtx = plus_constant (base, INTVAL (new_rtx));
9711 else
9713 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
9715 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
9716 new_rtx = XEXP (new_rtx, 1);
9718 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
9723 return new_rtx;
9726 /* Load the thread pointer. If TO_REG is true, force it into a register. */
9728 static rtx
9729 get_thread_pointer (int to_reg)
9731 rtx tp, reg, insn;
9733 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
9734 if (!to_reg)
9735 return tp;
9737 reg = gen_reg_rtx (Pmode);
9738 insn = gen_rtx_SET (VOIDmode, reg, tp);
9739 insn = emit_insn (insn);
9741 return reg;
9744 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
9745 false if we expect this to be used for a memory address and true if
9746 we expect to load the address into a register. */
9748 static rtx
9749 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
9751 rtx dest, base, off, pic, tp;
9752 int type;
9754 switch (model)
9756 case TLS_MODEL_GLOBAL_DYNAMIC:
9757 dest = gen_reg_rtx (Pmode);
9758 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9760 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9762 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
9764 start_sequence ();
9765 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
9766 insns = get_insns ();
9767 end_sequence ();
9769 RTL_CONST_CALL_P (insns) = 1;
9770 emit_libcall_block (insns, dest, rax, x);
9772 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9773 emit_insn (gen_tls_global_dynamic_64 (dest, x));
9774 else
9775 emit_insn (gen_tls_global_dynamic_32 (dest, x));
9777 if (TARGET_GNU2_TLS)
9779 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
9781 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9783 break;
9785 case TLS_MODEL_LOCAL_DYNAMIC:
9786 base = gen_reg_rtx (Pmode);
9787 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
9789 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
9791 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
9793 start_sequence ();
9794 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
9795 insns = get_insns ();
9796 end_sequence ();
9798 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
9799 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
9800 RTL_CONST_CALL_P (insns) = 1;
9801 emit_libcall_block (insns, base, rax, note);
9803 else if (TARGET_64BIT && TARGET_GNU2_TLS)
9804 emit_insn (gen_tls_local_dynamic_base_64 (base));
9805 else
9806 emit_insn (gen_tls_local_dynamic_base_32 (base));
9808 if (TARGET_GNU2_TLS)
9810 rtx x = ix86_tls_module_base ();
9812 set_unique_reg_note (get_last_insn (), REG_EQUIV,
9813 gen_rtx_MINUS (Pmode, x, tp));
9816 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
9817 off = gen_rtx_CONST (Pmode, off);
9819 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
9821 if (TARGET_GNU2_TLS)
9823 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
9825 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9828 break;
9830 case TLS_MODEL_INITIAL_EXEC:
9831 if (TARGET_64BIT)
9833 pic = NULL;
9834 type = UNSPEC_GOTNTPOFF;
9836 else if (flag_pic)
9838 if (reload_in_progress)
9839 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9840 pic = pic_offset_table_rtx;
9841 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
9843 else if (!TARGET_ANY_GNU_TLS)
9845 pic = gen_reg_rtx (Pmode);
9846 emit_insn (gen_set_got (pic));
9847 type = UNSPEC_GOTTPOFF;
9849 else
9851 pic = NULL;
9852 type = UNSPEC_INDNTPOFF;
9855 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
9856 off = gen_rtx_CONST (Pmode, off);
9857 if (pic)
9858 off = gen_rtx_PLUS (Pmode, pic, off);
9859 off = gen_const_mem (Pmode, off);
9860 set_mem_alias_set (off, ix86_GOT_alias_set ());
9862 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9864 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9865 off = force_reg (Pmode, off);
9866 return gen_rtx_PLUS (Pmode, base, off);
9868 else
9870 base = get_thread_pointer (true);
9871 dest = gen_reg_rtx (Pmode);
9872 emit_insn (gen_subsi3 (dest, base, off));
9874 break;
9876 case TLS_MODEL_LOCAL_EXEC:
9877 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
9878 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9879 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
9880 off = gen_rtx_CONST (Pmode, off);
9882 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9884 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9885 return gen_rtx_PLUS (Pmode, base, off);
9887 else
9889 base = get_thread_pointer (true);
9890 dest = gen_reg_rtx (Pmode);
9891 emit_insn (gen_subsi3 (dest, base, off));
9893 break;
9895 default:
9896 gcc_unreachable ();
9899 return dest;
9902 /* Create or return the unique __imp_DECL dllimport symbol corresponding
9903 to symbol DECL. */
9905 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
9906 htab_t dllimport_map;
9908 static tree
9909 get_dllimport_decl (tree decl)
9911 struct tree_map *h, in;
9912 void **loc;
9913 const char *name;
9914 const char *prefix;
9915 size_t namelen, prefixlen;
9916 char *imp_name;
9917 tree to;
9918 rtx rtl;
9920 if (!dllimport_map)
9921 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
9923 in.hash = htab_hash_pointer (decl);
9924 in.base.from = decl;
9925 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
9926 h = (struct tree_map *) *loc;
9927 if (h)
9928 return h->to;
9930 *loc = h = GGC_NEW (struct tree_map);
9931 h->hash = in.hash;
9932 h->base.from = decl;
9933 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
9934 DECL_ARTIFICIAL (to) = 1;
9935 DECL_IGNORED_P (to) = 1;
9936 DECL_EXTERNAL (to) = 1;
9937 TREE_READONLY (to) = 1;
9939 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9940 name = targetm.strip_name_encoding (name);
9941 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
9942 ? "*__imp_" : "*__imp__";
9943 namelen = strlen (name);
9944 prefixlen = strlen (prefix);
9945 imp_name = (char *) alloca (namelen + prefixlen + 1);
9946 memcpy (imp_name, prefix, prefixlen);
9947 memcpy (imp_name + prefixlen, name, namelen + 1);
9949 name = ggc_alloc_string (imp_name, namelen + prefixlen);
9950 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
9951 SET_SYMBOL_REF_DECL (rtl, to);
9952 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
9954 rtl = gen_const_mem (Pmode, rtl);
9955 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
9957 SET_DECL_RTL (to, rtl);
9958 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
9960 return to;
9963 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
9964 true if we require the result be a register. */
9966 static rtx
9967 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
9969 tree imp_decl;
9970 rtx x;
9972 gcc_assert (SYMBOL_REF_DECL (symbol));
9973 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
9975 x = DECL_RTL (imp_decl);
9976 if (want_reg)
9977 x = force_reg (Pmode, x);
9978 return x;
9981 /* Try machine-dependent ways of modifying an illegitimate address
9982 to be legitimate. If we find one, return the new, valid address.
9983 This macro is used in only one place: `memory_address' in explow.c.
9985 OLDX is the address as it was before break_out_memory_refs was called.
9986 In some cases it is useful to look at this to decide what needs to be done.
9988 MODE and WIN are passed so that this macro can use
9989 GO_IF_LEGITIMATE_ADDRESS.
9991 It is always safe for this macro to do nothing. It exists to recognize
9992 opportunities to optimize the output.
9994 For the 80386, we handle X+REG by loading X into a register R and
9995 using R+REG. R will go in a general reg and indexing will be used.
9996 However, if REG is a broken-out memory address or multiplication,
9997 nothing needs to be done because REG can certainly go in a general reg.
9999 When -fpic is used, special handling is needed for symbolic references.
10000 See comments by legitimize_pic_address in i386.c for details. */
10003 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
10005 int changed = 0;
10006 unsigned log;
10008 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10009 if (log)
10010 return legitimize_tls_address (x, (enum tls_model) log, false);
10011 if (GET_CODE (x) == CONST
10012 && GET_CODE (XEXP (x, 0)) == PLUS
10013 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10014 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10016 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10017 (enum tls_model) log, false);
10018 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10021 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10023 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10024 return legitimize_dllimport_symbol (x, true);
10025 if (GET_CODE (x) == CONST
10026 && GET_CODE (XEXP (x, 0)) == PLUS
10027 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10028 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10030 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10031 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10035 if (flag_pic && SYMBOLIC_CONST (x))
10036 return legitimize_pic_address (x, 0);
10038 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10039 if (GET_CODE (x) == ASHIFT
10040 && CONST_INT_P (XEXP (x, 1))
10041 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10043 changed = 1;
10044 log = INTVAL (XEXP (x, 1));
10045 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10046 GEN_INT (1 << log));
10049 if (GET_CODE (x) == PLUS)
10051 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10053 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10054 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10055 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10057 changed = 1;
10058 log = INTVAL (XEXP (XEXP (x, 0), 1));
10059 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10060 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10061 GEN_INT (1 << log));
10064 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10065 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10066 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10068 changed = 1;
10069 log = INTVAL (XEXP (XEXP (x, 1), 1));
10070 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10071 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10072 GEN_INT (1 << log));
10075 /* Put multiply first if it isn't already. */
10076 if (GET_CODE (XEXP (x, 1)) == MULT)
10078 rtx tmp = XEXP (x, 0);
10079 XEXP (x, 0) = XEXP (x, 1);
10080 XEXP (x, 1) = tmp;
10081 changed = 1;
10084 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10085 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10086 created by virtual register instantiation, register elimination, and
10087 similar optimizations. */
10088 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10090 changed = 1;
10091 x = gen_rtx_PLUS (Pmode,
10092 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10093 XEXP (XEXP (x, 1), 0)),
10094 XEXP (XEXP (x, 1), 1));
10097 /* Canonicalize
10098 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10099 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10100 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10101 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10102 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10103 && CONSTANT_P (XEXP (x, 1)))
10105 rtx constant;
10106 rtx other = NULL_RTX;
10108 if (CONST_INT_P (XEXP (x, 1)))
10110 constant = XEXP (x, 1);
10111 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10113 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10115 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10116 other = XEXP (x, 1);
10118 else
10119 constant = 0;
10121 if (constant)
10123 changed = 1;
10124 x = gen_rtx_PLUS (Pmode,
10125 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10126 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10127 plus_constant (other, INTVAL (constant)));
10131 if (changed && legitimate_address_p (mode, x, FALSE))
10132 return x;
10134 if (GET_CODE (XEXP (x, 0)) == MULT)
10136 changed = 1;
10137 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10140 if (GET_CODE (XEXP (x, 1)) == MULT)
10142 changed = 1;
10143 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10146 if (changed
10147 && REG_P (XEXP (x, 1))
10148 && REG_P (XEXP (x, 0)))
10149 return x;
10151 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10153 changed = 1;
10154 x = legitimize_pic_address (x, 0);
10157 if (changed && legitimate_address_p (mode, x, FALSE))
10158 return x;
10160 if (REG_P (XEXP (x, 0)))
10162 rtx temp = gen_reg_rtx (Pmode);
10163 rtx val = force_operand (XEXP (x, 1), temp);
10164 if (val != temp)
10165 emit_move_insn (temp, val);
10167 XEXP (x, 1) = temp;
10168 return x;
10171 else if (REG_P (XEXP (x, 1)))
10173 rtx temp = gen_reg_rtx (Pmode);
10174 rtx val = force_operand (XEXP (x, 0), temp);
10175 if (val != temp)
10176 emit_move_insn (temp, val);
10178 XEXP (x, 0) = temp;
10179 return x;
10183 return x;
10186 /* Print an integer constant expression in assembler syntax. Addition
10187 and subtraction are the only arithmetic that may appear in these
10188 expressions. FILE is the stdio stream to write to, X is the rtx, and
10189 CODE is the operand print code from the output string. */
10191 static void
10192 output_pic_addr_const (FILE *file, rtx x, int code)
10194 char buf[256];
10196 switch (GET_CODE (x))
10198 case PC:
10199 gcc_assert (flag_pic);
10200 putc ('.', file);
10201 break;
10203 case SYMBOL_REF:
10204 if (! TARGET_MACHO || TARGET_64BIT)
10205 output_addr_const (file, x);
10206 else
10208 const char *name = XSTR (x, 0);
10210 /* Mark the decl as referenced so that cgraph will
10211 output the function. */
10212 if (SYMBOL_REF_DECL (x))
10213 mark_decl_referenced (SYMBOL_REF_DECL (x));
10215 #if TARGET_MACHO
10216 if (MACHOPIC_INDIRECT
10217 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10218 name = machopic_indirection_name (x, /*stub_p=*/true);
10219 #endif
10220 assemble_name (file, name);
10222 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10223 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10224 fputs ("@PLT", file);
10225 break;
10227 case LABEL_REF:
10228 x = XEXP (x, 0);
10229 /* FALLTHRU */
10230 case CODE_LABEL:
10231 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10232 assemble_name (asm_out_file, buf);
10233 break;
10235 case CONST_INT:
10236 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10237 break;
10239 case CONST:
10240 /* This used to output parentheses around the expression,
10241 but that does not work on the 386 (either ATT or BSD assembler). */
10242 output_pic_addr_const (file, XEXP (x, 0), code);
10243 break;
10245 case CONST_DOUBLE:
10246 if (GET_MODE (x) == VOIDmode)
10248 /* We can use %d if the number is <32 bits and positive. */
10249 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10250 fprintf (file, "0x%lx%08lx",
10251 (unsigned long) CONST_DOUBLE_HIGH (x),
10252 (unsigned long) CONST_DOUBLE_LOW (x));
10253 else
10254 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10256 else
10257 /* We can't handle floating point constants;
10258 PRINT_OPERAND must handle them. */
10259 output_operand_lossage ("floating constant misused");
10260 break;
10262 case PLUS:
10263 /* Some assemblers need integer constants to appear first. */
10264 if (CONST_INT_P (XEXP (x, 0)))
10266 output_pic_addr_const (file, XEXP (x, 0), code);
10267 putc ('+', file);
10268 output_pic_addr_const (file, XEXP (x, 1), code);
10270 else
10272 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10273 output_pic_addr_const (file, XEXP (x, 1), code);
10274 putc ('+', file);
10275 output_pic_addr_const (file, XEXP (x, 0), code);
10277 break;
10279 case MINUS:
10280 if (!TARGET_MACHO)
10281 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10282 output_pic_addr_const (file, XEXP (x, 0), code);
10283 putc ('-', file);
10284 output_pic_addr_const (file, XEXP (x, 1), code);
10285 if (!TARGET_MACHO)
10286 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10287 break;
10289 case UNSPEC:
10290 gcc_assert (XVECLEN (x, 0) == 1);
10291 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10292 switch (XINT (x, 1))
10294 case UNSPEC_GOT:
10295 fputs ("@GOT", file);
10296 break;
10297 case UNSPEC_GOTOFF:
10298 fputs ("@GOTOFF", file);
10299 break;
10300 case UNSPEC_PLTOFF:
10301 fputs ("@PLTOFF", file);
10302 break;
10303 case UNSPEC_GOTPCREL:
10304 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10305 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10306 break;
10307 case UNSPEC_GOTTPOFF:
10308 /* FIXME: This might be @TPOFF in Sun ld too. */
10309 fputs ("@GOTTPOFF", file);
10310 break;
10311 case UNSPEC_TPOFF:
10312 fputs ("@TPOFF", file);
10313 break;
10314 case UNSPEC_NTPOFF:
10315 if (TARGET_64BIT)
10316 fputs ("@TPOFF", file);
10317 else
10318 fputs ("@NTPOFF", file);
10319 break;
10320 case UNSPEC_DTPOFF:
10321 fputs ("@DTPOFF", file);
10322 break;
10323 case UNSPEC_GOTNTPOFF:
10324 if (TARGET_64BIT)
10325 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10326 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
10327 else
10328 fputs ("@GOTNTPOFF", file);
10329 break;
10330 case UNSPEC_INDNTPOFF:
10331 fputs ("@INDNTPOFF", file);
10332 break;
10333 #if TARGET_MACHO
10334 case UNSPEC_MACHOPIC_OFFSET:
10335 putc ('-', file);
10336 machopic_output_function_base_name (file);
10337 break;
10338 #endif
10339 default:
10340 output_operand_lossage ("invalid UNSPEC as operand");
10341 break;
10343 break;
10345 default:
10346 output_operand_lossage ("invalid expression as operand");
10350 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10351 We need to emit DTP-relative relocations. */
10353 static void ATTRIBUTE_UNUSED
10354 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10356 fputs (ASM_LONG, file);
10357 output_addr_const (file, x);
10358 fputs ("@DTPOFF", file);
10359 switch (size)
10361 case 4:
10362 break;
10363 case 8:
10364 fputs (", 0", file);
10365 break;
10366 default:
10367 gcc_unreachable ();
10371 /* Return true if X is a representation of the PIC register. This copes
10372 with calls from ix86_find_base_term, where the register might have
10373 been replaced by a cselib value. */
10375 static bool
10376 ix86_pic_register_p (rtx x)
10378 if (GET_CODE (x) == VALUE)
10379 return (pic_offset_table_rtx
10380 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10381 else
10382 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10385 /* In the name of slightly smaller debug output, and to cater to
10386 general assembler lossage, recognize PIC+GOTOFF and turn it back
10387 into a direct symbol reference.
10389 On Darwin, this is necessary to avoid a crash, because Darwin
10390 has a different PIC label for each routine but the DWARF debugging
10391 information is not associated with any particular routine, so it's
10392 necessary to remove references to the PIC label from RTL stored by
10393 the DWARF output code. */
10395 static rtx
10396 ix86_delegitimize_address (rtx orig_x)
10398 rtx x = orig_x;
10399 /* reg_addend is NULL or a multiple of some register. */
10400 rtx reg_addend = NULL_RTX;
10401 /* const_addend is NULL or a const_int. */
10402 rtx const_addend = NULL_RTX;
10403 /* This is the result, or NULL. */
10404 rtx result = NULL_RTX;
10406 if (MEM_P (x))
10407 x = XEXP (x, 0);
10409 if (TARGET_64BIT)
10411 if (GET_CODE (x) != CONST
10412 || GET_CODE (XEXP (x, 0)) != UNSPEC
10413 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10414 || !MEM_P (orig_x))
10415 return orig_x;
10416 return XVECEXP (XEXP (x, 0), 0, 0);
10419 if (GET_CODE (x) != PLUS
10420 || GET_CODE (XEXP (x, 1)) != CONST)
10421 return orig_x;
10423 if (ix86_pic_register_p (XEXP (x, 0)))
10424 /* %ebx + GOT/GOTOFF */
10426 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10428 /* %ebx + %reg * scale + GOT/GOTOFF */
10429 reg_addend = XEXP (x, 0);
10430 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10431 reg_addend = XEXP (reg_addend, 1);
10432 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10433 reg_addend = XEXP (reg_addend, 0);
10434 else
10435 return orig_x;
10436 if (!REG_P (reg_addend)
10437 && GET_CODE (reg_addend) != MULT
10438 && GET_CODE (reg_addend) != ASHIFT)
10439 return orig_x;
10441 else
10442 return orig_x;
10444 x = XEXP (XEXP (x, 1), 0);
10445 if (GET_CODE (x) == PLUS
10446 && CONST_INT_P (XEXP (x, 1)))
10448 const_addend = XEXP (x, 1);
10449 x = XEXP (x, 0);
10452 if (GET_CODE (x) == UNSPEC
10453 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10454 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10455 result = XVECEXP (x, 0, 0);
10457 if (TARGET_MACHO && darwin_local_data_pic (x)
10458 && !MEM_P (orig_x))
10459 result = XVECEXP (x, 0, 0);
10461 if (! result)
10462 return orig_x;
10464 if (const_addend)
10465 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10466 if (reg_addend)
10467 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10468 return result;
10471 /* If X is a machine specific address (i.e. a symbol or label being
10472 referenced as a displacement from the GOT implemented using an
10473 UNSPEC), then return the base term. Otherwise return X. */
10476 ix86_find_base_term (rtx x)
10478 rtx term;
10480 if (TARGET_64BIT)
10482 if (GET_CODE (x) != CONST)
10483 return x;
10484 term = XEXP (x, 0);
10485 if (GET_CODE (term) == PLUS
10486 && (CONST_INT_P (XEXP (term, 1))
10487 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10488 term = XEXP (term, 0);
10489 if (GET_CODE (term) != UNSPEC
10490 || XINT (term, 1) != UNSPEC_GOTPCREL)
10491 return x;
10493 return XVECEXP (term, 0, 0);
10496 return ix86_delegitimize_address (x);
10499 static void
10500 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10501 int fp, FILE *file)
10503 const char *suffix;
10505 if (mode == CCFPmode || mode == CCFPUmode)
10507 enum rtx_code second_code, bypass_code;
10508 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
10509 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
10510 code = ix86_fp_compare_code_to_integer (code);
10511 mode = CCmode;
10513 if (reverse)
10514 code = reverse_condition (code);
10516 switch (code)
10518 case EQ:
10519 switch (mode)
10521 case CCAmode:
10522 suffix = "a";
10523 break;
10525 case CCCmode:
10526 suffix = "c";
10527 break;
10529 case CCOmode:
10530 suffix = "o";
10531 break;
10533 case CCSmode:
10534 suffix = "s";
10535 break;
10537 default:
10538 suffix = "e";
10540 break;
10541 case NE:
10542 switch (mode)
10544 case CCAmode:
10545 suffix = "na";
10546 break;
10548 case CCCmode:
10549 suffix = "nc";
10550 break;
10552 case CCOmode:
10553 suffix = "no";
10554 break;
10556 case CCSmode:
10557 suffix = "ns";
10558 break;
10560 default:
10561 suffix = "ne";
10563 break;
10564 case GT:
10565 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
10566 suffix = "g";
10567 break;
10568 case GTU:
10569 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
10570 Those same assemblers have the same but opposite lossage on cmov. */
10571 if (mode == CCmode)
10572 suffix = fp ? "nbe" : "a";
10573 else if (mode == CCCmode)
10574 suffix = "b";
10575 else
10576 gcc_unreachable ();
10577 break;
10578 case LT:
10579 switch (mode)
10581 case CCNOmode:
10582 case CCGOCmode:
10583 suffix = "s";
10584 break;
10586 case CCmode:
10587 case CCGCmode:
10588 suffix = "l";
10589 break;
10591 default:
10592 gcc_unreachable ();
10594 break;
10595 case LTU:
10596 gcc_assert (mode == CCmode || mode == CCCmode);
10597 suffix = "b";
10598 break;
10599 case GE:
10600 switch (mode)
10602 case CCNOmode:
10603 case CCGOCmode:
10604 suffix = "ns";
10605 break;
10607 case CCmode:
10608 case CCGCmode:
10609 suffix = "ge";
10610 break;
10612 default:
10613 gcc_unreachable ();
10615 break;
10616 case GEU:
10617 /* ??? As above. */
10618 gcc_assert (mode == CCmode || mode == CCCmode);
10619 suffix = fp ? "nb" : "ae";
10620 break;
10621 case LE:
10622 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
10623 suffix = "le";
10624 break;
10625 case LEU:
10626 /* ??? As above. */
10627 if (mode == CCmode)
10628 suffix = "be";
10629 else if (mode == CCCmode)
10630 suffix = fp ? "nb" : "ae";
10631 else
10632 gcc_unreachable ();
10633 break;
10634 case UNORDERED:
10635 suffix = fp ? "u" : "p";
10636 break;
10637 case ORDERED:
10638 suffix = fp ? "nu" : "np";
10639 break;
10640 default:
10641 gcc_unreachable ();
10643 fputs (suffix, file);
10646 /* Print the name of register X to FILE based on its machine mode and number.
10647 If CODE is 'w', pretend the mode is HImode.
10648 If CODE is 'b', pretend the mode is QImode.
10649 If CODE is 'k', pretend the mode is SImode.
10650 If CODE is 'q', pretend the mode is DImode.
10651 If CODE is 'x', pretend the mode is V4SFmode.
10652 If CODE is 't', pretend the mode is V8SFmode.
10653 If CODE is 'h', pretend the reg is the 'high' byte register.
10654 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
10655 If CODE is 'd', duplicate the operand for AVX instruction.
10658 void
10659 print_reg (rtx x, int code, FILE *file)
10661 const char *reg;
10662 bool duplicated = code == 'd' && TARGET_AVX;
10664 gcc_assert (x == pc_rtx
10665 || (REGNO (x) != ARG_POINTER_REGNUM
10666 && REGNO (x) != FRAME_POINTER_REGNUM
10667 && REGNO (x) != FLAGS_REG
10668 && REGNO (x) != FPSR_REG
10669 && REGNO (x) != FPCR_REG));
10671 if (ASSEMBLER_DIALECT == ASM_ATT)
10672 putc ('%', file);
10674 if (x == pc_rtx)
10676 gcc_assert (TARGET_64BIT);
10677 fputs ("rip", file);
10678 return;
10681 if (code == 'w' || MMX_REG_P (x))
10682 code = 2;
10683 else if (code == 'b')
10684 code = 1;
10685 else if (code == 'k')
10686 code = 4;
10687 else if (code == 'q')
10688 code = 8;
10689 else if (code == 'y')
10690 code = 3;
10691 else if (code == 'h')
10692 code = 0;
10693 else if (code == 'x')
10694 code = 16;
10695 else if (code == 't')
10696 code = 32;
10697 else
10698 code = GET_MODE_SIZE (GET_MODE (x));
10700 /* Irritatingly, AMD extended registers use different naming convention
10701 from the normal registers. */
10702 if (REX_INT_REG_P (x))
10704 gcc_assert (TARGET_64BIT);
10705 switch (code)
10707 case 0:
10708 error ("extended registers have no high halves");
10709 break;
10710 case 1:
10711 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
10712 break;
10713 case 2:
10714 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
10715 break;
10716 case 4:
10717 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
10718 break;
10719 case 8:
10720 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
10721 break;
10722 default:
10723 error ("unsupported operand size for extended register");
10724 break;
10726 return;
10729 reg = NULL;
10730 switch (code)
10732 case 3:
10733 if (STACK_TOP_P (x))
10735 reg = "st(0)";
10736 break;
10738 /* FALLTHRU */
10739 case 8:
10740 case 4:
10741 case 12:
10742 if (! ANY_FP_REG_P (x))
10743 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
10744 /* FALLTHRU */
10745 case 16:
10746 case 2:
10747 normal:
10748 reg = hi_reg_name[REGNO (x)];
10749 break;
10750 case 1:
10751 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
10752 goto normal;
10753 reg = qi_reg_name[REGNO (x)];
10754 break;
10755 case 0:
10756 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
10757 goto normal;
10758 reg = qi_high_reg_name[REGNO (x)];
10759 break;
10760 case 32:
10761 if (SSE_REG_P (x))
10763 gcc_assert (!duplicated);
10764 putc ('y', file);
10765 fputs (hi_reg_name[REGNO (x)] + 1, file);
10766 return;
10768 break;
10769 default:
10770 gcc_unreachable ();
10773 fputs (reg, file);
10774 if (duplicated)
10776 if (ASSEMBLER_DIALECT == ASM_ATT)
10777 fprintf (file, ", %%%s", reg);
10778 else
10779 fprintf (file, ", %s", reg);
10783 /* Locate some local-dynamic symbol still in use by this function
10784 so that we can print its name in some tls_local_dynamic_base
10785 pattern. */
10787 static int
10788 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10790 rtx x = *px;
10792 if (GET_CODE (x) == SYMBOL_REF
10793 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10795 cfun->machine->some_ld_name = XSTR (x, 0);
10796 return 1;
10799 return 0;
10802 static const char *
10803 get_some_local_dynamic_name (void)
10805 rtx insn;
10807 if (cfun->machine->some_ld_name)
10808 return cfun->machine->some_ld_name;
10810 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10811 if (INSN_P (insn)
10812 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10813 return cfun->machine->some_ld_name;
10815 gcc_unreachable ();
10818 /* Meaning of CODE:
10819 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
10820 C -- print opcode suffix for set/cmov insn.
10821 c -- like C, but print reversed condition
10822 E,e -- likewise, but for compare-and-branch fused insn.
10823 F,f -- likewise, but for floating-point.
10824 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
10825 otherwise nothing
10826 R -- print the prefix for register names.
10827 z -- print the opcode suffix for the size of the current operand.
10828 * -- print a star (in certain assembler syntax)
10829 A -- print an absolute memory reference.
10830 w -- print the operand as if it's a "word" (HImode) even if it isn't.
10831 s -- print a shift double count, followed by the assemblers argument
10832 delimiter.
10833 b -- print the QImode name of the register for the indicated operand.
10834 %b0 would print %al if operands[0] is reg 0.
10835 w -- likewise, print the HImode name of the register.
10836 k -- likewise, print the SImode name of the register.
10837 q -- likewise, print the DImode name of the register.
10838 x -- likewise, print the V4SFmode name of the register.
10839 t -- likewise, print the V8SFmode name of the register.
10840 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
10841 y -- print "st(0)" instead of "st" as a register.
10842 d -- print duplicated register operand for AVX instruction.
10843 D -- print condition for SSE cmp instruction.
10844 P -- if PIC, print an @PLT suffix.
10845 X -- don't print any sort of PIC '@' suffix for a symbol.
10846 & -- print some in-use local-dynamic symbol name.
10847 H -- print a memory address offset by 8; used for sse high-parts
10848 Y -- print condition for SSE5 com* instruction.
10849 + -- print a branch hint as 'cs' or 'ds' prefix
10850 ; -- print a semicolon (after prefixes due to bug in older gas).
10853 void
10854 print_operand (FILE *file, rtx x, int code)
10856 if (code)
10858 switch (code)
10860 case '*':
10861 if (ASSEMBLER_DIALECT == ASM_ATT)
10862 putc ('*', file);
10863 return;
10865 case '&':
10866 assemble_name (file, get_some_local_dynamic_name ());
10867 return;
10869 case 'A':
10870 switch (ASSEMBLER_DIALECT)
10872 case ASM_ATT:
10873 putc ('*', file);
10874 break;
10876 case ASM_INTEL:
10877 /* Intel syntax. For absolute addresses, registers should not
10878 be surrounded by braces. */
10879 if (!REG_P (x))
10881 putc ('[', file);
10882 PRINT_OPERAND (file, x, 0);
10883 putc (']', file);
10884 return;
10886 break;
10888 default:
10889 gcc_unreachable ();
10892 PRINT_OPERAND (file, x, 0);
10893 return;
10896 case 'L':
10897 if (ASSEMBLER_DIALECT == ASM_ATT)
10898 putc ('l', file);
10899 return;
10901 case 'W':
10902 if (ASSEMBLER_DIALECT == ASM_ATT)
10903 putc ('w', file);
10904 return;
10906 case 'B':
10907 if (ASSEMBLER_DIALECT == ASM_ATT)
10908 putc ('b', file);
10909 return;
10911 case 'Q':
10912 if (ASSEMBLER_DIALECT == ASM_ATT)
10913 putc ('l', file);
10914 return;
10916 case 'S':
10917 if (ASSEMBLER_DIALECT == ASM_ATT)
10918 putc ('s', file);
10919 return;
10921 case 'T':
10922 if (ASSEMBLER_DIALECT == ASM_ATT)
10923 putc ('t', file);
10924 return;
10926 case 'z':
10927 /* 387 opcodes don't get size suffixes if the operands are
10928 registers. */
10929 if (STACK_REG_P (x))
10930 return;
10932 /* Likewise if using Intel opcodes. */
10933 if (ASSEMBLER_DIALECT == ASM_INTEL)
10934 return;
10936 /* This is the size of op from size of operand. */
10937 switch (GET_MODE_SIZE (GET_MODE (x)))
10939 case 1:
10940 putc ('b', file);
10941 return;
10943 case 2:
10944 if (MEM_P (x))
10946 #ifdef HAVE_GAS_FILDS_FISTS
10947 putc ('s', file);
10948 #endif
10949 return;
10951 else
10952 putc ('w', file);
10953 return;
10955 case 4:
10956 if (GET_MODE (x) == SFmode)
10958 putc ('s', file);
10959 return;
10961 else
10962 putc ('l', file);
10963 return;
10965 case 12:
10966 case 16:
10967 putc ('t', file);
10968 return;
10970 case 8:
10971 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10973 if (MEM_P (x))
10975 #ifdef GAS_MNEMONICS
10976 putc ('q', file);
10977 #else
10978 putc ('l', file);
10979 putc ('l', file);
10980 #endif
10982 else
10983 putc ('q', file);
10985 else
10986 putc ('l', file);
10987 return;
10989 default:
10990 gcc_unreachable ();
10993 case 'd':
10994 case 'b':
10995 case 'w':
10996 case 'k':
10997 case 'q':
10998 case 'h':
10999 case 't':
11000 case 'y':
11001 case 'x':
11002 case 'X':
11003 case 'P':
11004 break;
11006 case 's':
11007 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11009 PRINT_OPERAND (file, x, 0);
11010 fputs (", ", file);
11012 return;
11014 case 'D':
11015 /* Little bit of braindamage here. The SSE compare instructions
11016 does use completely different names for the comparisons that the
11017 fp conditional moves. */
11018 if (TARGET_AVX)
11020 switch (GET_CODE (x))
11022 case EQ:
11023 fputs ("eq", file);
11024 break;
11025 case UNEQ:
11026 fputs ("eq_us", file);
11027 break;
11028 case LT:
11029 fputs ("lt", file);
11030 break;
11031 case UNLT:
11032 fputs ("nge", file);
11033 break;
11034 case LE:
11035 fputs ("le", file);
11036 break;
11037 case UNLE:
11038 fputs ("ngt", file);
11039 break;
11040 case UNORDERED:
11041 fputs ("unord", file);
11042 break;
11043 case NE:
11044 fputs ("neq", file);
11045 break;
11046 case LTGT:
11047 fputs ("neq_oq", file);
11048 break;
11049 case GE:
11050 fputs ("ge", file);
11051 break;
11052 case UNGE:
11053 fputs ("nlt", file);
11054 break;
11055 case GT:
11056 fputs ("gt", file);
11057 break;
11058 case UNGT:
11059 fputs ("nle", file);
11060 break;
11061 case ORDERED:
11062 fputs ("ord", file);
11063 break;
11064 default:
11065 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11066 return;
11069 else
11071 switch (GET_CODE (x))
11073 case EQ:
11074 case UNEQ:
11075 fputs ("eq", file);
11076 break;
11077 case LT:
11078 case UNLT:
11079 fputs ("lt", file);
11080 break;
11081 case LE:
11082 case UNLE:
11083 fputs ("le", file);
11084 break;
11085 case UNORDERED:
11086 fputs ("unord", file);
11087 break;
11088 case NE:
11089 case LTGT:
11090 fputs ("neq", file);
11091 break;
11092 case UNGE:
11093 case GE:
11094 fputs ("nlt", file);
11095 break;
11096 case UNGT:
11097 case GT:
11098 fputs ("nle", file);
11099 break;
11100 case ORDERED:
11101 fputs ("ord", file);
11102 break;
11103 default:
11104 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11105 return;
11108 return;
11109 case 'O':
11110 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11111 if (ASSEMBLER_DIALECT == ASM_ATT)
11113 switch (GET_MODE (x))
11115 case HImode: putc ('w', file); break;
11116 case SImode:
11117 case SFmode: putc ('l', file); break;
11118 case DImode:
11119 case DFmode: putc ('q', file); break;
11120 default: gcc_unreachable ();
11122 putc ('.', file);
11124 #endif
11125 return;
11126 case 'C':
11127 if (!COMPARISON_P (x))
11129 output_operand_lossage ("operand is neither a constant nor a "
11130 "condition code, invalid operand code "
11131 "'C'");
11132 return;
11134 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11135 return;
11136 case 'F':
11137 if (!COMPARISON_P (x))
11139 output_operand_lossage ("operand is neither a constant nor a "
11140 "condition code, invalid operand code "
11141 "'F'");
11142 return;
11144 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11145 if (ASSEMBLER_DIALECT == ASM_ATT)
11146 putc ('.', file);
11147 #endif
11148 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11149 return;
11151 /* Like above, but reverse condition */
11152 case 'c':
11153 /* Check to see if argument to %c is really a constant
11154 and not a condition code which needs to be reversed. */
11155 if (!COMPARISON_P (x))
11157 output_operand_lossage ("operand is neither a constant nor a "
11158 "condition code, invalid operand "
11159 "code 'c'");
11160 return;
11162 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11163 return;
11164 case 'f':
11165 if (!COMPARISON_P (x))
11167 output_operand_lossage ("operand is neither a constant nor a "
11168 "condition code, invalid operand "
11169 "code 'f'");
11170 return;
11172 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11173 if (ASSEMBLER_DIALECT == ASM_ATT)
11174 putc ('.', file);
11175 #endif
11176 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11177 return;
11179 case 'E':
11180 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11181 return;
11183 case 'e':
11184 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11185 return;
11187 case 'H':
11188 /* It doesn't actually matter what mode we use here, as we're
11189 only going to use this for printing. */
11190 x = adjust_address_nv (x, DImode, 8);
11191 break;
11193 case '+':
11195 rtx x;
11197 if (!optimize
11198 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11199 return;
11201 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11202 if (x)
11204 int pred_val = INTVAL (XEXP (x, 0));
11206 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11207 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11209 int taken = pred_val > REG_BR_PROB_BASE / 2;
11210 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11212 /* Emit hints only in the case default branch prediction
11213 heuristics would fail. */
11214 if (taken != cputaken)
11216 /* We use 3e (DS) prefix for taken branches and
11217 2e (CS) prefix for not taken branches. */
11218 if (taken)
11219 fputs ("ds ; ", file);
11220 else
11221 fputs ("cs ; ", file);
11225 return;
11228 case 'Y':
11229 switch (GET_CODE (x))
11231 case NE:
11232 fputs ("neq", file);
11233 break;
11234 case EQ:
11235 fputs ("eq", file);
11236 break;
11237 case GE:
11238 case GEU:
11239 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11240 break;
11241 case GT:
11242 case GTU:
11243 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11244 break;
11245 case LE:
11246 case LEU:
11247 fputs ("le", file);
11248 break;
11249 case LT:
11250 case LTU:
11251 fputs ("lt", file);
11252 break;
11253 case UNORDERED:
11254 fputs ("unord", file);
11255 break;
11256 case ORDERED:
11257 fputs ("ord", file);
11258 break;
11259 case UNEQ:
11260 fputs ("ueq", file);
11261 break;
11262 case UNGE:
11263 fputs ("nlt", file);
11264 break;
11265 case UNGT:
11266 fputs ("nle", file);
11267 break;
11268 case UNLE:
11269 fputs ("ule", file);
11270 break;
11271 case UNLT:
11272 fputs ("ult", file);
11273 break;
11274 case LTGT:
11275 fputs ("une", file);
11276 break;
11277 default:
11278 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11279 return;
11281 return;
11283 case ';':
11284 #if TARGET_MACHO
11285 fputs (" ; ", file);
11286 #else
11287 fputc (' ', file);
11288 #endif
11289 return;
11291 default:
11292 output_operand_lossage ("invalid operand code '%c'", code);
11296 if (REG_P (x))
11297 print_reg (x, code, file);
11299 else if (MEM_P (x))
11301 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11302 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11303 && GET_MODE (x) != BLKmode)
11305 const char * size;
11306 switch (GET_MODE_SIZE (GET_MODE (x)))
11308 case 1: size = "BYTE"; break;
11309 case 2: size = "WORD"; break;
11310 case 4: size = "DWORD"; break;
11311 case 8: size = "QWORD"; break;
11312 case 12: size = "XWORD"; break;
11313 case 16:
11314 if (GET_MODE (x) == XFmode)
11315 size = "XWORD";
11316 else
11317 size = "XMMWORD";
11318 break;
11319 default:
11320 gcc_unreachable ();
11323 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11324 if (code == 'b')
11325 size = "BYTE";
11326 else if (code == 'w')
11327 size = "WORD";
11328 else if (code == 'k')
11329 size = "DWORD";
11331 fputs (size, file);
11332 fputs (" PTR ", file);
11335 x = XEXP (x, 0);
11336 /* Avoid (%rip) for call operands. */
11337 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11338 && !CONST_INT_P (x))
11339 output_addr_const (file, x);
11340 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11341 output_operand_lossage ("invalid constraints for operand");
11342 else
11343 output_address (x);
11346 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11348 REAL_VALUE_TYPE r;
11349 long l;
11351 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11352 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11354 if (ASSEMBLER_DIALECT == ASM_ATT)
11355 putc ('$', file);
11356 fprintf (file, "0x%08lx", (long unsigned int) l);
11359 /* These float cases don't actually occur as immediate operands. */
11360 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11362 char dstr[30];
11364 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11365 fprintf (file, "%s", dstr);
11368 else if (GET_CODE (x) == CONST_DOUBLE
11369 && GET_MODE (x) == XFmode)
11371 char dstr[30];
11373 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11374 fprintf (file, "%s", dstr);
11377 else
11379 /* We have patterns that allow zero sets of memory, for instance.
11380 In 64-bit mode, we should probably support all 8-byte vectors,
11381 since we can in fact encode that into an immediate. */
11382 if (GET_CODE (x) == CONST_VECTOR)
11384 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11385 x = const0_rtx;
11388 if (code != 'P')
11390 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11392 if (ASSEMBLER_DIALECT == ASM_ATT)
11393 putc ('$', file);
11395 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11396 || GET_CODE (x) == LABEL_REF)
11398 if (ASSEMBLER_DIALECT == ASM_ATT)
11399 putc ('$', file);
11400 else
11401 fputs ("OFFSET FLAT:", file);
11404 if (CONST_INT_P (x))
11405 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11406 else if (flag_pic)
11407 output_pic_addr_const (file, x, code);
11408 else
11409 output_addr_const (file, x);
11413 /* Print a memory operand whose address is ADDR. */
11415 void
11416 print_operand_address (FILE *file, rtx addr)
11418 struct ix86_address parts;
11419 rtx base, index, disp;
11420 int scale;
11421 int ok = ix86_decompose_address (addr, &parts);
11423 gcc_assert (ok);
11425 base = parts.base;
11426 index = parts.index;
11427 disp = parts.disp;
11428 scale = parts.scale;
11430 switch (parts.seg)
11432 case SEG_DEFAULT:
11433 break;
11434 case SEG_FS:
11435 case SEG_GS:
11436 if (ASSEMBLER_DIALECT == ASM_ATT)
11437 putc ('%', file);
11438 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11439 break;
11440 default:
11441 gcc_unreachable ();
11444 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11445 if (TARGET_64BIT && !base && !index)
11447 rtx symbol = disp;
11449 if (GET_CODE (disp) == CONST
11450 && GET_CODE (XEXP (disp, 0)) == PLUS
11451 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11452 symbol = XEXP (XEXP (disp, 0), 0);
11454 if (GET_CODE (symbol) == LABEL_REF
11455 || (GET_CODE (symbol) == SYMBOL_REF
11456 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11457 base = pc_rtx;
11459 if (!base && !index)
11461 /* Displacement only requires special attention. */
11463 if (CONST_INT_P (disp))
11465 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
11466 fputs ("ds:", file);
11467 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
11469 else if (flag_pic)
11470 output_pic_addr_const (file, disp, 0);
11471 else
11472 output_addr_const (file, disp);
11474 else
11476 if (ASSEMBLER_DIALECT == ASM_ATT)
11478 if (disp)
11480 if (flag_pic)
11481 output_pic_addr_const (file, disp, 0);
11482 else if (GET_CODE (disp) == LABEL_REF)
11483 output_asm_label (disp);
11484 else
11485 output_addr_const (file, disp);
11488 putc ('(', file);
11489 if (base)
11490 print_reg (base, 0, file);
11491 if (index)
11493 putc (',', file);
11494 print_reg (index, 0, file);
11495 if (scale != 1)
11496 fprintf (file, ",%d", scale);
11498 putc (')', file);
11500 else
11502 rtx offset = NULL_RTX;
11504 if (disp)
11506 /* Pull out the offset of a symbol; print any symbol itself. */
11507 if (GET_CODE (disp) == CONST
11508 && GET_CODE (XEXP (disp, 0)) == PLUS
11509 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11511 offset = XEXP (XEXP (disp, 0), 1);
11512 disp = gen_rtx_CONST (VOIDmode,
11513 XEXP (XEXP (disp, 0), 0));
11516 if (flag_pic)
11517 output_pic_addr_const (file, disp, 0);
11518 else if (GET_CODE (disp) == LABEL_REF)
11519 output_asm_label (disp);
11520 else if (CONST_INT_P (disp))
11521 offset = disp;
11522 else
11523 output_addr_const (file, disp);
11526 putc ('[', file);
11527 if (base)
11529 print_reg (base, 0, file);
11530 if (offset)
11532 if (INTVAL (offset) >= 0)
11533 putc ('+', file);
11534 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11537 else if (offset)
11538 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11539 else
11540 putc ('0', file);
11542 if (index)
11544 putc ('+', file);
11545 print_reg (index, 0, file);
11546 if (scale != 1)
11547 fprintf (file, "*%d", scale);
11549 putc (']', file);
11554 bool
11555 output_addr_const_extra (FILE *file, rtx x)
11557 rtx op;
11559 if (GET_CODE (x) != UNSPEC)
11560 return false;
11562 op = XVECEXP (x, 0, 0);
11563 switch (XINT (x, 1))
11565 case UNSPEC_GOTTPOFF:
11566 output_addr_const (file, op);
11567 /* FIXME: This might be @TPOFF in Sun ld. */
11568 fputs ("@GOTTPOFF", file);
11569 break;
11570 case UNSPEC_TPOFF:
11571 output_addr_const (file, op);
11572 fputs ("@TPOFF", file);
11573 break;
11574 case UNSPEC_NTPOFF:
11575 output_addr_const (file, op);
11576 if (TARGET_64BIT)
11577 fputs ("@TPOFF", file);
11578 else
11579 fputs ("@NTPOFF", file);
11580 break;
11581 case UNSPEC_DTPOFF:
11582 output_addr_const (file, op);
11583 fputs ("@DTPOFF", file);
11584 break;
11585 case UNSPEC_GOTNTPOFF:
11586 output_addr_const (file, op);
11587 if (TARGET_64BIT)
11588 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11589 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
11590 else
11591 fputs ("@GOTNTPOFF", file);
11592 break;
11593 case UNSPEC_INDNTPOFF:
11594 output_addr_const (file, op);
11595 fputs ("@INDNTPOFF", file);
11596 break;
11597 #if TARGET_MACHO
11598 case UNSPEC_MACHOPIC_OFFSET:
11599 output_addr_const (file, op);
11600 putc ('-', file);
11601 machopic_output_function_base_name (file);
11602 break;
11603 #endif
11605 default:
11606 return false;
11609 return true;
11612 /* Split one or more DImode RTL references into pairs of SImode
11613 references. The RTL can be REG, offsettable MEM, integer constant, or
11614 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11615 split and "num" is its length. lo_half and hi_half are output arrays
11616 that parallel "operands". */
11618 void
11619 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11621 while (num--)
11623 rtx op = operands[num];
11625 /* simplify_subreg refuse to split volatile memory addresses,
11626 but we still have to handle it. */
11627 if (MEM_P (op))
11629 lo_half[num] = adjust_address (op, SImode, 0);
11630 hi_half[num] = adjust_address (op, SImode, 4);
11632 else
11634 lo_half[num] = simplify_gen_subreg (SImode, op,
11635 GET_MODE (op) == VOIDmode
11636 ? DImode : GET_MODE (op), 0);
11637 hi_half[num] = simplify_gen_subreg (SImode, op,
11638 GET_MODE (op) == VOIDmode
11639 ? DImode : GET_MODE (op), 4);
11643 /* Split one or more TImode RTL references into pairs of DImode
11644 references. The RTL can be REG, offsettable MEM, integer constant, or
11645 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11646 split and "num" is its length. lo_half and hi_half are output arrays
11647 that parallel "operands". */
11649 void
11650 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11652 while (num--)
11654 rtx op = operands[num];
11656 /* simplify_subreg refuse to split volatile memory addresses, but we
11657 still have to handle it. */
11658 if (MEM_P (op))
11660 lo_half[num] = adjust_address (op, DImode, 0);
11661 hi_half[num] = adjust_address (op, DImode, 8);
11663 else
11665 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
11666 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
11671 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
11672 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
11673 is the expression of the binary operation. The output may either be
11674 emitted here, or returned to the caller, like all output_* functions.
11676 There is no guarantee that the operands are the same mode, as they
11677 might be within FLOAT or FLOAT_EXTEND expressions. */
11679 #ifndef SYSV386_COMPAT
11680 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
11681 wants to fix the assemblers because that causes incompatibility
11682 with gcc. No-one wants to fix gcc because that causes
11683 incompatibility with assemblers... You can use the option of
11684 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
11685 #define SYSV386_COMPAT 1
11686 #endif
11688 const char *
11689 output_387_binary_op (rtx insn, rtx *operands)
11691 static char buf[40];
11692 const char *p;
11693 const char *ssep;
11694 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
11696 #ifdef ENABLE_CHECKING
11697 /* Even if we do not want to check the inputs, this documents input
11698 constraints. Which helps in understanding the following code. */
11699 if (STACK_REG_P (operands[0])
11700 && ((REG_P (operands[1])
11701 && REGNO (operands[0]) == REGNO (operands[1])
11702 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
11703 || (REG_P (operands[2])
11704 && REGNO (operands[0]) == REGNO (operands[2])
11705 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
11706 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
11707 ; /* ok */
11708 else
11709 gcc_assert (is_sse);
11710 #endif
11712 switch (GET_CODE (operands[3]))
11714 case PLUS:
11715 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11716 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11717 p = "fiadd";
11718 else
11719 p = "fadd";
11720 ssep = "vadd";
11721 break;
11723 case MINUS:
11724 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11725 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11726 p = "fisub";
11727 else
11728 p = "fsub";
11729 ssep = "vsub";
11730 break;
11732 case MULT:
11733 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11734 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11735 p = "fimul";
11736 else
11737 p = "fmul";
11738 ssep = "vmul";
11739 break;
11741 case DIV:
11742 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11743 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11744 p = "fidiv";
11745 else
11746 p = "fdiv";
11747 ssep = "vdiv";
11748 break;
11750 default:
11751 gcc_unreachable ();
11754 if (is_sse)
11756 if (TARGET_AVX)
11758 strcpy (buf, ssep);
11759 if (GET_MODE (operands[0]) == SFmode)
11760 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
11761 else
11762 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
11764 else
11766 strcpy (buf, ssep + 1);
11767 if (GET_MODE (operands[0]) == SFmode)
11768 strcat (buf, "ss\t{%2, %0|%0, %2}");
11769 else
11770 strcat (buf, "sd\t{%2, %0|%0, %2}");
11772 return buf;
11774 strcpy (buf, p);
11776 switch (GET_CODE (operands[3]))
11778 case MULT:
11779 case PLUS:
11780 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
11782 rtx temp = operands[2];
11783 operands[2] = operands[1];
11784 operands[1] = temp;
11787 /* know operands[0] == operands[1]. */
11789 if (MEM_P (operands[2]))
11791 p = "%z2\t%2";
11792 break;
11795 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11797 if (STACK_TOP_P (operands[0]))
11798 /* How is it that we are storing to a dead operand[2]?
11799 Well, presumably operands[1] is dead too. We can't
11800 store the result to st(0) as st(0) gets popped on this
11801 instruction. Instead store to operands[2] (which I
11802 think has to be st(1)). st(1) will be popped later.
11803 gcc <= 2.8.1 didn't have this check and generated
11804 assembly code that the Unixware assembler rejected. */
11805 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11806 else
11807 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11808 break;
11811 if (STACK_TOP_P (operands[0]))
11812 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11813 else
11814 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11815 break;
11817 case MINUS:
11818 case DIV:
11819 if (MEM_P (operands[1]))
11821 p = "r%z1\t%1";
11822 break;
11825 if (MEM_P (operands[2]))
11827 p = "%z2\t%2";
11828 break;
11831 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
11833 #if SYSV386_COMPAT
11834 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
11835 derived assemblers, confusingly reverse the direction of
11836 the operation for fsub{r} and fdiv{r} when the
11837 destination register is not st(0). The Intel assembler
11838 doesn't have this brain damage. Read !SYSV386_COMPAT to
11839 figure out what the hardware really does. */
11840 if (STACK_TOP_P (operands[0]))
11841 p = "{p\t%0, %2|rp\t%2, %0}";
11842 else
11843 p = "{rp\t%2, %0|p\t%0, %2}";
11844 #else
11845 if (STACK_TOP_P (operands[0]))
11846 /* As above for fmul/fadd, we can't store to st(0). */
11847 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
11848 else
11849 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
11850 #endif
11851 break;
11854 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
11856 #if SYSV386_COMPAT
11857 if (STACK_TOP_P (operands[0]))
11858 p = "{rp\t%0, %1|p\t%1, %0}";
11859 else
11860 p = "{p\t%1, %0|rp\t%0, %1}";
11861 #else
11862 if (STACK_TOP_P (operands[0]))
11863 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
11864 else
11865 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
11866 #endif
11867 break;
11870 if (STACK_TOP_P (operands[0]))
11872 if (STACK_TOP_P (operands[1]))
11873 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
11874 else
11875 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
11876 break;
11878 else if (STACK_TOP_P (operands[1]))
11880 #if SYSV386_COMPAT
11881 p = "{\t%1, %0|r\t%0, %1}";
11882 #else
11883 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
11884 #endif
11886 else
11888 #if SYSV386_COMPAT
11889 p = "{r\t%2, %0|\t%0, %2}";
11890 #else
11891 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
11892 #endif
11894 break;
11896 default:
11897 gcc_unreachable ();
11900 strcat (buf, p);
11901 return buf;
11904 /* Return needed mode for entity in optimize_mode_switching pass. */
11907 ix86_mode_needed (int entity, rtx insn)
11909 enum attr_i387_cw mode;
11911 /* The mode UNINITIALIZED is used to store control word after a
11912 function call or ASM pattern. The mode ANY specify that function
11913 has no requirements on the control word and make no changes in the
11914 bits we are interested in. */
11916 if (CALL_P (insn)
11917 || (NONJUMP_INSN_P (insn)
11918 && (asm_noperands (PATTERN (insn)) >= 0
11919 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
11920 return I387_CW_UNINITIALIZED;
11922 if (recog_memoized (insn) < 0)
11923 return I387_CW_ANY;
11925 mode = get_attr_i387_cw (insn);
11927 switch (entity)
11929 case I387_TRUNC:
11930 if (mode == I387_CW_TRUNC)
11931 return mode;
11932 break;
11934 case I387_FLOOR:
11935 if (mode == I387_CW_FLOOR)
11936 return mode;
11937 break;
11939 case I387_CEIL:
11940 if (mode == I387_CW_CEIL)
11941 return mode;
11942 break;
11944 case I387_MASK_PM:
11945 if (mode == I387_CW_MASK_PM)
11946 return mode;
11947 break;
11949 default:
11950 gcc_unreachable ();
11953 return I387_CW_ANY;
11956 /* Output code to initialize control word copies used by trunc?f?i and
11957 rounding patterns. CURRENT_MODE is set to current control word,
11958 while NEW_MODE is set to new control word. */
11960 void
11961 emit_i387_cw_initialization (int mode)
11963 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
11964 rtx new_mode;
11966 enum ix86_stack_slot slot;
11968 rtx reg = gen_reg_rtx (HImode);
11970 emit_insn (gen_x86_fnstcw_1 (stored_mode));
11971 emit_move_insn (reg, copy_rtx (stored_mode));
11973 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
11974 || optimize_function_for_size_p (cfun))
11976 switch (mode)
11978 case I387_CW_TRUNC:
11979 /* round toward zero (truncate) */
11980 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
11981 slot = SLOT_CW_TRUNC;
11982 break;
11984 case I387_CW_FLOOR:
11985 /* round down toward -oo */
11986 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11987 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
11988 slot = SLOT_CW_FLOOR;
11989 break;
11991 case I387_CW_CEIL:
11992 /* round up toward +oo */
11993 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11994 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
11995 slot = SLOT_CW_CEIL;
11996 break;
11998 case I387_CW_MASK_PM:
11999 /* mask precision exception for nearbyint() */
12000 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12001 slot = SLOT_CW_MASK_PM;
12002 break;
12004 default:
12005 gcc_unreachable ();
12008 else
12010 switch (mode)
12012 case I387_CW_TRUNC:
12013 /* round toward zero (truncate) */
12014 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12015 slot = SLOT_CW_TRUNC;
12016 break;
12018 case I387_CW_FLOOR:
12019 /* round down toward -oo */
12020 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12021 slot = SLOT_CW_FLOOR;
12022 break;
12024 case I387_CW_CEIL:
12025 /* round up toward +oo */
12026 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12027 slot = SLOT_CW_CEIL;
12028 break;
12030 case I387_CW_MASK_PM:
12031 /* mask precision exception for nearbyint() */
12032 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12033 slot = SLOT_CW_MASK_PM;
12034 break;
12036 default:
12037 gcc_unreachable ();
12041 gcc_assert (slot < MAX_386_STACK_LOCALS);
12043 new_mode = assign_386_stack_local (HImode, slot);
12044 emit_move_insn (new_mode, reg);
12047 /* Output code for INSN to convert a float to a signed int. OPERANDS
12048 are the insn operands. The output may be [HSD]Imode and the input
12049 operand may be [SDX]Fmode. */
12051 const char *
12052 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12054 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12055 int dimode_p = GET_MODE (operands[0]) == DImode;
12056 int round_mode = get_attr_i387_cw (insn);
12058 /* Jump through a hoop or two for DImode, since the hardware has no
12059 non-popping instruction. We used to do this a different way, but
12060 that was somewhat fragile and broke with post-reload splitters. */
12061 if ((dimode_p || fisttp) && !stack_top_dies)
12062 output_asm_insn ("fld\t%y1", operands);
12064 gcc_assert (STACK_TOP_P (operands[1]));
12065 gcc_assert (MEM_P (operands[0]));
12066 gcc_assert (GET_MODE (operands[1]) != TFmode);
12068 if (fisttp)
12069 output_asm_insn ("fisttp%z0\t%0", operands);
12070 else
12072 if (round_mode != I387_CW_ANY)
12073 output_asm_insn ("fldcw\t%3", operands);
12074 if (stack_top_dies || dimode_p)
12075 output_asm_insn ("fistp%z0\t%0", operands);
12076 else
12077 output_asm_insn ("fist%z0\t%0", operands);
12078 if (round_mode != I387_CW_ANY)
12079 output_asm_insn ("fldcw\t%2", operands);
12082 return "";
12085 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12086 have the values zero or one, indicates the ffreep insn's operand
12087 from the OPERANDS array. */
12089 static const char *
12090 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12092 if (TARGET_USE_FFREEP)
12093 #if HAVE_AS_IX86_FFREEP
12094 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12095 #else
12097 static char retval[] = ".word\t0xc_df";
12098 int regno = REGNO (operands[opno]);
12100 gcc_assert (FP_REGNO_P (regno));
12102 retval[9] = '0' + (regno - FIRST_STACK_REG);
12103 return retval;
12105 #endif
12107 return opno ? "fstp\t%y1" : "fstp\t%y0";
12111 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12112 should be used. UNORDERED_P is true when fucom should be used. */
12114 const char *
12115 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12117 int stack_top_dies;
12118 rtx cmp_op0, cmp_op1;
12119 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12121 if (eflags_p)
12123 cmp_op0 = operands[0];
12124 cmp_op1 = operands[1];
12126 else
12128 cmp_op0 = operands[1];
12129 cmp_op1 = operands[2];
12132 if (is_sse)
12134 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12135 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12136 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12137 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12139 if (GET_MODE (operands[0]) == SFmode)
12140 if (unordered_p)
12141 return &ucomiss[TARGET_AVX ? 0 : 1];
12142 else
12143 return &comiss[TARGET_AVX ? 0 : 1];
12144 else
12145 if (unordered_p)
12146 return &ucomisd[TARGET_AVX ? 0 : 1];
12147 else
12148 return &comisd[TARGET_AVX ? 0 : 1];
12151 gcc_assert (STACK_TOP_P (cmp_op0));
12153 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12155 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12157 if (stack_top_dies)
12159 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12160 return output_387_ffreep (operands, 1);
12162 else
12163 return "ftst\n\tfnstsw\t%0";
12166 if (STACK_REG_P (cmp_op1)
12167 && stack_top_dies
12168 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12169 && REGNO (cmp_op1) != FIRST_STACK_REG)
12171 /* If both the top of the 387 stack dies, and the other operand
12172 is also a stack register that dies, then this must be a
12173 `fcompp' float compare */
12175 if (eflags_p)
12177 /* There is no double popping fcomi variant. Fortunately,
12178 eflags is immune from the fstp's cc clobbering. */
12179 if (unordered_p)
12180 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12181 else
12182 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12183 return output_387_ffreep (operands, 0);
12185 else
12187 if (unordered_p)
12188 return "fucompp\n\tfnstsw\t%0";
12189 else
12190 return "fcompp\n\tfnstsw\t%0";
12193 else
12195 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12197 static const char * const alt[16] =
12199 "fcom%z2\t%y2\n\tfnstsw\t%0",
12200 "fcomp%z2\t%y2\n\tfnstsw\t%0",
12201 "fucom%z2\t%y2\n\tfnstsw\t%0",
12202 "fucomp%z2\t%y2\n\tfnstsw\t%0",
12204 "ficom%z2\t%y2\n\tfnstsw\t%0",
12205 "ficomp%z2\t%y2\n\tfnstsw\t%0",
12206 NULL,
12207 NULL,
12209 "fcomi\t{%y1, %0|%0, %y1}",
12210 "fcomip\t{%y1, %0|%0, %y1}",
12211 "fucomi\t{%y1, %0|%0, %y1}",
12212 "fucomip\t{%y1, %0|%0, %y1}",
12214 NULL,
12215 NULL,
12216 NULL,
12217 NULL
12220 int mask;
12221 const char *ret;
12223 mask = eflags_p << 3;
12224 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12225 mask |= unordered_p << 1;
12226 mask |= stack_top_dies;
12228 gcc_assert (mask < 16);
12229 ret = alt[mask];
12230 gcc_assert (ret);
12232 return ret;
12236 void
12237 ix86_output_addr_vec_elt (FILE *file, int value)
12239 const char *directive = ASM_LONG;
12241 #ifdef ASM_QUAD
12242 if (TARGET_64BIT)
12243 directive = ASM_QUAD;
12244 #else
12245 gcc_assert (!TARGET_64BIT);
12246 #endif
12248 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
12251 void
12252 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12254 const char *directive = ASM_LONG;
12256 #ifdef ASM_QUAD
12257 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12258 directive = ASM_QUAD;
12259 #else
12260 gcc_assert (!TARGET_64BIT);
12261 #endif
12262 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12263 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12264 fprintf (file, "%s%s%d-%s%d\n",
12265 directive, LPREFIX, value, LPREFIX, rel);
12266 else if (HAVE_AS_GOTOFF_IN_DATA)
12267 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
12268 #if TARGET_MACHO
12269 else if (TARGET_MACHO)
12271 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
12272 machopic_output_function_base_name (file);
12273 fprintf(file, "\n");
12275 #endif
12276 else
12277 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
12278 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
12281 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12282 for the target. */
12284 void
12285 ix86_expand_clear (rtx dest)
12287 rtx tmp;
12289 /* We play register width games, which are only valid after reload. */
12290 gcc_assert (reload_completed);
12292 /* Avoid HImode and its attendant prefix byte. */
12293 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12294 dest = gen_rtx_REG (SImode, REGNO (dest));
12295 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12297 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12298 if (reload_completed && (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ()))
12300 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12301 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12304 emit_insn (tmp);
12307 /* X is an unchanging MEM. If it is a constant pool reference, return
12308 the constant pool rtx, else NULL. */
12311 maybe_get_pool_constant (rtx x)
12313 x = ix86_delegitimize_address (XEXP (x, 0));
12315 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12316 return get_pool_constant (x);
12318 return NULL_RTX;
12321 void
12322 ix86_expand_move (enum machine_mode mode, rtx operands[])
12324 rtx op0, op1;
12325 enum tls_model model;
12327 op0 = operands[0];
12328 op1 = operands[1];
12330 if (GET_CODE (op1) == SYMBOL_REF)
12332 model = SYMBOL_REF_TLS_MODEL (op1);
12333 if (model)
12335 op1 = legitimize_tls_address (op1, model, true);
12336 op1 = force_operand (op1, op0);
12337 if (op1 == op0)
12338 return;
12340 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12341 && SYMBOL_REF_DLLIMPORT_P (op1))
12342 op1 = legitimize_dllimport_symbol (op1, false);
12344 else if (GET_CODE (op1) == CONST
12345 && GET_CODE (XEXP (op1, 0)) == PLUS
12346 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12348 rtx addend = XEXP (XEXP (op1, 0), 1);
12349 rtx symbol = XEXP (XEXP (op1, 0), 0);
12350 rtx tmp = NULL;
12352 model = SYMBOL_REF_TLS_MODEL (symbol);
12353 if (model)
12354 tmp = legitimize_tls_address (symbol, model, true);
12355 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12356 && SYMBOL_REF_DLLIMPORT_P (symbol))
12357 tmp = legitimize_dllimport_symbol (symbol, true);
12359 if (tmp)
12361 tmp = force_operand (tmp, NULL);
12362 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12363 op0, 1, OPTAB_DIRECT);
12364 if (tmp == op0)
12365 return;
12369 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12371 if (TARGET_MACHO && !TARGET_64BIT)
12373 #if TARGET_MACHO
12374 if (MACHOPIC_PURE)
12376 rtx temp = ((reload_in_progress
12377 || ((op0 && REG_P (op0))
12378 && mode == Pmode))
12379 ? op0 : gen_reg_rtx (Pmode));
12380 op1 = machopic_indirect_data_reference (op1, temp);
12381 op1 = machopic_legitimize_pic_address (op1, mode,
12382 temp == op1 ? 0 : temp);
12384 else if (MACHOPIC_INDIRECT)
12385 op1 = machopic_indirect_data_reference (op1, 0);
12386 if (op0 == op1)
12387 return;
12388 #endif
12390 else
12392 if (MEM_P (op0))
12393 op1 = force_reg (Pmode, op1);
12394 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12396 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
12397 op1 = legitimize_pic_address (op1, reg);
12398 if (op0 == op1)
12399 return;
12403 else
12405 if (MEM_P (op0)
12406 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12407 || !push_operand (op0, mode))
12408 && MEM_P (op1))
12409 op1 = force_reg (mode, op1);
12411 if (push_operand (op0, mode)
12412 && ! general_no_elim_operand (op1, mode))
12413 op1 = copy_to_mode_reg (mode, op1);
12415 /* Force large constants in 64bit compilation into register
12416 to get them CSEed. */
12417 if (can_create_pseudo_p ()
12418 && (mode == DImode) && TARGET_64BIT
12419 && immediate_operand (op1, mode)
12420 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12421 && !register_operand (op0, mode)
12422 && optimize)
12423 op1 = copy_to_mode_reg (mode, op1);
12425 if (can_create_pseudo_p ()
12426 && FLOAT_MODE_P (mode)
12427 && GET_CODE (op1) == CONST_DOUBLE)
12429 /* If we are loading a floating point constant to a register,
12430 force the value to memory now, since we'll get better code
12431 out the back end. */
12433 op1 = validize_mem (force_const_mem (mode, op1));
12434 if (!register_operand (op0, mode))
12436 rtx temp = gen_reg_rtx (mode);
12437 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12438 emit_move_insn (op0, temp);
12439 return;
12444 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12447 void
12448 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12450 rtx op0 = operands[0], op1 = operands[1];
12451 unsigned int align = GET_MODE_ALIGNMENT (mode);
12453 /* Force constants other than zero into memory. We do not know how
12454 the instructions used to build constants modify the upper 64 bits
12455 of the register, once we have that information we may be able
12456 to handle some of them more efficiently. */
12457 if (can_create_pseudo_p ()
12458 && register_operand (op0, mode)
12459 && (CONSTANT_P (op1)
12460 || (GET_CODE (op1) == SUBREG
12461 && CONSTANT_P (SUBREG_REG (op1))))
12462 && standard_sse_constant_p (op1) <= 0)
12463 op1 = validize_mem (force_const_mem (mode, op1));
12465 /* We need to check memory alignment for SSE mode since attribute
12466 can make operands unaligned. */
12467 if (can_create_pseudo_p ()
12468 && SSE_REG_MODE_P (mode)
12469 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
12470 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
12472 rtx tmp[2];
12474 /* ix86_expand_vector_move_misalign() does not like constants ... */
12475 if (CONSTANT_P (op1)
12476 || (GET_CODE (op1) == SUBREG
12477 && CONSTANT_P (SUBREG_REG (op1))))
12478 op1 = validize_mem (force_const_mem (mode, op1));
12480 /* ... nor both arguments in memory. */
12481 if (!register_operand (op0, mode)
12482 && !register_operand (op1, mode))
12483 op1 = force_reg (mode, op1);
12485 tmp[0] = op0; tmp[1] = op1;
12486 ix86_expand_vector_move_misalign (mode, tmp);
12487 return;
12490 /* Make operand1 a register if it isn't already. */
12491 if (can_create_pseudo_p ()
12492 && !register_operand (op0, mode)
12493 && !register_operand (op1, mode))
12495 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
12496 return;
12499 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12502 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
12503 straight to ix86_expand_vector_move. */
12504 /* Code generation for scalar reg-reg moves of single and double precision data:
12505 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
12506 movaps reg, reg
12507 else
12508 movss reg, reg
12509 if (x86_sse_partial_reg_dependency == true)
12510 movapd reg, reg
12511 else
12512 movsd reg, reg
12514 Code generation for scalar loads of double precision data:
12515 if (x86_sse_split_regs == true)
12516 movlpd mem, reg (gas syntax)
12517 else
12518 movsd mem, reg
12520 Code generation for unaligned packed loads of single precision data
12521 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
12522 if (x86_sse_unaligned_move_optimal)
12523 movups mem, reg
12525 if (x86_sse_partial_reg_dependency == true)
12527 xorps reg, reg
12528 movlps mem, reg
12529 movhps mem+8, reg
12531 else
12533 movlps mem, reg
12534 movhps mem+8, reg
12537 Code generation for unaligned packed loads of double precision data
12538 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
12539 if (x86_sse_unaligned_move_optimal)
12540 movupd mem, reg
12542 if (x86_sse_split_regs == true)
12544 movlpd mem, reg
12545 movhpd mem+8, reg
12547 else
12549 movsd mem, reg
12550 movhpd mem+8, reg
12554 void
12555 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
12557 rtx op0, op1, m;
12559 op0 = operands[0];
12560 op1 = operands[1];
12562 if (TARGET_AVX)
12564 switch (GET_MODE_CLASS (mode))
12566 case MODE_VECTOR_INT:
12567 case MODE_INT:
12568 switch (GET_MODE_SIZE (mode))
12570 case 16:
12571 op0 = gen_lowpart (V16QImode, op0);
12572 op1 = gen_lowpart (V16QImode, op1);
12573 emit_insn (gen_avx_movdqu (op0, op1));
12574 break;
12575 case 32:
12576 op0 = gen_lowpart (V32QImode, op0);
12577 op1 = gen_lowpart (V32QImode, op1);
12578 emit_insn (gen_avx_movdqu256 (op0, op1));
12579 break;
12580 default:
12581 gcc_unreachable ();
12583 break;
12584 case MODE_VECTOR_FLOAT:
12585 op0 = gen_lowpart (mode, op0);
12586 op1 = gen_lowpart (mode, op1);
12588 switch (mode)
12590 case V4SFmode:
12591 emit_insn (gen_avx_movups (op0, op1));
12592 break;
12593 case V8SFmode:
12594 emit_insn (gen_avx_movups256 (op0, op1));
12595 break;
12596 case V2DFmode:
12597 emit_insn (gen_avx_movupd (op0, op1));
12598 break;
12599 case V4DFmode:
12600 emit_insn (gen_avx_movupd256 (op0, op1));
12601 break;
12602 default:
12603 gcc_unreachable ();
12605 break;
12607 default:
12608 gcc_unreachable ();
12611 return;
12614 if (MEM_P (op1))
12616 /* If we're optimizing for size, movups is the smallest. */
12617 if (optimize_insn_for_size_p ())
12619 op0 = gen_lowpart (V4SFmode, op0);
12620 op1 = gen_lowpart (V4SFmode, op1);
12621 emit_insn (gen_sse_movups (op0, op1));
12622 return;
12625 /* ??? If we have typed data, then it would appear that using
12626 movdqu is the only way to get unaligned data loaded with
12627 integer type. */
12628 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12630 op0 = gen_lowpart (V16QImode, op0);
12631 op1 = gen_lowpart (V16QImode, op1);
12632 emit_insn (gen_sse2_movdqu (op0, op1));
12633 return;
12636 if (TARGET_SSE2 && mode == V2DFmode)
12638 rtx zero;
12640 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12642 op0 = gen_lowpart (V2DFmode, op0);
12643 op1 = gen_lowpart (V2DFmode, op1);
12644 emit_insn (gen_sse2_movupd (op0, op1));
12645 return;
12648 /* When SSE registers are split into halves, we can avoid
12649 writing to the top half twice. */
12650 if (TARGET_SSE_SPLIT_REGS)
12652 emit_clobber (op0);
12653 zero = op0;
12655 else
12657 /* ??? Not sure about the best option for the Intel chips.
12658 The following would seem to satisfy; the register is
12659 entirely cleared, breaking the dependency chain. We
12660 then store to the upper half, with a dependency depth
12661 of one. A rumor has it that Intel recommends two movsd
12662 followed by an unpacklpd, but this is unconfirmed. And
12663 given that the dependency depth of the unpacklpd would
12664 still be one, I'm not sure why this would be better. */
12665 zero = CONST0_RTX (V2DFmode);
12668 m = adjust_address (op1, DFmode, 0);
12669 emit_insn (gen_sse2_loadlpd (op0, zero, m));
12670 m = adjust_address (op1, DFmode, 8);
12671 emit_insn (gen_sse2_loadhpd (op0, op0, m));
12673 else
12675 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12677 op0 = gen_lowpart (V4SFmode, op0);
12678 op1 = gen_lowpart (V4SFmode, op1);
12679 emit_insn (gen_sse_movups (op0, op1));
12680 return;
12683 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
12684 emit_move_insn (op0, CONST0_RTX (mode));
12685 else
12686 emit_clobber (op0);
12688 if (mode != V4SFmode)
12689 op0 = gen_lowpart (V4SFmode, op0);
12690 m = adjust_address (op1, V2SFmode, 0);
12691 emit_insn (gen_sse_loadlps (op0, op0, m));
12692 m = adjust_address (op1, V2SFmode, 8);
12693 emit_insn (gen_sse_loadhps (op0, op0, m));
12696 else if (MEM_P (op0))
12698 /* If we're optimizing for size, movups is the smallest. */
12699 if (optimize_insn_for_size_p ())
12701 op0 = gen_lowpart (V4SFmode, op0);
12702 op1 = gen_lowpart (V4SFmode, op1);
12703 emit_insn (gen_sse_movups (op0, op1));
12704 return;
12707 /* ??? Similar to above, only less clear because of quote
12708 typeless stores unquote. */
12709 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
12710 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12712 op0 = gen_lowpart (V16QImode, op0);
12713 op1 = gen_lowpart (V16QImode, op1);
12714 emit_insn (gen_sse2_movdqu (op0, op1));
12715 return;
12718 if (TARGET_SSE2 && mode == V2DFmode)
12720 m = adjust_address (op0, DFmode, 0);
12721 emit_insn (gen_sse2_storelpd (m, op1));
12722 m = adjust_address (op0, DFmode, 8);
12723 emit_insn (gen_sse2_storehpd (m, op1));
12725 else
12727 if (mode != V4SFmode)
12728 op1 = gen_lowpart (V4SFmode, op1);
12729 m = adjust_address (op0, V2SFmode, 0);
12730 emit_insn (gen_sse_storelps (m, op1));
12731 m = adjust_address (op0, V2SFmode, 8);
12732 emit_insn (gen_sse_storehps (m, op1));
12735 else
12736 gcc_unreachable ();
12739 /* Expand a push in MODE. This is some mode for which we do not support
12740 proper push instructions, at least from the registers that we expect
12741 the value to live in. */
12743 void
12744 ix86_expand_push (enum machine_mode mode, rtx x)
12746 rtx tmp;
12748 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
12749 GEN_INT (-GET_MODE_SIZE (mode)),
12750 stack_pointer_rtx, 1, OPTAB_DIRECT);
12751 if (tmp != stack_pointer_rtx)
12752 emit_move_insn (stack_pointer_rtx, tmp);
12754 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
12756 /* When we push an operand onto stack, it has to be aligned at least
12757 at the function argument boundary. However since we don't have
12758 the argument type, we can't determine the actual argument
12759 boundary. */
12760 emit_move_insn (tmp, x);
12763 /* Helper function of ix86_fixup_binary_operands to canonicalize
12764 operand order. Returns true if the operands should be swapped. */
12766 static bool
12767 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
12768 rtx operands[])
12770 rtx dst = operands[0];
12771 rtx src1 = operands[1];
12772 rtx src2 = operands[2];
12774 /* If the operation is not commutative, we can't do anything. */
12775 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
12776 return false;
12778 /* Highest priority is that src1 should match dst. */
12779 if (rtx_equal_p (dst, src1))
12780 return false;
12781 if (rtx_equal_p (dst, src2))
12782 return true;
12784 /* Next highest priority is that immediate constants come second. */
12785 if (immediate_operand (src2, mode))
12786 return false;
12787 if (immediate_operand (src1, mode))
12788 return true;
12790 /* Lowest priority is that memory references should come second. */
12791 if (MEM_P (src2))
12792 return false;
12793 if (MEM_P (src1))
12794 return true;
12796 return false;
12800 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
12801 destination to use for the operation. If different from the true
12802 destination in operands[0], a copy operation will be required. */
12805 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
12806 rtx operands[])
12808 rtx dst = operands[0];
12809 rtx src1 = operands[1];
12810 rtx src2 = operands[2];
12812 /* Canonicalize operand order. */
12813 if (ix86_swap_binary_operands_p (code, mode, operands))
12815 rtx temp;
12817 /* It is invalid to swap operands of different modes. */
12818 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
12820 temp = src1;
12821 src1 = src2;
12822 src2 = temp;
12825 /* Both source operands cannot be in memory. */
12826 if (MEM_P (src1) && MEM_P (src2))
12828 /* Optimization: Only read from memory once. */
12829 if (rtx_equal_p (src1, src2))
12831 src2 = force_reg (mode, src2);
12832 src1 = src2;
12834 else
12835 src2 = force_reg (mode, src2);
12838 /* If the destination is memory, and we do not have matching source
12839 operands, do things in registers. */
12840 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12841 dst = gen_reg_rtx (mode);
12843 /* Source 1 cannot be a constant. */
12844 if (CONSTANT_P (src1))
12845 src1 = force_reg (mode, src1);
12847 /* Source 1 cannot be a non-matching memory. */
12848 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12849 src1 = force_reg (mode, src1);
12851 operands[1] = src1;
12852 operands[2] = src2;
12853 return dst;
12856 /* Similarly, but assume that the destination has already been
12857 set up properly. */
12859 void
12860 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
12861 enum machine_mode mode, rtx operands[])
12863 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
12864 gcc_assert (dst == operands[0]);
12867 /* Attempt to expand a binary operator. Make the expansion closer to the
12868 actual machine, then just general_operand, which will allow 3 separate
12869 memory references (one output, two input) in a single insn. */
12871 void
12872 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
12873 rtx operands[])
12875 rtx src1, src2, dst, op, clob;
12877 dst = ix86_fixup_binary_operands (code, mode, operands);
12878 src1 = operands[1];
12879 src2 = operands[2];
12881 /* Emit the instruction. */
12883 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
12884 if (reload_in_progress)
12886 /* Reload doesn't know about the flags register, and doesn't know that
12887 it doesn't want to clobber it. We can only do this with PLUS. */
12888 gcc_assert (code == PLUS);
12889 emit_insn (op);
12891 else
12893 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12894 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12897 /* Fix up the destination if needed. */
12898 if (dst != operands[0])
12899 emit_move_insn (operands[0], dst);
12902 /* Return TRUE or FALSE depending on whether the binary operator meets the
12903 appropriate constraints. */
12906 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
12907 rtx operands[3])
12909 rtx dst = operands[0];
12910 rtx src1 = operands[1];
12911 rtx src2 = operands[2];
12913 /* Both source operands cannot be in memory. */
12914 if (MEM_P (src1) && MEM_P (src2))
12915 return 0;
12917 /* Canonicalize operand order for commutative operators. */
12918 if (ix86_swap_binary_operands_p (code, mode, operands))
12920 rtx temp = src1;
12921 src1 = src2;
12922 src2 = temp;
12925 /* If the destination is memory, we must have a matching source operand. */
12926 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
12927 return 0;
12929 /* Source 1 cannot be a constant. */
12930 if (CONSTANT_P (src1))
12931 return 0;
12933 /* Source 1 cannot be a non-matching memory. */
12934 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
12935 return 0;
12937 return 1;
12940 /* Attempt to expand a unary operator. Make the expansion closer to the
12941 actual machine, then just general_operand, which will allow 2 separate
12942 memory references (one output, one input) in a single insn. */
12944 void
12945 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
12946 rtx operands[])
12948 int matching_memory;
12949 rtx src, dst, op, clob;
12951 dst = operands[0];
12952 src = operands[1];
12954 /* If the destination is memory, and we do not have matching source
12955 operands, do things in registers. */
12956 matching_memory = 0;
12957 if (MEM_P (dst))
12959 if (rtx_equal_p (dst, src))
12960 matching_memory = 1;
12961 else
12962 dst = gen_reg_rtx (mode);
12965 /* When source operand is memory, destination must match. */
12966 if (MEM_P (src) && !matching_memory)
12967 src = force_reg (mode, src);
12969 /* Emit the instruction. */
12971 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
12972 if (reload_in_progress || code == NOT)
12974 /* Reload doesn't know about the flags register, and doesn't know that
12975 it doesn't want to clobber it. */
12976 gcc_assert (code == NOT);
12977 emit_insn (op);
12979 else
12981 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12982 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
12985 /* Fix up the destination if needed. */
12986 if (dst != operands[0])
12987 emit_move_insn (operands[0], dst);
12990 /* Return TRUE or FALSE depending on whether the unary operator meets the
12991 appropriate constraints. */
12994 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
12995 enum machine_mode mode ATTRIBUTE_UNUSED,
12996 rtx operands[2] ATTRIBUTE_UNUSED)
12998 /* If one of operands is memory, source and destination must match. */
12999 if ((MEM_P (operands[0])
13000 || MEM_P (operands[1]))
13001 && ! rtx_equal_p (operands[0], operands[1]))
13002 return FALSE;
13003 return TRUE;
13006 /* Post-reload splitter for converting an SF or DFmode value in an
13007 SSE register into an unsigned SImode. */
13009 void
13010 ix86_split_convert_uns_si_sse (rtx operands[])
13012 enum machine_mode vecmode;
13013 rtx value, large, zero_or_two31, input, two31, x;
13015 large = operands[1];
13016 zero_or_two31 = operands[2];
13017 input = operands[3];
13018 two31 = operands[4];
13019 vecmode = GET_MODE (large);
13020 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13022 /* Load up the value into the low element. We must ensure that the other
13023 elements are valid floats -- zero is the easiest such value. */
13024 if (MEM_P (input))
13026 if (vecmode == V4SFmode)
13027 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13028 else
13029 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13031 else
13033 input = gen_rtx_REG (vecmode, REGNO (input));
13034 emit_move_insn (value, CONST0_RTX (vecmode));
13035 if (vecmode == V4SFmode)
13036 emit_insn (gen_sse_movss (value, value, input));
13037 else
13038 emit_insn (gen_sse2_movsd (value, value, input));
13041 emit_move_insn (large, two31);
13042 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13044 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13045 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13047 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13048 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13050 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
13051 emit_insn (gen_rtx_SET (VOIDmode, value, x));
13053 large = gen_rtx_REG (V4SImode, REGNO (large));
13054 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
13056 x = gen_rtx_REG (V4SImode, REGNO (value));
13057 if (vecmode == V4SFmode)
13058 emit_insn (gen_sse2_cvttps2dq (x, value));
13059 else
13060 emit_insn (gen_sse2_cvttpd2dq (x, value));
13061 value = x;
13063 emit_insn (gen_xorv4si3 (value, value, large));
13066 /* Convert an unsigned DImode value into a DFmode, using only SSE.
13067 Expects the 64-bit DImode to be supplied in a pair of integral
13068 registers. Requires SSE2; will use SSE3 if available. For x86_32,
13069 -mfpmath=sse, !optimize_size only. */
13071 void
13072 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
13074 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
13075 rtx int_xmm, fp_xmm;
13076 rtx biases, exponents;
13077 rtx x;
13079 int_xmm = gen_reg_rtx (V4SImode);
13080 if (TARGET_INTER_UNIT_MOVES)
13081 emit_insn (gen_movdi_to_sse (int_xmm, input));
13082 else if (TARGET_SSE_SPLIT_REGS)
13084 emit_clobber (int_xmm);
13085 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
13087 else
13089 x = gen_reg_rtx (V2DImode);
13090 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
13091 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
13094 x = gen_rtx_CONST_VECTOR (V4SImode,
13095 gen_rtvec (4, GEN_INT (0x43300000UL),
13096 GEN_INT (0x45300000UL),
13097 const0_rtx, const0_rtx));
13098 exponents = validize_mem (force_const_mem (V4SImode, x));
13100 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
13101 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
13103 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
13104 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
13105 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
13106 (0x1.0p84 + double(fp_value_hi_xmm)).
13107 Note these exponents differ by 32. */
13109 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
13111 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
13112 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
13113 real_ldexp (&bias_lo_rvt, &dconst1, 52);
13114 real_ldexp (&bias_hi_rvt, &dconst1, 84);
13115 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
13116 x = const_double_from_real_value (bias_hi_rvt, DFmode);
13117 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
13118 biases = validize_mem (force_const_mem (V2DFmode, biases));
13119 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
13121 /* Add the upper and lower DFmode values together. */
13122 if (TARGET_SSE3)
13123 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
13124 else
13126 x = copy_to_mode_reg (V2DFmode, fp_xmm);
13127 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
13128 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
13131 ix86_expand_vector_extract (false, target, fp_xmm, 0);
13134 /* Not used, but eases macroization of patterns. */
13135 void
13136 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
13137 rtx input ATTRIBUTE_UNUSED)
13139 gcc_unreachable ();
13142 /* Convert an unsigned SImode value into a DFmode. Only currently used
13143 for SSE, but applicable anywhere. */
13145 void
13146 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
13148 REAL_VALUE_TYPE TWO31r;
13149 rtx x, fp;
13151 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
13152 NULL, 1, OPTAB_DIRECT);
13154 fp = gen_reg_rtx (DFmode);
13155 emit_insn (gen_floatsidf2 (fp, x));
13157 real_ldexp (&TWO31r, &dconst1, 31);
13158 x = const_double_from_real_value (TWO31r, DFmode);
13160 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
13161 if (x != target)
13162 emit_move_insn (target, x);
13165 /* Convert a signed DImode value into a DFmode. Only used for SSE in
13166 32-bit mode; otherwise we have a direct convert instruction. */
13168 void
13169 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
13171 REAL_VALUE_TYPE TWO32r;
13172 rtx fp_lo, fp_hi, x;
13174 fp_lo = gen_reg_rtx (DFmode);
13175 fp_hi = gen_reg_rtx (DFmode);
13177 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
13179 real_ldexp (&TWO32r, &dconst1, 32);
13180 x = const_double_from_real_value (TWO32r, DFmode);
13181 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
13183 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
13185 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
13186 0, OPTAB_DIRECT);
13187 if (x != target)
13188 emit_move_insn (target, x);
13191 /* Convert an unsigned SImode value into a SFmode, using only SSE.
13192 For x86_32, -mfpmath=sse, !optimize_size only. */
13193 void
13194 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
13196 REAL_VALUE_TYPE ONE16r;
13197 rtx fp_hi, fp_lo, int_hi, int_lo, x;
13199 real_ldexp (&ONE16r, &dconst1, 16);
13200 x = const_double_from_real_value (ONE16r, SFmode);
13201 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
13202 NULL, 0, OPTAB_DIRECT);
13203 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
13204 NULL, 0, OPTAB_DIRECT);
13205 fp_hi = gen_reg_rtx (SFmode);
13206 fp_lo = gen_reg_rtx (SFmode);
13207 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
13208 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
13209 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
13210 0, OPTAB_DIRECT);
13211 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
13212 0, OPTAB_DIRECT);
13213 if (!rtx_equal_p (target, fp_hi))
13214 emit_move_insn (target, fp_hi);
13217 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
13218 then replicate the value for all elements of the vector
13219 register. */
13222 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
13224 rtvec v;
13225 switch (mode)
13227 case SImode:
13228 gcc_assert (vect);
13229 v = gen_rtvec (4, value, value, value, value);
13230 return gen_rtx_CONST_VECTOR (V4SImode, v);
13232 case DImode:
13233 gcc_assert (vect);
13234 v = gen_rtvec (2, value, value);
13235 return gen_rtx_CONST_VECTOR (V2DImode, v);
13237 case SFmode:
13238 if (vect)
13239 v = gen_rtvec (4, value, value, value, value);
13240 else
13241 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
13242 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13243 return gen_rtx_CONST_VECTOR (V4SFmode, v);
13245 case DFmode:
13246 if (vect)
13247 v = gen_rtvec (2, value, value);
13248 else
13249 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
13250 return gen_rtx_CONST_VECTOR (V2DFmode, v);
13252 default:
13253 gcc_unreachable ();
13257 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
13258 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
13259 for an SSE register. If VECT is true, then replicate the mask for
13260 all elements of the vector register. If INVERT is true, then create
13261 a mask excluding the sign bit. */
13264 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
13266 enum machine_mode vec_mode, imode;
13267 HOST_WIDE_INT hi, lo;
13268 int shift = 63;
13269 rtx v;
13270 rtx mask;
13272 /* Find the sign bit, sign extended to 2*HWI. */
13273 switch (mode)
13275 case SImode:
13276 case SFmode:
13277 imode = SImode;
13278 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
13279 lo = 0x80000000, hi = lo < 0;
13280 break;
13282 case DImode:
13283 case DFmode:
13284 imode = DImode;
13285 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
13286 if (HOST_BITS_PER_WIDE_INT >= 64)
13287 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
13288 else
13289 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13290 break;
13292 case TImode:
13293 case TFmode:
13294 vec_mode = VOIDmode;
13295 if (HOST_BITS_PER_WIDE_INT >= 64)
13297 imode = TImode;
13298 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
13300 else
13302 rtvec vec;
13304 imode = DImode;
13305 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13307 if (invert)
13309 lo = ~lo, hi = ~hi;
13310 v = constm1_rtx;
13312 else
13313 v = const0_rtx;
13315 mask = immed_double_const (lo, hi, imode);
13317 vec = gen_rtvec (2, v, mask);
13318 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
13319 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
13321 return v;
13323 break;
13325 default:
13326 gcc_unreachable ();
13329 if (invert)
13330 lo = ~lo, hi = ~hi;
13332 /* Force this value into the low part of a fp vector constant. */
13333 mask = immed_double_const (lo, hi, imode);
13334 mask = gen_lowpart (mode, mask);
13336 if (vec_mode == VOIDmode)
13337 return force_reg (mode, mask);
13339 v = ix86_build_const_vector (mode, vect, mask);
13340 return force_reg (vec_mode, v);
13343 /* Generate code for floating point ABS or NEG. */
13345 void
13346 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
13347 rtx operands[])
13349 rtx mask, set, use, clob, dst, src;
13350 bool use_sse = false;
13351 bool vector_mode = VECTOR_MODE_P (mode);
13352 enum machine_mode elt_mode = mode;
13354 if (vector_mode)
13356 elt_mode = GET_MODE_INNER (mode);
13357 use_sse = true;
13359 else if (mode == TFmode)
13360 use_sse = true;
13361 else if (TARGET_SSE_MATH)
13362 use_sse = SSE_FLOAT_MODE_P (mode);
13364 /* NEG and ABS performed with SSE use bitwise mask operations.
13365 Create the appropriate mask now. */
13366 if (use_sse)
13367 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
13368 else
13369 mask = NULL_RTX;
13371 dst = operands[0];
13372 src = operands[1];
13374 if (vector_mode)
13376 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
13377 set = gen_rtx_SET (VOIDmode, dst, set);
13378 emit_insn (set);
13380 else
13382 set = gen_rtx_fmt_e (code, mode, src);
13383 set = gen_rtx_SET (VOIDmode, dst, set);
13384 if (mask)
13386 use = gen_rtx_USE (VOIDmode, mask);
13387 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13388 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13389 gen_rtvec (3, set, use, clob)));
13391 else
13392 emit_insn (set);
13396 /* Expand a copysign operation. Special case operand 0 being a constant. */
13398 void
13399 ix86_expand_copysign (rtx operands[])
13401 enum machine_mode mode;
13402 rtx dest, op0, op1, mask, nmask;
13404 dest = operands[0];
13405 op0 = operands[1];
13406 op1 = operands[2];
13408 mode = GET_MODE (dest);
13410 if (GET_CODE (op0) == CONST_DOUBLE)
13412 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
13414 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
13415 op0 = simplify_unary_operation (ABS, mode, op0, mode);
13417 if (mode == SFmode || mode == DFmode)
13419 enum machine_mode vmode;
13421 vmode = mode == SFmode ? V4SFmode : V2DFmode;
13423 if (op0 == CONST0_RTX (mode))
13424 op0 = CONST0_RTX (vmode);
13425 else
13427 rtvec v;
13429 if (mode == SFmode)
13430 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
13431 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13432 else
13433 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
13435 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
13438 else if (op0 != CONST0_RTX (mode))
13439 op0 = force_reg (mode, op0);
13441 mask = ix86_build_signbit_mask (mode, 0, 0);
13443 if (mode == SFmode)
13444 copysign_insn = gen_copysignsf3_const;
13445 else if (mode == DFmode)
13446 copysign_insn = gen_copysigndf3_const;
13447 else
13448 copysign_insn = gen_copysigntf3_const;
13450 emit_insn (copysign_insn (dest, op0, op1, mask));
13452 else
13454 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
13456 nmask = ix86_build_signbit_mask (mode, 0, 1);
13457 mask = ix86_build_signbit_mask (mode, 0, 0);
13459 if (mode == SFmode)
13460 copysign_insn = gen_copysignsf3_var;
13461 else if (mode == DFmode)
13462 copysign_insn = gen_copysigndf3_var;
13463 else
13464 copysign_insn = gen_copysigntf3_var;
13466 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
13470 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
13471 be a constant, and so has already been expanded into a vector constant. */
13473 void
13474 ix86_split_copysign_const (rtx operands[])
13476 enum machine_mode mode, vmode;
13477 rtx dest, op0, op1, mask, x;
13479 dest = operands[0];
13480 op0 = operands[1];
13481 op1 = operands[2];
13482 mask = operands[3];
13484 mode = GET_MODE (dest);
13485 vmode = GET_MODE (mask);
13487 dest = simplify_gen_subreg (vmode, dest, mode, 0);
13488 x = gen_rtx_AND (vmode, dest, mask);
13489 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13491 if (op0 != CONST0_RTX (vmode))
13493 x = gen_rtx_IOR (vmode, dest, op0);
13494 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13498 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
13499 so we have to do two masks. */
13501 void
13502 ix86_split_copysign_var (rtx operands[])
13504 enum machine_mode mode, vmode;
13505 rtx dest, scratch, op0, op1, mask, nmask, x;
13507 dest = operands[0];
13508 scratch = operands[1];
13509 op0 = operands[2];
13510 op1 = operands[3];
13511 nmask = operands[4];
13512 mask = operands[5];
13514 mode = GET_MODE (dest);
13515 vmode = GET_MODE (mask);
13517 if (rtx_equal_p (op0, op1))
13519 /* Shouldn't happen often (it's useless, obviously), but when it does
13520 we'd generate incorrect code if we continue below. */
13521 emit_move_insn (dest, op0);
13522 return;
13525 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
13527 gcc_assert (REGNO (op1) == REGNO (scratch));
13529 x = gen_rtx_AND (vmode, scratch, mask);
13530 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13532 dest = mask;
13533 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13534 x = gen_rtx_NOT (vmode, dest);
13535 x = gen_rtx_AND (vmode, x, op0);
13536 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13538 else
13540 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
13542 x = gen_rtx_AND (vmode, scratch, mask);
13544 else /* alternative 2,4 */
13546 gcc_assert (REGNO (mask) == REGNO (scratch));
13547 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
13548 x = gen_rtx_AND (vmode, scratch, op1);
13550 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
13552 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
13554 dest = simplify_gen_subreg (vmode, op0, mode, 0);
13555 x = gen_rtx_AND (vmode, dest, nmask);
13557 else /* alternative 3,4 */
13559 gcc_assert (REGNO (nmask) == REGNO (dest));
13560 dest = nmask;
13561 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
13562 x = gen_rtx_AND (vmode, dest, op0);
13564 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13567 x = gen_rtx_IOR (vmode, dest, scratch);
13568 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13571 /* Return TRUE or FALSE depending on whether the first SET in INSN
13572 has source and destination with matching CC modes, and that the
13573 CC mode is at least as constrained as REQ_MODE. */
13576 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
13578 rtx set;
13579 enum machine_mode set_mode;
13581 set = PATTERN (insn);
13582 if (GET_CODE (set) == PARALLEL)
13583 set = XVECEXP (set, 0, 0);
13584 gcc_assert (GET_CODE (set) == SET);
13585 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
13587 set_mode = GET_MODE (SET_DEST (set));
13588 switch (set_mode)
13590 case CCNOmode:
13591 if (req_mode != CCNOmode
13592 && (req_mode != CCmode
13593 || XEXP (SET_SRC (set), 1) != const0_rtx))
13594 return 0;
13595 break;
13596 case CCmode:
13597 if (req_mode == CCGCmode)
13598 return 0;
13599 /* FALLTHRU */
13600 case CCGCmode:
13601 if (req_mode == CCGOCmode || req_mode == CCNOmode)
13602 return 0;
13603 /* FALLTHRU */
13604 case CCGOCmode:
13605 if (req_mode == CCZmode)
13606 return 0;
13607 /* FALLTHRU */
13608 case CCAmode:
13609 case CCCmode:
13610 case CCOmode:
13611 case CCSmode:
13612 case CCZmode:
13613 break;
13615 default:
13616 gcc_unreachable ();
13619 return (GET_MODE (SET_SRC (set)) == set_mode);
13622 /* Generate insn patterns to do an integer compare of OPERANDS. */
13624 static rtx
13625 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
13627 enum machine_mode cmpmode;
13628 rtx tmp, flags;
13630 cmpmode = SELECT_CC_MODE (code, op0, op1);
13631 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
13633 /* This is very simple, but making the interface the same as in the
13634 FP case makes the rest of the code easier. */
13635 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
13636 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
13638 /* Return the test that should be put into the flags user, i.e.
13639 the bcc, scc, or cmov instruction. */
13640 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
13643 /* Figure out whether to use ordered or unordered fp comparisons.
13644 Return the appropriate mode to use. */
13646 enum machine_mode
13647 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
13649 /* ??? In order to make all comparisons reversible, we do all comparisons
13650 non-trapping when compiling for IEEE. Once gcc is able to distinguish
13651 all forms trapping and nontrapping comparisons, we can make inequality
13652 comparisons trapping again, since it results in better code when using
13653 FCOM based compares. */
13654 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
13657 enum machine_mode
13658 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
13660 enum machine_mode mode = GET_MODE (op0);
13662 if (SCALAR_FLOAT_MODE_P (mode))
13664 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
13665 return ix86_fp_compare_mode (code);
13668 switch (code)
13670 /* Only zero flag is needed. */
13671 case EQ: /* ZF=0 */
13672 case NE: /* ZF!=0 */
13673 return CCZmode;
13674 /* Codes needing carry flag. */
13675 case GEU: /* CF=0 */
13676 case LTU: /* CF=1 */
13677 /* Detect overflow checks. They need just the carry flag. */
13678 if (GET_CODE (op0) == PLUS
13679 && rtx_equal_p (op1, XEXP (op0, 0)))
13680 return CCCmode;
13681 else
13682 return CCmode;
13683 case GTU: /* CF=0 & ZF=0 */
13684 case LEU: /* CF=1 | ZF=1 */
13685 /* Detect overflow checks. They need just the carry flag. */
13686 if (GET_CODE (op0) == MINUS
13687 && rtx_equal_p (op1, XEXP (op0, 0)))
13688 return CCCmode;
13689 else
13690 return CCmode;
13691 /* Codes possibly doable only with sign flag when
13692 comparing against zero. */
13693 case GE: /* SF=OF or SF=0 */
13694 case LT: /* SF<>OF or SF=1 */
13695 if (op1 == const0_rtx)
13696 return CCGOCmode;
13697 else
13698 /* For other cases Carry flag is not required. */
13699 return CCGCmode;
13700 /* Codes doable only with sign flag when comparing
13701 against zero, but we miss jump instruction for it
13702 so we need to use relational tests against overflow
13703 that thus needs to be zero. */
13704 case GT: /* ZF=0 & SF=OF */
13705 case LE: /* ZF=1 | SF<>OF */
13706 if (op1 == const0_rtx)
13707 return CCNOmode;
13708 else
13709 return CCGCmode;
13710 /* strcmp pattern do (use flags) and combine may ask us for proper
13711 mode. */
13712 case USE:
13713 return CCmode;
13714 default:
13715 gcc_unreachable ();
13719 /* Return the fixed registers used for condition codes. */
13721 static bool
13722 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13724 *p1 = FLAGS_REG;
13725 *p2 = FPSR_REG;
13726 return true;
13729 /* If two condition code modes are compatible, return a condition code
13730 mode which is compatible with both. Otherwise, return
13731 VOIDmode. */
13733 static enum machine_mode
13734 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
13736 if (m1 == m2)
13737 return m1;
13739 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
13740 return VOIDmode;
13742 if ((m1 == CCGCmode && m2 == CCGOCmode)
13743 || (m1 == CCGOCmode && m2 == CCGCmode))
13744 return CCGCmode;
13746 switch (m1)
13748 default:
13749 gcc_unreachable ();
13751 case CCmode:
13752 case CCGCmode:
13753 case CCGOCmode:
13754 case CCNOmode:
13755 case CCAmode:
13756 case CCCmode:
13757 case CCOmode:
13758 case CCSmode:
13759 case CCZmode:
13760 switch (m2)
13762 default:
13763 return VOIDmode;
13765 case CCmode:
13766 case CCGCmode:
13767 case CCGOCmode:
13768 case CCNOmode:
13769 case CCAmode:
13770 case CCCmode:
13771 case CCOmode:
13772 case CCSmode:
13773 case CCZmode:
13774 return CCmode;
13777 case CCFPmode:
13778 case CCFPUmode:
13779 /* These are only compatible with themselves, which we already
13780 checked above. */
13781 return VOIDmode;
13785 /* Split comparison code CODE into comparisons we can do using branch
13786 instructions. BYPASS_CODE is comparison code for branch that will
13787 branch around FIRST_CODE and SECOND_CODE. If some of branches
13788 is not required, set value to UNKNOWN.
13789 We never require more than two branches. */
13791 void
13792 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
13793 enum rtx_code *first_code,
13794 enum rtx_code *second_code)
13796 *first_code = code;
13797 *bypass_code = UNKNOWN;
13798 *second_code = UNKNOWN;
13800 /* The fcomi comparison sets flags as follows:
13802 cmp ZF PF CF
13803 > 0 0 0
13804 < 0 0 1
13805 = 1 0 0
13806 un 1 1 1 */
13808 switch (code)
13810 case GT: /* GTU - CF=0 & ZF=0 */
13811 case GE: /* GEU - CF=0 */
13812 case ORDERED: /* PF=0 */
13813 case UNORDERED: /* PF=1 */
13814 case UNEQ: /* EQ - ZF=1 */
13815 case UNLT: /* LTU - CF=1 */
13816 case UNLE: /* LEU - CF=1 | ZF=1 */
13817 case LTGT: /* EQ - ZF=0 */
13818 break;
13819 case LT: /* LTU - CF=1 - fails on unordered */
13820 *first_code = UNLT;
13821 *bypass_code = UNORDERED;
13822 break;
13823 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
13824 *first_code = UNLE;
13825 *bypass_code = UNORDERED;
13826 break;
13827 case EQ: /* EQ - ZF=1 - fails on unordered */
13828 *first_code = UNEQ;
13829 *bypass_code = UNORDERED;
13830 break;
13831 case NE: /* NE - ZF=0 - fails on unordered */
13832 *first_code = LTGT;
13833 *second_code = UNORDERED;
13834 break;
13835 case UNGE: /* GEU - CF=0 - fails on unordered */
13836 *first_code = GE;
13837 *second_code = UNORDERED;
13838 break;
13839 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
13840 *first_code = GT;
13841 *second_code = UNORDERED;
13842 break;
13843 default:
13844 gcc_unreachable ();
13846 if (!TARGET_IEEE_FP)
13848 *second_code = UNKNOWN;
13849 *bypass_code = UNKNOWN;
13853 /* Return cost of comparison done fcom + arithmetics operations on AX.
13854 All following functions do use number of instructions as a cost metrics.
13855 In future this should be tweaked to compute bytes for optimize_size and
13856 take into account performance of various instructions on various CPUs. */
13857 static int
13858 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
13860 if (!TARGET_IEEE_FP)
13861 return 4;
13862 /* The cost of code output by ix86_expand_fp_compare. */
13863 switch (code)
13865 case UNLE:
13866 case UNLT:
13867 case LTGT:
13868 case GT:
13869 case GE:
13870 case UNORDERED:
13871 case ORDERED:
13872 case UNEQ:
13873 return 4;
13874 break;
13875 case LT:
13876 case NE:
13877 case EQ:
13878 case UNGE:
13879 return 5;
13880 break;
13881 case LE:
13882 case UNGT:
13883 return 6;
13884 break;
13885 default:
13886 gcc_unreachable ();
13890 /* Return cost of comparison done using fcomi operation.
13891 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13892 static int
13893 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
13895 enum rtx_code bypass_code, first_code, second_code;
13896 /* Return arbitrarily high cost when instruction is not supported - this
13897 prevents gcc from using it. */
13898 if (!TARGET_CMOVE)
13899 return 1024;
13900 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13901 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
13904 /* Return cost of comparison done using sahf operation.
13905 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13906 static int
13907 ix86_fp_comparison_sahf_cost (enum rtx_code code)
13909 enum rtx_code bypass_code, first_code, second_code;
13910 /* Return arbitrarily high cost when instruction is not preferred - this
13911 avoids gcc from using it. */
13912 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ())))
13913 return 1024;
13914 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13915 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
13918 /* Compute cost of the comparison done using any method.
13919 See ix86_fp_comparison_arithmetics_cost for the metrics. */
13920 static int
13921 ix86_fp_comparison_cost (enum rtx_code code)
13923 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
13924 int min;
13926 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
13927 sahf_cost = ix86_fp_comparison_sahf_cost (code);
13929 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
13930 if (min > sahf_cost)
13931 min = sahf_cost;
13932 if (min > fcomi_cost)
13933 min = fcomi_cost;
13934 return min;
13937 /* Return true if we should use an FCOMI instruction for this
13938 fp comparison. */
13941 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
13943 enum rtx_code swapped_code = swap_condition (code);
13945 return ((ix86_fp_comparison_cost (code)
13946 == ix86_fp_comparison_fcomi_cost (code))
13947 || (ix86_fp_comparison_cost (swapped_code)
13948 == ix86_fp_comparison_fcomi_cost (swapped_code)));
13951 /* Swap, force into registers, or otherwise massage the two operands
13952 to a fp comparison. The operands are updated in place; the new
13953 comparison code is returned. */
13955 static enum rtx_code
13956 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
13958 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
13959 rtx op0 = *pop0, op1 = *pop1;
13960 enum machine_mode op_mode = GET_MODE (op0);
13961 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
13963 /* All of the unordered compare instructions only work on registers.
13964 The same is true of the fcomi compare instructions. The XFmode
13965 compare instructions require registers except when comparing
13966 against zero or when converting operand 1 from fixed point to
13967 floating point. */
13969 if (!is_sse
13970 && (fpcmp_mode == CCFPUmode
13971 || (op_mode == XFmode
13972 && ! (standard_80387_constant_p (op0) == 1
13973 || standard_80387_constant_p (op1) == 1)
13974 && GET_CODE (op1) != FLOAT)
13975 || ix86_use_fcomi_compare (code)))
13977 op0 = force_reg (op_mode, op0);
13978 op1 = force_reg (op_mode, op1);
13980 else
13982 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
13983 things around if they appear profitable, otherwise force op0
13984 into a register. */
13986 if (standard_80387_constant_p (op0) == 0
13987 || (MEM_P (op0)
13988 && ! (standard_80387_constant_p (op1) == 0
13989 || MEM_P (op1))))
13991 rtx tmp;
13992 tmp = op0, op0 = op1, op1 = tmp;
13993 code = swap_condition (code);
13996 if (!REG_P (op0))
13997 op0 = force_reg (op_mode, op0);
13999 if (CONSTANT_P (op1))
14001 int tmp = standard_80387_constant_p (op1);
14002 if (tmp == 0)
14003 op1 = validize_mem (force_const_mem (op_mode, op1));
14004 else if (tmp == 1)
14006 if (TARGET_CMOVE)
14007 op1 = force_reg (op_mode, op1);
14009 else
14010 op1 = force_reg (op_mode, op1);
14014 /* Try to rearrange the comparison to make it cheaper. */
14015 if (ix86_fp_comparison_cost (code)
14016 > ix86_fp_comparison_cost (swap_condition (code))
14017 && (REG_P (op1) || can_create_pseudo_p ()))
14019 rtx tmp;
14020 tmp = op0, op0 = op1, op1 = tmp;
14021 code = swap_condition (code);
14022 if (!REG_P (op0))
14023 op0 = force_reg (op_mode, op0);
14026 *pop0 = op0;
14027 *pop1 = op1;
14028 return code;
14031 /* Convert comparison codes we use to represent FP comparison to integer
14032 code that will result in proper branch. Return UNKNOWN if no such code
14033 is available. */
14035 enum rtx_code
14036 ix86_fp_compare_code_to_integer (enum rtx_code code)
14038 switch (code)
14040 case GT:
14041 return GTU;
14042 case GE:
14043 return GEU;
14044 case ORDERED:
14045 case UNORDERED:
14046 return code;
14047 break;
14048 case UNEQ:
14049 return EQ;
14050 break;
14051 case UNLT:
14052 return LTU;
14053 break;
14054 case UNLE:
14055 return LEU;
14056 break;
14057 case LTGT:
14058 return NE;
14059 break;
14060 default:
14061 return UNKNOWN;
14065 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14067 static rtx
14068 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
14069 rtx *second_test, rtx *bypass_test)
14071 enum machine_mode fpcmp_mode, intcmp_mode;
14072 rtx tmp, tmp2;
14073 int cost = ix86_fp_comparison_cost (code);
14074 enum rtx_code bypass_code, first_code, second_code;
14076 fpcmp_mode = ix86_fp_compare_mode (code);
14077 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14079 if (second_test)
14080 *second_test = NULL_RTX;
14081 if (bypass_test)
14082 *bypass_test = NULL_RTX;
14084 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14086 /* Do fcomi/sahf based test when profitable. */
14087 if (ix86_fp_comparison_arithmetics_cost (code) > cost
14088 && (bypass_code == UNKNOWN || bypass_test)
14089 && (second_code == UNKNOWN || second_test))
14091 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14092 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14093 tmp);
14094 if (TARGET_CMOVE)
14095 emit_insn (tmp);
14096 else
14098 gcc_assert (TARGET_SAHF);
14100 if (!scratch)
14101 scratch = gen_reg_rtx (HImode);
14102 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14104 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14107 /* The FP codes work out to act like unsigned. */
14108 intcmp_mode = fpcmp_mode;
14109 code = first_code;
14110 if (bypass_code != UNKNOWN)
14111 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
14112 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14113 const0_rtx);
14114 if (second_code != UNKNOWN)
14115 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
14116 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14117 const0_rtx);
14119 else
14121 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14122 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14123 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14124 if (!scratch)
14125 scratch = gen_reg_rtx (HImode);
14126 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14128 /* In the unordered case, we have to check C2 for NaN's, which
14129 doesn't happen to work out to anything nice combination-wise.
14130 So do some bit twiddling on the value we've got in AH to come
14131 up with an appropriate set of condition codes. */
14133 intcmp_mode = CCNOmode;
14134 switch (code)
14136 case GT:
14137 case UNGT:
14138 if (code == GT || !TARGET_IEEE_FP)
14140 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14141 code = EQ;
14143 else
14145 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14146 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14147 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14148 intcmp_mode = CCmode;
14149 code = GEU;
14151 break;
14152 case LT:
14153 case UNLT:
14154 if (code == LT && TARGET_IEEE_FP)
14156 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14157 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
14158 intcmp_mode = CCmode;
14159 code = EQ;
14161 else
14163 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
14164 code = NE;
14166 break;
14167 case GE:
14168 case UNGE:
14169 if (code == GE || !TARGET_IEEE_FP)
14171 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
14172 code = EQ;
14174 else
14176 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14177 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14178 GEN_INT (0x01)));
14179 code = NE;
14181 break;
14182 case LE:
14183 case UNLE:
14184 if (code == LE && TARGET_IEEE_FP)
14186 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14187 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14188 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14189 intcmp_mode = CCmode;
14190 code = LTU;
14192 else
14194 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14195 code = NE;
14197 break;
14198 case EQ:
14199 case UNEQ:
14200 if (code == EQ && TARGET_IEEE_FP)
14202 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14203 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14204 intcmp_mode = CCmode;
14205 code = EQ;
14207 else
14209 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14210 code = NE;
14211 break;
14213 break;
14214 case NE:
14215 case LTGT:
14216 if (code == NE && TARGET_IEEE_FP)
14218 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14219 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14220 GEN_INT (0x40)));
14221 code = NE;
14223 else
14225 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14226 code = EQ;
14228 break;
14230 case UNORDERED:
14231 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14232 code = NE;
14233 break;
14234 case ORDERED:
14235 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14236 code = EQ;
14237 break;
14239 default:
14240 gcc_unreachable ();
14244 /* Return the test that should be put into the flags user, i.e.
14245 the bcc, scc, or cmov instruction. */
14246 return gen_rtx_fmt_ee (code, VOIDmode,
14247 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14248 const0_rtx);
14252 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
14254 rtx op0, op1, ret;
14255 op0 = ix86_compare_op0;
14256 op1 = ix86_compare_op1;
14258 if (second_test)
14259 *second_test = NULL_RTX;
14260 if (bypass_test)
14261 *bypass_test = NULL_RTX;
14263 if (ix86_compare_emitted)
14265 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
14266 ix86_compare_emitted = NULL_RTX;
14268 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
14270 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
14271 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14272 second_test, bypass_test);
14274 else
14275 ret = ix86_expand_int_compare (code, op0, op1);
14277 return ret;
14280 /* Return true if the CODE will result in nontrivial jump sequence. */
14281 bool
14282 ix86_fp_jump_nontrivial_p (enum rtx_code code)
14284 enum rtx_code bypass_code, first_code, second_code;
14285 if (!TARGET_CMOVE)
14286 return true;
14287 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14288 return bypass_code != UNKNOWN || second_code != UNKNOWN;
14291 void
14292 ix86_expand_branch (enum rtx_code code, rtx label)
14294 rtx tmp;
14296 /* If we have emitted a compare insn, go straight to simple.
14297 ix86_expand_compare won't emit anything if ix86_compare_emitted
14298 is non NULL. */
14299 if (ix86_compare_emitted)
14300 goto simple;
14302 switch (GET_MODE (ix86_compare_op0))
14304 case QImode:
14305 case HImode:
14306 case SImode:
14307 simple:
14308 tmp = ix86_expand_compare (code, NULL, NULL);
14309 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14310 gen_rtx_LABEL_REF (VOIDmode, label),
14311 pc_rtx);
14312 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14313 return;
14315 case SFmode:
14316 case DFmode:
14317 case XFmode:
14319 rtvec vec;
14320 int use_fcomi;
14321 enum rtx_code bypass_code, first_code, second_code;
14323 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
14324 &ix86_compare_op1);
14326 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
14328 /* Check whether we will use the natural sequence with one jump. If
14329 so, we can expand jump early. Otherwise delay expansion by
14330 creating compound insn to not confuse optimizers. */
14331 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
14333 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
14334 gen_rtx_LABEL_REF (VOIDmode, label),
14335 pc_rtx, NULL_RTX, NULL_RTX);
14337 else
14339 tmp = gen_rtx_fmt_ee (code, VOIDmode,
14340 ix86_compare_op0, ix86_compare_op1);
14341 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14342 gen_rtx_LABEL_REF (VOIDmode, label),
14343 pc_rtx);
14344 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
14346 use_fcomi = ix86_use_fcomi_compare (code);
14347 vec = rtvec_alloc (3 + !use_fcomi);
14348 RTVEC_ELT (vec, 0) = tmp;
14349 RTVEC_ELT (vec, 1)
14350 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
14351 RTVEC_ELT (vec, 2)
14352 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
14353 if (! use_fcomi)
14354 RTVEC_ELT (vec, 3)
14355 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
14357 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
14359 return;
14362 case DImode:
14363 if (TARGET_64BIT)
14364 goto simple;
14365 case TImode:
14366 /* Expand DImode branch into multiple compare+branch. */
14368 rtx lo[2], hi[2], label2;
14369 enum rtx_code code1, code2, code3;
14370 enum machine_mode submode;
14372 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
14374 tmp = ix86_compare_op0;
14375 ix86_compare_op0 = ix86_compare_op1;
14376 ix86_compare_op1 = tmp;
14377 code = swap_condition (code);
14379 if (GET_MODE (ix86_compare_op0) == DImode)
14381 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
14382 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
14383 submode = SImode;
14385 else
14387 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
14388 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
14389 submode = DImode;
14392 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
14393 avoid two branches. This costs one extra insn, so disable when
14394 optimizing for size. */
14396 if ((code == EQ || code == NE)
14397 && (!optimize_insn_for_size_p ()
14398 || hi[1] == const0_rtx || lo[1] == const0_rtx))
14400 rtx xor0, xor1;
14402 xor1 = hi[0];
14403 if (hi[1] != const0_rtx)
14404 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
14405 NULL_RTX, 0, OPTAB_WIDEN);
14407 xor0 = lo[0];
14408 if (lo[1] != const0_rtx)
14409 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
14410 NULL_RTX, 0, OPTAB_WIDEN);
14412 tmp = expand_binop (submode, ior_optab, xor1, xor0,
14413 NULL_RTX, 0, OPTAB_WIDEN);
14415 ix86_compare_op0 = tmp;
14416 ix86_compare_op1 = const0_rtx;
14417 ix86_expand_branch (code, label);
14418 return;
14421 /* Otherwise, if we are doing less-than or greater-or-equal-than,
14422 op1 is a constant and the low word is zero, then we can just
14423 examine the high word. Similarly for low word -1 and
14424 less-or-equal-than or greater-than. */
14426 if (CONST_INT_P (hi[1]))
14427 switch (code)
14429 case LT: case LTU: case GE: case GEU:
14430 if (lo[1] == const0_rtx)
14432 ix86_compare_op0 = hi[0];
14433 ix86_compare_op1 = hi[1];
14434 ix86_expand_branch (code, label);
14435 return;
14437 break;
14438 case LE: case LEU: case GT: case GTU:
14439 if (lo[1] == constm1_rtx)
14441 ix86_compare_op0 = hi[0];
14442 ix86_compare_op1 = hi[1];
14443 ix86_expand_branch (code, label);
14444 return;
14446 break;
14447 default:
14448 break;
14451 /* Otherwise, we need two or three jumps. */
14453 label2 = gen_label_rtx ();
14455 code1 = code;
14456 code2 = swap_condition (code);
14457 code3 = unsigned_condition (code);
14459 switch (code)
14461 case LT: case GT: case LTU: case GTU:
14462 break;
14464 case LE: code1 = LT; code2 = GT; break;
14465 case GE: code1 = GT; code2 = LT; break;
14466 case LEU: code1 = LTU; code2 = GTU; break;
14467 case GEU: code1 = GTU; code2 = LTU; break;
14469 case EQ: code1 = UNKNOWN; code2 = NE; break;
14470 case NE: code2 = UNKNOWN; break;
14472 default:
14473 gcc_unreachable ();
14477 * a < b =>
14478 * if (hi(a) < hi(b)) goto true;
14479 * if (hi(a) > hi(b)) goto false;
14480 * if (lo(a) < lo(b)) goto true;
14481 * false:
14484 ix86_compare_op0 = hi[0];
14485 ix86_compare_op1 = hi[1];
14487 if (code1 != UNKNOWN)
14488 ix86_expand_branch (code1, label);
14489 if (code2 != UNKNOWN)
14490 ix86_expand_branch (code2, label2);
14492 ix86_compare_op0 = lo[0];
14493 ix86_compare_op1 = lo[1];
14494 ix86_expand_branch (code3, label);
14496 if (code2 != UNKNOWN)
14497 emit_label (label2);
14498 return;
14501 default:
14502 gcc_unreachable ();
14506 /* Split branch based on floating point condition. */
14507 void
14508 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
14509 rtx target1, rtx target2, rtx tmp, rtx pushed)
14511 rtx second, bypass;
14512 rtx label = NULL_RTX;
14513 rtx condition;
14514 int bypass_probability = -1, second_probability = -1, probability = -1;
14515 rtx i;
14517 if (target2 != pc_rtx)
14519 rtx tmp = target2;
14520 code = reverse_condition_maybe_unordered (code);
14521 target2 = target1;
14522 target1 = tmp;
14525 condition = ix86_expand_fp_compare (code, op1, op2,
14526 tmp, &second, &bypass);
14528 /* Remove pushed operand from stack. */
14529 if (pushed)
14530 ix86_free_from_memory (GET_MODE (pushed));
14532 if (split_branch_probability >= 0)
14534 /* Distribute the probabilities across the jumps.
14535 Assume the BYPASS and SECOND to be always test
14536 for UNORDERED. */
14537 probability = split_branch_probability;
14539 /* Value of 1 is low enough to make no need for probability
14540 to be updated. Later we may run some experiments and see
14541 if unordered values are more frequent in practice. */
14542 if (bypass)
14543 bypass_probability = 1;
14544 if (second)
14545 second_probability = 1;
14547 if (bypass != NULL_RTX)
14549 label = gen_label_rtx ();
14550 i = emit_jump_insn (gen_rtx_SET
14551 (VOIDmode, pc_rtx,
14552 gen_rtx_IF_THEN_ELSE (VOIDmode,
14553 bypass,
14554 gen_rtx_LABEL_REF (VOIDmode,
14555 label),
14556 pc_rtx)));
14557 if (bypass_probability >= 0)
14558 REG_NOTES (i)
14559 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14560 GEN_INT (bypass_probability),
14561 REG_NOTES (i));
14563 i = emit_jump_insn (gen_rtx_SET
14564 (VOIDmode, pc_rtx,
14565 gen_rtx_IF_THEN_ELSE (VOIDmode,
14566 condition, target1, target2)));
14567 if (probability >= 0)
14568 REG_NOTES (i)
14569 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14570 GEN_INT (probability),
14571 REG_NOTES (i));
14572 if (second != NULL_RTX)
14574 i = emit_jump_insn (gen_rtx_SET
14575 (VOIDmode, pc_rtx,
14576 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
14577 target2)));
14578 if (second_probability >= 0)
14579 REG_NOTES (i)
14580 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14581 GEN_INT (second_probability),
14582 REG_NOTES (i));
14584 if (label != NULL_RTX)
14585 emit_label (label);
14589 ix86_expand_setcc (enum rtx_code code, rtx dest)
14591 rtx ret, tmp, tmpreg, equiv;
14592 rtx second_test, bypass_test;
14594 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
14595 return 0; /* FAIL */
14597 gcc_assert (GET_MODE (dest) == QImode);
14599 ret = ix86_expand_compare (code, &second_test, &bypass_test);
14600 PUT_MODE (ret, QImode);
14602 tmp = dest;
14603 tmpreg = dest;
14605 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
14606 if (bypass_test || second_test)
14608 rtx test = second_test;
14609 int bypass = 0;
14610 rtx tmp2 = gen_reg_rtx (QImode);
14611 if (bypass_test)
14613 gcc_assert (!second_test);
14614 test = bypass_test;
14615 bypass = 1;
14616 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
14618 PUT_MODE (test, QImode);
14619 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
14621 if (bypass)
14622 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
14623 else
14624 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
14627 /* Attach a REG_EQUAL note describing the comparison result. */
14628 if (ix86_compare_op0 && ix86_compare_op1)
14630 equiv = simplify_gen_relational (code, QImode,
14631 GET_MODE (ix86_compare_op0),
14632 ix86_compare_op0, ix86_compare_op1);
14633 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
14636 return 1; /* DONE */
14639 /* Expand comparison setting or clearing carry flag. Return true when
14640 successful and set pop for the operation. */
14641 static bool
14642 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
14644 enum machine_mode mode =
14645 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
14647 /* Do not handle DImode compares that go through special path. */
14648 if (mode == (TARGET_64BIT ? TImode : DImode))
14649 return false;
14651 if (SCALAR_FLOAT_MODE_P (mode))
14653 rtx second_test = NULL, bypass_test = NULL;
14654 rtx compare_op, compare_seq;
14656 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14658 /* Shortcut: following common codes never translate
14659 into carry flag compares. */
14660 if (code == EQ || code == NE || code == UNEQ || code == LTGT
14661 || code == ORDERED || code == UNORDERED)
14662 return false;
14664 /* These comparisons require zero flag; swap operands so they won't. */
14665 if ((code == GT || code == UNLE || code == LE || code == UNGT)
14666 && !TARGET_IEEE_FP)
14668 rtx tmp = op0;
14669 op0 = op1;
14670 op1 = tmp;
14671 code = swap_condition (code);
14674 /* Try to expand the comparison and verify that we end up with
14675 carry flag based comparison. This fails to be true only when
14676 we decide to expand comparison using arithmetic that is not
14677 too common scenario. */
14678 start_sequence ();
14679 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
14680 &second_test, &bypass_test);
14681 compare_seq = get_insns ();
14682 end_sequence ();
14684 if (second_test || bypass_test)
14685 return false;
14687 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14688 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14689 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
14690 else
14691 code = GET_CODE (compare_op);
14693 if (code != LTU && code != GEU)
14694 return false;
14696 emit_insn (compare_seq);
14697 *pop = compare_op;
14698 return true;
14701 if (!INTEGRAL_MODE_P (mode))
14702 return false;
14704 switch (code)
14706 case LTU:
14707 case GEU:
14708 break;
14710 /* Convert a==0 into (unsigned)a<1. */
14711 case EQ:
14712 case NE:
14713 if (op1 != const0_rtx)
14714 return false;
14715 op1 = const1_rtx;
14716 code = (code == EQ ? LTU : GEU);
14717 break;
14719 /* Convert a>b into b<a or a>=b-1. */
14720 case GTU:
14721 case LEU:
14722 if (CONST_INT_P (op1))
14724 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
14725 /* Bail out on overflow. We still can swap operands but that
14726 would force loading of the constant into register. */
14727 if (op1 == const0_rtx
14728 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
14729 return false;
14730 code = (code == GTU ? GEU : LTU);
14732 else
14734 rtx tmp = op1;
14735 op1 = op0;
14736 op0 = tmp;
14737 code = (code == GTU ? LTU : GEU);
14739 break;
14741 /* Convert a>=0 into (unsigned)a<0x80000000. */
14742 case LT:
14743 case GE:
14744 if (mode == DImode || op1 != const0_rtx)
14745 return false;
14746 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14747 code = (code == LT ? GEU : LTU);
14748 break;
14749 case LE:
14750 case GT:
14751 if (mode == DImode || op1 != constm1_rtx)
14752 return false;
14753 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
14754 code = (code == LE ? GEU : LTU);
14755 break;
14757 default:
14758 return false;
14760 /* Swapping operands may cause constant to appear as first operand. */
14761 if (!nonimmediate_operand (op0, VOIDmode))
14763 if (!can_create_pseudo_p ())
14764 return false;
14765 op0 = force_reg (mode, op0);
14767 ix86_compare_op0 = op0;
14768 ix86_compare_op1 = op1;
14769 *pop = ix86_expand_compare (code, NULL, NULL);
14770 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
14771 return true;
14775 ix86_expand_int_movcc (rtx operands[])
14777 enum rtx_code code = GET_CODE (operands[1]), compare_code;
14778 rtx compare_seq, compare_op;
14779 rtx second_test, bypass_test;
14780 enum machine_mode mode = GET_MODE (operands[0]);
14781 bool sign_bit_compare_p = false;;
14783 start_sequence ();
14784 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14785 compare_seq = get_insns ();
14786 end_sequence ();
14788 compare_code = GET_CODE (compare_op);
14790 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
14791 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
14792 sign_bit_compare_p = true;
14794 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
14795 HImode insns, we'd be swallowed in word prefix ops. */
14797 if ((mode != HImode || TARGET_FAST_PREFIX)
14798 && (mode != (TARGET_64BIT ? TImode : DImode))
14799 && CONST_INT_P (operands[2])
14800 && CONST_INT_P (operands[3]))
14802 rtx out = operands[0];
14803 HOST_WIDE_INT ct = INTVAL (operands[2]);
14804 HOST_WIDE_INT cf = INTVAL (operands[3]);
14805 HOST_WIDE_INT diff;
14807 diff = ct - cf;
14808 /* Sign bit compares are better done using shifts than we do by using
14809 sbb. */
14810 if (sign_bit_compare_p
14811 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
14812 ix86_compare_op1, &compare_op))
14814 /* Detect overlap between destination and compare sources. */
14815 rtx tmp = out;
14817 if (!sign_bit_compare_p)
14819 bool fpcmp = false;
14821 compare_code = GET_CODE (compare_op);
14823 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
14824 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
14826 fpcmp = true;
14827 compare_code = ix86_fp_compare_code_to_integer (compare_code);
14830 /* To simplify rest of code, restrict to the GEU case. */
14831 if (compare_code == LTU)
14833 HOST_WIDE_INT tmp = ct;
14834 ct = cf;
14835 cf = tmp;
14836 compare_code = reverse_condition (compare_code);
14837 code = reverse_condition (code);
14839 else
14841 if (fpcmp)
14842 PUT_CODE (compare_op,
14843 reverse_condition_maybe_unordered
14844 (GET_CODE (compare_op)));
14845 else
14846 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
14848 diff = ct - cf;
14850 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
14851 || reg_overlap_mentioned_p (out, ix86_compare_op1))
14852 tmp = gen_reg_rtx (mode);
14854 if (mode == DImode)
14855 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
14856 else
14857 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
14859 else
14861 if (code == GT || code == GE)
14862 code = reverse_condition (code);
14863 else
14865 HOST_WIDE_INT tmp = ct;
14866 ct = cf;
14867 cf = tmp;
14868 diff = ct - cf;
14870 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
14871 ix86_compare_op1, VOIDmode, 0, -1);
14874 if (diff == 1)
14877 * cmpl op0,op1
14878 * sbbl dest,dest
14879 * [addl dest, ct]
14881 * Size 5 - 8.
14883 if (ct)
14884 tmp = expand_simple_binop (mode, PLUS,
14885 tmp, GEN_INT (ct),
14886 copy_rtx (tmp), 1, OPTAB_DIRECT);
14888 else if (cf == -1)
14891 * cmpl op0,op1
14892 * sbbl dest,dest
14893 * orl $ct, dest
14895 * Size 8.
14897 tmp = expand_simple_binop (mode, IOR,
14898 tmp, GEN_INT (ct),
14899 copy_rtx (tmp), 1, OPTAB_DIRECT);
14901 else if (diff == -1 && ct)
14904 * cmpl op0,op1
14905 * sbbl dest,dest
14906 * notl dest
14907 * [addl dest, cf]
14909 * Size 8 - 11.
14911 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14912 if (cf)
14913 tmp = expand_simple_binop (mode, PLUS,
14914 copy_rtx (tmp), GEN_INT (cf),
14915 copy_rtx (tmp), 1, OPTAB_DIRECT);
14917 else
14920 * cmpl op0,op1
14921 * sbbl dest,dest
14922 * [notl dest]
14923 * andl cf - ct, dest
14924 * [addl dest, ct]
14926 * Size 8 - 11.
14929 if (cf == 0)
14931 cf = ct;
14932 ct = 0;
14933 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
14936 tmp = expand_simple_binop (mode, AND,
14937 copy_rtx (tmp),
14938 gen_int_mode (cf - ct, mode),
14939 copy_rtx (tmp), 1, OPTAB_DIRECT);
14940 if (ct)
14941 tmp = expand_simple_binop (mode, PLUS,
14942 copy_rtx (tmp), GEN_INT (ct),
14943 copy_rtx (tmp), 1, OPTAB_DIRECT);
14946 if (!rtx_equal_p (tmp, out))
14947 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
14949 return 1; /* DONE */
14952 if (diff < 0)
14954 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14956 HOST_WIDE_INT tmp;
14957 tmp = ct, ct = cf, cf = tmp;
14958 diff = -diff;
14960 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14962 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14964 /* We may be reversing unordered compare to normal compare, that
14965 is not valid in general (we may convert non-trapping condition
14966 to trapping one), however on i386 we currently emit all
14967 comparisons unordered. */
14968 compare_code = reverse_condition_maybe_unordered (compare_code);
14969 code = reverse_condition_maybe_unordered (code);
14971 else
14973 compare_code = reverse_condition (compare_code);
14974 code = reverse_condition (code);
14978 compare_code = UNKNOWN;
14979 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
14980 && CONST_INT_P (ix86_compare_op1))
14982 if (ix86_compare_op1 == const0_rtx
14983 && (code == LT || code == GE))
14984 compare_code = code;
14985 else if (ix86_compare_op1 == constm1_rtx)
14987 if (code == LE)
14988 compare_code = LT;
14989 else if (code == GT)
14990 compare_code = GE;
14994 /* Optimize dest = (op0 < 0) ? -1 : cf. */
14995 if (compare_code != UNKNOWN
14996 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
14997 && (cf == -1 || ct == -1))
14999 /* If lea code below could be used, only optimize
15000 if it results in a 2 insn sequence. */
15002 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15003 || diff == 3 || diff == 5 || diff == 9)
15004 || (compare_code == LT && ct == -1)
15005 || (compare_code == GE && cf == -1))
15008 * notl op1 (if necessary)
15009 * sarl $31, op1
15010 * orl cf, op1
15012 if (ct != -1)
15014 cf = ct;
15015 ct = -1;
15016 code = reverse_condition (code);
15019 out = emit_store_flag (out, code, ix86_compare_op0,
15020 ix86_compare_op1, VOIDmode, 0, -1);
15022 out = expand_simple_binop (mode, IOR,
15023 out, GEN_INT (cf),
15024 out, 1, OPTAB_DIRECT);
15025 if (out != operands[0])
15026 emit_move_insn (operands[0], out);
15028 return 1; /* DONE */
15033 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15034 || diff == 3 || diff == 5 || diff == 9)
15035 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15036 && (mode != DImode
15037 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15040 * xorl dest,dest
15041 * cmpl op1,op2
15042 * setcc dest
15043 * lea cf(dest*(ct-cf)),dest
15045 * Size 14.
15047 * This also catches the degenerate setcc-only case.
15050 rtx tmp;
15051 int nops;
15053 out = emit_store_flag (out, code, ix86_compare_op0,
15054 ix86_compare_op1, VOIDmode, 0, 1);
15056 nops = 0;
15057 /* On x86_64 the lea instruction operates on Pmode, so we need
15058 to get arithmetics done in proper mode to match. */
15059 if (diff == 1)
15060 tmp = copy_rtx (out);
15061 else
15063 rtx out1;
15064 out1 = copy_rtx (out);
15065 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15066 nops++;
15067 if (diff & 1)
15069 tmp = gen_rtx_PLUS (mode, tmp, out1);
15070 nops++;
15073 if (cf != 0)
15075 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15076 nops++;
15078 if (!rtx_equal_p (tmp, out))
15080 if (nops == 1)
15081 out = force_operand (tmp, copy_rtx (out));
15082 else
15083 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15085 if (!rtx_equal_p (out, operands[0]))
15086 emit_move_insn (operands[0], copy_rtx (out));
15088 return 1; /* DONE */
15092 * General case: Jumpful:
15093 * xorl dest,dest cmpl op1, op2
15094 * cmpl op1, op2 movl ct, dest
15095 * setcc dest jcc 1f
15096 * decl dest movl cf, dest
15097 * andl (cf-ct),dest 1:
15098 * addl ct,dest
15100 * Size 20. Size 14.
15102 * This is reasonably steep, but branch mispredict costs are
15103 * high on modern cpus, so consider failing only if optimizing
15104 * for space.
15107 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15108 && BRANCH_COST (optimize_insn_for_speed_p (),
15109 false) >= 2)
15111 if (cf == 0)
15113 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15115 cf = ct;
15116 ct = 0;
15118 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15120 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15122 /* We may be reversing unordered compare to normal compare,
15123 that is not valid in general (we may convert non-trapping
15124 condition to trapping one), however on i386 we currently
15125 emit all comparisons unordered. */
15126 code = reverse_condition_maybe_unordered (code);
15128 else
15130 code = reverse_condition (code);
15131 if (compare_code != UNKNOWN)
15132 compare_code = reverse_condition (compare_code);
15136 if (compare_code != UNKNOWN)
15138 /* notl op1 (if needed)
15139 sarl $31, op1
15140 andl (cf-ct), op1
15141 addl ct, op1
15143 For x < 0 (resp. x <= -1) there will be no notl,
15144 so if possible swap the constants to get rid of the
15145 complement.
15146 True/false will be -1/0 while code below (store flag
15147 followed by decrement) is 0/-1, so the constants need
15148 to be exchanged once more. */
15150 if (compare_code == GE || !cf)
15152 code = reverse_condition (code);
15153 compare_code = LT;
15155 else
15157 HOST_WIDE_INT tmp = cf;
15158 cf = ct;
15159 ct = tmp;
15162 out = emit_store_flag (out, code, ix86_compare_op0,
15163 ix86_compare_op1, VOIDmode, 0, -1);
15165 else
15167 out = emit_store_flag (out, code, ix86_compare_op0,
15168 ix86_compare_op1, VOIDmode, 0, 1);
15170 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15171 copy_rtx (out), 1, OPTAB_DIRECT);
15174 out = expand_simple_binop (mode, AND, copy_rtx (out),
15175 gen_int_mode (cf - ct, mode),
15176 copy_rtx (out), 1, OPTAB_DIRECT);
15177 if (ct)
15178 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15179 copy_rtx (out), 1, OPTAB_DIRECT);
15180 if (!rtx_equal_p (out, operands[0]))
15181 emit_move_insn (operands[0], copy_rtx (out));
15183 return 1; /* DONE */
15187 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15189 /* Try a few things more with specific constants and a variable. */
15191 optab op;
15192 rtx var, orig_out, out, tmp;
15194 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15195 return 0; /* FAIL */
15197 /* If one of the two operands is an interesting constant, load a
15198 constant with the above and mask it in with a logical operation. */
15200 if (CONST_INT_P (operands[2]))
15202 var = operands[3];
15203 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15204 operands[3] = constm1_rtx, op = and_optab;
15205 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15206 operands[3] = const0_rtx, op = ior_optab;
15207 else
15208 return 0; /* FAIL */
15210 else if (CONST_INT_P (operands[3]))
15212 var = operands[2];
15213 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15214 operands[2] = constm1_rtx, op = and_optab;
15215 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15216 operands[2] = const0_rtx, op = ior_optab;
15217 else
15218 return 0; /* FAIL */
15220 else
15221 return 0; /* FAIL */
15223 orig_out = operands[0];
15224 tmp = gen_reg_rtx (mode);
15225 operands[0] = tmp;
15227 /* Recurse to get the constant loaded. */
15228 if (ix86_expand_int_movcc (operands) == 0)
15229 return 0; /* FAIL */
15231 /* Mask in the interesting variable. */
15232 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15233 OPTAB_WIDEN);
15234 if (!rtx_equal_p (out, orig_out))
15235 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15237 return 1; /* DONE */
15241 * For comparison with above,
15243 * movl cf,dest
15244 * movl ct,tmp
15245 * cmpl op1,op2
15246 * cmovcc tmp,dest
15248 * Size 15.
15251 if (! nonimmediate_operand (operands[2], mode))
15252 operands[2] = force_reg (mode, operands[2]);
15253 if (! nonimmediate_operand (operands[3], mode))
15254 operands[3] = force_reg (mode, operands[3]);
15256 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15258 rtx tmp = gen_reg_rtx (mode);
15259 emit_move_insn (tmp, operands[3]);
15260 operands[3] = tmp;
15262 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15264 rtx tmp = gen_reg_rtx (mode);
15265 emit_move_insn (tmp, operands[2]);
15266 operands[2] = tmp;
15269 if (! register_operand (operands[2], VOIDmode)
15270 && (mode == QImode
15271 || ! register_operand (operands[3], VOIDmode)))
15272 operands[2] = force_reg (mode, operands[2]);
15274 if (mode == QImode
15275 && ! register_operand (operands[3], VOIDmode))
15276 operands[3] = force_reg (mode, operands[3]);
15278 emit_insn (compare_seq);
15279 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15280 gen_rtx_IF_THEN_ELSE (mode,
15281 compare_op, operands[2],
15282 operands[3])));
15283 if (bypass_test)
15284 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
15285 gen_rtx_IF_THEN_ELSE (mode,
15286 bypass_test,
15287 copy_rtx (operands[3]),
15288 copy_rtx (operands[0]))));
15289 if (second_test)
15290 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
15291 gen_rtx_IF_THEN_ELSE (mode,
15292 second_test,
15293 copy_rtx (operands[2]),
15294 copy_rtx (operands[0]))));
15296 return 1; /* DONE */
15299 /* Swap, force into registers, or otherwise massage the two operands
15300 to an sse comparison with a mask result. Thus we differ a bit from
15301 ix86_prepare_fp_compare_args which expects to produce a flags result.
15303 The DEST operand exists to help determine whether to commute commutative
15304 operators. The POP0/POP1 operands are updated in place. The new
15305 comparison code is returned, or UNKNOWN if not implementable. */
15307 static enum rtx_code
15308 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15309 rtx *pop0, rtx *pop1)
15311 rtx tmp;
15313 switch (code)
15315 case LTGT:
15316 case UNEQ:
15317 /* We have no LTGT as an operator. We could implement it with
15318 NE & ORDERED, but this requires an extra temporary. It's
15319 not clear that it's worth it. */
15320 return UNKNOWN;
15322 case LT:
15323 case LE:
15324 case UNGT:
15325 case UNGE:
15326 /* These are supported directly. */
15327 break;
15329 case EQ:
15330 case NE:
15331 case UNORDERED:
15332 case ORDERED:
15333 /* For commutative operators, try to canonicalize the destination
15334 operand to be first in the comparison - this helps reload to
15335 avoid extra moves. */
15336 if (!dest || !rtx_equal_p (dest, *pop1))
15337 break;
15338 /* FALLTHRU */
15340 case GE:
15341 case GT:
15342 case UNLE:
15343 case UNLT:
15344 /* These are not supported directly. Swap the comparison operands
15345 to transform into something that is supported. */
15346 tmp = *pop0;
15347 *pop0 = *pop1;
15348 *pop1 = tmp;
15349 code = swap_condition (code);
15350 break;
15352 default:
15353 gcc_unreachable ();
15356 return code;
15359 /* Detect conditional moves that exactly match min/max operational
15360 semantics. Note that this is IEEE safe, as long as we don't
15361 interchange the operands.
15363 Returns FALSE if this conditional move doesn't match a MIN/MAX,
15364 and TRUE if the operation is successful and instructions are emitted. */
15366 static bool
15367 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
15368 rtx cmp_op1, rtx if_true, rtx if_false)
15370 enum machine_mode mode;
15371 bool is_min;
15372 rtx tmp;
15374 if (code == LT)
15376 else if (code == UNGE)
15378 tmp = if_true;
15379 if_true = if_false;
15380 if_false = tmp;
15382 else
15383 return false;
15385 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
15386 is_min = true;
15387 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
15388 is_min = false;
15389 else
15390 return false;
15392 mode = GET_MODE (dest);
15394 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15395 but MODE may be a vector mode and thus not appropriate. */
15396 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15398 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15399 rtvec v;
15401 if_true = force_reg (mode, if_true);
15402 v = gen_rtvec (2, if_true, if_false);
15403 tmp = gen_rtx_UNSPEC (mode, v, u);
15405 else
15407 code = is_min ? SMIN : SMAX;
15408 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
15411 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
15412 return true;
15415 /* Expand an sse vector comparison. Return the register with the result. */
15417 static rtx
15418 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
15419 rtx op_true, rtx op_false)
15421 enum machine_mode mode = GET_MODE (dest);
15422 rtx x;
15424 cmp_op0 = force_reg (mode, cmp_op0);
15425 if (!nonimmediate_operand (cmp_op1, mode))
15426 cmp_op1 = force_reg (mode, cmp_op1);
15428 if (optimize
15429 || reg_overlap_mentioned_p (dest, op_true)
15430 || reg_overlap_mentioned_p (dest, op_false))
15431 dest = gen_reg_rtx (mode);
15433 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
15434 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15436 return dest;
15439 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
15440 operations. This is used for both scalar and vector conditional moves. */
15442 static void
15443 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
15445 enum machine_mode mode = GET_MODE (dest);
15446 rtx t2, t3, x;
15448 if (op_false == CONST0_RTX (mode))
15450 op_true = force_reg (mode, op_true);
15451 x = gen_rtx_AND (mode, cmp, op_true);
15452 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15454 else if (op_true == CONST0_RTX (mode))
15456 op_false = force_reg (mode, op_false);
15457 x = gen_rtx_NOT (mode, cmp);
15458 x = gen_rtx_AND (mode, x, op_false);
15459 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15461 else if (TARGET_SSE5)
15463 rtx pcmov = gen_rtx_SET (mode, dest,
15464 gen_rtx_IF_THEN_ELSE (mode, cmp,
15465 op_true,
15466 op_false));
15467 emit_insn (pcmov);
15469 else
15471 op_true = force_reg (mode, op_true);
15472 op_false = force_reg (mode, op_false);
15474 t2 = gen_reg_rtx (mode);
15475 if (optimize)
15476 t3 = gen_reg_rtx (mode);
15477 else
15478 t3 = dest;
15480 x = gen_rtx_AND (mode, op_true, cmp);
15481 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
15483 x = gen_rtx_NOT (mode, cmp);
15484 x = gen_rtx_AND (mode, x, op_false);
15485 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
15487 x = gen_rtx_IOR (mode, t3, t2);
15488 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15492 /* Expand a floating-point conditional move. Return true if successful. */
15495 ix86_expand_fp_movcc (rtx operands[])
15497 enum machine_mode mode = GET_MODE (operands[0]);
15498 enum rtx_code code = GET_CODE (operands[1]);
15499 rtx tmp, compare_op, second_test, bypass_test;
15501 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
15503 enum machine_mode cmode;
15505 /* Since we've no cmove for sse registers, don't force bad register
15506 allocation just to gain access to it. Deny movcc when the
15507 comparison mode doesn't match the move mode. */
15508 cmode = GET_MODE (ix86_compare_op0);
15509 if (cmode == VOIDmode)
15510 cmode = GET_MODE (ix86_compare_op1);
15511 if (cmode != mode)
15512 return 0;
15514 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15515 &ix86_compare_op0,
15516 &ix86_compare_op1);
15517 if (code == UNKNOWN)
15518 return 0;
15520 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
15521 ix86_compare_op1, operands[2],
15522 operands[3]))
15523 return 1;
15525 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
15526 ix86_compare_op1, operands[2], operands[3]);
15527 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
15528 return 1;
15531 /* The floating point conditional move instructions don't directly
15532 support conditions resulting from a signed integer comparison. */
15534 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15536 /* The floating point conditional move instructions don't directly
15537 support signed integer comparisons. */
15539 if (!fcmov_comparison_operator (compare_op, VOIDmode))
15541 gcc_assert (!second_test && !bypass_test);
15542 tmp = gen_reg_rtx (QImode);
15543 ix86_expand_setcc (code, tmp);
15544 code = NE;
15545 ix86_compare_op0 = tmp;
15546 ix86_compare_op1 = const0_rtx;
15547 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
15549 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
15551 tmp = gen_reg_rtx (mode);
15552 emit_move_insn (tmp, operands[3]);
15553 operands[3] = tmp;
15555 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
15557 tmp = gen_reg_rtx (mode);
15558 emit_move_insn (tmp, operands[2]);
15559 operands[2] = tmp;
15562 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15563 gen_rtx_IF_THEN_ELSE (mode, compare_op,
15564 operands[2], operands[3])));
15565 if (bypass_test)
15566 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15567 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
15568 operands[3], operands[0])));
15569 if (second_test)
15570 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15571 gen_rtx_IF_THEN_ELSE (mode, second_test,
15572 operands[2], operands[0])));
15574 return 1;
15577 /* Expand a floating-point vector conditional move; a vcond operation
15578 rather than a movcc operation. */
15580 bool
15581 ix86_expand_fp_vcond (rtx operands[])
15583 enum rtx_code code = GET_CODE (operands[3]);
15584 rtx cmp;
15586 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15587 &operands[4], &operands[5]);
15588 if (code == UNKNOWN)
15589 return false;
15591 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
15592 operands[5], operands[1], operands[2]))
15593 return true;
15595 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
15596 operands[1], operands[2]);
15597 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
15598 return true;
15601 /* Expand a signed/unsigned integral vector conditional move. */
15603 bool
15604 ix86_expand_int_vcond (rtx operands[])
15606 enum machine_mode mode = GET_MODE (operands[0]);
15607 enum rtx_code code = GET_CODE (operands[3]);
15608 bool negate = false;
15609 rtx x, cop0, cop1;
15611 cop0 = operands[4];
15612 cop1 = operands[5];
15614 /* SSE5 supports all of the comparisons on all vector int types. */
15615 if (!TARGET_SSE5)
15617 /* Canonicalize the comparison to EQ, GT, GTU. */
15618 switch (code)
15620 case EQ:
15621 case GT:
15622 case GTU:
15623 break;
15625 case NE:
15626 case LE:
15627 case LEU:
15628 code = reverse_condition (code);
15629 negate = true;
15630 break;
15632 case GE:
15633 case GEU:
15634 code = reverse_condition (code);
15635 negate = true;
15636 /* FALLTHRU */
15638 case LT:
15639 case LTU:
15640 code = swap_condition (code);
15641 x = cop0, cop0 = cop1, cop1 = x;
15642 break;
15644 default:
15645 gcc_unreachable ();
15648 /* Only SSE4.1/SSE4.2 supports V2DImode. */
15649 if (mode == V2DImode)
15651 switch (code)
15653 case EQ:
15654 /* SSE4.1 supports EQ. */
15655 if (!TARGET_SSE4_1)
15656 return false;
15657 break;
15659 case GT:
15660 case GTU:
15661 /* SSE4.2 supports GT/GTU. */
15662 if (!TARGET_SSE4_2)
15663 return false;
15664 break;
15666 default:
15667 gcc_unreachable ();
15671 /* Unsigned parallel compare is not supported by the hardware. Play some
15672 tricks to turn this into a signed comparison against 0. */
15673 if (code == GTU)
15675 cop0 = force_reg (mode, cop0);
15677 switch (mode)
15679 case V4SImode:
15680 case V2DImode:
15682 rtx t1, t2, mask;
15684 /* Perform a parallel modulo subtraction. */
15685 t1 = gen_reg_rtx (mode);
15686 emit_insn ((mode == V4SImode
15687 ? gen_subv4si3
15688 : gen_subv2di3) (t1, cop0, cop1));
15690 /* Extract the original sign bit of op0. */
15691 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
15692 true, false);
15693 t2 = gen_reg_rtx (mode);
15694 emit_insn ((mode == V4SImode
15695 ? gen_andv4si3
15696 : gen_andv2di3) (t2, cop0, mask));
15698 /* XOR it back into the result of the subtraction. This results
15699 in the sign bit set iff we saw unsigned underflow. */
15700 x = gen_reg_rtx (mode);
15701 emit_insn ((mode == V4SImode
15702 ? gen_xorv4si3
15703 : gen_xorv2di3) (x, t1, t2));
15705 code = GT;
15707 break;
15709 case V16QImode:
15710 case V8HImode:
15711 /* Perform a parallel unsigned saturating subtraction. */
15712 x = gen_reg_rtx (mode);
15713 emit_insn (gen_rtx_SET (VOIDmode, x,
15714 gen_rtx_US_MINUS (mode, cop0, cop1)));
15716 code = EQ;
15717 negate = !negate;
15718 break;
15720 default:
15721 gcc_unreachable ();
15724 cop0 = x;
15725 cop1 = CONST0_RTX (mode);
15729 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
15730 operands[1+negate], operands[2-negate]);
15732 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
15733 operands[2-negate]);
15734 return true;
15737 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
15738 true if we should do zero extension, else sign extension. HIGH_P is
15739 true if we want the N/2 high elements, else the low elements. */
15741 void
15742 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15744 enum machine_mode imode = GET_MODE (operands[1]);
15745 rtx (*unpack)(rtx, rtx, rtx);
15746 rtx se, dest;
15748 switch (imode)
15750 case V16QImode:
15751 if (high_p)
15752 unpack = gen_vec_interleave_highv16qi;
15753 else
15754 unpack = gen_vec_interleave_lowv16qi;
15755 break;
15756 case V8HImode:
15757 if (high_p)
15758 unpack = gen_vec_interleave_highv8hi;
15759 else
15760 unpack = gen_vec_interleave_lowv8hi;
15761 break;
15762 case V4SImode:
15763 if (high_p)
15764 unpack = gen_vec_interleave_highv4si;
15765 else
15766 unpack = gen_vec_interleave_lowv4si;
15767 break;
15768 default:
15769 gcc_unreachable ();
15772 dest = gen_lowpart (imode, operands[0]);
15774 if (unsigned_p)
15775 se = force_reg (imode, CONST0_RTX (imode));
15776 else
15777 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
15778 operands[1], pc_rtx, pc_rtx);
15780 emit_insn (unpack (dest, operands[1], se));
15783 /* This function performs the same task as ix86_expand_sse_unpack,
15784 but with SSE4.1 instructions. */
15786 void
15787 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15789 enum machine_mode imode = GET_MODE (operands[1]);
15790 rtx (*unpack)(rtx, rtx);
15791 rtx src, dest;
15793 switch (imode)
15795 case V16QImode:
15796 if (unsigned_p)
15797 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
15798 else
15799 unpack = gen_sse4_1_extendv8qiv8hi2;
15800 break;
15801 case V8HImode:
15802 if (unsigned_p)
15803 unpack = gen_sse4_1_zero_extendv4hiv4si2;
15804 else
15805 unpack = gen_sse4_1_extendv4hiv4si2;
15806 break;
15807 case V4SImode:
15808 if (unsigned_p)
15809 unpack = gen_sse4_1_zero_extendv2siv2di2;
15810 else
15811 unpack = gen_sse4_1_extendv2siv2di2;
15812 break;
15813 default:
15814 gcc_unreachable ();
15817 dest = operands[0];
15818 if (high_p)
15820 /* Shift higher 8 bytes to lower 8 bytes. */
15821 src = gen_reg_rtx (imode);
15822 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
15823 gen_lowpart (TImode, operands[1]),
15824 GEN_INT (64)));
15826 else
15827 src = operands[1];
15829 emit_insn (unpack (dest, src));
15832 /* This function performs the same task as ix86_expand_sse_unpack,
15833 but with sse5 instructions. */
15835 void
15836 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
15838 enum machine_mode imode = GET_MODE (operands[1]);
15839 int pperm_bytes[16];
15840 int i;
15841 int h = (high_p) ? 8 : 0;
15842 int h2;
15843 int sign_extend;
15844 rtvec v = rtvec_alloc (16);
15845 rtvec vs;
15846 rtx x, p;
15847 rtx op0 = operands[0], op1 = operands[1];
15849 switch (imode)
15851 case V16QImode:
15852 vs = rtvec_alloc (8);
15853 h2 = (high_p) ? 8 : 0;
15854 for (i = 0; i < 8; i++)
15856 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
15857 pperm_bytes[2*i+1] = ((unsigned_p)
15858 ? PPERM_ZERO
15859 : PPERM_SIGN | PPERM_SRC2 | i | h);
15862 for (i = 0; i < 16; i++)
15863 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15865 for (i = 0; i < 8; i++)
15866 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15868 p = gen_rtx_PARALLEL (VOIDmode, vs);
15869 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15870 if (unsigned_p)
15871 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
15872 else
15873 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
15874 break;
15876 case V8HImode:
15877 vs = rtvec_alloc (4);
15878 h2 = (high_p) ? 4 : 0;
15879 for (i = 0; i < 4; i++)
15881 sign_extend = ((unsigned_p)
15882 ? PPERM_ZERO
15883 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
15884 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
15885 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
15886 pperm_bytes[4*i+2] = sign_extend;
15887 pperm_bytes[4*i+3] = sign_extend;
15890 for (i = 0; i < 16; i++)
15891 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15893 for (i = 0; i < 4; i++)
15894 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15896 p = gen_rtx_PARALLEL (VOIDmode, vs);
15897 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15898 if (unsigned_p)
15899 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
15900 else
15901 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
15902 break;
15904 case V4SImode:
15905 vs = rtvec_alloc (2);
15906 h2 = (high_p) ? 2 : 0;
15907 for (i = 0; i < 2; i++)
15909 sign_extend = ((unsigned_p)
15910 ? PPERM_ZERO
15911 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
15912 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
15913 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
15914 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
15915 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
15916 pperm_bytes[8*i+4] = sign_extend;
15917 pperm_bytes[8*i+5] = sign_extend;
15918 pperm_bytes[8*i+6] = sign_extend;
15919 pperm_bytes[8*i+7] = sign_extend;
15922 for (i = 0; i < 16; i++)
15923 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15925 for (i = 0; i < 2; i++)
15926 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
15928 p = gen_rtx_PARALLEL (VOIDmode, vs);
15929 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15930 if (unsigned_p)
15931 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
15932 else
15933 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
15934 break;
15936 default:
15937 gcc_unreachable ();
15940 return;
15943 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
15944 next narrower integer vector type */
15945 void
15946 ix86_expand_sse5_pack (rtx operands[3])
15948 enum machine_mode imode = GET_MODE (operands[0]);
15949 int pperm_bytes[16];
15950 int i;
15951 rtvec v = rtvec_alloc (16);
15952 rtx x;
15953 rtx op0 = operands[0];
15954 rtx op1 = operands[1];
15955 rtx op2 = operands[2];
15957 switch (imode)
15959 case V16QImode:
15960 for (i = 0; i < 8; i++)
15962 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
15963 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
15966 for (i = 0; i < 16; i++)
15967 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15969 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15970 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
15971 break;
15973 case V8HImode:
15974 for (i = 0; i < 4; i++)
15976 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
15977 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
15978 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
15979 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
15982 for (i = 0; i < 16; i++)
15983 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
15985 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
15986 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
15987 break;
15989 case V4SImode:
15990 for (i = 0; i < 2; i++)
15992 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
15993 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
15994 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
15995 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
15996 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
15997 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
15998 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
15999 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
16002 for (i = 0; i < 16; i++)
16003 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
16005 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
16006 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
16007 break;
16009 default:
16010 gcc_unreachable ();
16013 return;
16016 /* Expand conditional increment or decrement using adb/sbb instructions.
16017 The default case using setcc followed by the conditional move can be
16018 done by generic code. */
16020 ix86_expand_int_addcc (rtx operands[])
16022 enum rtx_code code = GET_CODE (operands[1]);
16023 rtx compare_op;
16024 rtx val = const0_rtx;
16025 bool fpcmp = false;
16026 enum machine_mode mode = GET_MODE (operands[0]);
16028 if (operands[3] != const1_rtx
16029 && operands[3] != constm1_rtx)
16030 return 0;
16031 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16032 ix86_compare_op1, &compare_op))
16033 return 0;
16034 code = GET_CODE (compare_op);
16036 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
16037 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
16039 fpcmp = true;
16040 code = ix86_fp_compare_code_to_integer (code);
16043 if (code != LTU)
16045 val = constm1_rtx;
16046 if (fpcmp)
16047 PUT_CODE (compare_op,
16048 reverse_condition_maybe_unordered
16049 (GET_CODE (compare_op)));
16050 else
16051 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16053 PUT_MODE (compare_op, mode);
16055 /* Construct either adc or sbb insn. */
16056 if ((code == LTU) == (operands[3] == constm1_rtx))
16058 switch (GET_MODE (operands[0]))
16060 case QImode:
16061 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
16062 break;
16063 case HImode:
16064 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
16065 break;
16066 case SImode:
16067 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
16068 break;
16069 case DImode:
16070 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
16071 break;
16072 default:
16073 gcc_unreachable ();
16076 else
16078 switch (GET_MODE (operands[0]))
16080 case QImode:
16081 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
16082 break;
16083 case HImode:
16084 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
16085 break;
16086 case SImode:
16087 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
16088 break;
16089 case DImode:
16090 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
16091 break;
16092 default:
16093 gcc_unreachable ();
16096 return 1; /* DONE */
16100 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16101 works for floating pointer parameters and nonoffsetable memories.
16102 For pushes, it returns just stack offsets; the values will be saved
16103 in the right order. Maximally three parts are generated. */
16105 static int
16106 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16108 int size;
16110 if (!TARGET_64BIT)
16111 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16112 else
16113 size = (GET_MODE_SIZE (mode) + 4) / 8;
16115 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16116 gcc_assert (size >= 2 && size <= 4);
16118 /* Optimize constant pool reference to immediates. This is used by fp
16119 moves, that force all constants to memory to allow combining. */
16120 if (MEM_P (operand) && MEM_READONLY_P (operand))
16122 rtx tmp = maybe_get_pool_constant (operand);
16123 if (tmp)
16124 operand = tmp;
16127 if (MEM_P (operand) && !offsettable_memref_p (operand))
16129 /* The only non-offsetable memories we handle are pushes. */
16130 int ok = push_operand (operand, VOIDmode);
16132 gcc_assert (ok);
16134 operand = copy_rtx (operand);
16135 PUT_MODE (operand, Pmode);
16136 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16137 return size;
16140 if (GET_CODE (operand) == CONST_VECTOR)
16142 enum machine_mode imode = int_mode_for_mode (mode);
16143 /* Caution: if we looked through a constant pool memory above,
16144 the operand may actually have a different mode now. That's
16145 ok, since we want to pun this all the way back to an integer. */
16146 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16147 gcc_assert (operand != NULL);
16148 mode = imode;
16151 if (!TARGET_64BIT)
16153 if (mode == DImode)
16154 split_di (&operand, 1, &parts[0], &parts[1]);
16155 else
16157 int i;
16159 if (REG_P (operand))
16161 gcc_assert (reload_completed);
16162 for (i = 0; i < size; i++)
16163 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16165 else if (offsettable_memref_p (operand))
16167 operand = adjust_address (operand, SImode, 0);
16168 parts[0] = operand;
16169 for (i = 1; i < size; i++)
16170 parts[i] = adjust_address (operand, SImode, 4 * i);
16172 else if (GET_CODE (operand) == CONST_DOUBLE)
16174 REAL_VALUE_TYPE r;
16175 long l[4];
16177 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16178 switch (mode)
16180 case TFmode:
16181 real_to_target (l, &r, mode);
16182 parts[3] = gen_int_mode (l[3], SImode);
16183 parts[2] = gen_int_mode (l[2], SImode);
16184 break;
16185 case XFmode:
16186 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16187 parts[2] = gen_int_mode (l[2], SImode);
16188 break;
16189 case DFmode:
16190 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16191 break;
16192 default:
16193 gcc_unreachable ();
16195 parts[1] = gen_int_mode (l[1], SImode);
16196 parts[0] = gen_int_mode (l[0], SImode);
16198 else
16199 gcc_unreachable ();
16202 else
16204 if (mode == TImode)
16205 split_ti (&operand, 1, &parts[0], &parts[1]);
16206 if (mode == XFmode || mode == TFmode)
16208 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16209 if (REG_P (operand))
16211 gcc_assert (reload_completed);
16212 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16213 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16215 else if (offsettable_memref_p (operand))
16217 operand = adjust_address (operand, DImode, 0);
16218 parts[0] = operand;
16219 parts[1] = adjust_address (operand, upper_mode, 8);
16221 else if (GET_CODE (operand) == CONST_DOUBLE)
16223 REAL_VALUE_TYPE r;
16224 long l[4];
16226 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16227 real_to_target (l, &r, mode);
16229 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16230 if (HOST_BITS_PER_WIDE_INT >= 64)
16231 parts[0]
16232 = gen_int_mode
16233 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16234 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16235 DImode);
16236 else
16237 parts[0] = immed_double_const (l[0], l[1], DImode);
16239 if (upper_mode == SImode)
16240 parts[1] = gen_int_mode (l[2], SImode);
16241 else if (HOST_BITS_PER_WIDE_INT >= 64)
16242 parts[1]
16243 = gen_int_mode
16244 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16245 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16246 DImode);
16247 else
16248 parts[1] = immed_double_const (l[2], l[3], DImode);
16250 else
16251 gcc_unreachable ();
16255 return size;
16258 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16259 Return false when normal moves are needed; true when all required
16260 insns have been emitted. Operands 2-4 contain the input values
16261 int the correct order; operands 5-7 contain the output values. */
16263 void
16264 ix86_split_long_move (rtx operands[])
16266 rtx part[2][4];
16267 int nparts, i, j;
16268 int push = 0;
16269 int collisions = 0;
16270 enum machine_mode mode = GET_MODE (operands[0]);
16271 bool collisionparts[4];
16273 /* The DFmode expanders may ask us to move double.
16274 For 64bit target this is single move. By hiding the fact
16275 here we simplify i386.md splitters. */
16276 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16278 /* Optimize constant pool reference to immediates. This is used by
16279 fp moves, that force all constants to memory to allow combining. */
16281 if (MEM_P (operands[1])
16282 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16283 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16284 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16285 if (push_operand (operands[0], VOIDmode))
16287 operands[0] = copy_rtx (operands[0]);
16288 PUT_MODE (operands[0], Pmode);
16290 else
16291 operands[0] = gen_lowpart (DImode, operands[0]);
16292 operands[1] = gen_lowpart (DImode, operands[1]);
16293 emit_move_insn (operands[0], operands[1]);
16294 return;
16297 /* The only non-offsettable memory we handle is push. */
16298 if (push_operand (operands[0], VOIDmode))
16299 push = 1;
16300 else
16301 gcc_assert (!MEM_P (operands[0])
16302 || offsettable_memref_p (operands[0]));
16304 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16305 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16307 /* When emitting push, take care for source operands on the stack. */
16308 if (push && MEM_P (operands[1])
16309 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16310 for (i = 0; i < nparts - 1; i++)
16311 part[1][i] = change_address (part[1][i],
16312 GET_MODE (part[1][i]),
16313 XEXP (part[1][i + 1], 0));
16315 /* We need to do copy in the right order in case an address register
16316 of the source overlaps the destination. */
16317 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16319 rtx tmp;
16321 for (i = 0; i < nparts; i++)
16323 collisionparts[i]
16324 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16325 if (collisionparts[i])
16326 collisions++;
16329 /* Collision in the middle part can be handled by reordering. */
16330 if (collisions == 1 && nparts == 3 && collisionparts [1])
16332 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16333 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16335 else if (collisions == 1
16336 && nparts == 4
16337 && (collisionparts [1] || collisionparts [2]))
16339 if (collisionparts [1])
16341 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16342 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16344 else
16346 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16347 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16351 /* If there are more collisions, we can't handle it by reordering.
16352 Do an lea to the last part and use only one colliding move. */
16353 else if (collisions > 1)
16355 rtx base;
16357 collisions = 1;
16359 base = part[0][nparts - 1];
16361 /* Handle the case when the last part isn't valid for lea.
16362 Happens in 64-bit mode storing the 12-byte XFmode. */
16363 if (GET_MODE (base) != Pmode)
16364 base = gen_rtx_REG (Pmode, REGNO (base));
16366 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16367 part[1][0] = replace_equiv_address (part[1][0], base);
16368 for (i = 1; i < nparts; i++)
16370 tmp = plus_constant (base, UNITS_PER_WORD * i);
16371 part[1][i] = replace_equiv_address (part[1][i], tmp);
16376 if (push)
16378 if (!TARGET_64BIT)
16380 if (nparts == 3)
16382 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16383 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
16384 emit_move_insn (part[0][2], part[1][2]);
16386 else if (nparts == 4)
16388 emit_move_insn (part[0][3], part[1][3]);
16389 emit_move_insn (part[0][2], part[1][2]);
16392 else
16394 /* In 64bit mode we don't have 32bit push available. In case this is
16395 register, it is OK - we will just use larger counterpart. We also
16396 retype memory - these comes from attempt to avoid REX prefix on
16397 moving of second half of TFmode value. */
16398 if (GET_MODE (part[1][1]) == SImode)
16400 switch (GET_CODE (part[1][1]))
16402 case MEM:
16403 part[1][1] = adjust_address (part[1][1], DImode, 0);
16404 break;
16406 case REG:
16407 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16408 break;
16410 default:
16411 gcc_unreachable ();
16414 if (GET_MODE (part[1][0]) == SImode)
16415 part[1][0] = part[1][1];
16418 emit_move_insn (part[0][1], part[1][1]);
16419 emit_move_insn (part[0][0], part[1][0]);
16420 return;
16423 /* Choose correct order to not overwrite the source before it is copied. */
16424 if ((REG_P (part[0][0])
16425 && REG_P (part[1][1])
16426 && (REGNO (part[0][0]) == REGNO (part[1][1])
16427 || (nparts == 3
16428 && REGNO (part[0][0]) == REGNO (part[1][2]))
16429 || (nparts == 4
16430 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16431 || (collisions > 0
16432 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16434 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16436 operands[2 + i] = part[0][j];
16437 operands[6 + i] = part[1][j];
16440 else
16442 for (i = 0; i < nparts; i++)
16444 operands[2 + i] = part[0][i];
16445 operands[6 + i] = part[1][i];
16449 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16450 if (optimize_insn_for_size_p ())
16452 for (j = 0; j < nparts - 1; j++)
16453 if (CONST_INT_P (operands[6 + j])
16454 && operands[6 + j] != const0_rtx
16455 && REG_P (operands[2 + j]))
16456 for (i = j; i < nparts - 1; i++)
16457 if (CONST_INT_P (operands[7 + i])
16458 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16459 operands[7 + i] = operands[2 + j];
16462 for (i = 0; i < nparts; i++)
16463 emit_move_insn (operands[2 + i], operands[6 + i]);
16465 return;
16468 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16469 left shift by a constant, either using a single shift or
16470 a sequence of add instructions. */
16472 static void
16473 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16475 if (count == 1)
16477 emit_insn ((mode == DImode
16478 ? gen_addsi3
16479 : gen_adddi3) (operand, operand, operand));
16481 else if (!optimize_insn_for_size_p ()
16482 && count * ix86_cost->add <= ix86_cost->shift_const)
16484 int i;
16485 for (i=0; i<count; i++)
16487 emit_insn ((mode == DImode
16488 ? gen_addsi3
16489 : gen_adddi3) (operand, operand, operand));
16492 else
16493 emit_insn ((mode == DImode
16494 ? gen_ashlsi3
16495 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16498 void
16499 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16501 rtx low[2], high[2];
16502 int count;
16503 const int single_width = mode == DImode ? 32 : 64;
16505 if (CONST_INT_P (operands[2]))
16507 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16508 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16510 if (count >= single_width)
16512 emit_move_insn (high[0], low[1]);
16513 emit_move_insn (low[0], const0_rtx);
16515 if (count > single_width)
16516 ix86_expand_ashl_const (high[0], count - single_width, mode);
16518 else
16520 if (!rtx_equal_p (operands[0], operands[1]))
16521 emit_move_insn (operands[0], operands[1]);
16522 emit_insn ((mode == DImode
16523 ? gen_x86_shld
16524 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16525 ix86_expand_ashl_const (low[0], count, mode);
16527 return;
16530 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16532 if (operands[1] == const1_rtx)
16534 /* Assuming we've chosen a QImode capable registers, then 1 << N
16535 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16536 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16538 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16540 ix86_expand_clear (low[0]);
16541 ix86_expand_clear (high[0]);
16542 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16544 d = gen_lowpart (QImode, low[0]);
16545 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16546 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16547 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16549 d = gen_lowpart (QImode, high[0]);
16550 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16551 s = gen_rtx_NE (QImode, flags, const0_rtx);
16552 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16555 /* Otherwise, we can get the same results by manually performing
16556 a bit extract operation on bit 5/6, and then performing the two
16557 shifts. The two methods of getting 0/1 into low/high are exactly
16558 the same size. Avoiding the shift in the bit extract case helps
16559 pentium4 a bit; no one else seems to care much either way. */
16560 else
16562 rtx x;
16564 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16565 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16566 else
16567 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16568 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16570 emit_insn ((mode == DImode
16571 ? gen_lshrsi3
16572 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
16573 emit_insn ((mode == DImode
16574 ? gen_andsi3
16575 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
16576 emit_move_insn (low[0], high[0]);
16577 emit_insn ((mode == DImode
16578 ? gen_xorsi3
16579 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
16582 emit_insn ((mode == DImode
16583 ? gen_ashlsi3
16584 : gen_ashldi3) (low[0], low[0], operands[2]));
16585 emit_insn ((mode == DImode
16586 ? gen_ashlsi3
16587 : gen_ashldi3) (high[0], high[0], operands[2]));
16588 return;
16591 if (operands[1] == constm1_rtx)
16593 /* For -1 << N, we can avoid the shld instruction, because we
16594 know that we're shifting 0...31/63 ones into a -1. */
16595 emit_move_insn (low[0], constm1_rtx);
16596 if (optimize_insn_for_size_p ())
16597 emit_move_insn (high[0], low[0]);
16598 else
16599 emit_move_insn (high[0], constm1_rtx);
16601 else
16603 if (!rtx_equal_p (operands[0], operands[1]))
16604 emit_move_insn (operands[0], operands[1]);
16606 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16607 emit_insn ((mode == DImode
16608 ? gen_x86_shld
16609 : gen_x86_64_shld) (high[0], low[0], operands[2]));
16612 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
16614 if (TARGET_CMOVE && scratch)
16616 ix86_expand_clear (scratch);
16617 emit_insn ((mode == DImode
16618 ? gen_x86_shift_adj_1
16619 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
16620 scratch));
16622 else
16623 emit_insn ((mode == DImode
16624 ? gen_x86_shift_adj_2
16625 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
16628 void
16629 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
16631 rtx low[2], high[2];
16632 int count;
16633 const int single_width = mode == DImode ? 32 : 64;
16635 if (CONST_INT_P (operands[2]))
16637 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16638 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16640 if (count == single_width * 2 - 1)
16642 emit_move_insn (high[0], high[1]);
16643 emit_insn ((mode == DImode
16644 ? gen_ashrsi3
16645 : gen_ashrdi3) (high[0], high[0],
16646 GEN_INT (single_width - 1)));
16647 emit_move_insn (low[0], high[0]);
16650 else if (count >= single_width)
16652 emit_move_insn (low[0], high[1]);
16653 emit_move_insn (high[0], low[0]);
16654 emit_insn ((mode == DImode
16655 ? gen_ashrsi3
16656 : gen_ashrdi3) (high[0], high[0],
16657 GEN_INT (single_width - 1)));
16658 if (count > single_width)
16659 emit_insn ((mode == DImode
16660 ? gen_ashrsi3
16661 : gen_ashrdi3) (low[0], low[0],
16662 GEN_INT (count - single_width)));
16664 else
16666 if (!rtx_equal_p (operands[0], operands[1]))
16667 emit_move_insn (operands[0], operands[1]);
16668 emit_insn ((mode == DImode
16669 ? gen_x86_shrd
16670 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16671 emit_insn ((mode == DImode
16672 ? gen_ashrsi3
16673 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
16676 else
16678 if (!rtx_equal_p (operands[0], operands[1]))
16679 emit_move_insn (operands[0], operands[1]);
16681 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16683 emit_insn ((mode == DImode
16684 ? gen_x86_shrd
16685 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16686 emit_insn ((mode == DImode
16687 ? gen_ashrsi3
16688 : gen_ashrdi3) (high[0], high[0], operands[2]));
16690 if (TARGET_CMOVE && scratch)
16692 emit_move_insn (scratch, high[0]);
16693 emit_insn ((mode == DImode
16694 ? gen_ashrsi3
16695 : gen_ashrdi3) (scratch, scratch,
16696 GEN_INT (single_width - 1)));
16697 emit_insn ((mode == DImode
16698 ? gen_x86_shift_adj_1
16699 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16700 scratch));
16702 else
16703 emit_insn ((mode == DImode
16704 ? gen_x86_shift_adj_3
16705 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
16709 void
16710 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
16712 rtx low[2], high[2];
16713 int count;
16714 const int single_width = mode == DImode ? 32 : 64;
16716 if (CONST_INT_P (operands[2]))
16718 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16719 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16721 if (count >= single_width)
16723 emit_move_insn (low[0], high[1]);
16724 ix86_expand_clear (high[0]);
16726 if (count > single_width)
16727 emit_insn ((mode == DImode
16728 ? gen_lshrsi3
16729 : gen_lshrdi3) (low[0], low[0],
16730 GEN_INT (count - single_width)));
16732 else
16734 if (!rtx_equal_p (operands[0], operands[1]))
16735 emit_move_insn (operands[0], operands[1]);
16736 emit_insn ((mode == DImode
16737 ? gen_x86_shrd
16738 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16739 emit_insn ((mode == DImode
16740 ? gen_lshrsi3
16741 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
16744 else
16746 if (!rtx_equal_p (operands[0], operands[1]))
16747 emit_move_insn (operands[0], operands[1]);
16749 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16751 emit_insn ((mode == DImode
16752 ? gen_x86_shrd
16753 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16754 emit_insn ((mode == DImode
16755 ? gen_lshrsi3
16756 : gen_lshrdi3) (high[0], high[0], operands[2]));
16758 /* Heh. By reversing the arguments, we can reuse this pattern. */
16759 if (TARGET_CMOVE && scratch)
16761 ix86_expand_clear (scratch);
16762 emit_insn ((mode == DImode
16763 ? gen_x86_shift_adj_1
16764 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16765 scratch));
16767 else
16768 emit_insn ((mode == DImode
16769 ? gen_x86_shift_adj_2
16770 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
16774 /* Predict just emitted jump instruction to be taken with probability PROB. */
16775 static void
16776 predict_jump (int prob)
16778 rtx insn = get_last_insn ();
16779 gcc_assert (JUMP_P (insn));
16780 REG_NOTES (insn)
16781 = gen_rtx_EXPR_LIST (REG_BR_PROB,
16782 GEN_INT (prob),
16783 REG_NOTES (insn));
16786 /* Helper function for the string operations below. Dest VARIABLE whether
16787 it is aligned to VALUE bytes. If true, jump to the label. */
16788 static rtx
16789 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
16791 rtx label = gen_label_rtx ();
16792 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
16793 if (GET_MODE (variable) == DImode)
16794 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
16795 else
16796 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
16797 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
16798 1, label);
16799 if (epilogue)
16800 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16801 else
16802 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16803 return label;
16806 /* Adjust COUNTER by the VALUE. */
16807 static void
16808 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
16810 if (GET_MODE (countreg) == DImode)
16811 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
16812 else
16813 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
16816 /* Zero extend possibly SImode EXP to Pmode register. */
16818 ix86_zero_extend_to_Pmode (rtx exp)
16820 rtx r;
16821 if (GET_MODE (exp) == VOIDmode)
16822 return force_reg (Pmode, exp);
16823 if (GET_MODE (exp) == Pmode)
16824 return copy_to_mode_reg (Pmode, exp);
16825 r = gen_reg_rtx (Pmode);
16826 emit_insn (gen_zero_extendsidi2 (r, exp));
16827 return r;
16830 /* Divide COUNTREG by SCALE. */
16831 static rtx
16832 scale_counter (rtx countreg, int scale)
16834 rtx sc;
16835 rtx piece_size_mask;
16837 if (scale == 1)
16838 return countreg;
16839 if (CONST_INT_P (countreg))
16840 return GEN_INT (INTVAL (countreg) / scale);
16841 gcc_assert (REG_P (countreg));
16843 piece_size_mask = GEN_INT (scale - 1);
16844 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
16845 GEN_INT (exact_log2 (scale)),
16846 NULL, 1, OPTAB_DIRECT);
16847 return sc;
16850 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
16851 DImode for constant loop counts. */
16853 static enum machine_mode
16854 counter_mode (rtx count_exp)
16856 if (GET_MODE (count_exp) != VOIDmode)
16857 return GET_MODE (count_exp);
16858 if (GET_CODE (count_exp) != CONST_INT)
16859 return Pmode;
16860 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
16861 return DImode;
16862 return SImode;
16865 /* When SRCPTR is non-NULL, output simple loop to move memory
16866 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
16867 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
16868 equivalent loop to set memory by VALUE (supposed to be in MODE).
16870 The size is rounded down to whole number of chunk size moved at once.
16871 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
16874 static void
16875 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
16876 rtx destptr, rtx srcptr, rtx value,
16877 rtx count, enum machine_mode mode, int unroll,
16878 int expected_size)
16880 rtx out_label, top_label, iter, tmp;
16881 enum machine_mode iter_mode = counter_mode (count);
16882 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
16883 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
16884 rtx size;
16885 rtx x_addr;
16886 rtx y_addr;
16887 int i;
16889 top_label = gen_label_rtx ();
16890 out_label = gen_label_rtx ();
16891 iter = gen_reg_rtx (iter_mode);
16893 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
16894 NULL, 1, OPTAB_DIRECT);
16895 /* Those two should combine. */
16896 if (piece_size == const1_rtx)
16898 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
16899 true, out_label);
16900 predict_jump (REG_BR_PROB_BASE * 10 / 100);
16902 emit_move_insn (iter, const0_rtx);
16904 emit_label (top_label);
16906 tmp = convert_modes (Pmode, iter_mode, iter, true);
16907 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
16908 destmem = change_address (destmem, mode, x_addr);
16910 if (srcmem)
16912 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
16913 srcmem = change_address (srcmem, mode, y_addr);
16915 /* When unrolling for chips that reorder memory reads and writes,
16916 we can save registers by using single temporary.
16917 Also using 4 temporaries is overkill in 32bit mode. */
16918 if (!TARGET_64BIT && 0)
16920 for (i = 0; i < unroll; i++)
16922 if (i)
16924 destmem =
16925 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16926 srcmem =
16927 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16929 emit_move_insn (destmem, srcmem);
16932 else
16934 rtx tmpreg[4];
16935 gcc_assert (unroll <= 4);
16936 for (i = 0; i < unroll; i++)
16938 tmpreg[i] = gen_reg_rtx (mode);
16939 if (i)
16941 srcmem =
16942 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
16944 emit_move_insn (tmpreg[i], srcmem);
16946 for (i = 0; i < unroll; i++)
16948 if (i)
16950 destmem =
16951 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16953 emit_move_insn (destmem, tmpreg[i]);
16957 else
16958 for (i = 0; i < unroll; i++)
16960 if (i)
16961 destmem =
16962 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
16963 emit_move_insn (destmem, value);
16966 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
16967 true, OPTAB_LIB_WIDEN);
16968 if (tmp != iter)
16969 emit_move_insn (iter, tmp);
16971 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
16972 true, top_label);
16973 if (expected_size != -1)
16975 expected_size /= GET_MODE_SIZE (mode) * unroll;
16976 if (expected_size == 0)
16977 predict_jump (0);
16978 else if (expected_size > REG_BR_PROB_BASE)
16979 predict_jump (REG_BR_PROB_BASE - 1);
16980 else
16981 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
16983 else
16984 predict_jump (REG_BR_PROB_BASE * 80 / 100);
16985 iter = ix86_zero_extend_to_Pmode (iter);
16986 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
16987 true, OPTAB_LIB_WIDEN);
16988 if (tmp != destptr)
16989 emit_move_insn (destptr, tmp);
16990 if (srcptr)
16992 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
16993 true, OPTAB_LIB_WIDEN);
16994 if (tmp != srcptr)
16995 emit_move_insn (srcptr, tmp);
16997 emit_label (out_label);
17000 /* Output "rep; mov" instruction.
17001 Arguments have same meaning as for previous function */
17002 static void
17003 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17004 rtx destptr, rtx srcptr,
17005 rtx count,
17006 enum machine_mode mode)
17008 rtx destexp;
17009 rtx srcexp;
17010 rtx countreg;
17012 /* If the size is known, it is shorter to use rep movs. */
17013 if (mode == QImode && CONST_INT_P (count)
17014 && !(INTVAL (count) & 3))
17015 mode = SImode;
17017 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17018 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17019 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17020 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17021 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17022 if (mode != QImode)
17024 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17025 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17026 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17027 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17028 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17029 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17031 else
17033 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17034 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17036 if (CONST_INT_P (count))
17038 count = GEN_INT (INTVAL (count)
17039 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17040 destmem = shallow_copy_rtx (destmem);
17041 srcmem = shallow_copy_rtx (srcmem);
17042 set_mem_size (destmem, count);
17043 set_mem_size (srcmem, count);
17045 else
17047 if (MEM_SIZE (destmem))
17048 set_mem_size (destmem, NULL_RTX);
17049 if (MEM_SIZE (srcmem))
17050 set_mem_size (srcmem, NULL_RTX);
17052 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17053 destexp, srcexp));
17056 /* Output "rep; stos" instruction.
17057 Arguments have same meaning as for previous function */
17058 static void
17059 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17060 rtx count, enum machine_mode mode,
17061 rtx orig_value)
17063 rtx destexp;
17064 rtx countreg;
17066 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17067 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17068 value = force_reg (mode, gen_lowpart (mode, value));
17069 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17070 if (mode != QImode)
17072 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17073 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17074 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17076 else
17077 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17078 if (orig_value == const0_rtx && CONST_INT_P (count))
17080 count = GEN_INT (INTVAL (count)
17081 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17082 destmem = shallow_copy_rtx (destmem);
17083 set_mem_size (destmem, count);
17085 else if (MEM_SIZE (destmem))
17086 set_mem_size (destmem, NULL_RTX);
17087 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17090 static void
17091 emit_strmov (rtx destmem, rtx srcmem,
17092 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17094 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17095 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17096 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17099 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17100 static void
17101 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17102 rtx destptr, rtx srcptr, rtx count, int max_size)
17104 rtx src, dest;
17105 if (CONST_INT_P (count))
17107 HOST_WIDE_INT countval = INTVAL (count);
17108 int offset = 0;
17110 if ((countval & 0x10) && max_size > 16)
17112 if (TARGET_64BIT)
17114 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17115 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17117 else
17118 gcc_unreachable ();
17119 offset += 16;
17121 if ((countval & 0x08) && max_size > 8)
17123 if (TARGET_64BIT)
17124 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17125 else
17127 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17128 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17130 offset += 8;
17132 if ((countval & 0x04) && max_size > 4)
17134 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17135 offset += 4;
17137 if ((countval & 0x02) && max_size > 2)
17139 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17140 offset += 2;
17142 if ((countval & 0x01) && max_size > 1)
17144 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17145 offset += 1;
17147 return;
17149 if (max_size > 8)
17151 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17152 count, 1, OPTAB_DIRECT);
17153 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17154 count, QImode, 1, 4);
17155 return;
17158 /* When there are stringops, we can cheaply increase dest and src pointers.
17159 Otherwise we save code size by maintaining offset (zero is readily
17160 available from preceding rep operation) and using x86 addressing modes.
17162 if (TARGET_SINGLE_STRINGOP)
17164 if (max_size > 4)
17166 rtx label = ix86_expand_aligntest (count, 4, true);
17167 src = change_address (srcmem, SImode, srcptr);
17168 dest = change_address (destmem, SImode, destptr);
17169 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17170 emit_label (label);
17171 LABEL_NUSES (label) = 1;
17173 if (max_size > 2)
17175 rtx label = ix86_expand_aligntest (count, 2, true);
17176 src = change_address (srcmem, HImode, srcptr);
17177 dest = change_address (destmem, HImode, destptr);
17178 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17179 emit_label (label);
17180 LABEL_NUSES (label) = 1;
17182 if (max_size > 1)
17184 rtx label = ix86_expand_aligntest (count, 1, true);
17185 src = change_address (srcmem, QImode, srcptr);
17186 dest = change_address (destmem, QImode, destptr);
17187 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17188 emit_label (label);
17189 LABEL_NUSES (label) = 1;
17192 else
17194 rtx offset = force_reg (Pmode, const0_rtx);
17195 rtx tmp;
17197 if (max_size > 4)
17199 rtx label = ix86_expand_aligntest (count, 4, true);
17200 src = change_address (srcmem, SImode, srcptr);
17201 dest = change_address (destmem, SImode, destptr);
17202 emit_move_insn (dest, src);
17203 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17204 true, OPTAB_LIB_WIDEN);
17205 if (tmp != offset)
17206 emit_move_insn (offset, tmp);
17207 emit_label (label);
17208 LABEL_NUSES (label) = 1;
17210 if (max_size > 2)
17212 rtx label = ix86_expand_aligntest (count, 2, true);
17213 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17214 src = change_address (srcmem, HImode, tmp);
17215 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17216 dest = change_address (destmem, HImode, tmp);
17217 emit_move_insn (dest, src);
17218 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17219 true, OPTAB_LIB_WIDEN);
17220 if (tmp != offset)
17221 emit_move_insn (offset, tmp);
17222 emit_label (label);
17223 LABEL_NUSES (label) = 1;
17225 if (max_size > 1)
17227 rtx label = ix86_expand_aligntest (count, 1, true);
17228 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17229 src = change_address (srcmem, QImode, tmp);
17230 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17231 dest = change_address (destmem, QImode, tmp);
17232 emit_move_insn (dest, src);
17233 emit_label (label);
17234 LABEL_NUSES (label) = 1;
17239 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17240 static void
17241 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17242 rtx count, int max_size)
17244 count =
17245 expand_simple_binop (counter_mode (count), AND, count,
17246 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17247 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17248 gen_lowpart (QImode, value), count, QImode,
17249 1, max_size / 2);
17252 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17253 static void
17254 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17256 rtx dest;
17258 if (CONST_INT_P (count))
17260 HOST_WIDE_INT countval = INTVAL (count);
17261 int offset = 0;
17263 if ((countval & 0x10) && max_size > 16)
17265 if (TARGET_64BIT)
17267 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17268 emit_insn (gen_strset (destptr, dest, value));
17269 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17270 emit_insn (gen_strset (destptr, dest, value));
17272 else
17273 gcc_unreachable ();
17274 offset += 16;
17276 if ((countval & 0x08) && max_size > 8)
17278 if (TARGET_64BIT)
17280 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17281 emit_insn (gen_strset (destptr, dest, value));
17283 else
17285 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17286 emit_insn (gen_strset (destptr, dest, value));
17287 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17288 emit_insn (gen_strset (destptr, dest, value));
17290 offset += 8;
17292 if ((countval & 0x04) && max_size > 4)
17294 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17295 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17296 offset += 4;
17298 if ((countval & 0x02) && max_size > 2)
17300 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17301 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17302 offset += 2;
17304 if ((countval & 0x01) && max_size > 1)
17306 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17307 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17308 offset += 1;
17310 return;
17312 if (max_size > 32)
17314 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17315 return;
17317 if (max_size > 16)
17319 rtx label = ix86_expand_aligntest (count, 16, true);
17320 if (TARGET_64BIT)
17322 dest = change_address (destmem, DImode, destptr);
17323 emit_insn (gen_strset (destptr, dest, value));
17324 emit_insn (gen_strset (destptr, dest, value));
17326 else
17328 dest = change_address (destmem, SImode, destptr);
17329 emit_insn (gen_strset (destptr, dest, value));
17330 emit_insn (gen_strset (destptr, dest, value));
17331 emit_insn (gen_strset (destptr, dest, value));
17332 emit_insn (gen_strset (destptr, dest, value));
17334 emit_label (label);
17335 LABEL_NUSES (label) = 1;
17337 if (max_size > 8)
17339 rtx label = ix86_expand_aligntest (count, 8, true);
17340 if (TARGET_64BIT)
17342 dest = change_address (destmem, DImode, destptr);
17343 emit_insn (gen_strset (destptr, dest, value));
17345 else
17347 dest = change_address (destmem, SImode, destptr);
17348 emit_insn (gen_strset (destptr, dest, value));
17349 emit_insn (gen_strset (destptr, dest, value));
17351 emit_label (label);
17352 LABEL_NUSES (label) = 1;
17354 if (max_size > 4)
17356 rtx label = ix86_expand_aligntest (count, 4, true);
17357 dest = change_address (destmem, SImode, destptr);
17358 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17359 emit_label (label);
17360 LABEL_NUSES (label) = 1;
17362 if (max_size > 2)
17364 rtx label = ix86_expand_aligntest (count, 2, true);
17365 dest = change_address (destmem, HImode, destptr);
17366 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17367 emit_label (label);
17368 LABEL_NUSES (label) = 1;
17370 if (max_size > 1)
17372 rtx label = ix86_expand_aligntest (count, 1, true);
17373 dest = change_address (destmem, QImode, destptr);
17374 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17375 emit_label (label);
17376 LABEL_NUSES (label) = 1;
17380 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17381 DESIRED_ALIGNMENT. */
17382 static void
17383 expand_movmem_prologue (rtx destmem, rtx srcmem,
17384 rtx destptr, rtx srcptr, rtx count,
17385 int align, int desired_alignment)
17387 if (align <= 1 && desired_alignment > 1)
17389 rtx label = ix86_expand_aligntest (destptr, 1, false);
17390 srcmem = change_address (srcmem, QImode, srcptr);
17391 destmem = change_address (destmem, QImode, destptr);
17392 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17393 ix86_adjust_counter (count, 1);
17394 emit_label (label);
17395 LABEL_NUSES (label) = 1;
17397 if (align <= 2 && desired_alignment > 2)
17399 rtx label = ix86_expand_aligntest (destptr, 2, false);
17400 srcmem = change_address (srcmem, HImode, srcptr);
17401 destmem = change_address (destmem, HImode, destptr);
17402 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17403 ix86_adjust_counter (count, 2);
17404 emit_label (label);
17405 LABEL_NUSES (label) = 1;
17407 if (align <= 4 && desired_alignment > 4)
17409 rtx label = ix86_expand_aligntest (destptr, 4, false);
17410 srcmem = change_address (srcmem, SImode, srcptr);
17411 destmem = change_address (destmem, SImode, destptr);
17412 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17413 ix86_adjust_counter (count, 4);
17414 emit_label (label);
17415 LABEL_NUSES (label) = 1;
17417 gcc_assert (desired_alignment <= 8);
17420 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17421 ALIGN_BYTES is how many bytes need to be copied. */
17422 static rtx
17423 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17424 int desired_align, int align_bytes)
17426 rtx src = *srcp;
17427 rtx src_size, dst_size;
17428 int off = 0;
17429 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17430 if (src_align_bytes >= 0)
17431 src_align_bytes = desired_align - src_align_bytes;
17432 src_size = MEM_SIZE (src);
17433 dst_size = MEM_SIZE (dst);
17434 if (align_bytes & 1)
17436 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17437 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17438 off = 1;
17439 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17441 if (align_bytes & 2)
17443 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17444 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17445 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17446 set_mem_align (dst, 2 * BITS_PER_UNIT);
17447 if (src_align_bytes >= 0
17448 && (src_align_bytes & 1) == (align_bytes & 1)
17449 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17450 set_mem_align (src, 2 * BITS_PER_UNIT);
17451 off = 2;
17452 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17454 if (align_bytes & 4)
17456 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17457 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17458 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17459 set_mem_align (dst, 4 * BITS_PER_UNIT);
17460 if (src_align_bytes >= 0)
17462 unsigned int src_align = 0;
17463 if ((src_align_bytes & 3) == (align_bytes & 3))
17464 src_align = 4;
17465 else if ((src_align_bytes & 1) == (align_bytes & 1))
17466 src_align = 2;
17467 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17468 set_mem_align (src, src_align * BITS_PER_UNIT);
17470 off = 4;
17471 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17473 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17474 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17475 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17476 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17477 if (src_align_bytes >= 0)
17479 unsigned int src_align = 0;
17480 if ((src_align_bytes & 7) == (align_bytes & 7))
17481 src_align = 8;
17482 else if ((src_align_bytes & 3) == (align_bytes & 3))
17483 src_align = 4;
17484 else if ((src_align_bytes & 1) == (align_bytes & 1))
17485 src_align = 2;
17486 if (src_align > (unsigned int) desired_align)
17487 src_align = desired_align;
17488 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17489 set_mem_align (src, src_align * BITS_PER_UNIT);
17491 if (dst_size)
17492 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17493 if (src_size)
17494 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17495 *srcp = src;
17496 return dst;
17499 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17500 DESIRED_ALIGNMENT. */
17501 static void
17502 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17503 int align, int desired_alignment)
17505 if (align <= 1 && desired_alignment > 1)
17507 rtx label = ix86_expand_aligntest (destptr, 1, false);
17508 destmem = change_address (destmem, QImode, destptr);
17509 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17510 ix86_adjust_counter (count, 1);
17511 emit_label (label);
17512 LABEL_NUSES (label) = 1;
17514 if (align <= 2 && desired_alignment > 2)
17516 rtx label = ix86_expand_aligntest (destptr, 2, false);
17517 destmem = change_address (destmem, HImode, destptr);
17518 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17519 ix86_adjust_counter (count, 2);
17520 emit_label (label);
17521 LABEL_NUSES (label) = 1;
17523 if (align <= 4 && desired_alignment > 4)
17525 rtx label = ix86_expand_aligntest (destptr, 4, false);
17526 destmem = change_address (destmem, SImode, destptr);
17527 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17528 ix86_adjust_counter (count, 4);
17529 emit_label (label);
17530 LABEL_NUSES (label) = 1;
17532 gcc_assert (desired_alignment <= 8);
17535 /* Set enough from DST to align DST known to by aligned by ALIGN to
17536 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
17537 static rtx
17538 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
17539 int desired_align, int align_bytes)
17541 int off = 0;
17542 rtx dst_size = MEM_SIZE (dst);
17543 if (align_bytes & 1)
17545 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17546 off = 1;
17547 emit_insn (gen_strset (destreg, dst,
17548 gen_lowpart (QImode, value)));
17550 if (align_bytes & 2)
17552 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17553 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17554 set_mem_align (dst, 2 * BITS_PER_UNIT);
17555 off = 2;
17556 emit_insn (gen_strset (destreg, dst,
17557 gen_lowpart (HImode, value)));
17559 if (align_bytes & 4)
17561 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17562 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17563 set_mem_align (dst, 4 * BITS_PER_UNIT);
17564 off = 4;
17565 emit_insn (gen_strset (destreg, dst,
17566 gen_lowpart (SImode, value)));
17568 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17569 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17570 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17571 if (dst_size)
17572 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17573 return dst;
17576 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17577 static enum stringop_alg
17578 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17579 int *dynamic_check)
17581 const struct stringop_algs * algs;
17582 bool optimize_for_speed;
17583 /* Algorithms using the rep prefix want at least edi and ecx;
17584 additionally, memset wants eax and memcpy wants esi. Don't
17585 consider such algorithms if the user has appropriated those
17586 registers for their own purposes. */
17587 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17588 || (memset
17589 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17591 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17592 || (alg != rep_prefix_1_byte \
17593 && alg != rep_prefix_4_byte \
17594 && alg != rep_prefix_8_byte))
17595 const struct processor_costs *cost;
17597 /* Even if the string operation call is cold, we still might spend a lot
17598 of time processing large blocks. */
17599 if (optimize_function_for_size_p (cfun)
17600 || (optimize_insn_for_size_p ()
17601 && expected_size != -1 && expected_size < 256))
17602 optimize_for_speed = false;
17603 else
17604 optimize_for_speed = true;
17606 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17608 *dynamic_check = -1;
17609 if (memset)
17610 algs = &cost->memset[TARGET_64BIT != 0];
17611 else
17612 algs = &cost->memcpy[TARGET_64BIT != 0];
17613 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
17614 return stringop_alg;
17615 /* rep; movq or rep; movl is the smallest variant. */
17616 else if (!optimize_for_speed)
17618 if (!count || (count & 3))
17619 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
17620 else
17621 return rep_prefix_usable ? rep_prefix_4_byte : loop;
17623 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
17625 else if (expected_size != -1 && expected_size < 4)
17626 return loop_1_byte;
17627 else if (expected_size != -1)
17629 unsigned int i;
17630 enum stringop_alg alg = libcall;
17631 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17633 /* We get here if the algorithms that were not libcall-based
17634 were rep-prefix based and we are unable to use rep prefixes
17635 based on global register usage. Break out of the loop and
17636 use the heuristic below. */
17637 if (algs->size[i].max == 0)
17638 break;
17639 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
17641 enum stringop_alg candidate = algs->size[i].alg;
17643 if (candidate != libcall && ALG_USABLE_P (candidate))
17644 alg = candidate;
17645 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
17646 last non-libcall inline algorithm. */
17647 if (TARGET_INLINE_ALL_STRINGOPS)
17649 /* When the current size is best to be copied by a libcall,
17650 but we are still forced to inline, run the heuristic below
17651 that will pick code for medium sized blocks. */
17652 if (alg != libcall)
17653 return alg;
17654 break;
17656 else if (ALG_USABLE_P (candidate))
17657 return candidate;
17660 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
17662 /* When asked to inline the call anyway, try to pick meaningful choice.
17663 We look for maximal size of block that is faster to copy by hand and
17664 take blocks of at most of that size guessing that average size will
17665 be roughly half of the block.
17667 If this turns out to be bad, we might simply specify the preferred
17668 choice in ix86_costs. */
17669 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17670 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
17672 int max = -1;
17673 enum stringop_alg alg;
17674 int i;
17675 bool any_alg_usable_p = true;
17677 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17679 enum stringop_alg candidate = algs->size[i].alg;
17680 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
17682 if (candidate != libcall && candidate
17683 && ALG_USABLE_P (candidate))
17684 max = algs->size[i].max;
17686 /* If there aren't any usable algorithms, then recursing on
17687 smaller sizes isn't going to find anything. Just return the
17688 simple byte-at-a-time copy loop. */
17689 if (!any_alg_usable_p)
17691 /* Pick something reasonable. */
17692 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17693 *dynamic_check = 128;
17694 return loop_1_byte;
17696 if (max == -1)
17697 max = 4096;
17698 alg = decide_alg (count, max / 2, memset, dynamic_check);
17699 gcc_assert (*dynamic_check == -1);
17700 gcc_assert (alg != libcall);
17701 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17702 *dynamic_check = max;
17703 return alg;
17705 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
17706 #undef ALG_USABLE_P
17709 /* Decide on alignment. We know that the operand is already aligned to ALIGN
17710 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
17711 static int
17712 decide_alignment (int align,
17713 enum stringop_alg alg,
17714 int expected_size)
17716 int desired_align = 0;
17717 switch (alg)
17719 case no_stringop:
17720 gcc_unreachable ();
17721 case loop:
17722 case unrolled_loop:
17723 desired_align = GET_MODE_SIZE (Pmode);
17724 break;
17725 case rep_prefix_8_byte:
17726 desired_align = 8;
17727 break;
17728 case rep_prefix_4_byte:
17729 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17730 copying whole cacheline at once. */
17731 if (TARGET_PENTIUMPRO)
17732 desired_align = 8;
17733 else
17734 desired_align = 4;
17735 break;
17736 case rep_prefix_1_byte:
17737 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17738 copying whole cacheline at once. */
17739 if (TARGET_PENTIUMPRO)
17740 desired_align = 8;
17741 else
17742 desired_align = 1;
17743 break;
17744 case loop_1_byte:
17745 desired_align = 1;
17746 break;
17747 case libcall:
17748 return 0;
17751 if (optimize_size)
17752 desired_align = 1;
17753 if (desired_align < align)
17754 desired_align = align;
17755 if (expected_size != -1 && expected_size < 4)
17756 desired_align = align;
17757 return desired_align;
17760 /* Return the smallest power of 2 greater than VAL. */
17761 static int
17762 smallest_pow2_greater_than (int val)
17764 int ret = 1;
17765 while (ret <= val)
17766 ret <<= 1;
17767 return ret;
17770 /* Expand string move (memcpy) operation. Use i386 string operations when
17771 profitable. expand_setmem contains similar code. The code depends upon
17772 architecture, block size and alignment, but always has the same
17773 overall structure:
17775 1) Prologue guard: Conditional that jumps up to epilogues for small
17776 blocks that can be handled by epilogue alone. This is faster but
17777 also needed for correctness, since prologue assume the block is larger
17778 than the desired alignment.
17780 Optional dynamic check for size and libcall for large
17781 blocks is emitted here too, with -minline-stringops-dynamically.
17783 2) Prologue: copy first few bytes in order to get destination aligned
17784 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
17785 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
17786 We emit either a jump tree on power of two sized blocks, or a byte loop.
17788 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
17789 with specified algorithm.
17791 4) Epilogue: code copying tail of the block that is too small to be
17792 handled by main body (or up to size guarded by prologue guard). */
17795 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
17796 rtx expected_align_exp, rtx expected_size_exp)
17798 rtx destreg;
17799 rtx srcreg;
17800 rtx label = NULL;
17801 rtx tmp;
17802 rtx jump_around_label = NULL;
17803 HOST_WIDE_INT align = 1;
17804 unsigned HOST_WIDE_INT count = 0;
17805 HOST_WIDE_INT expected_size = -1;
17806 int size_needed = 0, epilogue_size_needed;
17807 int desired_align = 0, align_bytes = 0;
17808 enum stringop_alg alg;
17809 int dynamic_check;
17810 bool need_zero_guard = false;
17812 if (CONST_INT_P (align_exp))
17813 align = INTVAL (align_exp);
17814 /* i386 can do misaligned access on reasonably increased cost. */
17815 if (CONST_INT_P (expected_align_exp)
17816 && INTVAL (expected_align_exp) > align)
17817 align = INTVAL (expected_align_exp);
17818 /* ALIGN is the minimum of destination and source alignment, but we care here
17819 just about destination alignment. */
17820 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
17821 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
17823 if (CONST_INT_P (count_exp))
17824 count = expected_size = INTVAL (count_exp);
17825 if (CONST_INT_P (expected_size_exp) && count == 0)
17826 expected_size = INTVAL (expected_size_exp);
17828 /* Make sure we don't need to care about overflow later on. */
17829 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17830 return 0;
17832 /* Step 0: Decide on preferred algorithm, desired alignment and
17833 size of chunks to be copied by main loop. */
17835 alg = decide_alg (count, expected_size, false, &dynamic_check);
17836 desired_align = decide_alignment (align, alg, expected_size);
17838 if (!TARGET_ALIGN_STRINGOPS)
17839 align = desired_align;
17841 if (alg == libcall)
17842 return 0;
17843 gcc_assert (alg != no_stringop);
17844 if (!count)
17845 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
17846 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17847 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
17848 switch (alg)
17850 case libcall:
17851 case no_stringop:
17852 gcc_unreachable ();
17853 case loop:
17854 need_zero_guard = true;
17855 size_needed = GET_MODE_SIZE (Pmode);
17856 break;
17857 case unrolled_loop:
17858 need_zero_guard = true;
17859 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
17860 break;
17861 case rep_prefix_8_byte:
17862 size_needed = 8;
17863 break;
17864 case rep_prefix_4_byte:
17865 size_needed = 4;
17866 break;
17867 case rep_prefix_1_byte:
17868 size_needed = 1;
17869 break;
17870 case loop_1_byte:
17871 need_zero_guard = true;
17872 size_needed = 1;
17873 break;
17876 epilogue_size_needed = size_needed;
17878 /* Step 1: Prologue guard. */
17880 /* Alignment code needs count to be in register. */
17881 if (CONST_INT_P (count_exp) && desired_align > align)
17883 if (INTVAL (count_exp) > desired_align
17884 && INTVAL (count_exp) > size_needed)
17886 align_bytes
17887 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
17888 if (align_bytes <= 0)
17889 align_bytes = 0;
17890 else
17891 align_bytes = desired_align - align_bytes;
17893 if (align_bytes == 0)
17894 count_exp = force_reg (counter_mode (count_exp), count_exp);
17896 gcc_assert (desired_align >= 1 && align >= 1);
17898 /* Ensure that alignment prologue won't copy past end of block. */
17899 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17901 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17902 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17903 Make sure it is power of 2. */
17904 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17906 if (count)
17908 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
17910 /* If main algorithm works on QImode, no epilogue is needed.
17911 For small sizes just don't align anything. */
17912 if (size_needed == 1)
17913 desired_align = align;
17914 else
17915 goto epilogue;
17918 else
17920 label = gen_label_rtx ();
17921 emit_cmp_and_jump_insns (count_exp,
17922 GEN_INT (epilogue_size_needed),
17923 LTU, 0, counter_mode (count_exp), 1, label);
17924 if (expected_size == -1 || expected_size < epilogue_size_needed)
17925 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17926 else
17927 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17931 /* Emit code to decide on runtime whether library call or inline should be
17932 used. */
17933 if (dynamic_check != -1)
17935 if (CONST_INT_P (count_exp))
17937 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
17939 emit_block_move_via_libcall (dst, src, count_exp, false);
17940 count_exp = const0_rtx;
17941 goto epilogue;
17944 else
17946 rtx hot_label = gen_label_rtx ();
17947 jump_around_label = gen_label_rtx ();
17948 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17949 LEU, 0, GET_MODE (count_exp), 1, hot_label);
17950 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17951 emit_block_move_via_libcall (dst, src, count_exp, false);
17952 emit_jump (jump_around_label);
17953 emit_label (hot_label);
17957 /* Step 2: Alignment prologue. */
17959 if (desired_align > align)
17961 if (align_bytes == 0)
17963 /* Except for the first move in epilogue, we no longer know
17964 constant offset in aliasing info. It don't seems to worth
17965 the pain to maintain it for the first move, so throw away
17966 the info early. */
17967 src = change_address (src, BLKmode, srcreg);
17968 dst = change_address (dst, BLKmode, destreg);
17969 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
17970 desired_align);
17972 else
17974 /* If we know how many bytes need to be stored before dst is
17975 sufficiently aligned, maintain aliasing info accurately. */
17976 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
17977 desired_align, align_bytes);
17978 count_exp = plus_constant (count_exp, -align_bytes);
17979 count -= align_bytes;
17981 if (need_zero_guard
17982 && (count < (unsigned HOST_WIDE_INT) size_needed
17983 || (align_bytes == 0
17984 && count < ((unsigned HOST_WIDE_INT) size_needed
17985 + desired_align - align))))
17987 /* It is possible that we copied enough so the main loop will not
17988 execute. */
17989 gcc_assert (size_needed > 1);
17990 if (label == NULL_RTX)
17991 label = gen_label_rtx ();
17992 emit_cmp_and_jump_insns (count_exp,
17993 GEN_INT (size_needed),
17994 LTU, 0, counter_mode (count_exp), 1, label);
17995 if (expected_size == -1
17996 || expected_size < (desired_align - align) / 2 + size_needed)
17997 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17998 else
17999 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18002 if (label && size_needed == 1)
18004 emit_label (label);
18005 LABEL_NUSES (label) = 1;
18006 label = NULL;
18007 epilogue_size_needed = 1;
18009 else if (label == NULL_RTX)
18010 epilogue_size_needed = size_needed;
18012 /* Step 3: Main loop. */
18014 switch (alg)
18016 case libcall:
18017 case no_stringop:
18018 gcc_unreachable ();
18019 case loop_1_byte:
18020 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18021 count_exp, QImode, 1, expected_size);
18022 break;
18023 case loop:
18024 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18025 count_exp, Pmode, 1, expected_size);
18026 break;
18027 case unrolled_loop:
18028 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18029 registers for 4 temporaries anyway. */
18030 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18031 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18032 expected_size);
18033 break;
18034 case rep_prefix_8_byte:
18035 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18036 DImode);
18037 break;
18038 case rep_prefix_4_byte:
18039 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18040 SImode);
18041 break;
18042 case rep_prefix_1_byte:
18043 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18044 QImode);
18045 break;
18047 /* Adjust properly the offset of src and dest memory for aliasing. */
18048 if (CONST_INT_P (count_exp))
18050 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18051 (count / size_needed) * size_needed);
18052 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18053 (count / size_needed) * size_needed);
18055 else
18057 src = change_address (src, BLKmode, srcreg);
18058 dst = change_address (dst, BLKmode, destreg);
18061 /* Step 4: Epilogue to copy the remaining bytes. */
18062 epilogue:
18063 if (label)
18065 /* When the main loop is done, COUNT_EXP might hold original count,
18066 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18067 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18068 bytes. Compensate if needed. */
18070 if (size_needed < epilogue_size_needed)
18072 tmp =
18073 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18074 GEN_INT (size_needed - 1), count_exp, 1,
18075 OPTAB_DIRECT);
18076 if (tmp != count_exp)
18077 emit_move_insn (count_exp, tmp);
18079 emit_label (label);
18080 LABEL_NUSES (label) = 1;
18083 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18084 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18085 epilogue_size_needed);
18086 if (jump_around_label)
18087 emit_label (jump_around_label);
18088 return 1;
18091 /* Helper function for memcpy. For QImode value 0xXY produce
18092 0xXYXYXYXY of wide specified by MODE. This is essentially
18093 a * 0x10101010, but we can do slightly better than
18094 synth_mult by unwinding the sequence by hand on CPUs with
18095 slow multiply. */
18096 static rtx
18097 promote_duplicated_reg (enum machine_mode mode, rtx val)
18099 enum machine_mode valmode = GET_MODE (val);
18100 rtx tmp;
18101 int nops = mode == DImode ? 3 : 2;
18103 gcc_assert (mode == SImode || mode == DImode);
18104 if (val == const0_rtx)
18105 return copy_to_mode_reg (mode, const0_rtx);
18106 if (CONST_INT_P (val))
18108 HOST_WIDE_INT v = INTVAL (val) & 255;
18110 v |= v << 8;
18111 v |= v << 16;
18112 if (mode == DImode)
18113 v |= (v << 16) << 16;
18114 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18117 if (valmode == VOIDmode)
18118 valmode = QImode;
18119 if (valmode != QImode)
18120 val = gen_lowpart (QImode, val);
18121 if (mode == QImode)
18122 return val;
18123 if (!TARGET_PARTIAL_REG_STALL)
18124 nops--;
18125 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18126 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18127 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18128 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18130 rtx reg = convert_modes (mode, QImode, val, true);
18131 tmp = promote_duplicated_reg (mode, const1_rtx);
18132 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18133 OPTAB_DIRECT);
18135 else
18137 rtx reg = convert_modes (mode, QImode, val, true);
18139 if (!TARGET_PARTIAL_REG_STALL)
18140 if (mode == SImode)
18141 emit_insn (gen_movsi_insv_1 (reg, reg));
18142 else
18143 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18144 else
18146 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18147 NULL, 1, OPTAB_DIRECT);
18148 reg =
18149 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18151 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18152 NULL, 1, OPTAB_DIRECT);
18153 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18154 if (mode == SImode)
18155 return reg;
18156 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18157 NULL, 1, OPTAB_DIRECT);
18158 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18159 return reg;
18163 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18164 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18165 alignment from ALIGN to DESIRED_ALIGN. */
18166 static rtx
18167 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18169 rtx promoted_val;
18171 if (TARGET_64BIT
18172 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18173 promoted_val = promote_duplicated_reg (DImode, val);
18174 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18175 promoted_val = promote_duplicated_reg (SImode, val);
18176 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18177 promoted_val = promote_duplicated_reg (HImode, val);
18178 else
18179 promoted_val = val;
18181 return promoted_val;
18184 /* Expand string clear operation (bzero). Use i386 string operations when
18185 profitable. See expand_movmem comment for explanation of individual
18186 steps performed. */
18188 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18189 rtx expected_align_exp, rtx expected_size_exp)
18191 rtx destreg;
18192 rtx label = NULL;
18193 rtx tmp;
18194 rtx jump_around_label = NULL;
18195 HOST_WIDE_INT align = 1;
18196 unsigned HOST_WIDE_INT count = 0;
18197 HOST_WIDE_INT expected_size = -1;
18198 int size_needed = 0, epilogue_size_needed;
18199 int desired_align = 0, align_bytes = 0;
18200 enum stringop_alg alg;
18201 rtx promoted_val = NULL;
18202 bool force_loopy_epilogue = false;
18203 int dynamic_check;
18204 bool need_zero_guard = false;
18206 if (CONST_INT_P (align_exp))
18207 align = INTVAL (align_exp);
18208 /* i386 can do misaligned access on reasonably increased cost. */
18209 if (CONST_INT_P (expected_align_exp)
18210 && INTVAL (expected_align_exp) > align)
18211 align = INTVAL (expected_align_exp);
18212 if (CONST_INT_P (count_exp))
18213 count = expected_size = INTVAL (count_exp);
18214 if (CONST_INT_P (expected_size_exp) && count == 0)
18215 expected_size = INTVAL (expected_size_exp);
18217 /* Make sure we don't need to care about overflow later on. */
18218 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18219 return 0;
18221 /* Step 0: Decide on preferred algorithm, desired alignment and
18222 size of chunks to be copied by main loop. */
18224 alg = decide_alg (count, expected_size, true, &dynamic_check);
18225 desired_align = decide_alignment (align, alg, expected_size);
18227 if (!TARGET_ALIGN_STRINGOPS)
18228 align = desired_align;
18230 if (alg == libcall)
18231 return 0;
18232 gcc_assert (alg != no_stringop);
18233 if (!count)
18234 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18235 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18236 switch (alg)
18238 case libcall:
18239 case no_stringop:
18240 gcc_unreachable ();
18241 case loop:
18242 need_zero_guard = true;
18243 size_needed = GET_MODE_SIZE (Pmode);
18244 break;
18245 case unrolled_loop:
18246 need_zero_guard = true;
18247 size_needed = GET_MODE_SIZE (Pmode) * 4;
18248 break;
18249 case rep_prefix_8_byte:
18250 size_needed = 8;
18251 break;
18252 case rep_prefix_4_byte:
18253 size_needed = 4;
18254 break;
18255 case rep_prefix_1_byte:
18256 size_needed = 1;
18257 break;
18258 case loop_1_byte:
18259 need_zero_guard = true;
18260 size_needed = 1;
18261 break;
18263 epilogue_size_needed = size_needed;
18265 /* Step 1: Prologue guard. */
18267 /* Alignment code needs count to be in register. */
18268 if (CONST_INT_P (count_exp) && desired_align > align)
18270 if (INTVAL (count_exp) > desired_align
18271 && INTVAL (count_exp) > size_needed)
18273 align_bytes
18274 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18275 if (align_bytes <= 0)
18276 align_bytes = 0;
18277 else
18278 align_bytes = desired_align - align_bytes;
18280 if (align_bytes == 0)
18282 enum machine_mode mode = SImode;
18283 if (TARGET_64BIT && (count & ~0xffffffff))
18284 mode = DImode;
18285 count_exp = force_reg (mode, count_exp);
18288 /* Do the cheap promotion to allow better CSE across the
18289 main loop and epilogue (ie one load of the big constant in the
18290 front of all code. */
18291 if (CONST_INT_P (val_exp))
18292 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18293 desired_align, align);
18294 /* Ensure that alignment prologue won't copy past end of block. */
18295 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18297 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18298 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18299 Make sure it is power of 2. */
18300 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18302 /* To improve performance of small blocks, we jump around the VAL
18303 promoting mode. This mean that if the promoted VAL is not constant,
18304 we might not use it in the epilogue and have to use byte
18305 loop variant. */
18306 if (epilogue_size_needed > 2 && !promoted_val)
18307 force_loopy_epilogue = true;
18308 if (count)
18310 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18312 /* If main algorithm works on QImode, no epilogue is needed.
18313 For small sizes just don't align anything. */
18314 if (size_needed == 1)
18315 desired_align = align;
18316 else
18317 goto epilogue;
18320 else
18322 label = gen_label_rtx ();
18323 emit_cmp_and_jump_insns (count_exp,
18324 GEN_INT (epilogue_size_needed),
18325 LTU, 0, counter_mode (count_exp), 1, label);
18326 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18327 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18328 else
18329 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18332 if (dynamic_check != -1)
18334 rtx hot_label = gen_label_rtx ();
18335 jump_around_label = gen_label_rtx ();
18336 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18337 LEU, 0, counter_mode (count_exp), 1, hot_label);
18338 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18339 set_storage_via_libcall (dst, count_exp, val_exp, false);
18340 emit_jump (jump_around_label);
18341 emit_label (hot_label);
18344 /* Step 2: Alignment prologue. */
18346 /* Do the expensive promotion once we branched off the small blocks. */
18347 if (!promoted_val)
18348 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18349 desired_align, align);
18350 gcc_assert (desired_align >= 1 && align >= 1);
18352 if (desired_align > align)
18354 if (align_bytes == 0)
18356 /* Except for the first move in epilogue, we no longer know
18357 constant offset in aliasing info. It don't seems to worth
18358 the pain to maintain it for the first move, so throw away
18359 the info early. */
18360 dst = change_address (dst, BLKmode, destreg);
18361 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18362 desired_align);
18364 else
18366 /* If we know how many bytes need to be stored before dst is
18367 sufficiently aligned, maintain aliasing info accurately. */
18368 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18369 desired_align, align_bytes);
18370 count_exp = plus_constant (count_exp, -align_bytes);
18371 count -= align_bytes;
18373 if (need_zero_guard
18374 && (count < (unsigned HOST_WIDE_INT) size_needed
18375 || (align_bytes == 0
18376 && count < ((unsigned HOST_WIDE_INT) size_needed
18377 + desired_align - align))))
18379 /* It is possible that we copied enough so the main loop will not
18380 execute. */
18381 gcc_assert (size_needed > 1);
18382 if (label == NULL_RTX)
18383 label = gen_label_rtx ();
18384 emit_cmp_and_jump_insns (count_exp,
18385 GEN_INT (size_needed),
18386 LTU, 0, counter_mode (count_exp), 1, label);
18387 if (expected_size == -1
18388 || expected_size < (desired_align - align) / 2 + size_needed)
18389 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18390 else
18391 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18394 if (label && size_needed == 1)
18396 emit_label (label);
18397 LABEL_NUSES (label) = 1;
18398 label = NULL;
18399 promoted_val = val_exp;
18400 epilogue_size_needed = 1;
18402 else if (label == NULL_RTX)
18403 epilogue_size_needed = size_needed;
18405 /* Step 3: Main loop. */
18407 switch (alg)
18409 case libcall:
18410 case no_stringop:
18411 gcc_unreachable ();
18412 case loop_1_byte:
18413 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18414 count_exp, QImode, 1, expected_size);
18415 break;
18416 case loop:
18417 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18418 count_exp, Pmode, 1, expected_size);
18419 break;
18420 case unrolled_loop:
18421 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18422 count_exp, Pmode, 4, expected_size);
18423 break;
18424 case rep_prefix_8_byte:
18425 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18426 DImode, val_exp);
18427 break;
18428 case rep_prefix_4_byte:
18429 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18430 SImode, val_exp);
18431 break;
18432 case rep_prefix_1_byte:
18433 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18434 QImode, val_exp);
18435 break;
18437 /* Adjust properly the offset of src and dest memory for aliasing. */
18438 if (CONST_INT_P (count_exp))
18439 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18440 (count / size_needed) * size_needed);
18441 else
18442 dst = change_address (dst, BLKmode, destreg);
18444 /* Step 4: Epilogue to copy the remaining bytes. */
18446 if (label)
18448 /* When the main loop is done, COUNT_EXP might hold original count,
18449 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18450 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18451 bytes. Compensate if needed. */
18453 if (size_needed < epilogue_size_needed)
18455 tmp =
18456 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18457 GEN_INT (size_needed - 1), count_exp, 1,
18458 OPTAB_DIRECT);
18459 if (tmp != count_exp)
18460 emit_move_insn (count_exp, tmp);
18462 emit_label (label);
18463 LABEL_NUSES (label) = 1;
18465 epilogue:
18466 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18468 if (force_loopy_epilogue)
18469 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18470 epilogue_size_needed);
18471 else
18472 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18473 epilogue_size_needed);
18475 if (jump_around_label)
18476 emit_label (jump_around_label);
18477 return 1;
18480 /* Expand the appropriate insns for doing strlen if not just doing
18481 repnz; scasb
18483 out = result, initialized with the start address
18484 align_rtx = alignment of the address.
18485 scratch = scratch register, initialized with the startaddress when
18486 not aligned, otherwise undefined
18488 This is just the body. It needs the initializations mentioned above and
18489 some address computing at the end. These things are done in i386.md. */
18491 static void
18492 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18494 int align;
18495 rtx tmp;
18496 rtx align_2_label = NULL_RTX;
18497 rtx align_3_label = NULL_RTX;
18498 rtx align_4_label = gen_label_rtx ();
18499 rtx end_0_label = gen_label_rtx ();
18500 rtx mem;
18501 rtx tmpreg = gen_reg_rtx (SImode);
18502 rtx scratch = gen_reg_rtx (SImode);
18503 rtx cmp;
18505 align = 0;
18506 if (CONST_INT_P (align_rtx))
18507 align = INTVAL (align_rtx);
18509 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18511 /* Is there a known alignment and is it less than 4? */
18512 if (align < 4)
18514 rtx scratch1 = gen_reg_rtx (Pmode);
18515 emit_move_insn (scratch1, out);
18516 /* Is there a known alignment and is it not 2? */
18517 if (align != 2)
18519 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18520 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18522 /* Leave just the 3 lower bits. */
18523 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18524 NULL_RTX, 0, OPTAB_WIDEN);
18526 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18527 Pmode, 1, align_4_label);
18528 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18529 Pmode, 1, align_2_label);
18530 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18531 Pmode, 1, align_3_label);
18533 else
18535 /* Since the alignment is 2, we have to check 2 or 0 bytes;
18536 check if is aligned to 4 - byte. */
18538 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
18539 NULL_RTX, 0, OPTAB_WIDEN);
18541 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18542 Pmode, 1, align_4_label);
18545 mem = change_address (src, QImode, out);
18547 /* Now compare the bytes. */
18549 /* Compare the first n unaligned byte on a byte per byte basis. */
18550 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
18551 QImode, 1, end_0_label);
18553 /* Increment the address. */
18554 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18556 /* Not needed with an alignment of 2 */
18557 if (align != 2)
18559 emit_label (align_2_label);
18561 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18562 end_0_label);
18564 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18566 emit_label (align_3_label);
18569 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18570 end_0_label);
18572 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18575 /* Generate loop to check 4 bytes at a time. It is not a good idea to
18576 align this loop. It gives only huge programs, but does not help to
18577 speed up. */
18578 emit_label (align_4_label);
18580 mem = change_address (src, SImode, out);
18581 emit_move_insn (scratch, mem);
18582 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
18584 /* This formula yields a nonzero result iff one of the bytes is zero.
18585 This saves three branches inside loop and many cycles. */
18587 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
18588 emit_insn (gen_one_cmplsi2 (scratch, scratch));
18589 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
18590 emit_insn (gen_andsi3 (tmpreg, tmpreg,
18591 gen_int_mode (0x80808080, SImode)));
18592 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
18593 align_4_label);
18595 if (TARGET_CMOVE)
18597 rtx reg = gen_reg_rtx (SImode);
18598 rtx reg2 = gen_reg_rtx (Pmode);
18599 emit_move_insn (reg, tmpreg);
18600 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
18602 /* If zero is not in the first two bytes, move two bytes forward. */
18603 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18604 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18605 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18606 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
18607 gen_rtx_IF_THEN_ELSE (SImode, tmp,
18608 reg,
18609 tmpreg)));
18610 /* Emit lea manually to avoid clobbering of flags. */
18611 emit_insn (gen_rtx_SET (SImode, reg2,
18612 gen_rtx_PLUS (Pmode, out, const2_rtx)));
18614 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18615 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18616 emit_insn (gen_rtx_SET (VOIDmode, out,
18617 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
18618 reg2,
18619 out)));
18622 else
18624 rtx end_2_label = gen_label_rtx ();
18625 /* Is zero in the first two bytes? */
18627 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18628 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18629 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
18630 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
18631 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
18632 pc_rtx);
18633 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
18634 JUMP_LABEL (tmp) = end_2_label;
18636 /* Not in the first two. Move two bytes forward. */
18637 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
18638 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
18640 emit_label (end_2_label);
18644 /* Avoid branch in fixing the byte. */
18645 tmpreg = gen_lowpart (QImode, tmpreg);
18646 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
18647 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
18648 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
18650 emit_label (end_0_label);
18653 /* Expand strlen. */
18656 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
18658 rtx addr, scratch1, scratch2, scratch3, scratch4;
18660 /* The generic case of strlen expander is long. Avoid it's
18661 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
18663 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18664 && !TARGET_INLINE_ALL_STRINGOPS
18665 && !optimize_insn_for_size_p ()
18666 && (!CONST_INT_P (align) || INTVAL (align) < 4))
18667 return 0;
18669 addr = force_reg (Pmode, XEXP (src, 0));
18670 scratch1 = gen_reg_rtx (Pmode);
18672 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18673 && !optimize_insn_for_size_p ())
18675 /* Well it seems that some optimizer does not combine a call like
18676 foo(strlen(bar), strlen(bar));
18677 when the move and the subtraction is done here. It does calculate
18678 the length just once when these instructions are done inside of
18679 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
18680 often used and I use one fewer register for the lifetime of
18681 output_strlen_unroll() this is better. */
18683 emit_move_insn (out, addr);
18685 ix86_expand_strlensi_unroll_1 (out, src, align);
18687 /* strlensi_unroll_1 returns the address of the zero at the end of
18688 the string, like memchr(), so compute the length by subtracting
18689 the start address. */
18690 emit_insn ((*ix86_gen_sub3) (out, out, addr));
18692 else
18694 rtx unspec;
18696 /* Can't use this if the user has appropriated eax, ecx, or edi. */
18697 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
18698 return false;
18700 scratch2 = gen_reg_rtx (Pmode);
18701 scratch3 = gen_reg_rtx (Pmode);
18702 scratch4 = force_reg (Pmode, constm1_rtx);
18704 emit_move_insn (scratch3, addr);
18705 eoschar = force_reg (QImode, eoschar);
18707 src = replace_equiv_address_nv (src, scratch3);
18709 /* If .md starts supporting :P, this can be done in .md. */
18710 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
18711 scratch4), UNSPEC_SCAS);
18712 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
18713 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
18714 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
18716 return 1;
18719 /* For given symbol (function) construct code to compute address of it's PLT
18720 entry in large x86-64 PIC model. */
18722 construct_plt_address (rtx symbol)
18724 rtx tmp = gen_reg_rtx (Pmode);
18725 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
18727 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18728 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
18730 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
18731 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
18732 return tmp;
18735 void
18736 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
18737 rtx callarg2,
18738 rtx pop, int sibcall)
18740 rtx use = NULL, call;
18742 if (pop == const0_rtx)
18743 pop = NULL;
18744 gcc_assert (!TARGET_64BIT || !pop);
18746 if (TARGET_MACHO && !TARGET_64BIT)
18748 #if TARGET_MACHO
18749 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
18750 fnaddr = machopic_indirect_call_target (fnaddr);
18751 #endif
18753 else
18755 /* Static functions and indirect calls don't need the pic register. */
18756 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
18757 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18758 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
18759 use_reg (&use, pic_offset_table_rtx);
18762 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
18764 rtx al = gen_rtx_REG (QImode, AX_REG);
18765 emit_move_insn (al, callarg2);
18766 use_reg (&use, al);
18769 if (ix86_cmodel == CM_LARGE_PIC
18770 && GET_CODE (fnaddr) == MEM
18771 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18772 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
18773 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
18774 else if (sibcall
18775 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
18776 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
18778 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18779 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18782 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
18783 if (retval)
18784 call = gen_rtx_SET (VOIDmode, retval, call);
18785 if (pop)
18787 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
18788 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
18789 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
18791 if (TARGET_64BIT
18792 && ix86_cfun_abi () == MS_ABI
18793 && (!callarg2 || INTVAL (callarg2) != -2))
18795 /* We need to represent that SI and DI registers are clobbered
18796 by SYSV calls. */
18797 static int clobbered_registers[] = {
18798 XMM6_REG, XMM7_REG, XMM8_REG,
18799 XMM9_REG, XMM10_REG, XMM11_REG,
18800 XMM12_REG, XMM13_REG, XMM14_REG,
18801 XMM15_REG, SI_REG, DI_REG
18803 unsigned int i;
18804 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
18805 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
18806 UNSPEC_MS_TO_SYSV_CALL);
18808 vec[0] = call;
18809 vec[1] = unspec;
18810 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
18811 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
18812 ? TImode : DImode,
18813 gen_rtx_REG
18814 (SSE_REGNO_P (clobbered_registers[i])
18815 ? TImode : DImode,
18816 clobbered_registers[i]));
18818 call = gen_rtx_PARALLEL (VOIDmode,
18819 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
18820 + 2, vec));
18823 call = emit_call_insn (call);
18824 if (use)
18825 CALL_INSN_FUNCTION_USAGE (call) = use;
18829 /* Clear stack slot assignments remembered from previous functions.
18830 This is called from INIT_EXPANDERS once before RTL is emitted for each
18831 function. */
18833 static struct machine_function *
18834 ix86_init_machine_status (void)
18836 struct machine_function *f;
18838 f = GGC_CNEW (struct machine_function);
18839 f->use_fast_prologue_epilogue_nregs = -1;
18840 f->tls_descriptor_call_expanded_p = 0;
18841 f->call_abi = DEFAULT_ABI;
18843 return f;
18846 /* Return a MEM corresponding to a stack slot with mode MODE.
18847 Allocate a new slot if necessary.
18849 The RTL for a function can have several slots available: N is
18850 which slot to use. */
18853 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
18855 struct stack_local_entry *s;
18857 gcc_assert (n < MAX_386_STACK_LOCALS);
18859 /* Virtual slot is valid only before vregs are instantiated. */
18860 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
18862 for (s = ix86_stack_locals; s; s = s->next)
18863 if (s->mode == mode && s->n == n)
18864 return copy_rtx (s->rtl);
18866 s = (struct stack_local_entry *)
18867 ggc_alloc (sizeof (struct stack_local_entry));
18868 s->n = n;
18869 s->mode = mode;
18870 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18872 s->next = ix86_stack_locals;
18873 ix86_stack_locals = s;
18874 return s->rtl;
18877 /* Construct the SYMBOL_REF for the tls_get_addr function. */
18879 static GTY(()) rtx ix86_tls_symbol;
18881 ix86_tls_get_addr (void)
18884 if (!ix86_tls_symbol)
18886 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
18887 (TARGET_ANY_GNU_TLS
18888 && !TARGET_64BIT)
18889 ? "___tls_get_addr"
18890 : "__tls_get_addr");
18893 return ix86_tls_symbol;
18896 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
18898 static GTY(()) rtx ix86_tls_module_base_symbol;
18900 ix86_tls_module_base (void)
18903 if (!ix86_tls_module_base_symbol)
18905 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
18906 "_TLS_MODULE_BASE_");
18907 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
18908 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
18911 return ix86_tls_module_base_symbol;
18914 /* Calculate the length of the memory address in the instruction
18915 encoding. Does not include the one-byte modrm, opcode, or prefix. */
18918 memory_address_length (rtx addr)
18920 struct ix86_address parts;
18921 rtx base, index, disp;
18922 int len;
18923 int ok;
18925 if (GET_CODE (addr) == PRE_DEC
18926 || GET_CODE (addr) == POST_INC
18927 || GET_CODE (addr) == PRE_MODIFY
18928 || GET_CODE (addr) == POST_MODIFY)
18929 return 0;
18931 ok = ix86_decompose_address (addr, &parts);
18932 gcc_assert (ok);
18934 if (parts.base && GET_CODE (parts.base) == SUBREG)
18935 parts.base = SUBREG_REG (parts.base);
18936 if (parts.index && GET_CODE (parts.index) == SUBREG)
18937 parts.index = SUBREG_REG (parts.index);
18939 base = parts.base;
18940 index = parts.index;
18941 disp = parts.disp;
18942 len = 0;
18944 /* Rule of thumb:
18945 - esp as the base always wants an index,
18946 - ebp as the base always wants a displacement. */
18948 /* Register Indirect. */
18949 if (base && !index && !disp)
18951 /* esp (for its index) and ebp (for its displacement) need
18952 the two-byte modrm form. */
18953 if (addr == stack_pointer_rtx
18954 || addr == arg_pointer_rtx
18955 || addr == frame_pointer_rtx
18956 || addr == hard_frame_pointer_rtx)
18957 len = 1;
18960 /* Direct Addressing. */
18961 else if (disp && !base && !index)
18962 len = 4;
18964 else
18966 /* Find the length of the displacement constant. */
18967 if (disp)
18969 if (base && satisfies_constraint_K (disp))
18970 len = 1;
18971 else
18972 len = 4;
18974 /* ebp always wants a displacement. */
18975 else if (base == hard_frame_pointer_rtx)
18976 len = 1;
18978 /* An index requires the two-byte modrm form.... */
18979 if (index
18980 /* ...like esp, which always wants an index. */
18981 || base == stack_pointer_rtx
18982 || base == arg_pointer_rtx
18983 || base == frame_pointer_rtx)
18984 len += 1;
18987 return len;
18990 /* Compute default value for "length_immediate" attribute. When SHORTFORM
18991 is set, expect that insn have 8bit immediate alternative. */
18993 ix86_attr_length_immediate_default (rtx insn, int shortform)
18995 int len = 0;
18996 int i;
18997 extract_insn_cached (insn);
18998 for (i = recog_data.n_operands - 1; i >= 0; --i)
18999 if (CONSTANT_P (recog_data.operand[i]))
19001 gcc_assert (!len);
19002 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
19003 len = 1;
19004 else
19006 switch (get_attr_mode (insn))
19008 case MODE_QI:
19009 len+=1;
19010 break;
19011 case MODE_HI:
19012 len+=2;
19013 break;
19014 case MODE_SI:
19015 len+=4;
19016 break;
19017 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19018 case MODE_DI:
19019 len+=4;
19020 break;
19021 default:
19022 fatal_insn ("unknown insn mode", insn);
19026 return len;
19028 /* Compute default value for "length_address" attribute. */
19030 ix86_attr_length_address_default (rtx insn)
19032 int i;
19034 if (get_attr_type (insn) == TYPE_LEA)
19036 rtx set = PATTERN (insn);
19038 if (GET_CODE (set) == PARALLEL)
19039 set = XVECEXP (set, 0, 0);
19041 gcc_assert (GET_CODE (set) == SET);
19043 return memory_address_length (SET_SRC (set));
19046 extract_insn_cached (insn);
19047 for (i = recog_data.n_operands - 1; i >= 0; --i)
19048 if (MEM_P (recog_data.operand[i]))
19050 return memory_address_length (XEXP (recog_data.operand[i], 0));
19051 break;
19053 return 0;
19056 /* Compute default value for "length_vex" attribute. It includes
19057 2 or 3 byte VEX prefix and 1 opcode byte. */
19060 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19061 int has_vex_w)
19063 int i;
19065 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19066 byte VEX prefix. */
19067 if (!has_0f_opcode || has_vex_w)
19068 return 3 + 1;
19070 /* We can always use 2 byte VEX prefix in 32bit. */
19071 if (!TARGET_64BIT)
19072 return 2 + 1;
19074 extract_insn_cached (insn);
19076 for (i = recog_data.n_operands - 1; i >= 0; --i)
19077 if (REG_P (recog_data.operand[i]))
19079 /* REX.W bit uses 3 byte VEX prefix. */
19080 if (GET_MODE (recog_data.operand[i]) == DImode)
19081 return 3 + 1;
19083 else
19085 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19086 if (MEM_P (recog_data.operand[i])
19087 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19088 return 3 + 1;
19091 return 2 + 1;
19094 /* Return the maximum number of instructions a cpu can issue. */
19096 static int
19097 ix86_issue_rate (void)
19099 switch (ix86_tune)
19101 case PROCESSOR_PENTIUM:
19102 case PROCESSOR_K6:
19103 return 2;
19105 case PROCESSOR_PENTIUMPRO:
19106 case PROCESSOR_PENTIUM4:
19107 case PROCESSOR_ATHLON:
19108 case PROCESSOR_K8:
19109 case PROCESSOR_AMDFAM10:
19110 case PROCESSOR_NOCONA:
19111 case PROCESSOR_GENERIC32:
19112 case PROCESSOR_GENERIC64:
19113 return 3;
19115 case PROCESSOR_CORE2:
19116 return 4;
19118 default:
19119 return 1;
19123 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19124 by DEP_INSN and nothing set by DEP_INSN. */
19126 static int
19127 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19129 rtx set, set2;
19131 /* Simplify the test for uninteresting insns. */
19132 if (insn_type != TYPE_SETCC
19133 && insn_type != TYPE_ICMOV
19134 && insn_type != TYPE_FCMOV
19135 && insn_type != TYPE_IBR)
19136 return 0;
19138 if ((set = single_set (dep_insn)) != 0)
19140 set = SET_DEST (set);
19141 set2 = NULL_RTX;
19143 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19144 && XVECLEN (PATTERN (dep_insn), 0) == 2
19145 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19146 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19148 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19149 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19151 else
19152 return 0;
19154 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19155 return 0;
19157 /* This test is true if the dependent insn reads the flags but
19158 not any other potentially set register. */
19159 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19160 return 0;
19162 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19163 return 0;
19165 return 1;
19168 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
19169 address with operands set by DEP_INSN. */
19171 static int
19172 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19174 rtx addr;
19176 if (insn_type == TYPE_LEA
19177 && TARGET_PENTIUM)
19179 addr = PATTERN (insn);
19181 if (GET_CODE (addr) == PARALLEL)
19182 addr = XVECEXP (addr, 0, 0);
19184 gcc_assert (GET_CODE (addr) == SET);
19186 addr = SET_SRC (addr);
19188 else
19190 int i;
19191 extract_insn_cached (insn);
19192 for (i = recog_data.n_operands - 1; i >= 0; --i)
19193 if (MEM_P (recog_data.operand[i]))
19195 addr = XEXP (recog_data.operand[i], 0);
19196 goto found;
19198 return 0;
19199 found:;
19202 return modified_in_p (addr, dep_insn);
19205 static int
19206 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19208 enum attr_type insn_type, dep_insn_type;
19209 enum attr_memory memory;
19210 rtx set, set2;
19211 int dep_insn_code_number;
19213 /* Anti and output dependencies have zero cost on all CPUs. */
19214 if (REG_NOTE_KIND (link) != 0)
19215 return 0;
19217 dep_insn_code_number = recog_memoized (dep_insn);
19219 /* If we can't recognize the insns, we can't really do anything. */
19220 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19221 return cost;
19223 insn_type = get_attr_type (insn);
19224 dep_insn_type = get_attr_type (dep_insn);
19226 switch (ix86_tune)
19228 case PROCESSOR_PENTIUM:
19229 /* Address Generation Interlock adds a cycle of latency. */
19230 if (ix86_agi_dependent (insn, dep_insn, insn_type))
19231 cost += 1;
19233 /* ??? Compares pair with jump/setcc. */
19234 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19235 cost = 0;
19237 /* Floating point stores require value to be ready one cycle earlier. */
19238 if (insn_type == TYPE_FMOV
19239 && get_attr_memory (insn) == MEMORY_STORE
19240 && !ix86_agi_dependent (insn, dep_insn, insn_type))
19241 cost += 1;
19242 break;
19244 case PROCESSOR_PENTIUMPRO:
19245 memory = get_attr_memory (insn);
19247 /* INT->FP conversion is expensive. */
19248 if (get_attr_fp_int_src (dep_insn))
19249 cost += 5;
19251 /* There is one cycle extra latency between an FP op and a store. */
19252 if (insn_type == TYPE_FMOV
19253 && (set = single_set (dep_insn)) != NULL_RTX
19254 && (set2 = single_set (insn)) != NULL_RTX
19255 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19256 && MEM_P (SET_DEST (set2)))
19257 cost += 1;
19259 /* Show ability of reorder buffer to hide latency of load by executing
19260 in parallel with previous instruction in case
19261 previous instruction is not needed to compute the address. */
19262 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19263 && !ix86_agi_dependent (insn, dep_insn, insn_type))
19265 /* Claim moves to take one cycle, as core can issue one load
19266 at time and the next load can start cycle later. */
19267 if (dep_insn_type == TYPE_IMOV
19268 || dep_insn_type == TYPE_FMOV)
19269 cost = 1;
19270 else if (cost > 1)
19271 cost--;
19273 break;
19275 case PROCESSOR_K6:
19276 memory = get_attr_memory (insn);
19278 /* The esp dependency is resolved before the instruction is really
19279 finished. */
19280 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19281 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19282 return 1;
19284 /* INT->FP conversion is expensive. */
19285 if (get_attr_fp_int_src (dep_insn))
19286 cost += 5;
19288 /* Show ability of reorder buffer to hide latency of load by executing
19289 in parallel with previous instruction in case
19290 previous instruction is not needed to compute the address. */
19291 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19292 && !ix86_agi_dependent (insn, dep_insn, insn_type))
19294 /* Claim moves to take one cycle, as core can issue one load
19295 at time and the next load can start cycle later. */
19296 if (dep_insn_type == TYPE_IMOV
19297 || dep_insn_type == TYPE_FMOV)
19298 cost = 1;
19299 else if (cost > 2)
19300 cost -= 2;
19301 else
19302 cost = 1;
19304 break;
19306 case PROCESSOR_ATHLON:
19307 case PROCESSOR_K8:
19308 case PROCESSOR_AMDFAM10:
19309 case PROCESSOR_GENERIC32:
19310 case PROCESSOR_GENERIC64:
19311 memory = get_attr_memory (insn);
19313 /* Show ability of reorder buffer to hide latency of load by executing
19314 in parallel with previous instruction in case
19315 previous instruction is not needed to compute the address. */
19316 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19317 && !ix86_agi_dependent (insn, dep_insn, insn_type))
19319 enum attr_unit unit = get_attr_unit (insn);
19320 int loadcost = 3;
19322 /* Because of the difference between the length of integer and
19323 floating unit pipeline preparation stages, the memory operands
19324 for floating point are cheaper.
19326 ??? For Athlon it the difference is most probably 2. */
19327 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19328 loadcost = 3;
19329 else
19330 loadcost = TARGET_ATHLON ? 2 : 0;
19332 if (cost >= loadcost)
19333 cost -= loadcost;
19334 else
19335 cost = 0;
19338 default:
19339 break;
19342 return cost;
19345 /* How many alternative schedules to try. This should be as wide as the
19346 scheduling freedom in the DFA, but no wider. Making this value too
19347 large results extra work for the scheduler. */
19349 static int
19350 ia32_multipass_dfa_lookahead (void)
19352 switch (ix86_tune)
19354 case PROCESSOR_PENTIUM:
19355 return 2;
19357 case PROCESSOR_PENTIUMPRO:
19358 case PROCESSOR_K6:
19359 return 1;
19361 default:
19362 return 0;
19367 /* Compute the alignment given to a constant that is being placed in memory.
19368 EXP is the constant and ALIGN is the alignment that the object would
19369 ordinarily have.
19370 The value of this function is used instead of that alignment to align
19371 the object. */
19374 ix86_constant_alignment (tree exp, int align)
19376 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19377 || TREE_CODE (exp) == INTEGER_CST)
19379 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19380 return 64;
19381 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19382 return 128;
19384 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19385 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19386 return BITS_PER_WORD;
19388 return align;
19391 /* Compute the alignment for a static variable.
19392 TYPE is the data type, and ALIGN is the alignment that
19393 the object would ordinarily have. The value of this function is used
19394 instead of that alignment to align the object. */
19397 ix86_data_alignment (tree type, int align)
19399 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19401 if (AGGREGATE_TYPE_P (type)
19402 && TYPE_SIZE (type)
19403 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19404 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19405 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19406 && align < max_align)
19407 align = max_align;
19409 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19410 to 16byte boundary. */
19411 if (TARGET_64BIT)
19413 if (AGGREGATE_TYPE_P (type)
19414 && TYPE_SIZE (type)
19415 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19416 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19417 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19418 return 128;
19421 if (TREE_CODE (type) == ARRAY_TYPE)
19423 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19424 return 64;
19425 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19426 return 128;
19428 else if (TREE_CODE (type) == COMPLEX_TYPE)
19431 if (TYPE_MODE (type) == DCmode && align < 64)
19432 return 64;
19433 if ((TYPE_MODE (type) == XCmode
19434 || TYPE_MODE (type) == TCmode) && align < 128)
19435 return 128;
19437 else if ((TREE_CODE (type) == RECORD_TYPE
19438 || TREE_CODE (type) == UNION_TYPE
19439 || TREE_CODE (type) == QUAL_UNION_TYPE)
19440 && TYPE_FIELDS (type))
19442 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19443 return 64;
19444 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19445 return 128;
19447 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19448 || TREE_CODE (type) == INTEGER_TYPE)
19450 if (TYPE_MODE (type) == DFmode && align < 64)
19451 return 64;
19452 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19453 return 128;
19456 return align;
19459 /* Compute the alignment for a local variable or a stack slot. EXP is
19460 the data type or decl itself, MODE is the widest mode available and
19461 ALIGN is the alignment that the object would ordinarily have. The
19462 value of this macro is used instead of that alignment to align the
19463 object. */
19465 unsigned int
19466 ix86_local_alignment (tree exp, enum machine_mode mode,
19467 unsigned int align)
19469 tree type, decl;
19471 if (exp && DECL_P (exp))
19473 type = TREE_TYPE (exp);
19474 decl = exp;
19476 else
19478 type = exp;
19479 decl = NULL;
19482 /* Don't do dynamic stack realignment for long long objects with
19483 -mpreferred-stack-boundary=2. */
19484 if (!TARGET_64BIT
19485 && align == 64
19486 && ix86_preferred_stack_boundary < 64
19487 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
19488 && (!type || !TYPE_USER_ALIGN (type))
19489 && (!decl || !DECL_USER_ALIGN (decl)))
19490 align = 32;
19492 /* If TYPE is NULL, we are allocating a stack slot for caller-save
19493 register in MODE. We will return the largest alignment of XF
19494 and DF. */
19495 if (!type)
19497 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
19498 align = GET_MODE_ALIGNMENT (DFmode);
19499 return align;
19502 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19503 to 16byte boundary. */
19504 if (TARGET_64BIT)
19506 if (AGGREGATE_TYPE_P (type)
19507 && TYPE_SIZE (type)
19508 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19509 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
19510 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19511 return 128;
19513 if (TREE_CODE (type) == ARRAY_TYPE)
19515 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19516 return 64;
19517 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19518 return 128;
19520 else if (TREE_CODE (type) == COMPLEX_TYPE)
19522 if (TYPE_MODE (type) == DCmode && align < 64)
19523 return 64;
19524 if ((TYPE_MODE (type) == XCmode
19525 || TYPE_MODE (type) == TCmode) && align < 128)
19526 return 128;
19528 else if ((TREE_CODE (type) == RECORD_TYPE
19529 || TREE_CODE (type) == UNION_TYPE
19530 || TREE_CODE (type) == QUAL_UNION_TYPE)
19531 && TYPE_FIELDS (type))
19533 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19534 return 64;
19535 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19536 return 128;
19538 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19539 || TREE_CODE (type) == INTEGER_TYPE)
19542 if (TYPE_MODE (type) == DFmode && align < 64)
19543 return 64;
19544 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19545 return 128;
19547 return align;
19550 /* Compute the minimum required alignment for dynamic stack realignment
19551 purposes for a local variable, parameter or a stack slot. EXP is
19552 the data type or decl itself, MODE is its mode and ALIGN is the
19553 alignment that the object would ordinarily have. */
19555 unsigned int
19556 ix86_minimum_alignment (tree exp, enum machine_mode mode,
19557 unsigned int align)
19559 tree type, decl;
19561 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
19562 return align;
19564 if (exp && DECL_P (exp))
19566 type = TREE_TYPE (exp);
19567 decl = exp;
19569 else
19571 type = exp;
19572 decl = NULL;
19575 /* Don't do dynamic stack realignment for long long objects with
19576 -mpreferred-stack-boundary=2. */
19577 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
19578 && (!type || !TYPE_USER_ALIGN (type))
19579 && (!decl || !DECL_USER_ALIGN (decl)))
19580 return 32;
19582 return align;
19585 /* Emit RTL insns to initialize the variable parts of a trampoline.
19586 FNADDR is an RTX for the address of the function's pure code.
19587 CXT is an RTX for the static chain value for the function. */
19588 void
19589 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
19591 if (!TARGET_64BIT)
19593 /* Compute offset from the end of the jmp to the target function. */
19594 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
19595 plus_constant (tramp, 10),
19596 NULL_RTX, 1, OPTAB_DIRECT);
19597 emit_move_insn (gen_rtx_MEM (QImode, tramp),
19598 gen_int_mode (0xb9, QImode));
19599 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
19600 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
19601 gen_int_mode (0xe9, QImode));
19602 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
19604 else
19606 int offset = 0;
19607 /* Try to load address using shorter movl instead of movabs.
19608 We may want to support movq for kernel mode, but kernel does not use
19609 trampolines at the moment. */
19610 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
19612 fnaddr = copy_to_mode_reg (DImode, fnaddr);
19613 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19614 gen_int_mode (0xbb41, HImode));
19615 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
19616 gen_lowpart (SImode, fnaddr));
19617 offset += 6;
19619 else
19621 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19622 gen_int_mode (0xbb49, HImode));
19623 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19624 fnaddr);
19625 offset += 10;
19627 /* Load static chain using movabs to r10. */
19628 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19629 gen_int_mode (0xba49, HImode));
19630 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19631 cxt);
19632 offset += 10;
19633 /* Jump to the r11 */
19634 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19635 gen_int_mode (0xff49, HImode));
19636 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
19637 gen_int_mode (0xe3, QImode));
19638 offset += 3;
19639 gcc_assert (offset <= TRAMPOLINE_SIZE);
19642 #ifdef ENABLE_EXECUTE_STACK
19643 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
19644 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
19645 #endif
19648 /* Codes for all the SSE/MMX builtins. */
19649 enum ix86_builtins
19651 IX86_BUILTIN_ADDPS,
19652 IX86_BUILTIN_ADDSS,
19653 IX86_BUILTIN_DIVPS,
19654 IX86_BUILTIN_DIVSS,
19655 IX86_BUILTIN_MULPS,
19656 IX86_BUILTIN_MULSS,
19657 IX86_BUILTIN_SUBPS,
19658 IX86_BUILTIN_SUBSS,
19660 IX86_BUILTIN_CMPEQPS,
19661 IX86_BUILTIN_CMPLTPS,
19662 IX86_BUILTIN_CMPLEPS,
19663 IX86_BUILTIN_CMPGTPS,
19664 IX86_BUILTIN_CMPGEPS,
19665 IX86_BUILTIN_CMPNEQPS,
19666 IX86_BUILTIN_CMPNLTPS,
19667 IX86_BUILTIN_CMPNLEPS,
19668 IX86_BUILTIN_CMPNGTPS,
19669 IX86_BUILTIN_CMPNGEPS,
19670 IX86_BUILTIN_CMPORDPS,
19671 IX86_BUILTIN_CMPUNORDPS,
19672 IX86_BUILTIN_CMPEQSS,
19673 IX86_BUILTIN_CMPLTSS,
19674 IX86_BUILTIN_CMPLESS,
19675 IX86_BUILTIN_CMPNEQSS,
19676 IX86_BUILTIN_CMPNLTSS,
19677 IX86_BUILTIN_CMPNLESS,
19678 IX86_BUILTIN_CMPNGTSS,
19679 IX86_BUILTIN_CMPNGESS,
19680 IX86_BUILTIN_CMPORDSS,
19681 IX86_BUILTIN_CMPUNORDSS,
19683 IX86_BUILTIN_COMIEQSS,
19684 IX86_BUILTIN_COMILTSS,
19685 IX86_BUILTIN_COMILESS,
19686 IX86_BUILTIN_COMIGTSS,
19687 IX86_BUILTIN_COMIGESS,
19688 IX86_BUILTIN_COMINEQSS,
19689 IX86_BUILTIN_UCOMIEQSS,
19690 IX86_BUILTIN_UCOMILTSS,
19691 IX86_BUILTIN_UCOMILESS,
19692 IX86_BUILTIN_UCOMIGTSS,
19693 IX86_BUILTIN_UCOMIGESS,
19694 IX86_BUILTIN_UCOMINEQSS,
19696 IX86_BUILTIN_CVTPI2PS,
19697 IX86_BUILTIN_CVTPS2PI,
19698 IX86_BUILTIN_CVTSI2SS,
19699 IX86_BUILTIN_CVTSI642SS,
19700 IX86_BUILTIN_CVTSS2SI,
19701 IX86_BUILTIN_CVTSS2SI64,
19702 IX86_BUILTIN_CVTTPS2PI,
19703 IX86_BUILTIN_CVTTSS2SI,
19704 IX86_BUILTIN_CVTTSS2SI64,
19706 IX86_BUILTIN_MAXPS,
19707 IX86_BUILTIN_MAXSS,
19708 IX86_BUILTIN_MINPS,
19709 IX86_BUILTIN_MINSS,
19711 IX86_BUILTIN_LOADUPS,
19712 IX86_BUILTIN_STOREUPS,
19713 IX86_BUILTIN_MOVSS,
19715 IX86_BUILTIN_MOVHLPS,
19716 IX86_BUILTIN_MOVLHPS,
19717 IX86_BUILTIN_LOADHPS,
19718 IX86_BUILTIN_LOADLPS,
19719 IX86_BUILTIN_STOREHPS,
19720 IX86_BUILTIN_STORELPS,
19722 IX86_BUILTIN_MASKMOVQ,
19723 IX86_BUILTIN_MOVMSKPS,
19724 IX86_BUILTIN_PMOVMSKB,
19726 IX86_BUILTIN_MOVNTPS,
19727 IX86_BUILTIN_MOVNTQ,
19729 IX86_BUILTIN_LOADDQU,
19730 IX86_BUILTIN_STOREDQU,
19732 IX86_BUILTIN_PACKSSWB,
19733 IX86_BUILTIN_PACKSSDW,
19734 IX86_BUILTIN_PACKUSWB,
19736 IX86_BUILTIN_PADDB,
19737 IX86_BUILTIN_PADDW,
19738 IX86_BUILTIN_PADDD,
19739 IX86_BUILTIN_PADDQ,
19740 IX86_BUILTIN_PADDSB,
19741 IX86_BUILTIN_PADDSW,
19742 IX86_BUILTIN_PADDUSB,
19743 IX86_BUILTIN_PADDUSW,
19744 IX86_BUILTIN_PSUBB,
19745 IX86_BUILTIN_PSUBW,
19746 IX86_BUILTIN_PSUBD,
19747 IX86_BUILTIN_PSUBQ,
19748 IX86_BUILTIN_PSUBSB,
19749 IX86_BUILTIN_PSUBSW,
19750 IX86_BUILTIN_PSUBUSB,
19751 IX86_BUILTIN_PSUBUSW,
19753 IX86_BUILTIN_PAND,
19754 IX86_BUILTIN_PANDN,
19755 IX86_BUILTIN_POR,
19756 IX86_BUILTIN_PXOR,
19758 IX86_BUILTIN_PAVGB,
19759 IX86_BUILTIN_PAVGW,
19761 IX86_BUILTIN_PCMPEQB,
19762 IX86_BUILTIN_PCMPEQW,
19763 IX86_BUILTIN_PCMPEQD,
19764 IX86_BUILTIN_PCMPGTB,
19765 IX86_BUILTIN_PCMPGTW,
19766 IX86_BUILTIN_PCMPGTD,
19768 IX86_BUILTIN_PMADDWD,
19770 IX86_BUILTIN_PMAXSW,
19771 IX86_BUILTIN_PMAXUB,
19772 IX86_BUILTIN_PMINSW,
19773 IX86_BUILTIN_PMINUB,
19775 IX86_BUILTIN_PMULHUW,
19776 IX86_BUILTIN_PMULHW,
19777 IX86_BUILTIN_PMULLW,
19779 IX86_BUILTIN_PSADBW,
19780 IX86_BUILTIN_PSHUFW,
19782 IX86_BUILTIN_PSLLW,
19783 IX86_BUILTIN_PSLLD,
19784 IX86_BUILTIN_PSLLQ,
19785 IX86_BUILTIN_PSRAW,
19786 IX86_BUILTIN_PSRAD,
19787 IX86_BUILTIN_PSRLW,
19788 IX86_BUILTIN_PSRLD,
19789 IX86_BUILTIN_PSRLQ,
19790 IX86_BUILTIN_PSLLWI,
19791 IX86_BUILTIN_PSLLDI,
19792 IX86_BUILTIN_PSLLQI,
19793 IX86_BUILTIN_PSRAWI,
19794 IX86_BUILTIN_PSRADI,
19795 IX86_BUILTIN_PSRLWI,
19796 IX86_BUILTIN_PSRLDI,
19797 IX86_BUILTIN_PSRLQI,
19799 IX86_BUILTIN_PUNPCKHBW,
19800 IX86_BUILTIN_PUNPCKHWD,
19801 IX86_BUILTIN_PUNPCKHDQ,
19802 IX86_BUILTIN_PUNPCKLBW,
19803 IX86_BUILTIN_PUNPCKLWD,
19804 IX86_BUILTIN_PUNPCKLDQ,
19806 IX86_BUILTIN_SHUFPS,
19808 IX86_BUILTIN_RCPPS,
19809 IX86_BUILTIN_RCPSS,
19810 IX86_BUILTIN_RSQRTPS,
19811 IX86_BUILTIN_RSQRTPS_NR,
19812 IX86_BUILTIN_RSQRTSS,
19813 IX86_BUILTIN_RSQRTF,
19814 IX86_BUILTIN_SQRTPS,
19815 IX86_BUILTIN_SQRTPS_NR,
19816 IX86_BUILTIN_SQRTSS,
19818 IX86_BUILTIN_UNPCKHPS,
19819 IX86_BUILTIN_UNPCKLPS,
19821 IX86_BUILTIN_ANDPS,
19822 IX86_BUILTIN_ANDNPS,
19823 IX86_BUILTIN_ORPS,
19824 IX86_BUILTIN_XORPS,
19826 IX86_BUILTIN_EMMS,
19827 IX86_BUILTIN_LDMXCSR,
19828 IX86_BUILTIN_STMXCSR,
19829 IX86_BUILTIN_SFENCE,
19831 /* 3DNow! Original */
19832 IX86_BUILTIN_FEMMS,
19833 IX86_BUILTIN_PAVGUSB,
19834 IX86_BUILTIN_PF2ID,
19835 IX86_BUILTIN_PFACC,
19836 IX86_BUILTIN_PFADD,
19837 IX86_BUILTIN_PFCMPEQ,
19838 IX86_BUILTIN_PFCMPGE,
19839 IX86_BUILTIN_PFCMPGT,
19840 IX86_BUILTIN_PFMAX,
19841 IX86_BUILTIN_PFMIN,
19842 IX86_BUILTIN_PFMUL,
19843 IX86_BUILTIN_PFRCP,
19844 IX86_BUILTIN_PFRCPIT1,
19845 IX86_BUILTIN_PFRCPIT2,
19846 IX86_BUILTIN_PFRSQIT1,
19847 IX86_BUILTIN_PFRSQRT,
19848 IX86_BUILTIN_PFSUB,
19849 IX86_BUILTIN_PFSUBR,
19850 IX86_BUILTIN_PI2FD,
19851 IX86_BUILTIN_PMULHRW,
19853 /* 3DNow! Athlon Extensions */
19854 IX86_BUILTIN_PF2IW,
19855 IX86_BUILTIN_PFNACC,
19856 IX86_BUILTIN_PFPNACC,
19857 IX86_BUILTIN_PI2FW,
19858 IX86_BUILTIN_PSWAPDSI,
19859 IX86_BUILTIN_PSWAPDSF,
19861 /* SSE2 */
19862 IX86_BUILTIN_ADDPD,
19863 IX86_BUILTIN_ADDSD,
19864 IX86_BUILTIN_DIVPD,
19865 IX86_BUILTIN_DIVSD,
19866 IX86_BUILTIN_MULPD,
19867 IX86_BUILTIN_MULSD,
19868 IX86_BUILTIN_SUBPD,
19869 IX86_BUILTIN_SUBSD,
19871 IX86_BUILTIN_CMPEQPD,
19872 IX86_BUILTIN_CMPLTPD,
19873 IX86_BUILTIN_CMPLEPD,
19874 IX86_BUILTIN_CMPGTPD,
19875 IX86_BUILTIN_CMPGEPD,
19876 IX86_BUILTIN_CMPNEQPD,
19877 IX86_BUILTIN_CMPNLTPD,
19878 IX86_BUILTIN_CMPNLEPD,
19879 IX86_BUILTIN_CMPNGTPD,
19880 IX86_BUILTIN_CMPNGEPD,
19881 IX86_BUILTIN_CMPORDPD,
19882 IX86_BUILTIN_CMPUNORDPD,
19883 IX86_BUILTIN_CMPEQSD,
19884 IX86_BUILTIN_CMPLTSD,
19885 IX86_BUILTIN_CMPLESD,
19886 IX86_BUILTIN_CMPNEQSD,
19887 IX86_BUILTIN_CMPNLTSD,
19888 IX86_BUILTIN_CMPNLESD,
19889 IX86_BUILTIN_CMPORDSD,
19890 IX86_BUILTIN_CMPUNORDSD,
19892 IX86_BUILTIN_COMIEQSD,
19893 IX86_BUILTIN_COMILTSD,
19894 IX86_BUILTIN_COMILESD,
19895 IX86_BUILTIN_COMIGTSD,
19896 IX86_BUILTIN_COMIGESD,
19897 IX86_BUILTIN_COMINEQSD,
19898 IX86_BUILTIN_UCOMIEQSD,
19899 IX86_BUILTIN_UCOMILTSD,
19900 IX86_BUILTIN_UCOMILESD,
19901 IX86_BUILTIN_UCOMIGTSD,
19902 IX86_BUILTIN_UCOMIGESD,
19903 IX86_BUILTIN_UCOMINEQSD,
19905 IX86_BUILTIN_MAXPD,
19906 IX86_BUILTIN_MAXSD,
19907 IX86_BUILTIN_MINPD,
19908 IX86_BUILTIN_MINSD,
19910 IX86_BUILTIN_ANDPD,
19911 IX86_BUILTIN_ANDNPD,
19912 IX86_BUILTIN_ORPD,
19913 IX86_BUILTIN_XORPD,
19915 IX86_BUILTIN_SQRTPD,
19916 IX86_BUILTIN_SQRTSD,
19918 IX86_BUILTIN_UNPCKHPD,
19919 IX86_BUILTIN_UNPCKLPD,
19921 IX86_BUILTIN_SHUFPD,
19923 IX86_BUILTIN_LOADUPD,
19924 IX86_BUILTIN_STOREUPD,
19925 IX86_BUILTIN_MOVSD,
19927 IX86_BUILTIN_LOADHPD,
19928 IX86_BUILTIN_LOADLPD,
19930 IX86_BUILTIN_CVTDQ2PD,
19931 IX86_BUILTIN_CVTDQ2PS,
19933 IX86_BUILTIN_CVTPD2DQ,
19934 IX86_BUILTIN_CVTPD2PI,
19935 IX86_BUILTIN_CVTPD2PS,
19936 IX86_BUILTIN_CVTTPD2DQ,
19937 IX86_BUILTIN_CVTTPD2PI,
19939 IX86_BUILTIN_CVTPI2PD,
19940 IX86_BUILTIN_CVTSI2SD,
19941 IX86_BUILTIN_CVTSI642SD,
19943 IX86_BUILTIN_CVTSD2SI,
19944 IX86_BUILTIN_CVTSD2SI64,
19945 IX86_BUILTIN_CVTSD2SS,
19946 IX86_BUILTIN_CVTSS2SD,
19947 IX86_BUILTIN_CVTTSD2SI,
19948 IX86_BUILTIN_CVTTSD2SI64,
19950 IX86_BUILTIN_CVTPS2DQ,
19951 IX86_BUILTIN_CVTPS2PD,
19952 IX86_BUILTIN_CVTTPS2DQ,
19954 IX86_BUILTIN_MOVNTI,
19955 IX86_BUILTIN_MOVNTPD,
19956 IX86_BUILTIN_MOVNTDQ,
19958 IX86_BUILTIN_MOVQ128,
19960 /* SSE2 MMX */
19961 IX86_BUILTIN_MASKMOVDQU,
19962 IX86_BUILTIN_MOVMSKPD,
19963 IX86_BUILTIN_PMOVMSKB128,
19965 IX86_BUILTIN_PACKSSWB128,
19966 IX86_BUILTIN_PACKSSDW128,
19967 IX86_BUILTIN_PACKUSWB128,
19969 IX86_BUILTIN_PADDB128,
19970 IX86_BUILTIN_PADDW128,
19971 IX86_BUILTIN_PADDD128,
19972 IX86_BUILTIN_PADDQ128,
19973 IX86_BUILTIN_PADDSB128,
19974 IX86_BUILTIN_PADDSW128,
19975 IX86_BUILTIN_PADDUSB128,
19976 IX86_BUILTIN_PADDUSW128,
19977 IX86_BUILTIN_PSUBB128,
19978 IX86_BUILTIN_PSUBW128,
19979 IX86_BUILTIN_PSUBD128,
19980 IX86_BUILTIN_PSUBQ128,
19981 IX86_BUILTIN_PSUBSB128,
19982 IX86_BUILTIN_PSUBSW128,
19983 IX86_BUILTIN_PSUBUSB128,
19984 IX86_BUILTIN_PSUBUSW128,
19986 IX86_BUILTIN_PAND128,
19987 IX86_BUILTIN_PANDN128,
19988 IX86_BUILTIN_POR128,
19989 IX86_BUILTIN_PXOR128,
19991 IX86_BUILTIN_PAVGB128,
19992 IX86_BUILTIN_PAVGW128,
19994 IX86_BUILTIN_PCMPEQB128,
19995 IX86_BUILTIN_PCMPEQW128,
19996 IX86_BUILTIN_PCMPEQD128,
19997 IX86_BUILTIN_PCMPGTB128,
19998 IX86_BUILTIN_PCMPGTW128,
19999 IX86_BUILTIN_PCMPGTD128,
20001 IX86_BUILTIN_PMADDWD128,
20003 IX86_BUILTIN_PMAXSW128,
20004 IX86_BUILTIN_PMAXUB128,
20005 IX86_BUILTIN_PMINSW128,
20006 IX86_BUILTIN_PMINUB128,
20008 IX86_BUILTIN_PMULUDQ,
20009 IX86_BUILTIN_PMULUDQ128,
20010 IX86_BUILTIN_PMULHUW128,
20011 IX86_BUILTIN_PMULHW128,
20012 IX86_BUILTIN_PMULLW128,
20014 IX86_BUILTIN_PSADBW128,
20015 IX86_BUILTIN_PSHUFHW,
20016 IX86_BUILTIN_PSHUFLW,
20017 IX86_BUILTIN_PSHUFD,
20019 IX86_BUILTIN_PSLLDQI128,
20020 IX86_BUILTIN_PSLLWI128,
20021 IX86_BUILTIN_PSLLDI128,
20022 IX86_BUILTIN_PSLLQI128,
20023 IX86_BUILTIN_PSRAWI128,
20024 IX86_BUILTIN_PSRADI128,
20025 IX86_BUILTIN_PSRLDQI128,
20026 IX86_BUILTIN_PSRLWI128,
20027 IX86_BUILTIN_PSRLDI128,
20028 IX86_BUILTIN_PSRLQI128,
20030 IX86_BUILTIN_PSLLDQ128,
20031 IX86_BUILTIN_PSLLW128,
20032 IX86_BUILTIN_PSLLD128,
20033 IX86_BUILTIN_PSLLQ128,
20034 IX86_BUILTIN_PSRAW128,
20035 IX86_BUILTIN_PSRAD128,
20036 IX86_BUILTIN_PSRLW128,
20037 IX86_BUILTIN_PSRLD128,
20038 IX86_BUILTIN_PSRLQ128,
20040 IX86_BUILTIN_PUNPCKHBW128,
20041 IX86_BUILTIN_PUNPCKHWD128,
20042 IX86_BUILTIN_PUNPCKHDQ128,
20043 IX86_BUILTIN_PUNPCKHQDQ128,
20044 IX86_BUILTIN_PUNPCKLBW128,
20045 IX86_BUILTIN_PUNPCKLWD128,
20046 IX86_BUILTIN_PUNPCKLDQ128,
20047 IX86_BUILTIN_PUNPCKLQDQ128,
20049 IX86_BUILTIN_CLFLUSH,
20050 IX86_BUILTIN_MFENCE,
20051 IX86_BUILTIN_LFENCE,
20053 /* SSE3. */
20054 IX86_BUILTIN_ADDSUBPS,
20055 IX86_BUILTIN_HADDPS,
20056 IX86_BUILTIN_HSUBPS,
20057 IX86_BUILTIN_MOVSHDUP,
20058 IX86_BUILTIN_MOVSLDUP,
20059 IX86_BUILTIN_ADDSUBPD,
20060 IX86_BUILTIN_HADDPD,
20061 IX86_BUILTIN_HSUBPD,
20062 IX86_BUILTIN_LDDQU,
20064 IX86_BUILTIN_MONITOR,
20065 IX86_BUILTIN_MWAIT,
20067 /* SSSE3. */
20068 IX86_BUILTIN_PHADDW,
20069 IX86_BUILTIN_PHADDD,
20070 IX86_BUILTIN_PHADDSW,
20071 IX86_BUILTIN_PHSUBW,
20072 IX86_BUILTIN_PHSUBD,
20073 IX86_BUILTIN_PHSUBSW,
20074 IX86_BUILTIN_PMADDUBSW,
20075 IX86_BUILTIN_PMULHRSW,
20076 IX86_BUILTIN_PSHUFB,
20077 IX86_BUILTIN_PSIGNB,
20078 IX86_BUILTIN_PSIGNW,
20079 IX86_BUILTIN_PSIGND,
20080 IX86_BUILTIN_PALIGNR,
20081 IX86_BUILTIN_PABSB,
20082 IX86_BUILTIN_PABSW,
20083 IX86_BUILTIN_PABSD,
20085 IX86_BUILTIN_PHADDW128,
20086 IX86_BUILTIN_PHADDD128,
20087 IX86_BUILTIN_PHADDSW128,
20088 IX86_BUILTIN_PHSUBW128,
20089 IX86_BUILTIN_PHSUBD128,
20090 IX86_BUILTIN_PHSUBSW128,
20091 IX86_BUILTIN_PMADDUBSW128,
20092 IX86_BUILTIN_PMULHRSW128,
20093 IX86_BUILTIN_PSHUFB128,
20094 IX86_BUILTIN_PSIGNB128,
20095 IX86_BUILTIN_PSIGNW128,
20096 IX86_BUILTIN_PSIGND128,
20097 IX86_BUILTIN_PALIGNR128,
20098 IX86_BUILTIN_PABSB128,
20099 IX86_BUILTIN_PABSW128,
20100 IX86_BUILTIN_PABSD128,
20102 /* AMDFAM10 - SSE4A New Instructions. */
20103 IX86_BUILTIN_MOVNTSD,
20104 IX86_BUILTIN_MOVNTSS,
20105 IX86_BUILTIN_EXTRQI,
20106 IX86_BUILTIN_EXTRQ,
20107 IX86_BUILTIN_INSERTQI,
20108 IX86_BUILTIN_INSERTQ,
20110 /* SSE4.1. */
20111 IX86_BUILTIN_BLENDPD,
20112 IX86_BUILTIN_BLENDPS,
20113 IX86_BUILTIN_BLENDVPD,
20114 IX86_BUILTIN_BLENDVPS,
20115 IX86_BUILTIN_PBLENDVB128,
20116 IX86_BUILTIN_PBLENDW128,
20118 IX86_BUILTIN_DPPD,
20119 IX86_BUILTIN_DPPS,
20121 IX86_BUILTIN_INSERTPS128,
20123 IX86_BUILTIN_MOVNTDQA,
20124 IX86_BUILTIN_MPSADBW128,
20125 IX86_BUILTIN_PACKUSDW128,
20126 IX86_BUILTIN_PCMPEQQ,
20127 IX86_BUILTIN_PHMINPOSUW128,
20129 IX86_BUILTIN_PMAXSB128,
20130 IX86_BUILTIN_PMAXSD128,
20131 IX86_BUILTIN_PMAXUD128,
20132 IX86_BUILTIN_PMAXUW128,
20134 IX86_BUILTIN_PMINSB128,
20135 IX86_BUILTIN_PMINSD128,
20136 IX86_BUILTIN_PMINUD128,
20137 IX86_BUILTIN_PMINUW128,
20139 IX86_BUILTIN_PMOVSXBW128,
20140 IX86_BUILTIN_PMOVSXBD128,
20141 IX86_BUILTIN_PMOVSXBQ128,
20142 IX86_BUILTIN_PMOVSXWD128,
20143 IX86_BUILTIN_PMOVSXWQ128,
20144 IX86_BUILTIN_PMOVSXDQ128,
20146 IX86_BUILTIN_PMOVZXBW128,
20147 IX86_BUILTIN_PMOVZXBD128,
20148 IX86_BUILTIN_PMOVZXBQ128,
20149 IX86_BUILTIN_PMOVZXWD128,
20150 IX86_BUILTIN_PMOVZXWQ128,
20151 IX86_BUILTIN_PMOVZXDQ128,
20153 IX86_BUILTIN_PMULDQ128,
20154 IX86_BUILTIN_PMULLD128,
20156 IX86_BUILTIN_ROUNDPD,
20157 IX86_BUILTIN_ROUNDPS,
20158 IX86_BUILTIN_ROUNDSD,
20159 IX86_BUILTIN_ROUNDSS,
20161 IX86_BUILTIN_PTESTZ,
20162 IX86_BUILTIN_PTESTC,
20163 IX86_BUILTIN_PTESTNZC,
20165 IX86_BUILTIN_VEC_INIT_V2SI,
20166 IX86_BUILTIN_VEC_INIT_V4HI,
20167 IX86_BUILTIN_VEC_INIT_V8QI,
20168 IX86_BUILTIN_VEC_EXT_V2DF,
20169 IX86_BUILTIN_VEC_EXT_V2DI,
20170 IX86_BUILTIN_VEC_EXT_V4SF,
20171 IX86_BUILTIN_VEC_EXT_V4SI,
20172 IX86_BUILTIN_VEC_EXT_V8HI,
20173 IX86_BUILTIN_VEC_EXT_V2SI,
20174 IX86_BUILTIN_VEC_EXT_V4HI,
20175 IX86_BUILTIN_VEC_EXT_V16QI,
20176 IX86_BUILTIN_VEC_SET_V2DI,
20177 IX86_BUILTIN_VEC_SET_V4SF,
20178 IX86_BUILTIN_VEC_SET_V4SI,
20179 IX86_BUILTIN_VEC_SET_V8HI,
20180 IX86_BUILTIN_VEC_SET_V4HI,
20181 IX86_BUILTIN_VEC_SET_V16QI,
20183 IX86_BUILTIN_VEC_PACK_SFIX,
20185 /* SSE4.2. */
20186 IX86_BUILTIN_CRC32QI,
20187 IX86_BUILTIN_CRC32HI,
20188 IX86_BUILTIN_CRC32SI,
20189 IX86_BUILTIN_CRC32DI,
20191 IX86_BUILTIN_PCMPESTRI128,
20192 IX86_BUILTIN_PCMPESTRM128,
20193 IX86_BUILTIN_PCMPESTRA128,
20194 IX86_BUILTIN_PCMPESTRC128,
20195 IX86_BUILTIN_PCMPESTRO128,
20196 IX86_BUILTIN_PCMPESTRS128,
20197 IX86_BUILTIN_PCMPESTRZ128,
20198 IX86_BUILTIN_PCMPISTRI128,
20199 IX86_BUILTIN_PCMPISTRM128,
20200 IX86_BUILTIN_PCMPISTRA128,
20201 IX86_BUILTIN_PCMPISTRC128,
20202 IX86_BUILTIN_PCMPISTRO128,
20203 IX86_BUILTIN_PCMPISTRS128,
20204 IX86_BUILTIN_PCMPISTRZ128,
20206 IX86_BUILTIN_PCMPGTQ,
20208 /* AES instructions */
20209 IX86_BUILTIN_AESENC128,
20210 IX86_BUILTIN_AESENCLAST128,
20211 IX86_BUILTIN_AESDEC128,
20212 IX86_BUILTIN_AESDECLAST128,
20213 IX86_BUILTIN_AESIMC128,
20214 IX86_BUILTIN_AESKEYGENASSIST128,
20216 /* PCLMUL instruction */
20217 IX86_BUILTIN_PCLMULQDQ128,
20219 /* AVX */
20220 IX86_BUILTIN_ADDPD256,
20221 IX86_BUILTIN_ADDPS256,
20222 IX86_BUILTIN_ADDSUBPD256,
20223 IX86_BUILTIN_ADDSUBPS256,
20224 IX86_BUILTIN_ANDPD256,
20225 IX86_BUILTIN_ANDPS256,
20226 IX86_BUILTIN_ANDNPD256,
20227 IX86_BUILTIN_ANDNPS256,
20228 IX86_BUILTIN_BLENDPD256,
20229 IX86_BUILTIN_BLENDPS256,
20230 IX86_BUILTIN_BLENDVPD256,
20231 IX86_BUILTIN_BLENDVPS256,
20232 IX86_BUILTIN_DIVPD256,
20233 IX86_BUILTIN_DIVPS256,
20234 IX86_BUILTIN_DPPS256,
20235 IX86_BUILTIN_HADDPD256,
20236 IX86_BUILTIN_HADDPS256,
20237 IX86_BUILTIN_HSUBPD256,
20238 IX86_BUILTIN_HSUBPS256,
20239 IX86_BUILTIN_MAXPD256,
20240 IX86_BUILTIN_MAXPS256,
20241 IX86_BUILTIN_MINPD256,
20242 IX86_BUILTIN_MINPS256,
20243 IX86_BUILTIN_MULPD256,
20244 IX86_BUILTIN_MULPS256,
20245 IX86_BUILTIN_ORPD256,
20246 IX86_BUILTIN_ORPS256,
20247 IX86_BUILTIN_SHUFPD256,
20248 IX86_BUILTIN_SHUFPS256,
20249 IX86_BUILTIN_SUBPD256,
20250 IX86_BUILTIN_SUBPS256,
20251 IX86_BUILTIN_XORPD256,
20252 IX86_BUILTIN_XORPS256,
20253 IX86_BUILTIN_CMPSD,
20254 IX86_BUILTIN_CMPSS,
20255 IX86_BUILTIN_CMPPD,
20256 IX86_BUILTIN_CMPPS,
20257 IX86_BUILTIN_CMPPD256,
20258 IX86_BUILTIN_CMPPS256,
20259 IX86_BUILTIN_CVTDQ2PD256,
20260 IX86_BUILTIN_CVTDQ2PS256,
20261 IX86_BUILTIN_CVTPD2PS256,
20262 IX86_BUILTIN_CVTPS2DQ256,
20263 IX86_BUILTIN_CVTPS2PD256,
20264 IX86_BUILTIN_CVTTPD2DQ256,
20265 IX86_BUILTIN_CVTPD2DQ256,
20266 IX86_BUILTIN_CVTTPS2DQ256,
20267 IX86_BUILTIN_EXTRACTF128PD256,
20268 IX86_BUILTIN_EXTRACTF128PS256,
20269 IX86_BUILTIN_EXTRACTF128SI256,
20270 IX86_BUILTIN_VZEROALL,
20271 IX86_BUILTIN_VZEROUPPER,
20272 IX86_BUILTIN_VZEROUPPER_REX64,
20273 IX86_BUILTIN_VPERMILVARPD,
20274 IX86_BUILTIN_VPERMILVARPS,
20275 IX86_BUILTIN_VPERMILVARPD256,
20276 IX86_BUILTIN_VPERMILVARPS256,
20277 IX86_BUILTIN_VPERMILPD,
20278 IX86_BUILTIN_VPERMILPS,
20279 IX86_BUILTIN_VPERMILPD256,
20280 IX86_BUILTIN_VPERMILPS256,
20281 IX86_BUILTIN_VPERM2F128PD256,
20282 IX86_BUILTIN_VPERM2F128PS256,
20283 IX86_BUILTIN_VPERM2F128SI256,
20284 IX86_BUILTIN_VBROADCASTSS,
20285 IX86_BUILTIN_VBROADCASTSD256,
20286 IX86_BUILTIN_VBROADCASTSS256,
20287 IX86_BUILTIN_VBROADCASTPD256,
20288 IX86_BUILTIN_VBROADCASTPS256,
20289 IX86_BUILTIN_VINSERTF128PD256,
20290 IX86_BUILTIN_VINSERTF128PS256,
20291 IX86_BUILTIN_VINSERTF128SI256,
20292 IX86_BUILTIN_LOADUPD256,
20293 IX86_BUILTIN_LOADUPS256,
20294 IX86_BUILTIN_STOREUPD256,
20295 IX86_BUILTIN_STOREUPS256,
20296 IX86_BUILTIN_LDDQU256,
20297 IX86_BUILTIN_MOVNTDQ256,
20298 IX86_BUILTIN_MOVNTPD256,
20299 IX86_BUILTIN_MOVNTPS256,
20300 IX86_BUILTIN_LOADDQU256,
20301 IX86_BUILTIN_STOREDQU256,
20302 IX86_BUILTIN_MASKLOADPD,
20303 IX86_BUILTIN_MASKLOADPS,
20304 IX86_BUILTIN_MASKSTOREPD,
20305 IX86_BUILTIN_MASKSTOREPS,
20306 IX86_BUILTIN_MASKLOADPD256,
20307 IX86_BUILTIN_MASKLOADPS256,
20308 IX86_BUILTIN_MASKSTOREPD256,
20309 IX86_BUILTIN_MASKSTOREPS256,
20310 IX86_BUILTIN_MOVSHDUP256,
20311 IX86_BUILTIN_MOVSLDUP256,
20312 IX86_BUILTIN_MOVDDUP256,
20314 IX86_BUILTIN_SQRTPD256,
20315 IX86_BUILTIN_SQRTPS256,
20316 IX86_BUILTIN_SQRTPS_NR256,
20317 IX86_BUILTIN_RSQRTPS256,
20318 IX86_BUILTIN_RSQRTPS_NR256,
20320 IX86_BUILTIN_RCPPS256,
20322 IX86_BUILTIN_ROUNDPD256,
20323 IX86_BUILTIN_ROUNDPS256,
20325 IX86_BUILTIN_UNPCKHPD256,
20326 IX86_BUILTIN_UNPCKLPD256,
20327 IX86_BUILTIN_UNPCKHPS256,
20328 IX86_BUILTIN_UNPCKLPS256,
20330 IX86_BUILTIN_SI256_SI,
20331 IX86_BUILTIN_PS256_PS,
20332 IX86_BUILTIN_PD256_PD,
20333 IX86_BUILTIN_SI_SI256,
20334 IX86_BUILTIN_PS_PS256,
20335 IX86_BUILTIN_PD_PD256,
20337 IX86_BUILTIN_VTESTZPD,
20338 IX86_BUILTIN_VTESTCPD,
20339 IX86_BUILTIN_VTESTNZCPD,
20340 IX86_BUILTIN_VTESTZPS,
20341 IX86_BUILTIN_VTESTCPS,
20342 IX86_BUILTIN_VTESTNZCPS,
20343 IX86_BUILTIN_VTESTZPD256,
20344 IX86_BUILTIN_VTESTCPD256,
20345 IX86_BUILTIN_VTESTNZCPD256,
20346 IX86_BUILTIN_VTESTZPS256,
20347 IX86_BUILTIN_VTESTCPS256,
20348 IX86_BUILTIN_VTESTNZCPS256,
20349 IX86_BUILTIN_PTESTZ256,
20350 IX86_BUILTIN_PTESTC256,
20351 IX86_BUILTIN_PTESTNZC256,
20353 IX86_BUILTIN_MOVMSKPD256,
20354 IX86_BUILTIN_MOVMSKPS256,
20356 /* TFmode support builtins. */
20357 IX86_BUILTIN_INFQ,
20358 IX86_BUILTIN_FABSQ,
20359 IX86_BUILTIN_COPYSIGNQ,
20361 /* SSE5 instructions */
20362 IX86_BUILTIN_FMADDSS,
20363 IX86_BUILTIN_FMADDSD,
20364 IX86_BUILTIN_FMADDPS,
20365 IX86_BUILTIN_FMADDPD,
20366 IX86_BUILTIN_FMSUBSS,
20367 IX86_BUILTIN_FMSUBSD,
20368 IX86_BUILTIN_FMSUBPS,
20369 IX86_BUILTIN_FMSUBPD,
20370 IX86_BUILTIN_FNMADDSS,
20371 IX86_BUILTIN_FNMADDSD,
20372 IX86_BUILTIN_FNMADDPS,
20373 IX86_BUILTIN_FNMADDPD,
20374 IX86_BUILTIN_FNMSUBSS,
20375 IX86_BUILTIN_FNMSUBSD,
20376 IX86_BUILTIN_FNMSUBPS,
20377 IX86_BUILTIN_FNMSUBPD,
20378 IX86_BUILTIN_PCMOV,
20379 IX86_BUILTIN_PCMOV_V2DI,
20380 IX86_BUILTIN_PCMOV_V4SI,
20381 IX86_BUILTIN_PCMOV_V8HI,
20382 IX86_BUILTIN_PCMOV_V16QI,
20383 IX86_BUILTIN_PCMOV_V4SF,
20384 IX86_BUILTIN_PCMOV_V2DF,
20385 IX86_BUILTIN_PPERM,
20386 IX86_BUILTIN_PERMPS,
20387 IX86_BUILTIN_PERMPD,
20388 IX86_BUILTIN_PMACSSWW,
20389 IX86_BUILTIN_PMACSWW,
20390 IX86_BUILTIN_PMACSSWD,
20391 IX86_BUILTIN_PMACSWD,
20392 IX86_BUILTIN_PMACSSDD,
20393 IX86_BUILTIN_PMACSDD,
20394 IX86_BUILTIN_PMACSSDQL,
20395 IX86_BUILTIN_PMACSSDQH,
20396 IX86_BUILTIN_PMACSDQL,
20397 IX86_BUILTIN_PMACSDQH,
20398 IX86_BUILTIN_PMADCSSWD,
20399 IX86_BUILTIN_PMADCSWD,
20400 IX86_BUILTIN_PHADDBW,
20401 IX86_BUILTIN_PHADDBD,
20402 IX86_BUILTIN_PHADDBQ,
20403 IX86_BUILTIN_PHADDWD,
20404 IX86_BUILTIN_PHADDWQ,
20405 IX86_BUILTIN_PHADDDQ,
20406 IX86_BUILTIN_PHADDUBW,
20407 IX86_BUILTIN_PHADDUBD,
20408 IX86_BUILTIN_PHADDUBQ,
20409 IX86_BUILTIN_PHADDUWD,
20410 IX86_BUILTIN_PHADDUWQ,
20411 IX86_BUILTIN_PHADDUDQ,
20412 IX86_BUILTIN_PHSUBBW,
20413 IX86_BUILTIN_PHSUBWD,
20414 IX86_BUILTIN_PHSUBDQ,
20415 IX86_BUILTIN_PROTB,
20416 IX86_BUILTIN_PROTW,
20417 IX86_BUILTIN_PROTD,
20418 IX86_BUILTIN_PROTQ,
20419 IX86_BUILTIN_PROTB_IMM,
20420 IX86_BUILTIN_PROTW_IMM,
20421 IX86_BUILTIN_PROTD_IMM,
20422 IX86_BUILTIN_PROTQ_IMM,
20423 IX86_BUILTIN_PSHLB,
20424 IX86_BUILTIN_PSHLW,
20425 IX86_BUILTIN_PSHLD,
20426 IX86_BUILTIN_PSHLQ,
20427 IX86_BUILTIN_PSHAB,
20428 IX86_BUILTIN_PSHAW,
20429 IX86_BUILTIN_PSHAD,
20430 IX86_BUILTIN_PSHAQ,
20431 IX86_BUILTIN_FRCZSS,
20432 IX86_BUILTIN_FRCZSD,
20433 IX86_BUILTIN_FRCZPS,
20434 IX86_BUILTIN_FRCZPD,
20435 IX86_BUILTIN_CVTPH2PS,
20436 IX86_BUILTIN_CVTPS2PH,
20438 IX86_BUILTIN_COMEQSS,
20439 IX86_BUILTIN_COMNESS,
20440 IX86_BUILTIN_COMLTSS,
20441 IX86_BUILTIN_COMLESS,
20442 IX86_BUILTIN_COMGTSS,
20443 IX86_BUILTIN_COMGESS,
20444 IX86_BUILTIN_COMUEQSS,
20445 IX86_BUILTIN_COMUNESS,
20446 IX86_BUILTIN_COMULTSS,
20447 IX86_BUILTIN_COMULESS,
20448 IX86_BUILTIN_COMUGTSS,
20449 IX86_BUILTIN_COMUGESS,
20450 IX86_BUILTIN_COMORDSS,
20451 IX86_BUILTIN_COMUNORDSS,
20452 IX86_BUILTIN_COMFALSESS,
20453 IX86_BUILTIN_COMTRUESS,
20455 IX86_BUILTIN_COMEQSD,
20456 IX86_BUILTIN_COMNESD,
20457 IX86_BUILTIN_COMLTSD,
20458 IX86_BUILTIN_COMLESD,
20459 IX86_BUILTIN_COMGTSD,
20460 IX86_BUILTIN_COMGESD,
20461 IX86_BUILTIN_COMUEQSD,
20462 IX86_BUILTIN_COMUNESD,
20463 IX86_BUILTIN_COMULTSD,
20464 IX86_BUILTIN_COMULESD,
20465 IX86_BUILTIN_COMUGTSD,
20466 IX86_BUILTIN_COMUGESD,
20467 IX86_BUILTIN_COMORDSD,
20468 IX86_BUILTIN_COMUNORDSD,
20469 IX86_BUILTIN_COMFALSESD,
20470 IX86_BUILTIN_COMTRUESD,
20472 IX86_BUILTIN_COMEQPS,
20473 IX86_BUILTIN_COMNEPS,
20474 IX86_BUILTIN_COMLTPS,
20475 IX86_BUILTIN_COMLEPS,
20476 IX86_BUILTIN_COMGTPS,
20477 IX86_BUILTIN_COMGEPS,
20478 IX86_BUILTIN_COMUEQPS,
20479 IX86_BUILTIN_COMUNEPS,
20480 IX86_BUILTIN_COMULTPS,
20481 IX86_BUILTIN_COMULEPS,
20482 IX86_BUILTIN_COMUGTPS,
20483 IX86_BUILTIN_COMUGEPS,
20484 IX86_BUILTIN_COMORDPS,
20485 IX86_BUILTIN_COMUNORDPS,
20486 IX86_BUILTIN_COMFALSEPS,
20487 IX86_BUILTIN_COMTRUEPS,
20489 IX86_BUILTIN_COMEQPD,
20490 IX86_BUILTIN_COMNEPD,
20491 IX86_BUILTIN_COMLTPD,
20492 IX86_BUILTIN_COMLEPD,
20493 IX86_BUILTIN_COMGTPD,
20494 IX86_BUILTIN_COMGEPD,
20495 IX86_BUILTIN_COMUEQPD,
20496 IX86_BUILTIN_COMUNEPD,
20497 IX86_BUILTIN_COMULTPD,
20498 IX86_BUILTIN_COMULEPD,
20499 IX86_BUILTIN_COMUGTPD,
20500 IX86_BUILTIN_COMUGEPD,
20501 IX86_BUILTIN_COMORDPD,
20502 IX86_BUILTIN_COMUNORDPD,
20503 IX86_BUILTIN_COMFALSEPD,
20504 IX86_BUILTIN_COMTRUEPD,
20506 IX86_BUILTIN_PCOMEQUB,
20507 IX86_BUILTIN_PCOMNEUB,
20508 IX86_BUILTIN_PCOMLTUB,
20509 IX86_BUILTIN_PCOMLEUB,
20510 IX86_BUILTIN_PCOMGTUB,
20511 IX86_BUILTIN_PCOMGEUB,
20512 IX86_BUILTIN_PCOMFALSEUB,
20513 IX86_BUILTIN_PCOMTRUEUB,
20514 IX86_BUILTIN_PCOMEQUW,
20515 IX86_BUILTIN_PCOMNEUW,
20516 IX86_BUILTIN_PCOMLTUW,
20517 IX86_BUILTIN_PCOMLEUW,
20518 IX86_BUILTIN_PCOMGTUW,
20519 IX86_BUILTIN_PCOMGEUW,
20520 IX86_BUILTIN_PCOMFALSEUW,
20521 IX86_BUILTIN_PCOMTRUEUW,
20522 IX86_BUILTIN_PCOMEQUD,
20523 IX86_BUILTIN_PCOMNEUD,
20524 IX86_BUILTIN_PCOMLTUD,
20525 IX86_BUILTIN_PCOMLEUD,
20526 IX86_BUILTIN_PCOMGTUD,
20527 IX86_BUILTIN_PCOMGEUD,
20528 IX86_BUILTIN_PCOMFALSEUD,
20529 IX86_BUILTIN_PCOMTRUEUD,
20530 IX86_BUILTIN_PCOMEQUQ,
20531 IX86_BUILTIN_PCOMNEUQ,
20532 IX86_BUILTIN_PCOMLTUQ,
20533 IX86_BUILTIN_PCOMLEUQ,
20534 IX86_BUILTIN_PCOMGTUQ,
20535 IX86_BUILTIN_PCOMGEUQ,
20536 IX86_BUILTIN_PCOMFALSEUQ,
20537 IX86_BUILTIN_PCOMTRUEUQ,
20539 IX86_BUILTIN_PCOMEQB,
20540 IX86_BUILTIN_PCOMNEB,
20541 IX86_BUILTIN_PCOMLTB,
20542 IX86_BUILTIN_PCOMLEB,
20543 IX86_BUILTIN_PCOMGTB,
20544 IX86_BUILTIN_PCOMGEB,
20545 IX86_BUILTIN_PCOMFALSEB,
20546 IX86_BUILTIN_PCOMTRUEB,
20547 IX86_BUILTIN_PCOMEQW,
20548 IX86_BUILTIN_PCOMNEW,
20549 IX86_BUILTIN_PCOMLTW,
20550 IX86_BUILTIN_PCOMLEW,
20551 IX86_BUILTIN_PCOMGTW,
20552 IX86_BUILTIN_PCOMGEW,
20553 IX86_BUILTIN_PCOMFALSEW,
20554 IX86_BUILTIN_PCOMTRUEW,
20555 IX86_BUILTIN_PCOMEQD,
20556 IX86_BUILTIN_PCOMNED,
20557 IX86_BUILTIN_PCOMLTD,
20558 IX86_BUILTIN_PCOMLED,
20559 IX86_BUILTIN_PCOMGTD,
20560 IX86_BUILTIN_PCOMGED,
20561 IX86_BUILTIN_PCOMFALSED,
20562 IX86_BUILTIN_PCOMTRUED,
20563 IX86_BUILTIN_PCOMEQQ,
20564 IX86_BUILTIN_PCOMNEQ,
20565 IX86_BUILTIN_PCOMLTQ,
20566 IX86_BUILTIN_PCOMLEQ,
20567 IX86_BUILTIN_PCOMGTQ,
20568 IX86_BUILTIN_PCOMGEQ,
20569 IX86_BUILTIN_PCOMFALSEQ,
20570 IX86_BUILTIN_PCOMTRUEQ,
20572 IX86_BUILTIN_MAX
20575 /* Table for the ix86 builtin decls. */
20576 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
20578 /* Table of all of the builtin functions that are possible with different ISA's
20579 but are waiting to be built until a function is declared to use that
20580 ISA. */
20581 struct builtin_isa GTY(())
20583 tree type; /* builtin type to use in the declaration */
20584 const char *name; /* function name */
20585 int isa; /* isa_flags this builtin is defined for */
20586 bool const_p; /* true if the declaration is constant */
20589 static GTY(()) struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
20592 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
20593 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
20594 * function decl in the ix86_builtins array. Returns the function decl or
20595 * NULL_TREE, if the builtin was not added.
20597 * If the front end has a special hook for builtin functions, delay adding
20598 * builtin functions that aren't in the current ISA until the ISA is changed
20599 * with function specific optimization. Doing so, can save about 300K for the
20600 * default compiler. When the builtin is expanded, check at that time whether
20601 * it is valid.
20603 * If the front end doesn't have a special hook, record all builtins, even if
20604 * it isn't an instruction set in the current ISA in case the user uses
20605 * function specific options for a different ISA, so that we don't get scope
20606 * errors if a builtin is added in the middle of a function scope. */
20608 static inline tree
20609 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
20611 tree decl = NULL_TREE;
20613 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
20615 ix86_builtins_isa[(int) code].isa = mask;
20617 if ((mask & ix86_isa_flags) != 0
20618 || (lang_hooks.builtin_function
20619 == lang_hooks.builtin_function_ext_scope))
20622 decl = add_builtin_function (name, type, code, BUILT_IN_MD, NULL,
20623 NULL_TREE);
20624 ix86_builtins[(int) code] = decl;
20625 ix86_builtins_isa[(int) code].type = NULL_TREE;
20627 else
20629 ix86_builtins[(int) code] = NULL_TREE;
20630 ix86_builtins_isa[(int) code].const_p = false;
20631 ix86_builtins_isa[(int) code].type = type;
20632 ix86_builtins_isa[(int) code].name = name;
20636 return decl;
20639 /* Like def_builtin, but also marks the function decl "const". */
20641 static inline tree
20642 def_builtin_const (int mask, const char *name, tree type,
20643 enum ix86_builtins code)
20645 tree decl = def_builtin (mask, name, type, code);
20646 if (decl)
20647 TREE_READONLY (decl) = 1;
20648 else
20649 ix86_builtins_isa[(int) code].const_p = true;
20651 return decl;
20654 /* Add any new builtin functions for a given ISA that may not have been
20655 declared. This saves a bit of space compared to adding all of the
20656 declarations to the tree, even if we didn't use them. */
20658 static void
20659 ix86_add_new_builtins (int isa)
20661 int i;
20662 tree decl;
20664 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
20666 if ((ix86_builtins_isa[i].isa & isa) != 0
20667 && ix86_builtins_isa[i].type != NULL_TREE)
20669 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
20670 ix86_builtins_isa[i].type,
20671 i, BUILT_IN_MD, NULL,
20672 NULL_TREE);
20674 ix86_builtins[i] = decl;
20675 ix86_builtins_isa[i].type = NULL_TREE;
20676 if (ix86_builtins_isa[i].const_p)
20677 TREE_READONLY (decl) = 1;
20682 /* Bits for builtin_description.flag. */
20684 /* Set when we don't support the comparison natively, and should
20685 swap_comparison in order to support it. */
20686 #define BUILTIN_DESC_SWAP_OPERANDS 1
20688 struct builtin_description
20690 const unsigned int mask;
20691 const enum insn_code icode;
20692 const char *const name;
20693 const enum ix86_builtins code;
20694 const enum rtx_code comparison;
20695 const int flag;
20698 static const struct builtin_description bdesc_comi[] =
20700 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
20701 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
20702 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
20703 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
20704 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
20705 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
20706 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
20707 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
20708 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
20709 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
20710 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
20711 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
20712 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
20713 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
20714 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
20715 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
20716 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
20717 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
20718 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
20719 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
20720 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
20721 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
20722 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
20723 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
20726 static const struct builtin_description bdesc_pcmpestr[] =
20728 /* SSE4.2 */
20729 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
20730 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
20731 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
20732 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
20733 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
20734 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
20735 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
20738 static const struct builtin_description bdesc_pcmpistr[] =
20740 /* SSE4.2 */
20741 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
20742 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
20743 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
20744 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
20745 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
20746 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
20747 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
20750 /* Special builtin types */
20751 enum ix86_special_builtin_type
20753 SPECIAL_FTYPE_UNKNOWN,
20754 VOID_FTYPE_VOID,
20755 V32QI_FTYPE_PCCHAR,
20756 V16QI_FTYPE_PCCHAR,
20757 V8SF_FTYPE_PCV4SF,
20758 V8SF_FTYPE_PCFLOAT,
20759 V4DF_FTYPE_PCV2DF,
20760 V4DF_FTYPE_PCDOUBLE,
20761 V4SF_FTYPE_PCFLOAT,
20762 V2DF_FTYPE_PCDOUBLE,
20763 V8SF_FTYPE_PCV8SF_V8SF,
20764 V4DF_FTYPE_PCV4DF_V4DF,
20765 V4SF_FTYPE_V4SF_PCV2SF,
20766 V4SF_FTYPE_PCV4SF_V4SF,
20767 V2DF_FTYPE_V2DF_PCDOUBLE,
20768 V2DF_FTYPE_PCV2DF_V2DF,
20769 V2DI_FTYPE_PV2DI,
20770 VOID_FTYPE_PV2SF_V4SF,
20771 VOID_FTYPE_PV4DI_V4DI,
20772 VOID_FTYPE_PV2DI_V2DI,
20773 VOID_FTYPE_PCHAR_V32QI,
20774 VOID_FTYPE_PCHAR_V16QI,
20775 VOID_FTYPE_PFLOAT_V8SF,
20776 VOID_FTYPE_PFLOAT_V4SF,
20777 VOID_FTYPE_PDOUBLE_V4DF,
20778 VOID_FTYPE_PDOUBLE_V2DF,
20779 VOID_FTYPE_PDI_DI,
20780 VOID_FTYPE_PINT_INT,
20781 VOID_FTYPE_PV8SF_V8SF_V8SF,
20782 VOID_FTYPE_PV4DF_V4DF_V4DF,
20783 VOID_FTYPE_PV4SF_V4SF_V4SF,
20784 VOID_FTYPE_PV2DF_V2DF_V2DF
20787 /* Builtin types */
20788 enum ix86_builtin_type
20790 FTYPE_UNKNOWN,
20791 FLOAT128_FTYPE_FLOAT128,
20792 FLOAT_FTYPE_FLOAT,
20793 FLOAT128_FTYPE_FLOAT128_FLOAT128,
20794 INT_FTYPE_V8SF_V8SF_PTEST,
20795 INT_FTYPE_V4DI_V4DI_PTEST,
20796 INT_FTYPE_V4DF_V4DF_PTEST,
20797 INT_FTYPE_V4SF_V4SF_PTEST,
20798 INT_FTYPE_V2DI_V2DI_PTEST,
20799 INT_FTYPE_V2DF_V2DF_PTEST,
20800 INT64_FTYPE_V4SF,
20801 INT64_FTYPE_V2DF,
20802 INT_FTYPE_V16QI,
20803 INT_FTYPE_V8QI,
20804 INT_FTYPE_V8SF,
20805 INT_FTYPE_V4DF,
20806 INT_FTYPE_V4SF,
20807 INT_FTYPE_V2DF,
20808 V16QI_FTYPE_V16QI,
20809 V8SI_FTYPE_V8SF,
20810 V8SI_FTYPE_V4SI,
20811 V8HI_FTYPE_V8HI,
20812 V8HI_FTYPE_V16QI,
20813 V8QI_FTYPE_V8QI,
20814 V8SF_FTYPE_V8SF,
20815 V8SF_FTYPE_V8SI,
20816 V8SF_FTYPE_V4SF,
20817 V4SI_FTYPE_V4SI,
20818 V4SI_FTYPE_V16QI,
20819 V4SI_FTYPE_V8SI,
20820 V4SI_FTYPE_V8HI,
20821 V4SI_FTYPE_V4DF,
20822 V4SI_FTYPE_V4SF,
20823 V4SI_FTYPE_V2DF,
20824 V4HI_FTYPE_V4HI,
20825 V4DF_FTYPE_V4DF,
20826 V4DF_FTYPE_V4SI,
20827 V4DF_FTYPE_V4SF,
20828 V4DF_FTYPE_V2DF,
20829 V4SF_FTYPE_V4DF,
20830 V4SF_FTYPE_V4SF,
20831 V4SF_FTYPE_V4SF_VEC_MERGE,
20832 V4SF_FTYPE_V8SF,
20833 V4SF_FTYPE_V4SI,
20834 V4SF_FTYPE_V2DF,
20835 V2DI_FTYPE_V2DI,
20836 V2DI_FTYPE_V16QI,
20837 V2DI_FTYPE_V8HI,
20838 V2DI_FTYPE_V4SI,
20839 V2DF_FTYPE_V2DF,
20840 V2DF_FTYPE_V2DF_VEC_MERGE,
20841 V2DF_FTYPE_V4SI,
20842 V2DF_FTYPE_V4DF,
20843 V2DF_FTYPE_V4SF,
20844 V2DF_FTYPE_V2SI,
20845 V2SI_FTYPE_V2SI,
20846 V2SI_FTYPE_V4SF,
20847 V2SI_FTYPE_V2SF,
20848 V2SI_FTYPE_V2DF,
20849 V2SF_FTYPE_V2SF,
20850 V2SF_FTYPE_V2SI,
20851 V16QI_FTYPE_V16QI_V16QI,
20852 V16QI_FTYPE_V8HI_V8HI,
20853 V8QI_FTYPE_V8QI_V8QI,
20854 V8QI_FTYPE_V4HI_V4HI,
20855 V8HI_FTYPE_V8HI_V8HI,
20856 V8HI_FTYPE_V8HI_V8HI_COUNT,
20857 V8HI_FTYPE_V16QI_V16QI,
20858 V8HI_FTYPE_V4SI_V4SI,
20859 V8HI_FTYPE_V8HI_SI_COUNT,
20860 V8SF_FTYPE_V8SF_V8SF,
20861 V8SF_FTYPE_V8SF_V8SI,
20862 V4SI_FTYPE_V4SI_V4SI,
20863 V4SI_FTYPE_V4SI_V4SI_COUNT,
20864 V4SI_FTYPE_V8HI_V8HI,
20865 V4SI_FTYPE_V4SF_V4SF,
20866 V4SI_FTYPE_V2DF_V2DF,
20867 V4SI_FTYPE_V4SI_SI_COUNT,
20868 V4HI_FTYPE_V4HI_V4HI,
20869 V4HI_FTYPE_V4HI_V4HI_COUNT,
20870 V4HI_FTYPE_V8QI_V8QI,
20871 V4HI_FTYPE_V2SI_V2SI,
20872 V4HI_FTYPE_V4HI_SI_COUNT,
20873 V4DF_FTYPE_V4DF_V4DF,
20874 V4DF_FTYPE_V4DF_V4DI,
20875 V4SF_FTYPE_V4SF_V4SF,
20876 V4SF_FTYPE_V4SF_V4SF_SWAP,
20877 V4SF_FTYPE_V4SF_V4SI,
20878 V4SF_FTYPE_V4SF_V2SI,
20879 V4SF_FTYPE_V4SF_V2DF,
20880 V4SF_FTYPE_V4SF_DI,
20881 V4SF_FTYPE_V4SF_SI,
20882 V2DI_FTYPE_V2DI_V2DI,
20883 V2DI_FTYPE_V2DI_V2DI_COUNT,
20884 V2DI_FTYPE_V16QI_V16QI,
20885 V2DI_FTYPE_V4SI_V4SI,
20886 V2DI_FTYPE_V2DI_V16QI,
20887 V2DI_FTYPE_V2DF_V2DF,
20888 V2DI_FTYPE_V2DI_SI_COUNT,
20889 V2SI_FTYPE_V2SI_V2SI,
20890 V2SI_FTYPE_V2SI_V2SI_COUNT,
20891 V2SI_FTYPE_V4HI_V4HI,
20892 V2SI_FTYPE_V2SF_V2SF,
20893 V2SI_FTYPE_V2SI_SI_COUNT,
20894 V2DF_FTYPE_V2DF_V2DF,
20895 V2DF_FTYPE_V2DF_V2DF_SWAP,
20896 V2DF_FTYPE_V2DF_V4SF,
20897 V2DF_FTYPE_V2DF_V2DI,
20898 V2DF_FTYPE_V2DF_DI,
20899 V2DF_FTYPE_V2DF_SI,
20900 V2SF_FTYPE_V2SF_V2SF,
20901 V1DI_FTYPE_V1DI_V1DI,
20902 V1DI_FTYPE_V1DI_V1DI_COUNT,
20903 V1DI_FTYPE_V8QI_V8QI,
20904 V1DI_FTYPE_V2SI_V2SI,
20905 V1DI_FTYPE_V1DI_SI_COUNT,
20906 UINT64_FTYPE_UINT64_UINT64,
20907 UINT_FTYPE_UINT_UINT,
20908 UINT_FTYPE_UINT_USHORT,
20909 UINT_FTYPE_UINT_UCHAR,
20910 V8HI_FTYPE_V8HI_INT,
20911 V4SI_FTYPE_V4SI_INT,
20912 V4HI_FTYPE_V4HI_INT,
20913 V8SF_FTYPE_V8SF_INT,
20914 V4SI_FTYPE_V8SI_INT,
20915 V4SF_FTYPE_V8SF_INT,
20916 V2DF_FTYPE_V4DF_INT,
20917 V4DF_FTYPE_V4DF_INT,
20918 V4SF_FTYPE_V4SF_INT,
20919 V2DI_FTYPE_V2DI_INT,
20920 V2DI2TI_FTYPE_V2DI_INT,
20921 V2DF_FTYPE_V2DF_INT,
20922 V16QI_FTYPE_V16QI_V16QI_V16QI,
20923 V8SF_FTYPE_V8SF_V8SF_V8SF,
20924 V4DF_FTYPE_V4DF_V4DF_V4DF,
20925 V4SF_FTYPE_V4SF_V4SF_V4SF,
20926 V2DF_FTYPE_V2DF_V2DF_V2DF,
20927 V16QI_FTYPE_V16QI_V16QI_INT,
20928 V8SI_FTYPE_V8SI_V8SI_INT,
20929 V8SI_FTYPE_V8SI_V4SI_INT,
20930 V8HI_FTYPE_V8HI_V8HI_INT,
20931 V8SF_FTYPE_V8SF_V8SF_INT,
20932 V8SF_FTYPE_V8SF_V4SF_INT,
20933 V4SI_FTYPE_V4SI_V4SI_INT,
20934 V4DF_FTYPE_V4DF_V4DF_INT,
20935 V4DF_FTYPE_V4DF_V2DF_INT,
20936 V4SF_FTYPE_V4SF_V4SF_INT,
20937 V2DI_FTYPE_V2DI_V2DI_INT,
20938 V2DI2TI_FTYPE_V2DI_V2DI_INT,
20939 V1DI2DI_FTYPE_V1DI_V1DI_INT,
20940 V2DF_FTYPE_V2DF_V2DF_INT,
20941 V2DI_FTYPE_V2DI_UINT_UINT,
20942 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
20945 /* Special builtins with variable number of arguments. */
20946 static const struct builtin_description bdesc_special_args[] =
20948 /* MMX */
20949 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20951 /* 3DNow! */
20952 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20954 /* SSE */
20955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20964 /* SSE or 3DNow!A */
20965 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20966 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
20968 /* SSE2 */
20969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20970 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20972 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
20973 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20974 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
20975 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
20976 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
20977 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20979 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20980 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20982 /* SSE3 */
20983 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20985 /* SSE4.1 */
20986 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
20988 /* SSE4A */
20989 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20990 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20992 /* AVX */
20993 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
20994 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, 0, IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
20995 { OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_64BIT, CODE_FOR_avx_vzeroupper_rex64, 0, IX86_BUILTIN_VZEROUPPER_REX64, UNKNOWN, (int) VOID_FTYPE_VOID },
20997 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20998 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastsd256, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20999 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss256, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21000 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_pd256, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21001 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_ps256, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21003 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21004 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21005 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21006 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21007 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21008 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21009 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21011 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21012 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21013 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21015 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21016 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21017 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21018 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21019 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21020 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21021 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21022 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21025 /* Builtins with variable number of arguments. */
21026 static const struct builtin_description bdesc_args[] =
21028 /* MMX */
21029 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21030 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21031 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21032 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21033 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21034 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21036 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21037 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21038 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21039 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21040 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21041 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21042 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21043 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21045 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21046 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21048 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21049 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21050 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21051 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21053 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21054 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21055 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21056 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21057 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21058 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21060 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21061 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21062 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21063 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21064 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21065 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21067 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21068 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21069 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21071 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21073 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21074 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21075 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21076 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21077 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21078 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21080 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21081 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21082 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21083 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21084 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21085 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21087 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21088 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21089 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21090 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21092 /* 3DNow! */
21093 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21094 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21095 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21096 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21098 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21099 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21100 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21101 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21102 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21103 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21104 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21105 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21106 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21107 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21108 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21109 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21110 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21111 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21112 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21114 /* 3DNow!A */
21115 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21116 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21117 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21118 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21119 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21120 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21122 /* SSE */
21123 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21124 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21125 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21126 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21127 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21128 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21129 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21130 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21131 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21132 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21133 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21134 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21136 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21138 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21139 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21140 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21141 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21142 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21143 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21144 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21145 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21147 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21148 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21149 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21150 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21151 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21152 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21153 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21154 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21155 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21156 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21157 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21158 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21159 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21160 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21161 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21162 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21163 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21164 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21165 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21166 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21167 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21168 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21170 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21171 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21172 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21173 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21175 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21176 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21177 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21178 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21180 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21181 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21182 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21183 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21184 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21186 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21187 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21188 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21190 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21192 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21193 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21194 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21196 /* SSE MMX or 3Dnow!A */
21197 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21198 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21199 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21201 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21202 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21203 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21204 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21206 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21207 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21209 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21211 /* SSE2 */
21212 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21215 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21216 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21220 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21221 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21222 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21223 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21224 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21226 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21228 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21229 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21230 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21231 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21233 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21234 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21235 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21237 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21238 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21239 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21240 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21241 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21242 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21243 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21244 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21246 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21247 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21248 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21249 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21250 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21251 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21252 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21253 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21254 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21255 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21256 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21257 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21258 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21260 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21262 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21263 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21264 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21265 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21267 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21268 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21269 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21270 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21272 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21273 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21274 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21275 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21277 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21278 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21279 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21281 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21283 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21284 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21285 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21286 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21287 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21288 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21289 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21290 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21292 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21293 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21294 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21295 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21296 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21297 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21298 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21299 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21301 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21302 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21304 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21305 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21306 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21307 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21309 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21310 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21312 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21313 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21314 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21315 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21316 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21317 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21319 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21320 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21321 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21322 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21324 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21325 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21326 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21327 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21328 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21329 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21330 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21331 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21333 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21334 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21335 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21337 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21338 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21340 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21341 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21343 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21345 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21346 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21347 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21348 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21350 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21351 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21352 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21353 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21354 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21355 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21356 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21358 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21359 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21360 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21361 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21362 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21363 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21364 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21366 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21367 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21368 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21369 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21371 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21372 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21373 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21375 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21377 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21378 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21380 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21382 /* SSE2 MMX */
21383 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21384 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21386 /* SSE3 */
21387 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21388 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21390 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21391 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21392 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21393 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21394 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21395 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21397 /* SSSE3 */
21398 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
21399 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
21400 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21401 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
21402 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
21403 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21405 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21406 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21407 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21408 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21409 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21410 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21411 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21412 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21413 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21414 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21415 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21416 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21417 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
21418 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
21419 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21420 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21421 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21422 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21423 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21424 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21425 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21426 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21427 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21428 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21430 /* SSSE3. */
21431 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
21432 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
21434 /* SSE4.1 */
21435 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21436 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21437 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
21438 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
21439 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21440 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21441 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21442 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
21443 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21444 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
21446 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21447 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21448 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21449 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21450 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21451 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21452 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21453 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21454 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21455 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21456 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21457 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21458 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21460 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21461 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21462 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21463 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21464 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21465 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21466 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21467 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21468 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21469 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21470 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21471 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21473 /* SSE4.1 and SSE5 */
21474 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21475 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21476 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21477 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21479 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21480 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21481 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21483 /* SSE4.2 */
21484 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21485 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
21486 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
21487 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
21488 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
21490 /* SSE4A */
21491 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
21492 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
21493 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
21494 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21496 /* AES */
21497 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
21498 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21500 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21501 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21502 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21503 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21505 /* PCLMUL */
21506 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
21508 /* AVX */
21509 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21510 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21511 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21512 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21513 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21514 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21515 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21517 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21518 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21519 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21520 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21521 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21522 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21523 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21524 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21525 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21526 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21527 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21528 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21529 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21530 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21531 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21532 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21533 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21534 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21536 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
21537 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
21538 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
21539 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
21541 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21542 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21543 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
21544 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
21545 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21546 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21547 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21548 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21549 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21550 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21551 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21552 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21553 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21554 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
21555 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
21556 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
21557 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
21558 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
21559 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
21560 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21561 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
21562 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21563 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21564 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21565 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21566 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21567 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
21568 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21569 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21570 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21571 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21572 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
21573 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
21574 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
21576 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21577 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21578 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21580 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21581 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21582 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21583 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21584 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21586 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21588 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21589 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21591 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21592 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21593 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21594 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21596 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
21597 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
21598 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
21599 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
21600 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
21601 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
21603 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21604 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21605 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21606 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21607 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21608 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21609 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21610 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21611 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21612 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21613 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21614 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21615 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21616 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21617 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21619 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
21620 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
21623 /* SSE5 */
21624 enum multi_arg_type {
21625 MULTI_ARG_UNKNOWN,
21626 MULTI_ARG_3_SF,
21627 MULTI_ARG_3_DF,
21628 MULTI_ARG_3_DI,
21629 MULTI_ARG_3_SI,
21630 MULTI_ARG_3_SI_DI,
21631 MULTI_ARG_3_HI,
21632 MULTI_ARG_3_HI_SI,
21633 MULTI_ARG_3_QI,
21634 MULTI_ARG_3_PERMPS,
21635 MULTI_ARG_3_PERMPD,
21636 MULTI_ARG_2_SF,
21637 MULTI_ARG_2_DF,
21638 MULTI_ARG_2_DI,
21639 MULTI_ARG_2_SI,
21640 MULTI_ARG_2_HI,
21641 MULTI_ARG_2_QI,
21642 MULTI_ARG_2_DI_IMM,
21643 MULTI_ARG_2_SI_IMM,
21644 MULTI_ARG_2_HI_IMM,
21645 MULTI_ARG_2_QI_IMM,
21646 MULTI_ARG_2_SF_CMP,
21647 MULTI_ARG_2_DF_CMP,
21648 MULTI_ARG_2_DI_CMP,
21649 MULTI_ARG_2_SI_CMP,
21650 MULTI_ARG_2_HI_CMP,
21651 MULTI_ARG_2_QI_CMP,
21652 MULTI_ARG_2_DI_TF,
21653 MULTI_ARG_2_SI_TF,
21654 MULTI_ARG_2_HI_TF,
21655 MULTI_ARG_2_QI_TF,
21656 MULTI_ARG_2_SF_TF,
21657 MULTI_ARG_2_DF_TF,
21658 MULTI_ARG_1_SF,
21659 MULTI_ARG_1_DF,
21660 MULTI_ARG_1_DI,
21661 MULTI_ARG_1_SI,
21662 MULTI_ARG_1_HI,
21663 MULTI_ARG_1_QI,
21664 MULTI_ARG_1_SI_DI,
21665 MULTI_ARG_1_HI_DI,
21666 MULTI_ARG_1_HI_SI,
21667 MULTI_ARG_1_QI_DI,
21668 MULTI_ARG_1_QI_SI,
21669 MULTI_ARG_1_QI_HI,
21670 MULTI_ARG_1_PH2PS,
21671 MULTI_ARG_1_PS2PH
21674 static const struct builtin_description bdesc_multi_arg[] =
21676 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
21677 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
21678 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
21679 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
21680 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
21681 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
21682 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
21683 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
21684 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
21685 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
21686 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
21687 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
21688 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
21689 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
21690 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
21691 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
21692 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV, 0, (int)MULTI_ARG_3_DI },
21693 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
21694 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
21695 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
21696 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
21697 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
21698 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
21699 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
21700 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
21701 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
21702 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
21703 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
21704 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
21705 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
21706 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
21707 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
21708 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21709 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21710 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
21711 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
21712 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
21713 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
21714 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
21715 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
21716 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
21717 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
21718 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
21719 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
21720 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
21721 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
21722 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
21723 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
21724 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
21725 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
21726 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
21727 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
21728 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
21729 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
21730 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
21731 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
21732 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
21733 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
21734 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
21735 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
21736 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
21737 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
21738 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
21739 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
21740 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
21741 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
21742 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
21743 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
21744 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
21745 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
21746 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
21747 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
21748 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
21749 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
21750 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
21752 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
21753 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21754 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
21755 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
21756 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
21757 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
21758 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
21759 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21760 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21761 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21762 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21763 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21764 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21765 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21766 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21767 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21769 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
21770 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21771 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
21772 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
21773 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
21774 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
21775 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
21776 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21777 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21778 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21779 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21780 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21781 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21782 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21783 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21784 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21786 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
21787 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21788 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
21789 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
21790 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
21791 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
21792 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
21793 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
21794 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21795 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
21796 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
21797 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
21798 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
21799 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
21800 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
21801 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
21803 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
21804 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21805 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
21806 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
21807 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
21808 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
21809 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
21810 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
21811 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21812 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
21813 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
21814 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
21815 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
21816 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
21817 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
21818 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
21820 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
21821 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21822 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
21823 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
21824 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
21825 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
21826 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
21828 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
21829 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21830 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
21831 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
21832 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
21833 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
21834 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
21836 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
21837 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21838 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
21839 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
21840 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
21841 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
21842 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
21844 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21845 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21846 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
21847 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
21848 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
21849 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
21850 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
21852 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
21853 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21854 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
21855 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
21856 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
21857 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
21858 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
21860 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
21861 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21862 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
21863 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
21864 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
21865 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
21866 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
21868 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
21869 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21870 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
21871 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
21872 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
21873 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
21874 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
21876 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
21877 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21878 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
21879 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
21880 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
21881 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
21882 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
21884 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
21885 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
21886 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
21887 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
21888 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
21889 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
21890 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
21891 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
21893 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21894 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21895 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21896 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21897 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
21898 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
21899 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
21900 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
21902 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21903 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21904 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21905 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21906 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
21907 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
21908 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
21909 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
21912 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
21913 in the current target ISA to allow the user to compile particular modules
21914 with different target specific options that differ from the command line
21915 options. */
21916 static void
21917 ix86_init_mmx_sse_builtins (void)
21919 const struct builtin_description * d;
21920 size_t i;
21922 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
21923 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
21924 tree V1DI_type_node
21925 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
21926 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
21927 tree V2DI_type_node
21928 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
21929 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
21930 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
21931 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
21932 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
21933 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
21934 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
21936 tree pchar_type_node = build_pointer_type (char_type_node);
21937 tree pcchar_type_node
21938 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
21939 tree pfloat_type_node = build_pointer_type (float_type_node);
21940 tree pcfloat_type_node
21941 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
21942 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
21943 tree pcv2sf_type_node
21944 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
21945 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
21946 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
21948 /* Comparisons. */
21949 tree int_ftype_v4sf_v4sf
21950 = build_function_type_list (integer_type_node,
21951 V4SF_type_node, V4SF_type_node, NULL_TREE);
21952 tree v4si_ftype_v4sf_v4sf
21953 = build_function_type_list (V4SI_type_node,
21954 V4SF_type_node, V4SF_type_node, NULL_TREE);
21955 /* MMX/SSE/integer conversions. */
21956 tree int_ftype_v4sf
21957 = build_function_type_list (integer_type_node,
21958 V4SF_type_node, NULL_TREE);
21959 tree int64_ftype_v4sf
21960 = build_function_type_list (long_long_integer_type_node,
21961 V4SF_type_node, NULL_TREE);
21962 tree int_ftype_v8qi
21963 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
21964 tree v4sf_ftype_v4sf_int
21965 = build_function_type_list (V4SF_type_node,
21966 V4SF_type_node, integer_type_node, NULL_TREE);
21967 tree v4sf_ftype_v4sf_int64
21968 = build_function_type_list (V4SF_type_node,
21969 V4SF_type_node, long_long_integer_type_node,
21970 NULL_TREE);
21971 tree v4sf_ftype_v4sf_v2si
21972 = build_function_type_list (V4SF_type_node,
21973 V4SF_type_node, V2SI_type_node, NULL_TREE);
21975 /* Miscellaneous. */
21976 tree v8qi_ftype_v4hi_v4hi
21977 = build_function_type_list (V8QI_type_node,
21978 V4HI_type_node, V4HI_type_node, NULL_TREE);
21979 tree v4hi_ftype_v2si_v2si
21980 = build_function_type_list (V4HI_type_node,
21981 V2SI_type_node, V2SI_type_node, NULL_TREE);
21982 tree v4sf_ftype_v4sf_v4sf_int
21983 = build_function_type_list (V4SF_type_node,
21984 V4SF_type_node, V4SF_type_node,
21985 integer_type_node, NULL_TREE);
21986 tree v2si_ftype_v4hi_v4hi
21987 = build_function_type_list (V2SI_type_node,
21988 V4HI_type_node, V4HI_type_node, NULL_TREE);
21989 tree v4hi_ftype_v4hi_int
21990 = build_function_type_list (V4HI_type_node,
21991 V4HI_type_node, integer_type_node, NULL_TREE);
21992 tree v2si_ftype_v2si_int
21993 = build_function_type_list (V2SI_type_node,
21994 V2SI_type_node, integer_type_node, NULL_TREE);
21995 tree v1di_ftype_v1di_int
21996 = build_function_type_list (V1DI_type_node,
21997 V1DI_type_node, integer_type_node, NULL_TREE);
21999 tree void_ftype_void
22000 = build_function_type (void_type_node, void_list_node);
22001 tree void_ftype_unsigned
22002 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
22003 tree void_ftype_unsigned_unsigned
22004 = build_function_type_list (void_type_node, unsigned_type_node,
22005 unsigned_type_node, NULL_TREE);
22006 tree void_ftype_pcvoid_unsigned_unsigned
22007 = build_function_type_list (void_type_node, const_ptr_type_node,
22008 unsigned_type_node, unsigned_type_node,
22009 NULL_TREE);
22010 tree unsigned_ftype_void
22011 = build_function_type (unsigned_type_node, void_list_node);
22012 tree v2si_ftype_v4sf
22013 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
22014 /* Loads/stores. */
22015 tree void_ftype_v8qi_v8qi_pchar
22016 = build_function_type_list (void_type_node,
22017 V8QI_type_node, V8QI_type_node,
22018 pchar_type_node, NULL_TREE);
22019 tree v4sf_ftype_pcfloat
22020 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
22021 tree v4sf_ftype_v4sf_pcv2sf
22022 = build_function_type_list (V4SF_type_node,
22023 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
22024 tree void_ftype_pv2sf_v4sf
22025 = build_function_type_list (void_type_node,
22026 pv2sf_type_node, V4SF_type_node, NULL_TREE);
22027 tree void_ftype_pfloat_v4sf
22028 = build_function_type_list (void_type_node,
22029 pfloat_type_node, V4SF_type_node, NULL_TREE);
22030 tree void_ftype_pdi_di
22031 = build_function_type_list (void_type_node,
22032 pdi_type_node, long_long_unsigned_type_node,
22033 NULL_TREE);
22034 tree void_ftype_pv2di_v2di
22035 = build_function_type_list (void_type_node,
22036 pv2di_type_node, V2DI_type_node, NULL_TREE);
22037 /* Normal vector unops. */
22038 tree v4sf_ftype_v4sf
22039 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
22040 tree v16qi_ftype_v16qi
22041 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
22042 tree v8hi_ftype_v8hi
22043 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
22044 tree v4si_ftype_v4si
22045 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
22046 tree v8qi_ftype_v8qi
22047 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
22048 tree v4hi_ftype_v4hi
22049 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
22051 /* Normal vector binops. */
22052 tree v4sf_ftype_v4sf_v4sf
22053 = build_function_type_list (V4SF_type_node,
22054 V4SF_type_node, V4SF_type_node, NULL_TREE);
22055 tree v8qi_ftype_v8qi_v8qi
22056 = build_function_type_list (V8QI_type_node,
22057 V8QI_type_node, V8QI_type_node, NULL_TREE);
22058 tree v4hi_ftype_v4hi_v4hi
22059 = build_function_type_list (V4HI_type_node,
22060 V4HI_type_node, V4HI_type_node, NULL_TREE);
22061 tree v2si_ftype_v2si_v2si
22062 = build_function_type_list (V2SI_type_node,
22063 V2SI_type_node, V2SI_type_node, NULL_TREE);
22064 tree v1di_ftype_v1di_v1di
22065 = build_function_type_list (V1DI_type_node,
22066 V1DI_type_node, V1DI_type_node, NULL_TREE);
22067 tree v1di_ftype_v1di_v1di_int
22068 = build_function_type_list (V1DI_type_node,
22069 V1DI_type_node, V1DI_type_node,
22070 integer_type_node, NULL_TREE);
22071 tree v2si_ftype_v2sf
22072 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
22073 tree v2sf_ftype_v2si
22074 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
22075 tree v2si_ftype_v2si
22076 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
22077 tree v2sf_ftype_v2sf
22078 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
22079 tree v2sf_ftype_v2sf_v2sf
22080 = build_function_type_list (V2SF_type_node,
22081 V2SF_type_node, V2SF_type_node, NULL_TREE);
22082 tree v2si_ftype_v2sf_v2sf
22083 = build_function_type_list (V2SI_type_node,
22084 V2SF_type_node, V2SF_type_node, NULL_TREE);
22085 tree pint_type_node = build_pointer_type (integer_type_node);
22086 tree pdouble_type_node = build_pointer_type (double_type_node);
22087 tree pcdouble_type_node = build_pointer_type (
22088 build_type_variant (double_type_node, 1, 0));
22089 tree int_ftype_v2df_v2df
22090 = build_function_type_list (integer_type_node,
22091 V2DF_type_node, V2DF_type_node, NULL_TREE);
22093 tree void_ftype_pcvoid
22094 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
22095 tree v4sf_ftype_v4si
22096 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
22097 tree v4si_ftype_v4sf
22098 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
22099 tree v2df_ftype_v4si
22100 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
22101 tree v4si_ftype_v2df
22102 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
22103 tree v4si_ftype_v2df_v2df
22104 = build_function_type_list (V4SI_type_node,
22105 V2DF_type_node, V2DF_type_node, NULL_TREE);
22106 tree v2si_ftype_v2df
22107 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
22108 tree v4sf_ftype_v2df
22109 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
22110 tree v2df_ftype_v2si
22111 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
22112 tree v2df_ftype_v4sf
22113 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
22114 tree int_ftype_v2df
22115 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
22116 tree int64_ftype_v2df
22117 = build_function_type_list (long_long_integer_type_node,
22118 V2DF_type_node, NULL_TREE);
22119 tree v2df_ftype_v2df_int
22120 = build_function_type_list (V2DF_type_node,
22121 V2DF_type_node, integer_type_node, NULL_TREE);
22122 tree v2df_ftype_v2df_int64
22123 = build_function_type_list (V2DF_type_node,
22124 V2DF_type_node, long_long_integer_type_node,
22125 NULL_TREE);
22126 tree v4sf_ftype_v4sf_v2df
22127 = build_function_type_list (V4SF_type_node,
22128 V4SF_type_node, V2DF_type_node, NULL_TREE);
22129 tree v2df_ftype_v2df_v4sf
22130 = build_function_type_list (V2DF_type_node,
22131 V2DF_type_node, V4SF_type_node, NULL_TREE);
22132 tree v2df_ftype_v2df_v2df_int
22133 = build_function_type_list (V2DF_type_node,
22134 V2DF_type_node, V2DF_type_node,
22135 integer_type_node,
22136 NULL_TREE);
22137 tree v2df_ftype_v2df_pcdouble
22138 = build_function_type_list (V2DF_type_node,
22139 V2DF_type_node, pcdouble_type_node, NULL_TREE);
22140 tree void_ftype_pdouble_v2df
22141 = build_function_type_list (void_type_node,
22142 pdouble_type_node, V2DF_type_node, NULL_TREE);
22143 tree void_ftype_pint_int
22144 = build_function_type_list (void_type_node,
22145 pint_type_node, integer_type_node, NULL_TREE);
22146 tree void_ftype_v16qi_v16qi_pchar
22147 = build_function_type_list (void_type_node,
22148 V16QI_type_node, V16QI_type_node,
22149 pchar_type_node, NULL_TREE);
22150 tree v2df_ftype_pcdouble
22151 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
22152 tree v2df_ftype_v2df_v2df
22153 = build_function_type_list (V2DF_type_node,
22154 V2DF_type_node, V2DF_type_node, NULL_TREE);
22155 tree v16qi_ftype_v16qi_v16qi
22156 = build_function_type_list (V16QI_type_node,
22157 V16QI_type_node, V16QI_type_node, NULL_TREE);
22158 tree v8hi_ftype_v8hi_v8hi
22159 = build_function_type_list (V8HI_type_node,
22160 V8HI_type_node, V8HI_type_node, NULL_TREE);
22161 tree v4si_ftype_v4si_v4si
22162 = build_function_type_list (V4SI_type_node,
22163 V4SI_type_node, V4SI_type_node, NULL_TREE);
22164 tree v2di_ftype_v2di_v2di
22165 = build_function_type_list (V2DI_type_node,
22166 V2DI_type_node, V2DI_type_node, NULL_TREE);
22167 tree v2di_ftype_v2df_v2df
22168 = build_function_type_list (V2DI_type_node,
22169 V2DF_type_node, V2DF_type_node, NULL_TREE);
22170 tree v2df_ftype_v2df
22171 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
22172 tree v2di_ftype_v2di_int
22173 = build_function_type_list (V2DI_type_node,
22174 V2DI_type_node, integer_type_node, NULL_TREE);
22175 tree v2di_ftype_v2di_v2di_int
22176 = build_function_type_list (V2DI_type_node, V2DI_type_node,
22177 V2DI_type_node, integer_type_node, NULL_TREE);
22178 tree v4si_ftype_v4si_int
22179 = build_function_type_list (V4SI_type_node,
22180 V4SI_type_node, integer_type_node, NULL_TREE);
22181 tree v8hi_ftype_v8hi_int
22182 = build_function_type_list (V8HI_type_node,
22183 V8HI_type_node, integer_type_node, NULL_TREE);
22184 tree v4si_ftype_v8hi_v8hi
22185 = build_function_type_list (V4SI_type_node,
22186 V8HI_type_node, V8HI_type_node, NULL_TREE);
22187 tree v1di_ftype_v8qi_v8qi
22188 = build_function_type_list (V1DI_type_node,
22189 V8QI_type_node, V8QI_type_node, NULL_TREE);
22190 tree v1di_ftype_v2si_v2si
22191 = build_function_type_list (V1DI_type_node,
22192 V2SI_type_node, V2SI_type_node, NULL_TREE);
22193 tree v2di_ftype_v16qi_v16qi
22194 = build_function_type_list (V2DI_type_node,
22195 V16QI_type_node, V16QI_type_node, NULL_TREE);
22196 tree v2di_ftype_v4si_v4si
22197 = build_function_type_list (V2DI_type_node,
22198 V4SI_type_node, V4SI_type_node, NULL_TREE);
22199 tree int_ftype_v16qi
22200 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
22201 tree v16qi_ftype_pcchar
22202 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
22203 tree void_ftype_pchar_v16qi
22204 = build_function_type_list (void_type_node,
22205 pchar_type_node, V16QI_type_node, NULL_TREE);
22207 tree v2di_ftype_v2di_unsigned_unsigned
22208 = build_function_type_list (V2DI_type_node, V2DI_type_node,
22209 unsigned_type_node, unsigned_type_node,
22210 NULL_TREE);
22211 tree v2di_ftype_v2di_v2di_unsigned_unsigned
22212 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
22213 unsigned_type_node, unsigned_type_node,
22214 NULL_TREE);
22215 tree v2di_ftype_v2di_v16qi
22216 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
22217 NULL_TREE);
22218 tree v2df_ftype_v2df_v2df_v2df
22219 = build_function_type_list (V2DF_type_node,
22220 V2DF_type_node, V2DF_type_node,
22221 V2DF_type_node, NULL_TREE);
22222 tree v4sf_ftype_v4sf_v4sf_v4sf
22223 = build_function_type_list (V4SF_type_node,
22224 V4SF_type_node, V4SF_type_node,
22225 V4SF_type_node, NULL_TREE);
22226 tree v8hi_ftype_v16qi
22227 = build_function_type_list (V8HI_type_node, V16QI_type_node,
22228 NULL_TREE);
22229 tree v4si_ftype_v16qi
22230 = build_function_type_list (V4SI_type_node, V16QI_type_node,
22231 NULL_TREE);
22232 tree v2di_ftype_v16qi
22233 = build_function_type_list (V2DI_type_node, V16QI_type_node,
22234 NULL_TREE);
22235 tree v4si_ftype_v8hi
22236 = build_function_type_list (V4SI_type_node, V8HI_type_node,
22237 NULL_TREE);
22238 tree v2di_ftype_v8hi
22239 = build_function_type_list (V2DI_type_node, V8HI_type_node,
22240 NULL_TREE);
22241 tree v2di_ftype_v4si
22242 = build_function_type_list (V2DI_type_node, V4SI_type_node,
22243 NULL_TREE);
22244 tree v2di_ftype_pv2di
22245 = build_function_type_list (V2DI_type_node, pv2di_type_node,
22246 NULL_TREE);
22247 tree v16qi_ftype_v16qi_v16qi_int
22248 = build_function_type_list (V16QI_type_node, V16QI_type_node,
22249 V16QI_type_node, integer_type_node,
22250 NULL_TREE);
22251 tree v16qi_ftype_v16qi_v16qi_v16qi
22252 = build_function_type_list (V16QI_type_node, V16QI_type_node,
22253 V16QI_type_node, V16QI_type_node,
22254 NULL_TREE);
22255 tree v8hi_ftype_v8hi_v8hi_int
22256 = build_function_type_list (V8HI_type_node, V8HI_type_node,
22257 V8HI_type_node, integer_type_node,
22258 NULL_TREE);
22259 tree v4si_ftype_v4si_v4si_int
22260 = build_function_type_list (V4SI_type_node, V4SI_type_node,
22261 V4SI_type_node, integer_type_node,
22262 NULL_TREE);
22263 tree int_ftype_v2di_v2di
22264 = build_function_type_list (integer_type_node,
22265 V2DI_type_node, V2DI_type_node,
22266 NULL_TREE);
22267 tree int_ftype_v16qi_int_v16qi_int_int
22268 = build_function_type_list (integer_type_node,
22269 V16QI_type_node,
22270 integer_type_node,
22271 V16QI_type_node,
22272 integer_type_node,
22273 integer_type_node,
22274 NULL_TREE);
22275 tree v16qi_ftype_v16qi_int_v16qi_int_int
22276 = build_function_type_list (V16QI_type_node,
22277 V16QI_type_node,
22278 integer_type_node,
22279 V16QI_type_node,
22280 integer_type_node,
22281 integer_type_node,
22282 NULL_TREE);
22283 tree int_ftype_v16qi_v16qi_int
22284 = build_function_type_list (integer_type_node,
22285 V16QI_type_node,
22286 V16QI_type_node,
22287 integer_type_node,
22288 NULL_TREE);
22290 /* SSE5 instructions */
22291 tree v2di_ftype_v2di_v2di_v2di
22292 = build_function_type_list (V2DI_type_node,
22293 V2DI_type_node,
22294 V2DI_type_node,
22295 V2DI_type_node,
22296 NULL_TREE);
22298 tree v4si_ftype_v4si_v4si_v4si
22299 = build_function_type_list (V4SI_type_node,
22300 V4SI_type_node,
22301 V4SI_type_node,
22302 V4SI_type_node,
22303 NULL_TREE);
22305 tree v4si_ftype_v4si_v4si_v2di
22306 = build_function_type_list (V4SI_type_node,
22307 V4SI_type_node,
22308 V4SI_type_node,
22309 V2DI_type_node,
22310 NULL_TREE);
22312 tree v8hi_ftype_v8hi_v8hi_v8hi
22313 = build_function_type_list (V8HI_type_node,
22314 V8HI_type_node,
22315 V8HI_type_node,
22316 V8HI_type_node,
22317 NULL_TREE);
22319 tree v8hi_ftype_v8hi_v8hi_v4si
22320 = build_function_type_list (V8HI_type_node,
22321 V8HI_type_node,
22322 V8HI_type_node,
22323 V4SI_type_node,
22324 NULL_TREE);
22326 tree v2df_ftype_v2df_v2df_v16qi
22327 = build_function_type_list (V2DF_type_node,
22328 V2DF_type_node,
22329 V2DF_type_node,
22330 V16QI_type_node,
22331 NULL_TREE);
22333 tree v4sf_ftype_v4sf_v4sf_v16qi
22334 = build_function_type_list (V4SF_type_node,
22335 V4SF_type_node,
22336 V4SF_type_node,
22337 V16QI_type_node,
22338 NULL_TREE);
22340 tree v2di_ftype_v2di_si
22341 = build_function_type_list (V2DI_type_node,
22342 V2DI_type_node,
22343 integer_type_node,
22344 NULL_TREE);
22346 tree v4si_ftype_v4si_si
22347 = build_function_type_list (V4SI_type_node,
22348 V4SI_type_node,
22349 integer_type_node,
22350 NULL_TREE);
22352 tree v8hi_ftype_v8hi_si
22353 = build_function_type_list (V8HI_type_node,
22354 V8HI_type_node,
22355 integer_type_node,
22356 NULL_TREE);
22358 tree v16qi_ftype_v16qi_si
22359 = build_function_type_list (V16QI_type_node,
22360 V16QI_type_node,
22361 integer_type_node,
22362 NULL_TREE);
22363 tree v4sf_ftype_v4hi
22364 = build_function_type_list (V4SF_type_node,
22365 V4HI_type_node,
22366 NULL_TREE);
22368 tree v4hi_ftype_v4sf
22369 = build_function_type_list (V4HI_type_node,
22370 V4SF_type_node,
22371 NULL_TREE);
22373 tree v2di_ftype_v2di
22374 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
22376 tree v16qi_ftype_v8hi_v8hi
22377 = build_function_type_list (V16QI_type_node,
22378 V8HI_type_node, V8HI_type_node,
22379 NULL_TREE);
22380 tree v8hi_ftype_v4si_v4si
22381 = build_function_type_list (V8HI_type_node,
22382 V4SI_type_node, V4SI_type_node,
22383 NULL_TREE);
22384 tree v8hi_ftype_v16qi_v16qi
22385 = build_function_type_list (V8HI_type_node,
22386 V16QI_type_node, V16QI_type_node,
22387 NULL_TREE);
22388 tree v4hi_ftype_v8qi_v8qi
22389 = build_function_type_list (V4HI_type_node,
22390 V8QI_type_node, V8QI_type_node,
22391 NULL_TREE);
22392 tree unsigned_ftype_unsigned_uchar
22393 = build_function_type_list (unsigned_type_node,
22394 unsigned_type_node,
22395 unsigned_char_type_node,
22396 NULL_TREE);
22397 tree unsigned_ftype_unsigned_ushort
22398 = build_function_type_list (unsigned_type_node,
22399 unsigned_type_node,
22400 short_unsigned_type_node,
22401 NULL_TREE);
22402 tree unsigned_ftype_unsigned_unsigned
22403 = build_function_type_list (unsigned_type_node,
22404 unsigned_type_node,
22405 unsigned_type_node,
22406 NULL_TREE);
22407 tree uint64_ftype_uint64_uint64
22408 = build_function_type_list (long_long_unsigned_type_node,
22409 long_long_unsigned_type_node,
22410 long_long_unsigned_type_node,
22411 NULL_TREE);
22412 tree float_ftype_float
22413 = build_function_type_list (float_type_node,
22414 float_type_node,
22415 NULL_TREE);
22417 /* AVX builtins */
22418 tree V32QI_type_node = build_vector_type_for_mode (char_type_node,
22419 V32QImode);
22420 tree V8SI_type_node = build_vector_type_for_mode (intSI_type_node,
22421 V8SImode);
22422 tree V8SF_type_node = build_vector_type_for_mode (float_type_node,
22423 V8SFmode);
22424 tree V4DI_type_node = build_vector_type_for_mode (long_long_integer_type_node,
22425 V4DImode);
22426 tree V4DF_type_node = build_vector_type_for_mode (double_type_node,
22427 V4DFmode);
22428 tree v8sf_ftype_v8sf
22429 = build_function_type_list (V8SF_type_node,
22430 V8SF_type_node,
22431 NULL_TREE);
22432 tree v8si_ftype_v8sf
22433 = build_function_type_list (V8SI_type_node,
22434 V8SF_type_node,
22435 NULL_TREE);
22436 tree v8sf_ftype_v8si
22437 = build_function_type_list (V8SF_type_node,
22438 V8SI_type_node,
22439 NULL_TREE);
22440 tree v4si_ftype_v4df
22441 = build_function_type_list (V4SI_type_node,
22442 V4DF_type_node,
22443 NULL_TREE);
22444 tree v4df_ftype_v4df
22445 = build_function_type_list (V4DF_type_node,
22446 V4DF_type_node,
22447 NULL_TREE);
22448 tree v4df_ftype_v4si
22449 = build_function_type_list (V4DF_type_node,
22450 V4SI_type_node,
22451 NULL_TREE);
22452 tree v4df_ftype_v4sf
22453 = build_function_type_list (V4DF_type_node,
22454 V4SF_type_node,
22455 NULL_TREE);
22456 tree v4sf_ftype_v4df
22457 = build_function_type_list (V4SF_type_node,
22458 V4DF_type_node,
22459 NULL_TREE);
22460 tree v8sf_ftype_v8sf_v8sf
22461 = build_function_type_list (V8SF_type_node,
22462 V8SF_type_node, V8SF_type_node,
22463 NULL_TREE);
22464 tree v4df_ftype_v4df_v4df
22465 = build_function_type_list (V4DF_type_node,
22466 V4DF_type_node, V4DF_type_node,
22467 NULL_TREE);
22468 tree v8sf_ftype_v8sf_int
22469 = build_function_type_list (V8SF_type_node,
22470 V8SF_type_node, integer_type_node,
22471 NULL_TREE);
22472 tree v4si_ftype_v8si_int
22473 = build_function_type_list (V4SI_type_node,
22474 V8SI_type_node, integer_type_node,
22475 NULL_TREE);
22476 tree v4df_ftype_v4df_int
22477 = build_function_type_list (V4DF_type_node,
22478 V4DF_type_node, integer_type_node,
22479 NULL_TREE);
22480 tree v4sf_ftype_v8sf_int
22481 = build_function_type_list (V4SF_type_node,
22482 V8SF_type_node, integer_type_node,
22483 NULL_TREE);
22484 tree v2df_ftype_v4df_int
22485 = build_function_type_list (V2DF_type_node,
22486 V4DF_type_node, integer_type_node,
22487 NULL_TREE);
22488 tree v8sf_ftype_v8sf_v8sf_int
22489 = build_function_type_list (V8SF_type_node,
22490 V8SF_type_node, V8SF_type_node,
22491 integer_type_node,
22492 NULL_TREE);
22493 tree v8sf_ftype_v8sf_v8sf_v8sf
22494 = build_function_type_list (V8SF_type_node,
22495 V8SF_type_node, V8SF_type_node,
22496 V8SF_type_node,
22497 NULL_TREE);
22498 tree v4df_ftype_v4df_v4df_v4df
22499 = build_function_type_list (V4DF_type_node,
22500 V4DF_type_node, V4DF_type_node,
22501 V4DF_type_node,
22502 NULL_TREE);
22503 tree v8si_ftype_v8si_v8si_int
22504 = build_function_type_list (V8SI_type_node,
22505 V8SI_type_node, V8SI_type_node,
22506 integer_type_node,
22507 NULL_TREE);
22508 tree v4df_ftype_v4df_v4df_int
22509 = build_function_type_list (V4DF_type_node,
22510 V4DF_type_node, V4DF_type_node,
22511 integer_type_node,
22512 NULL_TREE);
22513 tree v8sf_ftype_pcfloat
22514 = build_function_type_list (V8SF_type_node,
22515 pcfloat_type_node,
22516 NULL_TREE);
22517 tree v4df_ftype_pcdouble
22518 = build_function_type_list (V4DF_type_node,
22519 pcdouble_type_node,
22520 NULL_TREE);
22521 tree pcv4sf_type_node
22522 = build_pointer_type (build_type_variant (V4SF_type_node, 1, 0));
22523 tree pcv2df_type_node
22524 = build_pointer_type (build_type_variant (V2DF_type_node, 1, 0));
22525 tree v8sf_ftype_pcv4sf
22526 = build_function_type_list (V8SF_type_node,
22527 pcv4sf_type_node,
22528 NULL_TREE);
22529 tree v4df_ftype_pcv2df
22530 = build_function_type_list (V4DF_type_node,
22531 pcv2df_type_node,
22532 NULL_TREE);
22533 tree v32qi_ftype_pcchar
22534 = build_function_type_list (V32QI_type_node,
22535 pcchar_type_node,
22536 NULL_TREE);
22537 tree void_ftype_pchar_v32qi
22538 = build_function_type_list (void_type_node,
22539 pchar_type_node, V32QI_type_node,
22540 NULL_TREE);
22541 tree v8si_ftype_v8si_v4si_int
22542 = build_function_type_list (V8SI_type_node,
22543 V8SI_type_node, V4SI_type_node,
22544 integer_type_node,
22545 NULL_TREE);
22546 tree pv4di_type_node = build_pointer_type (V4DI_type_node);
22547 tree void_ftype_pv4di_v4di
22548 = build_function_type_list (void_type_node,
22549 pv4di_type_node, V4DI_type_node,
22550 NULL_TREE);
22551 tree v8sf_ftype_v8sf_v4sf_int
22552 = build_function_type_list (V8SF_type_node,
22553 V8SF_type_node, V4SF_type_node,
22554 integer_type_node,
22555 NULL_TREE);
22556 tree v4df_ftype_v4df_v2df_int
22557 = build_function_type_list (V4DF_type_node,
22558 V4DF_type_node, V2DF_type_node,
22559 integer_type_node,
22560 NULL_TREE);
22561 tree void_ftype_pfloat_v8sf
22562 = build_function_type_list (void_type_node,
22563 pfloat_type_node, V8SF_type_node,
22564 NULL_TREE);
22565 tree void_ftype_pdouble_v4df
22566 = build_function_type_list (void_type_node,
22567 pdouble_type_node, V4DF_type_node,
22568 NULL_TREE);
22569 tree pv8sf_type_node = build_pointer_type (V8SF_type_node);
22570 tree pv4sf_type_node = build_pointer_type (V4SF_type_node);
22571 tree pv4df_type_node = build_pointer_type (V4DF_type_node);
22572 tree pv2df_type_node = build_pointer_type (V2DF_type_node);
22573 tree pcv8sf_type_node
22574 = build_pointer_type (build_type_variant (V8SF_type_node, 1, 0));
22575 tree pcv4df_type_node
22576 = build_pointer_type (build_type_variant (V4DF_type_node, 1, 0));
22577 tree v8sf_ftype_pcv8sf_v8sf
22578 = build_function_type_list (V8SF_type_node,
22579 pcv8sf_type_node, V8SF_type_node,
22580 NULL_TREE);
22581 tree v4df_ftype_pcv4df_v4df
22582 = build_function_type_list (V4DF_type_node,
22583 pcv4df_type_node, V4DF_type_node,
22584 NULL_TREE);
22585 tree v4sf_ftype_pcv4sf_v4sf
22586 = build_function_type_list (V4SF_type_node,
22587 pcv4sf_type_node, V4SF_type_node,
22588 NULL_TREE);
22589 tree v2df_ftype_pcv2df_v2df
22590 = build_function_type_list (V2DF_type_node,
22591 pcv2df_type_node, V2DF_type_node,
22592 NULL_TREE);
22593 tree void_ftype_pv8sf_v8sf_v8sf
22594 = build_function_type_list (void_type_node,
22595 pv8sf_type_node, V8SF_type_node,
22596 V8SF_type_node,
22597 NULL_TREE);
22598 tree void_ftype_pv4df_v4df_v4df
22599 = build_function_type_list (void_type_node,
22600 pv4df_type_node, V4DF_type_node,
22601 V4DF_type_node,
22602 NULL_TREE);
22603 tree void_ftype_pv4sf_v4sf_v4sf
22604 = build_function_type_list (void_type_node,
22605 pv4sf_type_node, V4SF_type_node,
22606 V4SF_type_node,
22607 NULL_TREE);
22608 tree void_ftype_pv2df_v2df_v2df
22609 = build_function_type_list (void_type_node,
22610 pv2df_type_node, V2DF_type_node,
22611 V2DF_type_node,
22612 NULL_TREE);
22613 tree v4df_ftype_v2df
22614 = build_function_type_list (V4DF_type_node,
22615 V2DF_type_node,
22616 NULL_TREE);
22617 tree v8sf_ftype_v4sf
22618 = build_function_type_list (V8SF_type_node,
22619 V4SF_type_node,
22620 NULL_TREE);
22621 tree v8si_ftype_v4si
22622 = build_function_type_list (V8SI_type_node,
22623 V4SI_type_node,
22624 NULL_TREE);
22625 tree v2df_ftype_v4df
22626 = build_function_type_list (V2DF_type_node,
22627 V4DF_type_node,
22628 NULL_TREE);
22629 tree v4sf_ftype_v8sf
22630 = build_function_type_list (V4SF_type_node,
22631 V8SF_type_node,
22632 NULL_TREE);
22633 tree v4si_ftype_v8si
22634 = build_function_type_list (V4SI_type_node,
22635 V8SI_type_node,
22636 NULL_TREE);
22637 tree int_ftype_v4df
22638 = build_function_type_list (integer_type_node,
22639 V4DF_type_node,
22640 NULL_TREE);
22641 tree int_ftype_v8sf
22642 = build_function_type_list (integer_type_node,
22643 V8SF_type_node,
22644 NULL_TREE);
22645 tree int_ftype_v8sf_v8sf
22646 = build_function_type_list (integer_type_node,
22647 V8SF_type_node, V8SF_type_node,
22648 NULL_TREE);
22649 tree int_ftype_v4di_v4di
22650 = build_function_type_list (integer_type_node,
22651 V4DI_type_node, V4DI_type_node,
22652 NULL_TREE);
22653 tree int_ftype_v4df_v4df
22654 = build_function_type_list (integer_type_node,
22655 V4DF_type_node, V4DF_type_node,
22656 NULL_TREE);
22657 tree v8sf_ftype_v8sf_v8si
22658 = build_function_type_list (V8SF_type_node,
22659 V8SF_type_node, V8SI_type_node,
22660 NULL_TREE);
22661 tree v4df_ftype_v4df_v4di
22662 = build_function_type_list (V4DF_type_node,
22663 V4DF_type_node, V4DI_type_node,
22664 NULL_TREE);
22665 tree v4sf_ftype_v4sf_v4si
22666 = build_function_type_list (V4SF_type_node,
22667 V4SF_type_node, V4SI_type_node, NULL_TREE);
22668 tree v2df_ftype_v2df_v2di
22669 = build_function_type_list (V2DF_type_node,
22670 V2DF_type_node, V2DI_type_node, NULL_TREE);
22672 tree ftype;
22674 /* Add all special builtins with variable number of operands. */
22675 for (i = 0, d = bdesc_special_args;
22676 i < ARRAY_SIZE (bdesc_special_args);
22677 i++, d++)
22679 tree type;
22681 if (d->name == 0)
22682 continue;
22684 switch ((enum ix86_special_builtin_type) d->flag)
22686 case VOID_FTYPE_VOID:
22687 type = void_ftype_void;
22688 break;
22689 case V32QI_FTYPE_PCCHAR:
22690 type = v32qi_ftype_pcchar;
22691 break;
22692 case V16QI_FTYPE_PCCHAR:
22693 type = v16qi_ftype_pcchar;
22694 break;
22695 case V8SF_FTYPE_PCV4SF:
22696 type = v8sf_ftype_pcv4sf;
22697 break;
22698 case V8SF_FTYPE_PCFLOAT:
22699 type = v8sf_ftype_pcfloat;
22700 break;
22701 case V4DF_FTYPE_PCV2DF:
22702 type = v4df_ftype_pcv2df;
22703 break;
22704 case V4DF_FTYPE_PCDOUBLE:
22705 type = v4df_ftype_pcdouble;
22706 break;
22707 case V4SF_FTYPE_PCFLOAT:
22708 type = v4sf_ftype_pcfloat;
22709 break;
22710 case V2DI_FTYPE_PV2DI:
22711 type = v2di_ftype_pv2di;
22712 break;
22713 case V2DF_FTYPE_PCDOUBLE:
22714 type = v2df_ftype_pcdouble;
22715 break;
22716 case V8SF_FTYPE_PCV8SF_V8SF:
22717 type = v8sf_ftype_pcv8sf_v8sf;
22718 break;
22719 case V4DF_FTYPE_PCV4DF_V4DF:
22720 type = v4df_ftype_pcv4df_v4df;
22721 break;
22722 case V4SF_FTYPE_V4SF_PCV2SF:
22723 type = v4sf_ftype_v4sf_pcv2sf;
22724 break;
22725 case V4SF_FTYPE_PCV4SF_V4SF:
22726 type = v4sf_ftype_pcv4sf_v4sf;
22727 break;
22728 case V2DF_FTYPE_V2DF_PCDOUBLE:
22729 type = v2df_ftype_v2df_pcdouble;
22730 break;
22731 case V2DF_FTYPE_PCV2DF_V2DF:
22732 type = v2df_ftype_pcv2df_v2df;
22733 break;
22734 case VOID_FTYPE_PV2SF_V4SF:
22735 type = void_ftype_pv2sf_v4sf;
22736 break;
22737 case VOID_FTYPE_PV4DI_V4DI:
22738 type = void_ftype_pv4di_v4di;
22739 break;
22740 case VOID_FTYPE_PV2DI_V2DI:
22741 type = void_ftype_pv2di_v2di;
22742 break;
22743 case VOID_FTYPE_PCHAR_V32QI:
22744 type = void_ftype_pchar_v32qi;
22745 break;
22746 case VOID_FTYPE_PCHAR_V16QI:
22747 type = void_ftype_pchar_v16qi;
22748 break;
22749 case VOID_FTYPE_PFLOAT_V8SF:
22750 type = void_ftype_pfloat_v8sf;
22751 break;
22752 case VOID_FTYPE_PFLOAT_V4SF:
22753 type = void_ftype_pfloat_v4sf;
22754 break;
22755 case VOID_FTYPE_PDOUBLE_V4DF:
22756 type = void_ftype_pdouble_v4df;
22757 break;
22758 case VOID_FTYPE_PDOUBLE_V2DF:
22759 type = void_ftype_pdouble_v2df;
22760 break;
22761 case VOID_FTYPE_PDI_DI:
22762 type = void_ftype_pdi_di;
22763 break;
22764 case VOID_FTYPE_PINT_INT:
22765 type = void_ftype_pint_int;
22766 break;
22767 case VOID_FTYPE_PV8SF_V8SF_V8SF:
22768 type = void_ftype_pv8sf_v8sf_v8sf;
22769 break;
22770 case VOID_FTYPE_PV4DF_V4DF_V4DF:
22771 type = void_ftype_pv4df_v4df_v4df;
22772 break;
22773 case VOID_FTYPE_PV4SF_V4SF_V4SF:
22774 type = void_ftype_pv4sf_v4sf_v4sf;
22775 break;
22776 case VOID_FTYPE_PV2DF_V2DF_V2DF:
22777 type = void_ftype_pv2df_v2df_v2df;
22778 break;
22779 default:
22780 gcc_unreachable ();
22783 def_builtin (d->mask, d->name, type, d->code);
22786 /* Add all builtins with variable number of operands. */
22787 for (i = 0, d = bdesc_args;
22788 i < ARRAY_SIZE (bdesc_args);
22789 i++, d++)
22791 tree type;
22793 if (d->name == 0)
22794 continue;
22796 switch ((enum ix86_builtin_type) d->flag)
22798 case FLOAT_FTYPE_FLOAT:
22799 type = float_ftype_float;
22800 break;
22801 case INT_FTYPE_V8SF_V8SF_PTEST:
22802 type = int_ftype_v8sf_v8sf;
22803 break;
22804 case INT_FTYPE_V4DI_V4DI_PTEST:
22805 type = int_ftype_v4di_v4di;
22806 break;
22807 case INT_FTYPE_V4DF_V4DF_PTEST:
22808 type = int_ftype_v4df_v4df;
22809 break;
22810 case INT_FTYPE_V4SF_V4SF_PTEST:
22811 type = int_ftype_v4sf_v4sf;
22812 break;
22813 case INT_FTYPE_V2DI_V2DI_PTEST:
22814 type = int_ftype_v2di_v2di;
22815 break;
22816 case INT_FTYPE_V2DF_V2DF_PTEST:
22817 type = int_ftype_v2df_v2df;
22818 break;
22819 case INT64_FTYPE_V4SF:
22820 type = int64_ftype_v4sf;
22821 break;
22822 case INT64_FTYPE_V2DF:
22823 type = int64_ftype_v2df;
22824 break;
22825 case INT_FTYPE_V16QI:
22826 type = int_ftype_v16qi;
22827 break;
22828 case INT_FTYPE_V8QI:
22829 type = int_ftype_v8qi;
22830 break;
22831 case INT_FTYPE_V8SF:
22832 type = int_ftype_v8sf;
22833 break;
22834 case INT_FTYPE_V4DF:
22835 type = int_ftype_v4df;
22836 break;
22837 case INT_FTYPE_V4SF:
22838 type = int_ftype_v4sf;
22839 break;
22840 case INT_FTYPE_V2DF:
22841 type = int_ftype_v2df;
22842 break;
22843 case V16QI_FTYPE_V16QI:
22844 type = v16qi_ftype_v16qi;
22845 break;
22846 case V8SI_FTYPE_V8SF:
22847 type = v8si_ftype_v8sf;
22848 break;
22849 case V8SI_FTYPE_V4SI:
22850 type = v8si_ftype_v4si;
22851 break;
22852 case V8HI_FTYPE_V8HI:
22853 type = v8hi_ftype_v8hi;
22854 break;
22855 case V8HI_FTYPE_V16QI:
22856 type = v8hi_ftype_v16qi;
22857 break;
22858 case V8QI_FTYPE_V8QI:
22859 type = v8qi_ftype_v8qi;
22860 break;
22861 case V8SF_FTYPE_V8SF:
22862 type = v8sf_ftype_v8sf;
22863 break;
22864 case V8SF_FTYPE_V8SI:
22865 type = v8sf_ftype_v8si;
22866 break;
22867 case V8SF_FTYPE_V4SF:
22868 type = v8sf_ftype_v4sf;
22869 break;
22870 case V4SI_FTYPE_V4DF:
22871 type = v4si_ftype_v4df;
22872 break;
22873 case V4SI_FTYPE_V4SI:
22874 type = v4si_ftype_v4si;
22875 break;
22876 case V4SI_FTYPE_V16QI:
22877 type = v4si_ftype_v16qi;
22878 break;
22879 case V4SI_FTYPE_V8SI:
22880 type = v4si_ftype_v8si;
22881 break;
22882 case V4SI_FTYPE_V8HI:
22883 type = v4si_ftype_v8hi;
22884 break;
22885 case V4SI_FTYPE_V4SF:
22886 type = v4si_ftype_v4sf;
22887 break;
22888 case V4SI_FTYPE_V2DF:
22889 type = v4si_ftype_v2df;
22890 break;
22891 case V4HI_FTYPE_V4HI:
22892 type = v4hi_ftype_v4hi;
22893 break;
22894 case V4DF_FTYPE_V4DF:
22895 type = v4df_ftype_v4df;
22896 break;
22897 case V4DF_FTYPE_V4SI:
22898 type = v4df_ftype_v4si;
22899 break;
22900 case V4DF_FTYPE_V4SF:
22901 type = v4df_ftype_v4sf;
22902 break;
22903 case V4DF_FTYPE_V2DF:
22904 type = v4df_ftype_v2df;
22905 break;
22906 case V4SF_FTYPE_V4SF:
22907 case V4SF_FTYPE_V4SF_VEC_MERGE:
22908 type = v4sf_ftype_v4sf;
22909 break;
22910 case V4SF_FTYPE_V8SF:
22911 type = v4sf_ftype_v8sf;
22912 break;
22913 case V4SF_FTYPE_V4SI:
22914 type = v4sf_ftype_v4si;
22915 break;
22916 case V4SF_FTYPE_V4DF:
22917 type = v4sf_ftype_v4df;
22918 break;
22919 case V4SF_FTYPE_V2DF:
22920 type = v4sf_ftype_v2df;
22921 break;
22922 case V2DI_FTYPE_V2DI:
22923 type = v2di_ftype_v2di;
22924 break;
22925 case V2DI_FTYPE_V16QI:
22926 type = v2di_ftype_v16qi;
22927 break;
22928 case V2DI_FTYPE_V8HI:
22929 type = v2di_ftype_v8hi;
22930 break;
22931 case V2DI_FTYPE_V4SI:
22932 type = v2di_ftype_v4si;
22933 break;
22934 case V2SI_FTYPE_V2SI:
22935 type = v2si_ftype_v2si;
22936 break;
22937 case V2SI_FTYPE_V4SF:
22938 type = v2si_ftype_v4sf;
22939 break;
22940 case V2SI_FTYPE_V2DF:
22941 type = v2si_ftype_v2df;
22942 break;
22943 case V2SI_FTYPE_V2SF:
22944 type = v2si_ftype_v2sf;
22945 break;
22946 case V2DF_FTYPE_V4DF:
22947 type = v2df_ftype_v4df;
22948 break;
22949 case V2DF_FTYPE_V4SF:
22950 type = v2df_ftype_v4sf;
22951 break;
22952 case V2DF_FTYPE_V2DF:
22953 case V2DF_FTYPE_V2DF_VEC_MERGE:
22954 type = v2df_ftype_v2df;
22955 break;
22956 case V2DF_FTYPE_V2SI:
22957 type = v2df_ftype_v2si;
22958 break;
22959 case V2DF_FTYPE_V4SI:
22960 type = v2df_ftype_v4si;
22961 break;
22962 case V2SF_FTYPE_V2SF:
22963 type = v2sf_ftype_v2sf;
22964 break;
22965 case V2SF_FTYPE_V2SI:
22966 type = v2sf_ftype_v2si;
22967 break;
22968 case V16QI_FTYPE_V16QI_V16QI:
22969 type = v16qi_ftype_v16qi_v16qi;
22970 break;
22971 case V16QI_FTYPE_V8HI_V8HI:
22972 type = v16qi_ftype_v8hi_v8hi;
22973 break;
22974 case V8QI_FTYPE_V8QI_V8QI:
22975 type = v8qi_ftype_v8qi_v8qi;
22976 break;
22977 case V8QI_FTYPE_V4HI_V4HI:
22978 type = v8qi_ftype_v4hi_v4hi;
22979 break;
22980 case V8HI_FTYPE_V8HI_V8HI:
22981 case V8HI_FTYPE_V8HI_V8HI_COUNT:
22982 type = v8hi_ftype_v8hi_v8hi;
22983 break;
22984 case V8HI_FTYPE_V16QI_V16QI:
22985 type = v8hi_ftype_v16qi_v16qi;
22986 break;
22987 case V8HI_FTYPE_V4SI_V4SI:
22988 type = v8hi_ftype_v4si_v4si;
22989 break;
22990 case V8HI_FTYPE_V8HI_SI_COUNT:
22991 type = v8hi_ftype_v8hi_int;
22992 break;
22993 case V8SF_FTYPE_V8SF_V8SF:
22994 type = v8sf_ftype_v8sf_v8sf;
22995 break;
22996 case V8SF_FTYPE_V8SF_V8SI:
22997 type = v8sf_ftype_v8sf_v8si;
22998 break;
22999 case V4SI_FTYPE_V4SI_V4SI:
23000 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23001 type = v4si_ftype_v4si_v4si;
23002 break;
23003 case V4SI_FTYPE_V8HI_V8HI:
23004 type = v4si_ftype_v8hi_v8hi;
23005 break;
23006 case V4SI_FTYPE_V4SF_V4SF:
23007 type = v4si_ftype_v4sf_v4sf;
23008 break;
23009 case V4SI_FTYPE_V2DF_V2DF:
23010 type = v4si_ftype_v2df_v2df;
23011 break;
23012 case V4SI_FTYPE_V4SI_SI_COUNT:
23013 type = v4si_ftype_v4si_int;
23014 break;
23015 case V4HI_FTYPE_V4HI_V4HI:
23016 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23017 type = v4hi_ftype_v4hi_v4hi;
23018 break;
23019 case V4HI_FTYPE_V8QI_V8QI:
23020 type = v4hi_ftype_v8qi_v8qi;
23021 break;
23022 case V4HI_FTYPE_V2SI_V2SI:
23023 type = v4hi_ftype_v2si_v2si;
23024 break;
23025 case V4HI_FTYPE_V4HI_SI_COUNT:
23026 type = v4hi_ftype_v4hi_int;
23027 break;
23028 case V4DF_FTYPE_V4DF_V4DF:
23029 type = v4df_ftype_v4df_v4df;
23030 break;
23031 case V4DF_FTYPE_V4DF_V4DI:
23032 type = v4df_ftype_v4df_v4di;
23033 break;
23034 case V4SF_FTYPE_V4SF_V4SF:
23035 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23036 type = v4sf_ftype_v4sf_v4sf;
23037 break;
23038 case V4SF_FTYPE_V4SF_V4SI:
23039 type = v4sf_ftype_v4sf_v4si;
23040 break;
23041 case V4SF_FTYPE_V4SF_V2SI:
23042 type = v4sf_ftype_v4sf_v2si;
23043 break;
23044 case V4SF_FTYPE_V4SF_V2DF:
23045 type = v4sf_ftype_v4sf_v2df;
23046 break;
23047 case V4SF_FTYPE_V4SF_DI:
23048 type = v4sf_ftype_v4sf_int64;
23049 break;
23050 case V4SF_FTYPE_V4SF_SI:
23051 type = v4sf_ftype_v4sf_int;
23052 break;
23053 case V2DI_FTYPE_V2DI_V2DI:
23054 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23055 type = v2di_ftype_v2di_v2di;
23056 break;
23057 case V2DI_FTYPE_V16QI_V16QI:
23058 type = v2di_ftype_v16qi_v16qi;
23059 break;
23060 case V2DI_FTYPE_V4SI_V4SI:
23061 type = v2di_ftype_v4si_v4si;
23062 break;
23063 case V2DI_FTYPE_V2DI_V16QI:
23064 type = v2di_ftype_v2di_v16qi;
23065 break;
23066 case V2DI_FTYPE_V2DF_V2DF:
23067 type = v2di_ftype_v2df_v2df;
23068 break;
23069 case V2DI_FTYPE_V2DI_SI_COUNT:
23070 type = v2di_ftype_v2di_int;
23071 break;
23072 case V2SI_FTYPE_V2SI_V2SI:
23073 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23074 type = v2si_ftype_v2si_v2si;
23075 break;
23076 case V2SI_FTYPE_V4HI_V4HI:
23077 type = v2si_ftype_v4hi_v4hi;
23078 break;
23079 case V2SI_FTYPE_V2SF_V2SF:
23080 type = v2si_ftype_v2sf_v2sf;
23081 break;
23082 case V2SI_FTYPE_V2SI_SI_COUNT:
23083 type = v2si_ftype_v2si_int;
23084 break;
23085 case V2DF_FTYPE_V2DF_V2DF:
23086 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23087 type = v2df_ftype_v2df_v2df;
23088 break;
23089 case V2DF_FTYPE_V2DF_V4SF:
23090 type = v2df_ftype_v2df_v4sf;
23091 break;
23092 case V2DF_FTYPE_V2DF_V2DI:
23093 type = v2df_ftype_v2df_v2di;
23094 break;
23095 case V2DF_FTYPE_V2DF_DI:
23096 type = v2df_ftype_v2df_int64;
23097 break;
23098 case V2DF_FTYPE_V2DF_SI:
23099 type = v2df_ftype_v2df_int;
23100 break;
23101 case V2SF_FTYPE_V2SF_V2SF:
23102 type = v2sf_ftype_v2sf_v2sf;
23103 break;
23104 case V1DI_FTYPE_V1DI_V1DI:
23105 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23106 type = v1di_ftype_v1di_v1di;
23107 break;
23108 case V1DI_FTYPE_V8QI_V8QI:
23109 type = v1di_ftype_v8qi_v8qi;
23110 break;
23111 case V1DI_FTYPE_V2SI_V2SI:
23112 type = v1di_ftype_v2si_v2si;
23113 break;
23114 case V1DI_FTYPE_V1DI_SI_COUNT:
23115 type = v1di_ftype_v1di_int;
23116 break;
23117 case UINT64_FTYPE_UINT64_UINT64:
23118 type = uint64_ftype_uint64_uint64;
23119 break;
23120 case UINT_FTYPE_UINT_UINT:
23121 type = unsigned_ftype_unsigned_unsigned;
23122 break;
23123 case UINT_FTYPE_UINT_USHORT:
23124 type = unsigned_ftype_unsigned_ushort;
23125 break;
23126 case UINT_FTYPE_UINT_UCHAR:
23127 type = unsigned_ftype_unsigned_uchar;
23128 break;
23129 case V8HI_FTYPE_V8HI_INT:
23130 type = v8hi_ftype_v8hi_int;
23131 break;
23132 case V8SF_FTYPE_V8SF_INT:
23133 type = v8sf_ftype_v8sf_int;
23134 break;
23135 case V4SI_FTYPE_V4SI_INT:
23136 type = v4si_ftype_v4si_int;
23137 break;
23138 case V4SI_FTYPE_V8SI_INT:
23139 type = v4si_ftype_v8si_int;
23140 break;
23141 case V4HI_FTYPE_V4HI_INT:
23142 type = v4hi_ftype_v4hi_int;
23143 break;
23144 case V4DF_FTYPE_V4DF_INT:
23145 type = v4df_ftype_v4df_int;
23146 break;
23147 case V4SF_FTYPE_V4SF_INT:
23148 type = v4sf_ftype_v4sf_int;
23149 break;
23150 case V4SF_FTYPE_V8SF_INT:
23151 type = v4sf_ftype_v8sf_int;
23152 break;
23153 case V2DI_FTYPE_V2DI_INT:
23154 case V2DI2TI_FTYPE_V2DI_INT:
23155 type = v2di_ftype_v2di_int;
23156 break;
23157 case V2DF_FTYPE_V2DF_INT:
23158 type = v2df_ftype_v2df_int;
23159 break;
23160 case V2DF_FTYPE_V4DF_INT:
23161 type = v2df_ftype_v4df_int;
23162 break;
23163 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23164 type = v16qi_ftype_v16qi_v16qi_v16qi;
23165 break;
23166 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23167 type = v8sf_ftype_v8sf_v8sf_v8sf;
23168 break;
23169 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23170 type = v4df_ftype_v4df_v4df_v4df;
23171 break;
23172 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23173 type = v4sf_ftype_v4sf_v4sf_v4sf;
23174 break;
23175 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23176 type = v2df_ftype_v2df_v2df_v2df;
23177 break;
23178 case V16QI_FTYPE_V16QI_V16QI_INT:
23179 type = v16qi_ftype_v16qi_v16qi_int;
23180 break;
23181 case V8SI_FTYPE_V8SI_V8SI_INT:
23182 type = v8si_ftype_v8si_v8si_int;
23183 break;
23184 case V8SI_FTYPE_V8SI_V4SI_INT:
23185 type = v8si_ftype_v8si_v4si_int;
23186 break;
23187 case V8HI_FTYPE_V8HI_V8HI_INT:
23188 type = v8hi_ftype_v8hi_v8hi_int;
23189 break;
23190 case V8SF_FTYPE_V8SF_V8SF_INT:
23191 type = v8sf_ftype_v8sf_v8sf_int;
23192 break;
23193 case V8SF_FTYPE_V8SF_V4SF_INT:
23194 type = v8sf_ftype_v8sf_v4sf_int;
23195 break;
23196 case V4SI_FTYPE_V4SI_V4SI_INT:
23197 type = v4si_ftype_v4si_v4si_int;
23198 break;
23199 case V4DF_FTYPE_V4DF_V4DF_INT:
23200 type = v4df_ftype_v4df_v4df_int;
23201 break;
23202 case V4DF_FTYPE_V4DF_V2DF_INT:
23203 type = v4df_ftype_v4df_v2df_int;
23204 break;
23205 case V4SF_FTYPE_V4SF_V4SF_INT:
23206 type = v4sf_ftype_v4sf_v4sf_int;
23207 break;
23208 case V2DI_FTYPE_V2DI_V2DI_INT:
23209 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
23210 type = v2di_ftype_v2di_v2di_int;
23211 break;
23212 case V2DF_FTYPE_V2DF_V2DF_INT:
23213 type = v2df_ftype_v2df_v2df_int;
23214 break;
23215 case V2DI_FTYPE_V2DI_UINT_UINT:
23216 type = v2di_ftype_v2di_unsigned_unsigned;
23217 break;
23218 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23219 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
23220 break;
23221 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
23222 type = v1di_ftype_v1di_v1di_int;
23223 break;
23224 default:
23225 gcc_unreachable ();
23228 def_builtin_const (d->mask, d->name, type, d->code);
23231 /* pcmpestr[im] insns. */
23232 for (i = 0, d = bdesc_pcmpestr;
23233 i < ARRAY_SIZE (bdesc_pcmpestr);
23234 i++, d++)
23236 if (d->code == IX86_BUILTIN_PCMPESTRM128)
23237 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
23238 else
23239 ftype = int_ftype_v16qi_int_v16qi_int_int;
23240 def_builtin_const (d->mask, d->name, ftype, d->code);
23243 /* pcmpistr[im] insns. */
23244 for (i = 0, d = bdesc_pcmpistr;
23245 i < ARRAY_SIZE (bdesc_pcmpistr);
23246 i++, d++)
23248 if (d->code == IX86_BUILTIN_PCMPISTRM128)
23249 ftype = v16qi_ftype_v16qi_v16qi_int;
23250 else
23251 ftype = int_ftype_v16qi_v16qi_int;
23252 def_builtin_const (d->mask, d->name, ftype, d->code);
23255 /* comi/ucomi insns. */
23256 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
23257 if (d->mask == OPTION_MASK_ISA_SSE2)
23258 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
23259 else
23260 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
23262 /* SSE */
23263 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
23264 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
23266 /* SSE or 3DNow!A */
23267 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
23269 /* SSE2 */
23270 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
23272 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
23273 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
23275 /* SSE3. */
23276 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
23277 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
23279 /* AES */
23280 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
23281 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
23282 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
23283 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
23284 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
23285 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
23287 /* PCLMUL */
23288 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
23290 /* AVX */
23291 def_builtin (OPTION_MASK_ISA_AVX, "__builtin_ia32_vzeroupper", void_ftype_void,
23292 TARGET_64BIT ? IX86_BUILTIN_VZEROUPPER_REX64 : IX86_BUILTIN_VZEROUPPER);
23294 /* Access to the vec_init patterns. */
23295 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
23296 integer_type_node, NULL_TREE);
23297 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
23299 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
23300 short_integer_type_node,
23301 short_integer_type_node,
23302 short_integer_type_node, NULL_TREE);
23303 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
23305 ftype = build_function_type_list (V8QI_type_node, char_type_node,
23306 char_type_node, char_type_node,
23307 char_type_node, char_type_node,
23308 char_type_node, char_type_node,
23309 char_type_node, NULL_TREE);
23310 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
23312 /* Access to the vec_extract patterns. */
23313 ftype = build_function_type_list (double_type_node, V2DF_type_node,
23314 integer_type_node, NULL_TREE);
23315 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
23317 ftype = build_function_type_list (long_long_integer_type_node,
23318 V2DI_type_node, integer_type_node,
23319 NULL_TREE);
23320 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
23322 ftype = build_function_type_list (float_type_node, V4SF_type_node,
23323 integer_type_node, NULL_TREE);
23324 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
23326 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
23327 integer_type_node, NULL_TREE);
23328 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
23330 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
23331 integer_type_node, NULL_TREE);
23332 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
23334 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
23335 integer_type_node, NULL_TREE);
23336 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
23338 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
23339 integer_type_node, NULL_TREE);
23340 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
23342 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
23343 integer_type_node, NULL_TREE);
23344 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
23346 /* Access to the vec_set patterns. */
23347 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
23348 intDI_type_node,
23349 integer_type_node, NULL_TREE);
23350 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
23352 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
23353 float_type_node,
23354 integer_type_node, NULL_TREE);
23355 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
23357 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
23358 intSI_type_node,
23359 integer_type_node, NULL_TREE);
23360 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
23362 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
23363 intHI_type_node,
23364 integer_type_node, NULL_TREE);
23365 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
23367 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
23368 intHI_type_node,
23369 integer_type_node, NULL_TREE);
23370 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
23372 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
23373 intQI_type_node,
23374 integer_type_node, NULL_TREE);
23375 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
23377 /* Add SSE5 multi-arg argument instructions */
23378 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
23380 tree mtype = NULL_TREE;
23382 if (d->name == 0)
23383 continue;
23385 switch ((enum multi_arg_type)d->flag)
23387 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
23388 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
23389 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
23390 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
23391 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
23392 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
23393 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
23394 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
23395 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
23396 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
23397 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
23398 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
23399 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
23400 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
23401 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
23402 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
23403 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
23404 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
23405 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
23406 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
23407 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
23408 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
23409 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
23410 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
23411 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
23412 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
23413 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
23414 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
23415 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
23416 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
23417 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
23418 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
23419 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
23420 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
23421 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
23422 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
23423 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
23424 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
23425 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
23426 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
23427 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
23428 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
23429 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
23430 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
23431 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
23432 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
23433 case MULTI_ARG_UNKNOWN:
23434 default:
23435 gcc_unreachable ();
23438 if (mtype)
23439 def_builtin_const (d->mask, d->name, mtype, d->code);
23443 /* Internal method for ix86_init_builtins. */
23445 static void
23446 ix86_init_builtins_va_builtins_abi (void)
23448 tree ms_va_ref, sysv_va_ref;
23449 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
23450 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
23451 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
23452 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
23454 if (!TARGET_64BIT)
23455 return;
23456 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
23457 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
23458 ms_va_ref = build_reference_type (ms_va_list_type_node);
23459 sysv_va_ref =
23460 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
23462 fnvoid_va_end_ms =
23463 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23464 fnvoid_va_start_ms =
23465 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23466 fnvoid_va_end_sysv =
23467 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
23468 fnvoid_va_start_sysv =
23469 build_varargs_function_type_list (void_type_node, sysv_va_ref,
23470 NULL_TREE);
23471 fnvoid_va_copy_ms =
23472 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
23473 NULL_TREE);
23474 fnvoid_va_copy_sysv =
23475 build_function_type_list (void_type_node, sysv_va_ref,
23476 sysv_va_ref, NULL_TREE);
23478 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
23479 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
23480 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
23481 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
23482 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
23483 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
23484 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
23485 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23486 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
23487 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23488 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
23489 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23492 static void
23493 ix86_init_builtins (void)
23495 tree float128_type_node = make_node (REAL_TYPE);
23496 tree ftype, decl;
23498 /* The __float80 type. */
23499 if (TYPE_MODE (long_double_type_node) == XFmode)
23500 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
23501 "__float80");
23502 else
23504 /* The __float80 type. */
23505 tree float80_type_node = make_node (REAL_TYPE);
23507 TYPE_PRECISION (float80_type_node) = 80;
23508 layout_type (float80_type_node);
23509 (*lang_hooks.types.register_builtin_type) (float80_type_node,
23510 "__float80");
23513 /* The __float128 type. */
23514 TYPE_PRECISION (float128_type_node) = 128;
23515 layout_type (float128_type_node);
23516 (*lang_hooks.types.register_builtin_type) (float128_type_node,
23517 "__float128");
23519 /* TFmode support builtins. */
23520 ftype = build_function_type (float128_type_node, void_list_node);
23521 decl = add_builtin_function ("__builtin_infq", ftype,
23522 IX86_BUILTIN_INFQ, BUILT_IN_MD,
23523 NULL, NULL_TREE);
23524 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
23526 /* We will expand them to normal call if SSE2 isn't available since
23527 they are used by libgcc. */
23528 ftype = build_function_type_list (float128_type_node,
23529 float128_type_node,
23530 NULL_TREE);
23531 decl = add_builtin_function ("__builtin_fabsq", ftype,
23532 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
23533 "__fabstf2", NULL_TREE);
23534 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
23535 TREE_READONLY (decl) = 1;
23537 ftype = build_function_type_list (float128_type_node,
23538 float128_type_node,
23539 float128_type_node,
23540 NULL_TREE);
23541 decl = add_builtin_function ("__builtin_copysignq", ftype,
23542 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
23543 "__copysigntf3", NULL_TREE);
23544 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
23545 TREE_READONLY (decl) = 1;
23547 ix86_init_mmx_sse_builtins ();
23548 if (TARGET_64BIT)
23549 ix86_init_builtins_va_builtins_abi ();
23552 /* Errors in the source file can cause expand_expr to return const0_rtx
23553 where we expect a vector. To avoid crashing, use one of the vector
23554 clear instructions. */
23555 static rtx
23556 safe_vector_operand (rtx x, enum machine_mode mode)
23558 if (x == const0_rtx)
23559 x = CONST0_RTX (mode);
23560 return x;
23563 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23565 static rtx
23566 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23568 rtx pat;
23569 tree arg0 = CALL_EXPR_ARG (exp, 0);
23570 tree arg1 = CALL_EXPR_ARG (exp, 1);
23571 rtx op0 = expand_normal (arg0);
23572 rtx op1 = expand_normal (arg1);
23573 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23574 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23575 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23577 if (VECTOR_MODE_P (mode0))
23578 op0 = safe_vector_operand (op0, mode0);
23579 if (VECTOR_MODE_P (mode1))
23580 op1 = safe_vector_operand (op1, mode1);
23582 if (optimize || !target
23583 || GET_MODE (target) != tmode
23584 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23585 target = gen_reg_rtx (tmode);
23587 if (GET_MODE (op1) == SImode && mode1 == TImode)
23589 rtx x = gen_reg_rtx (V4SImode);
23590 emit_insn (gen_sse2_loadd (x, op1));
23591 op1 = gen_lowpart (TImode, x);
23594 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23595 op0 = copy_to_mode_reg (mode0, op0);
23596 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23597 op1 = copy_to_mode_reg (mode1, op1);
23599 pat = GEN_FCN (icode) (target, op0, op1);
23600 if (! pat)
23601 return 0;
23603 emit_insn (pat);
23605 return target;
23608 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23610 static rtx
23611 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23612 enum multi_arg_type m_type,
23613 enum insn_code sub_code)
23615 rtx pat;
23616 int i;
23617 int nargs;
23618 bool comparison_p = false;
23619 bool tf_p = false;
23620 bool last_arg_constant = false;
23621 int num_memory = 0;
23622 struct {
23623 rtx op;
23624 enum machine_mode mode;
23625 } args[4];
23627 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23629 switch (m_type)
23631 case MULTI_ARG_3_SF:
23632 case MULTI_ARG_3_DF:
23633 case MULTI_ARG_3_DI:
23634 case MULTI_ARG_3_SI:
23635 case MULTI_ARG_3_SI_DI:
23636 case MULTI_ARG_3_HI:
23637 case MULTI_ARG_3_HI_SI:
23638 case MULTI_ARG_3_QI:
23639 case MULTI_ARG_3_PERMPS:
23640 case MULTI_ARG_3_PERMPD:
23641 nargs = 3;
23642 break;
23644 case MULTI_ARG_2_SF:
23645 case MULTI_ARG_2_DF:
23646 case MULTI_ARG_2_DI:
23647 case MULTI_ARG_2_SI:
23648 case MULTI_ARG_2_HI:
23649 case MULTI_ARG_2_QI:
23650 nargs = 2;
23651 break;
23653 case MULTI_ARG_2_DI_IMM:
23654 case MULTI_ARG_2_SI_IMM:
23655 case MULTI_ARG_2_HI_IMM:
23656 case MULTI_ARG_2_QI_IMM:
23657 nargs = 2;
23658 last_arg_constant = true;
23659 break;
23661 case MULTI_ARG_1_SF:
23662 case MULTI_ARG_1_DF:
23663 case MULTI_ARG_1_DI:
23664 case MULTI_ARG_1_SI:
23665 case MULTI_ARG_1_HI:
23666 case MULTI_ARG_1_QI:
23667 case MULTI_ARG_1_SI_DI:
23668 case MULTI_ARG_1_HI_DI:
23669 case MULTI_ARG_1_HI_SI:
23670 case MULTI_ARG_1_QI_DI:
23671 case MULTI_ARG_1_QI_SI:
23672 case MULTI_ARG_1_QI_HI:
23673 case MULTI_ARG_1_PH2PS:
23674 case MULTI_ARG_1_PS2PH:
23675 nargs = 1;
23676 break;
23678 case MULTI_ARG_2_SF_CMP:
23679 case MULTI_ARG_2_DF_CMP:
23680 case MULTI_ARG_2_DI_CMP:
23681 case MULTI_ARG_2_SI_CMP:
23682 case MULTI_ARG_2_HI_CMP:
23683 case MULTI_ARG_2_QI_CMP:
23684 nargs = 2;
23685 comparison_p = true;
23686 break;
23688 case MULTI_ARG_2_SF_TF:
23689 case MULTI_ARG_2_DF_TF:
23690 case MULTI_ARG_2_DI_TF:
23691 case MULTI_ARG_2_SI_TF:
23692 case MULTI_ARG_2_HI_TF:
23693 case MULTI_ARG_2_QI_TF:
23694 nargs = 2;
23695 tf_p = true;
23696 break;
23698 case MULTI_ARG_UNKNOWN:
23699 default:
23700 gcc_unreachable ();
23703 if (optimize || !target
23704 || GET_MODE (target) != tmode
23705 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23706 target = gen_reg_rtx (tmode);
23708 gcc_assert (nargs <= 4);
23710 for (i = 0; i < nargs; i++)
23712 tree arg = CALL_EXPR_ARG (exp, i);
23713 rtx op = expand_normal (arg);
23714 int adjust = (comparison_p) ? 1 : 0;
23715 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23717 if (last_arg_constant && i == nargs-1)
23719 if (GET_CODE (op) != CONST_INT)
23721 error ("last argument must be an immediate");
23722 return gen_reg_rtx (tmode);
23725 else
23727 if (VECTOR_MODE_P (mode))
23728 op = safe_vector_operand (op, mode);
23730 /* If we aren't optimizing, only allow one memory operand to be
23731 generated. */
23732 if (memory_operand (op, mode))
23733 num_memory++;
23735 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23737 if (optimize
23738 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23739 || num_memory > 1)
23740 op = force_reg (mode, op);
23743 args[i].op = op;
23744 args[i].mode = mode;
23747 switch (nargs)
23749 case 1:
23750 pat = GEN_FCN (icode) (target, args[0].op);
23751 break;
23753 case 2:
23754 if (tf_p)
23755 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23756 GEN_INT ((int)sub_code));
23757 else if (! comparison_p)
23758 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23759 else
23761 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23762 args[0].op,
23763 args[1].op);
23765 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23767 break;
23769 case 3:
23770 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23771 break;
23773 default:
23774 gcc_unreachable ();
23777 if (! pat)
23778 return 0;
23780 emit_insn (pat);
23781 return target;
23784 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23785 insns with vec_merge. */
23787 static rtx
23788 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23789 rtx target)
23791 rtx pat;
23792 tree arg0 = CALL_EXPR_ARG (exp, 0);
23793 rtx op1, op0 = expand_normal (arg0);
23794 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23795 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23797 if (optimize || !target
23798 || GET_MODE (target) != tmode
23799 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23800 target = gen_reg_rtx (tmode);
23802 if (VECTOR_MODE_P (mode0))
23803 op0 = safe_vector_operand (op0, mode0);
23805 if ((optimize && !register_operand (op0, mode0))
23806 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23807 op0 = copy_to_mode_reg (mode0, op0);
23809 op1 = op0;
23810 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23811 op1 = copy_to_mode_reg (mode0, op1);
23813 pat = GEN_FCN (icode) (target, op0, op1);
23814 if (! pat)
23815 return 0;
23816 emit_insn (pat);
23817 return target;
23820 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23822 static rtx
23823 ix86_expand_sse_compare (const struct builtin_description *d,
23824 tree exp, rtx target, bool swap)
23826 rtx pat;
23827 tree arg0 = CALL_EXPR_ARG (exp, 0);
23828 tree arg1 = CALL_EXPR_ARG (exp, 1);
23829 rtx op0 = expand_normal (arg0);
23830 rtx op1 = expand_normal (arg1);
23831 rtx op2;
23832 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23833 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23834 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23835 enum rtx_code comparison = d->comparison;
23837 if (VECTOR_MODE_P (mode0))
23838 op0 = safe_vector_operand (op0, mode0);
23839 if (VECTOR_MODE_P (mode1))
23840 op1 = safe_vector_operand (op1, mode1);
23842 /* Swap operands if we have a comparison that isn't available in
23843 hardware. */
23844 if (swap)
23846 rtx tmp = gen_reg_rtx (mode1);
23847 emit_move_insn (tmp, op1);
23848 op1 = op0;
23849 op0 = tmp;
23852 if (optimize || !target
23853 || GET_MODE (target) != tmode
23854 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23855 target = gen_reg_rtx (tmode);
23857 if ((optimize && !register_operand (op0, mode0))
23858 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23859 op0 = copy_to_mode_reg (mode0, op0);
23860 if ((optimize && !register_operand (op1, mode1))
23861 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23862 op1 = copy_to_mode_reg (mode1, op1);
23864 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23865 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23866 if (! pat)
23867 return 0;
23868 emit_insn (pat);
23869 return target;
23872 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23874 static rtx
23875 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23876 rtx target)
23878 rtx pat;
23879 tree arg0 = CALL_EXPR_ARG (exp, 0);
23880 tree arg1 = CALL_EXPR_ARG (exp, 1);
23881 rtx op0 = expand_normal (arg0);
23882 rtx op1 = expand_normal (arg1);
23883 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23884 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23885 enum rtx_code comparison = d->comparison;
23887 if (VECTOR_MODE_P (mode0))
23888 op0 = safe_vector_operand (op0, mode0);
23889 if (VECTOR_MODE_P (mode1))
23890 op1 = safe_vector_operand (op1, mode1);
23892 /* Swap operands if we have a comparison that isn't available in
23893 hardware. */
23894 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23896 rtx tmp = op1;
23897 op1 = op0;
23898 op0 = tmp;
23901 target = gen_reg_rtx (SImode);
23902 emit_move_insn (target, const0_rtx);
23903 target = gen_rtx_SUBREG (QImode, target, 0);
23905 if ((optimize && !register_operand (op0, mode0))
23906 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23907 op0 = copy_to_mode_reg (mode0, op0);
23908 if ((optimize && !register_operand (op1, mode1))
23909 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23910 op1 = copy_to_mode_reg (mode1, op1);
23912 pat = GEN_FCN (d->icode) (op0, op1);
23913 if (! pat)
23914 return 0;
23915 emit_insn (pat);
23916 emit_insn (gen_rtx_SET (VOIDmode,
23917 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23918 gen_rtx_fmt_ee (comparison, QImode,
23919 SET_DEST (pat),
23920 const0_rtx)));
23922 return SUBREG_REG (target);
23925 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23927 static rtx
23928 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23929 rtx target)
23931 rtx pat;
23932 tree arg0 = CALL_EXPR_ARG (exp, 0);
23933 tree arg1 = CALL_EXPR_ARG (exp, 1);
23934 rtx op0 = expand_normal (arg0);
23935 rtx op1 = expand_normal (arg1);
23936 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23937 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23938 enum rtx_code comparison = d->comparison;
23940 if (VECTOR_MODE_P (mode0))
23941 op0 = safe_vector_operand (op0, mode0);
23942 if (VECTOR_MODE_P (mode1))
23943 op1 = safe_vector_operand (op1, mode1);
23945 target = gen_reg_rtx (SImode);
23946 emit_move_insn (target, const0_rtx);
23947 target = gen_rtx_SUBREG (QImode, target, 0);
23949 if ((optimize && !register_operand (op0, mode0))
23950 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23951 op0 = copy_to_mode_reg (mode0, op0);
23952 if ((optimize && !register_operand (op1, mode1))
23953 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23954 op1 = copy_to_mode_reg (mode1, op1);
23956 pat = GEN_FCN (d->icode) (op0, op1);
23957 if (! pat)
23958 return 0;
23959 emit_insn (pat);
23960 emit_insn (gen_rtx_SET (VOIDmode,
23961 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23962 gen_rtx_fmt_ee (comparison, QImode,
23963 SET_DEST (pat),
23964 const0_rtx)));
23966 return SUBREG_REG (target);
23969 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23971 static rtx
23972 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23973 tree exp, rtx target)
23975 rtx pat;
23976 tree arg0 = CALL_EXPR_ARG (exp, 0);
23977 tree arg1 = CALL_EXPR_ARG (exp, 1);
23978 tree arg2 = CALL_EXPR_ARG (exp, 2);
23979 tree arg3 = CALL_EXPR_ARG (exp, 3);
23980 tree arg4 = CALL_EXPR_ARG (exp, 4);
23981 rtx scratch0, scratch1;
23982 rtx op0 = expand_normal (arg0);
23983 rtx op1 = expand_normal (arg1);
23984 rtx op2 = expand_normal (arg2);
23985 rtx op3 = expand_normal (arg3);
23986 rtx op4 = expand_normal (arg4);
23987 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23989 tmode0 = insn_data[d->icode].operand[0].mode;
23990 tmode1 = insn_data[d->icode].operand[1].mode;
23991 modev2 = insn_data[d->icode].operand[2].mode;
23992 modei3 = insn_data[d->icode].operand[3].mode;
23993 modev4 = insn_data[d->icode].operand[4].mode;
23994 modei5 = insn_data[d->icode].operand[5].mode;
23995 modeimm = insn_data[d->icode].operand[6].mode;
23997 if (VECTOR_MODE_P (modev2))
23998 op0 = safe_vector_operand (op0, modev2);
23999 if (VECTOR_MODE_P (modev4))
24000 op2 = safe_vector_operand (op2, modev4);
24002 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
24003 op0 = copy_to_mode_reg (modev2, op0);
24004 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
24005 op1 = copy_to_mode_reg (modei3, op1);
24006 if ((optimize && !register_operand (op2, modev4))
24007 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
24008 op2 = copy_to_mode_reg (modev4, op2);
24009 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
24010 op3 = copy_to_mode_reg (modei5, op3);
24012 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
24014 error ("the fifth argument must be a 8-bit immediate");
24015 return const0_rtx;
24018 if (d->code == IX86_BUILTIN_PCMPESTRI128)
24020 if (optimize || !target
24021 || GET_MODE (target) != tmode0
24022 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
24023 target = gen_reg_rtx (tmode0);
24025 scratch1 = gen_reg_rtx (tmode1);
24027 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
24029 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
24031 if (optimize || !target
24032 || GET_MODE (target) != tmode1
24033 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
24034 target = gen_reg_rtx (tmode1);
24036 scratch0 = gen_reg_rtx (tmode0);
24038 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
24040 else
24042 gcc_assert (d->flag);
24044 scratch0 = gen_reg_rtx (tmode0);
24045 scratch1 = gen_reg_rtx (tmode1);
24047 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
24050 if (! pat)
24051 return 0;
24053 emit_insn (pat);
24055 if (d->flag)
24057 target = gen_reg_rtx (SImode);
24058 emit_move_insn (target, const0_rtx);
24059 target = gen_rtx_SUBREG (QImode, target, 0);
24061 emit_insn
24062 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24063 gen_rtx_fmt_ee (EQ, QImode,
24064 gen_rtx_REG ((enum machine_mode) d->flag,
24065 FLAGS_REG),
24066 const0_rtx)));
24067 return SUBREG_REG (target);
24069 else
24070 return target;
24074 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
24076 static rtx
24077 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
24078 tree exp, rtx target)
24080 rtx pat;
24081 tree arg0 = CALL_EXPR_ARG (exp, 0);
24082 tree arg1 = CALL_EXPR_ARG (exp, 1);
24083 tree arg2 = CALL_EXPR_ARG (exp, 2);
24084 rtx scratch0, scratch1;
24085 rtx op0 = expand_normal (arg0);
24086 rtx op1 = expand_normal (arg1);
24087 rtx op2 = expand_normal (arg2);
24088 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
24090 tmode0 = insn_data[d->icode].operand[0].mode;
24091 tmode1 = insn_data[d->icode].operand[1].mode;
24092 modev2 = insn_data[d->icode].operand[2].mode;
24093 modev3 = insn_data[d->icode].operand[3].mode;
24094 modeimm = insn_data[d->icode].operand[4].mode;
24096 if (VECTOR_MODE_P (modev2))
24097 op0 = safe_vector_operand (op0, modev2);
24098 if (VECTOR_MODE_P (modev3))
24099 op1 = safe_vector_operand (op1, modev3);
24101 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
24102 op0 = copy_to_mode_reg (modev2, op0);
24103 if ((optimize && !register_operand (op1, modev3))
24104 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
24105 op1 = copy_to_mode_reg (modev3, op1);
24107 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
24109 error ("the third argument must be a 8-bit immediate");
24110 return const0_rtx;
24113 if (d->code == IX86_BUILTIN_PCMPISTRI128)
24115 if (optimize || !target
24116 || GET_MODE (target) != tmode0
24117 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
24118 target = gen_reg_rtx (tmode0);
24120 scratch1 = gen_reg_rtx (tmode1);
24122 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
24124 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
24126 if (optimize || !target
24127 || GET_MODE (target) != tmode1
24128 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
24129 target = gen_reg_rtx (tmode1);
24131 scratch0 = gen_reg_rtx (tmode0);
24133 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
24135 else
24137 gcc_assert (d->flag);
24139 scratch0 = gen_reg_rtx (tmode0);
24140 scratch1 = gen_reg_rtx (tmode1);
24142 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
24145 if (! pat)
24146 return 0;
24148 emit_insn (pat);
24150 if (d->flag)
24152 target = gen_reg_rtx (SImode);
24153 emit_move_insn (target, const0_rtx);
24154 target = gen_rtx_SUBREG (QImode, target, 0);
24156 emit_insn
24157 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
24158 gen_rtx_fmt_ee (EQ, QImode,
24159 gen_rtx_REG ((enum machine_mode) d->flag,
24160 FLAGS_REG),
24161 const0_rtx)));
24162 return SUBREG_REG (target);
24164 else
24165 return target;
24168 /* Subroutine of ix86_expand_builtin to take care of insns with
24169 variable number of operands. */
24171 static rtx
24172 ix86_expand_args_builtin (const struct builtin_description *d,
24173 tree exp, rtx target)
24175 rtx pat, real_target;
24176 unsigned int i, nargs;
24177 unsigned int nargs_constant = 0;
24178 int num_memory = 0;
24179 struct
24181 rtx op;
24182 enum machine_mode mode;
24183 } args[4];
24184 bool last_arg_count = false;
24185 enum insn_code icode = d->icode;
24186 const struct insn_data *insn_p = &insn_data[icode];
24187 enum machine_mode tmode = insn_p->operand[0].mode;
24188 enum machine_mode rmode = VOIDmode;
24189 bool swap = false;
24190 enum rtx_code comparison = d->comparison;
24192 switch ((enum ix86_builtin_type) d->flag)
24194 case INT_FTYPE_V8SF_V8SF_PTEST:
24195 case INT_FTYPE_V4DI_V4DI_PTEST:
24196 case INT_FTYPE_V4DF_V4DF_PTEST:
24197 case INT_FTYPE_V4SF_V4SF_PTEST:
24198 case INT_FTYPE_V2DI_V2DI_PTEST:
24199 case INT_FTYPE_V2DF_V2DF_PTEST:
24200 return ix86_expand_sse_ptest (d, exp, target);
24201 case FLOAT128_FTYPE_FLOAT128:
24202 case FLOAT_FTYPE_FLOAT:
24203 case INT64_FTYPE_V4SF:
24204 case INT64_FTYPE_V2DF:
24205 case INT_FTYPE_V16QI:
24206 case INT_FTYPE_V8QI:
24207 case INT_FTYPE_V8SF:
24208 case INT_FTYPE_V4DF:
24209 case INT_FTYPE_V4SF:
24210 case INT_FTYPE_V2DF:
24211 case V16QI_FTYPE_V16QI:
24212 case V8SI_FTYPE_V8SF:
24213 case V8SI_FTYPE_V4SI:
24214 case V8HI_FTYPE_V8HI:
24215 case V8HI_FTYPE_V16QI:
24216 case V8QI_FTYPE_V8QI:
24217 case V8SF_FTYPE_V8SF:
24218 case V8SF_FTYPE_V8SI:
24219 case V8SF_FTYPE_V4SF:
24220 case V4SI_FTYPE_V4SI:
24221 case V4SI_FTYPE_V16QI:
24222 case V4SI_FTYPE_V4SF:
24223 case V4SI_FTYPE_V8SI:
24224 case V4SI_FTYPE_V8HI:
24225 case V4SI_FTYPE_V4DF:
24226 case V4SI_FTYPE_V2DF:
24227 case V4HI_FTYPE_V4HI:
24228 case V4DF_FTYPE_V4DF:
24229 case V4DF_FTYPE_V4SI:
24230 case V4DF_FTYPE_V4SF:
24231 case V4DF_FTYPE_V2DF:
24232 case V4SF_FTYPE_V4SF:
24233 case V4SF_FTYPE_V4SI:
24234 case V4SF_FTYPE_V8SF:
24235 case V4SF_FTYPE_V4DF:
24236 case V4SF_FTYPE_V2DF:
24237 case V2DI_FTYPE_V2DI:
24238 case V2DI_FTYPE_V16QI:
24239 case V2DI_FTYPE_V8HI:
24240 case V2DI_FTYPE_V4SI:
24241 case V2DF_FTYPE_V2DF:
24242 case V2DF_FTYPE_V4SI:
24243 case V2DF_FTYPE_V4DF:
24244 case V2DF_FTYPE_V4SF:
24245 case V2DF_FTYPE_V2SI:
24246 case V2SI_FTYPE_V2SI:
24247 case V2SI_FTYPE_V4SF:
24248 case V2SI_FTYPE_V2SF:
24249 case V2SI_FTYPE_V2DF:
24250 case V2SF_FTYPE_V2SF:
24251 case V2SF_FTYPE_V2SI:
24252 nargs = 1;
24253 break;
24254 case V4SF_FTYPE_V4SF_VEC_MERGE:
24255 case V2DF_FTYPE_V2DF_VEC_MERGE:
24256 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
24257 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
24258 case V16QI_FTYPE_V16QI_V16QI:
24259 case V16QI_FTYPE_V8HI_V8HI:
24260 case V8QI_FTYPE_V8QI_V8QI:
24261 case V8QI_FTYPE_V4HI_V4HI:
24262 case V8HI_FTYPE_V8HI_V8HI:
24263 case V8HI_FTYPE_V16QI_V16QI:
24264 case V8HI_FTYPE_V4SI_V4SI:
24265 case V8SF_FTYPE_V8SF_V8SF:
24266 case V8SF_FTYPE_V8SF_V8SI:
24267 case V4SI_FTYPE_V4SI_V4SI:
24268 case V4SI_FTYPE_V8HI_V8HI:
24269 case V4SI_FTYPE_V4SF_V4SF:
24270 case V4SI_FTYPE_V2DF_V2DF:
24271 case V4HI_FTYPE_V4HI_V4HI:
24272 case V4HI_FTYPE_V8QI_V8QI:
24273 case V4HI_FTYPE_V2SI_V2SI:
24274 case V4DF_FTYPE_V4DF_V4DF:
24275 case V4DF_FTYPE_V4DF_V4DI:
24276 case V4SF_FTYPE_V4SF_V4SF:
24277 case V4SF_FTYPE_V4SF_V4SI:
24278 case V4SF_FTYPE_V4SF_V2SI:
24279 case V4SF_FTYPE_V4SF_V2DF:
24280 case V4SF_FTYPE_V4SF_DI:
24281 case V4SF_FTYPE_V4SF_SI:
24282 case V2DI_FTYPE_V2DI_V2DI:
24283 case V2DI_FTYPE_V16QI_V16QI:
24284 case V2DI_FTYPE_V4SI_V4SI:
24285 case V2DI_FTYPE_V2DI_V16QI:
24286 case V2DI_FTYPE_V2DF_V2DF:
24287 case V2SI_FTYPE_V2SI_V2SI:
24288 case V2SI_FTYPE_V4HI_V4HI:
24289 case V2SI_FTYPE_V2SF_V2SF:
24290 case V2DF_FTYPE_V2DF_V2DF:
24291 case V2DF_FTYPE_V2DF_V4SF:
24292 case V2DF_FTYPE_V2DF_V2DI:
24293 case V2DF_FTYPE_V2DF_DI:
24294 case V2DF_FTYPE_V2DF_SI:
24295 case V2SF_FTYPE_V2SF_V2SF:
24296 case V1DI_FTYPE_V1DI_V1DI:
24297 case V1DI_FTYPE_V8QI_V8QI:
24298 case V1DI_FTYPE_V2SI_V2SI:
24299 if (comparison == UNKNOWN)
24300 return ix86_expand_binop_builtin (icode, exp, target);
24301 nargs = 2;
24302 break;
24303 case V4SF_FTYPE_V4SF_V4SF_SWAP:
24304 case V2DF_FTYPE_V2DF_V2DF_SWAP:
24305 gcc_assert (comparison != UNKNOWN);
24306 nargs = 2;
24307 swap = true;
24308 break;
24309 case V8HI_FTYPE_V8HI_V8HI_COUNT:
24310 case V8HI_FTYPE_V8HI_SI_COUNT:
24311 case V4SI_FTYPE_V4SI_V4SI_COUNT:
24312 case V4SI_FTYPE_V4SI_SI_COUNT:
24313 case V4HI_FTYPE_V4HI_V4HI_COUNT:
24314 case V4HI_FTYPE_V4HI_SI_COUNT:
24315 case V2DI_FTYPE_V2DI_V2DI_COUNT:
24316 case V2DI_FTYPE_V2DI_SI_COUNT:
24317 case V2SI_FTYPE_V2SI_V2SI_COUNT:
24318 case V2SI_FTYPE_V2SI_SI_COUNT:
24319 case V1DI_FTYPE_V1DI_V1DI_COUNT:
24320 case V1DI_FTYPE_V1DI_SI_COUNT:
24321 nargs = 2;
24322 last_arg_count = true;
24323 break;
24324 case UINT64_FTYPE_UINT64_UINT64:
24325 case UINT_FTYPE_UINT_UINT:
24326 case UINT_FTYPE_UINT_USHORT:
24327 case UINT_FTYPE_UINT_UCHAR:
24328 nargs = 2;
24329 break;
24330 case V2DI2TI_FTYPE_V2DI_INT:
24331 nargs = 2;
24332 rmode = V2DImode;
24333 nargs_constant = 1;
24334 break;
24335 case V8HI_FTYPE_V8HI_INT:
24336 case V8SF_FTYPE_V8SF_INT:
24337 case V4SI_FTYPE_V4SI_INT:
24338 case V4SI_FTYPE_V8SI_INT:
24339 case V4HI_FTYPE_V4HI_INT:
24340 case V4DF_FTYPE_V4DF_INT:
24341 case V4SF_FTYPE_V4SF_INT:
24342 case V4SF_FTYPE_V8SF_INT:
24343 case V2DI_FTYPE_V2DI_INT:
24344 case V2DF_FTYPE_V2DF_INT:
24345 case V2DF_FTYPE_V4DF_INT:
24346 nargs = 2;
24347 nargs_constant = 1;
24348 break;
24349 case V16QI_FTYPE_V16QI_V16QI_V16QI:
24350 case V8SF_FTYPE_V8SF_V8SF_V8SF:
24351 case V4DF_FTYPE_V4DF_V4DF_V4DF:
24352 case V4SF_FTYPE_V4SF_V4SF_V4SF:
24353 case V2DF_FTYPE_V2DF_V2DF_V2DF:
24354 nargs = 3;
24355 break;
24356 case V16QI_FTYPE_V16QI_V16QI_INT:
24357 case V8HI_FTYPE_V8HI_V8HI_INT:
24358 case V8SI_FTYPE_V8SI_V8SI_INT:
24359 case V8SI_FTYPE_V8SI_V4SI_INT:
24360 case V8SF_FTYPE_V8SF_V8SF_INT:
24361 case V8SF_FTYPE_V8SF_V4SF_INT:
24362 case V4SI_FTYPE_V4SI_V4SI_INT:
24363 case V4DF_FTYPE_V4DF_V4DF_INT:
24364 case V4DF_FTYPE_V4DF_V2DF_INT:
24365 case V4SF_FTYPE_V4SF_V4SF_INT:
24366 case V2DI_FTYPE_V2DI_V2DI_INT:
24367 case V2DF_FTYPE_V2DF_V2DF_INT:
24368 nargs = 3;
24369 nargs_constant = 1;
24370 break;
24371 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
24372 nargs = 3;
24373 rmode = V2DImode;
24374 nargs_constant = 1;
24375 break;
24376 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
24377 nargs = 3;
24378 rmode = DImode;
24379 nargs_constant = 1;
24380 break;
24381 case V2DI_FTYPE_V2DI_UINT_UINT:
24382 nargs = 3;
24383 nargs_constant = 2;
24384 break;
24385 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
24386 nargs = 4;
24387 nargs_constant = 2;
24388 break;
24389 default:
24390 gcc_unreachable ();
24393 gcc_assert (nargs <= ARRAY_SIZE (args));
24395 if (comparison != UNKNOWN)
24397 gcc_assert (nargs == 2);
24398 return ix86_expand_sse_compare (d, exp, target, swap);
24401 if (rmode == VOIDmode || rmode == tmode)
24403 if (optimize
24404 || target == 0
24405 || GET_MODE (target) != tmode
24406 || ! (*insn_p->operand[0].predicate) (target, tmode))
24407 target = gen_reg_rtx (tmode);
24408 real_target = target;
24410 else
24412 target = gen_reg_rtx (rmode);
24413 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
24416 for (i = 0; i < nargs; i++)
24418 tree arg = CALL_EXPR_ARG (exp, i);
24419 rtx op = expand_normal (arg);
24420 enum machine_mode mode = insn_p->operand[i + 1].mode;
24421 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
24423 if (last_arg_count && (i + 1) == nargs)
24425 /* SIMD shift insns take either an 8-bit immediate or
24426 register as count. But builtin functions take int as
24427 count. If count doesn't match, we put it in register. */
24428 if (!match)
24430 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
24431 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
24432 op = copy_to_reg (op);
24435 else if ((nargs - i) <= nargs_constant)
24437 if (!match)
24438 switch (icode)
24440 case CODE_FOR_sse4_1_roundpd:
24441 case CODE_FOR_sse4_1_roundps:
24442 case CODE_FOR_sse4_1_roundsd:
24443 case CODE_FOR_sse4_1_roundss:
24444 case CODE_FOR_sse4_1_blendps:
24445 case CODE_FOR_avx_blendpd256:
24446 case CODE_FOR_avx_vpermilv4df:
24447 case CODE_FOR_avx_roundpd256:
24448 case CODE_FOR_avx_roundps256:
24449 error ("the last argument must be a 4-bit immediate");
24450 return const0_rtx;
24452 case CODE_FOR_sse4_1_blendpd:
24453 case CODE_FOR_avx_vpermilv2df:
24454 error ("the last argument must be a 2-bit immediate");
24455 return const0_rtx;
24457 case CODE_FOR_avx_vextractf128v4df:
24458 case CODE_FOR_avx_vextractf128v8sf:
24459 case CODE_FOR_avx_vextractf128v8si:
24460 case CODE_FOR_avx_vinsertf128v4df:
24461 case CODE_FOR_avx_vinsertf128v8sf:
24462 case CODE_FOR_avx_vinsertf128v8si:
24463 error ("the last argument must be a 1-bit immediate");
24464 return const0_rtx;
24466 case CODE_FOR_avx_cmpsdv2df3:
24467 case CODE_FOR_avx_cmpssv4sf3:
24468 case CODE_FOR_avx_cmppdv2df3:
24469 case CODE_FOR_avx_cmppsv4sf3:
24470 case CODE_FOR_avx_cmppdv4df3:
24471 case CODE_FOR_avx_cmppsv8sf3:
24472 error ("the last argument must be a 5-bit immediate");
24473 return const0_rtx;
24475 default:
24476 switch (nargs_constant)
24478 case 2:
24479 if ((nargs - i) == nargs_constant)
24481 error ("the next to last argument must be an 8-bit immediate");
24482 break;
24484 case 1:
24485 error ("the last argument must be an 8-bit immediate");
24486 break;
24487 default:
24488 gcc_unreachable ();
24490 return const0_rtx;
24493 else
24495 if (VECTOR_MODE_P (mode))
24496 op = safe_vector_operand (op, mode);
24498 /* If we aren't optimizing, only allow one memory operand to
24499 be generated. */
24500 if (memory_operand (op, mode))
24501 num_memory++;
24503 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
24505 if (optimize || !match || num_memory > 1)
24506 op = copy_to_mode_reg (mode, op);
24508 else
24510 op = copy_to_reg (op);
24511 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
24515 args[i].op = op;
24516 args[i].mode = mode;
24519 switch (nargs)
24521 case 1:
24522 pat = GEN_FCN (icode) (real_target, args[0].op);
24523 break;
24524 case 2:
24525 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24526 break;
24527 case 3:
24528 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24529 args[2].op);
24530 break;
24531 case 4:
24532 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24533 args[2].op, args[3].op);
24534 break;
24535 default:
24536 gcc_unreachable ();
24539 if (! pat)
24540 return 0;
24542 emit_insn (pat);
24543 return target;
24546 /* Subroutine of ix86_expand_builtin to take care of special insns
24547 with variable number of operands. */
24549 static rtx
24550 ix86_expand_special_args_builtin (const struct builtin_description *d,
24551 tree exp, rtx target)
24553 tree arg;
24554 rtx pat, op;
24555 unsigned int i, nargs, arg_adjust, memory;
24556 struct
24558 rtx op;
24559 enum machine_mode mode;
24560 } args[2];
24561 enum insn_code icode = d->icode;
24562 bool last_arg_constant = false;
24563 const struct insn_data *insn_p = &insn_data[icode];
24564 enum machine_mode tmode = insn_p->operand[0].mode;
24565 enum { load, store } klass;
24567 switch ((enum ix86_special_builtin_type) d->flag)
24569 case VOID_FTYPE_VOID:
24570 emit_insn (GEN_FCN (icode) (target));
24571 return 0;
24572 case V2DI_FTYPE_PV2DI:
24573 case V32QI_FTYPE_PCCHAR:
24574 case V16QI_FTYPE_PCCHAR:
24575 case V8SF_FTYPE_PCV4SF:
24576 case V8SF_FTYPE_PCFLOAT:
24577 case V4SF_FTYPE_PCFLOAT:
24578 case V4DF_FTYPE_PCV2DF:
24579 case V4DF_FTYPE_PCDOUBLE:
24580 case V2DF_FTYPE_PCDOUBLE:
24581 nargs = 1;
24582 klass = load;
24583 memory = 0;
24584 break;
24585 case VOID_FTYPE_PV2SF_V4SF:
24586 case VOID_FTYPE_PV4DI_V4DI:
24587 case VOID_FTYPE_PV2DI_V2DI:
24588 case VOID_FTYPE_PCHAR_V32QI:
24589 case VOID_FTYPE_PCHAR_V16QI:
24590 case VOID_FTYPE_PFLOAT_V8SF:
24591 case VOID_FTYPE_PFLOAT_V4SF:
24592 case VOID_FTYPE_PDOUBLE_V4DF:
24593 case VOID_FTYPE_PDOUBLE_V2DF:
24594 case VOID_FTYPE_PDI_DI:
24595 case VOID_FTYPE_PINT_INT:
24596 nargs = 1;
24597 klass = store;
24598 /* Reserve memory operand for target. */
24599 memory = ARRAY_SIZE (args);
24600 break;
24601 case V4SF_FTYPE_V4SF_PCV2SF:
24602 case V2DF_FTYPE_V2DF_PCDOUBLE:
24603 nargs = 2;
24604 klass = load;
24605 memory = 1;
24606 break;
24607 case V8SF_FTYPE_PCV8SF_V8SF:
24608 case V4DF_FTYPE_PCV4DF_V4DF:
24609 case V4SF_FTYPE_PCV4SF_V4SF:
24610 case V2DF_FTYPE_PCV2DF_V2DF:
24611 nargs = 2;
24612 klass = load;
24613 memory = 0;
24614 break;
24615 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24616 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24617 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24618 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24619 nargs = 2;
24620 klass = store;
24621 /* Reserve memory operand for target. */
24622 memory = ARRAY_SIZE (args);
24623 break;
24624 default:
24625 gcc_unreachable ();
24628 gcc_assert (nargs <= ARRAY_SIZE (args));
24630 if (klass == store)
24632 arg = CALL_EXPR_ARG (exp, 0);
24633 op = expand_normal (arg);
24634 gcc_assert (target == 0);
24635 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24636 arg_adjust = 1;
24638 else
24640 arg_adjust = 0;
24641 if (optimize
24642 || target == 0
24643 || GET_MODE (target) != tmode
24644 || ! (*insn_p->operand[0].predicate) (target, tmode))
24645 target = gen_reg_rtx (tmode);
24648 for (i = 0; i < nargs; i++)
24650 enum machine_mode mode = insn_p->operand[i + 1].mode;
24651 bool match;
24653 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24654 op = expand_normal (arg);
24655 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24657 if (last_arg_constant && (i + 1) == nargs)
24659 if (!match)
24660 switch (icode)
24662 default:
24663 error ("the last argument must be an 8-bit immediate");
24664 return const0_rtx;
24667 else
24669 if (i == memory)
24671 /* This must be the memory operand. */
24672 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24673 gcc_assert (GET_MODE (op) == mode
24674 || GET_MODE (op) == VOIDmode);
24676 else
24678 /* This must be register. */
24679 if (VECTOR_MODE_P (mode))
24680 op = safe_vector_operand (op, mode);
24682 gcc_assert (GET_MODE (op) == mode
24683 || GET_MODE (op) == VOIDmode);
24684 op = copy_to_mode_reg (mode, op);
24688 args[i].op = op;
24689 args[i].mode = mode;
24692 switch (nargs)
24694 case 1:
24695 pat = GEN_FCN (icode) (target, args[0].op);
24696 break;
24697 case 2:
24698 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24699 break;
24700 default:
24701 gcc_unreachable ();
24704 if (! pat)
24705 return 0;
24706 emit_insn (pat);
24707 return klass == store ? 0 : target;
24710 /* Return the integer constant in ARG. Constrain it to be in the range
24711 of the subparts of VEC_TYPE; issue an error if not. */
24713 static int
24714 get_element_number (tree vec_type, tree arg)
24716 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24718 if (!host_integerp (arg, 1)
24719 || (elt = tree_low_cst (arg, 1), elt > max))
24721 error ("selector must be an integer constant in the range 0..%wi", max);
24722 return 0;
24725 return elt;
24728 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24729 ix86_expand_vector_init. We DO have language-level syntax for this, in
24730 the form of (type){ init-list }. Except that since we can't place emms
24731 instructions from inside the compiler, we can't allow the use of MMX
24732 registers unless the user explicitly asks for it. So we do *not* define
24733 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24734 we have builtins invoked by mmintrin.h that gives us license to emit
24735 these sorts of instructions. */
24737 static rtx
24738 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24740 enum machine_mode tmode = TYPE_MODE (type);
24741 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24742 int i, n_elt = GET_MODE_NUNITS (tmode);
24743 rtvec v = rtvec_alloc (n_elt);
24745 gcc_assert (VECTOR_MODE_P (tmode));
24746 gcc_assert (call_expr_nargs (exp) == n_elt);
24748 for (i = 0; i < n_elt; ++i)
24750 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24751 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24754 if (!target || !register_operand (target, tmode))
24755 target = gen_reg_rtx (tmode);
24757 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24758 return target;
24761 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24762 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24763 had a language-level syntax for referencing vector elements. */
24765 static rtx
24766 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24768 enum machine_mode tmode, mode0;
24769 tree arg0, arg1;
24770 int elt;
24771 rtx op0;
24773 arg0 = CALL_EXPR_ARG (exp, 0);
24774 arg1 = CALL_EXPR_ARG (exp, 1);
24776 op0 = expand_normal (arg0);
24777 elt = get_element_number (TREE_TYPE (arg0), arg1);
24779 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24780 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24781 gcc_assert (VECTOR_MODE_P (mode0));
24783 op0 = force_reg (mode0, op0);
24785 if (optimize || !target || !register_operand (target, tmode))
24786 target = gen_reg_rtx (tmode);
24788 ix86_expand_vector_extract (true, target, op0, elt);
24790 return target;
24793 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24794 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24795 a language-level syntax for referencing vector elements. */
24797 static rtx
24798 ix86_expand_vec_set_builtin (tree exp)
24800 enum machine_mode tmode, mode1;
24801 tree arg0, arg1, arg2;
24802 int elt;
24803 rtx op0, op1, target;
24805 arg0 = CALL_EXPR_ARG (exp, 0);
24806 arg1 = CALL_EXPR_ARG (exp, 1);
24807 arg2 = CALL_EXPR_ARG (exp, 2);
24809 tmode = TYPE_MODE (TREE_TYPE (arg0));
24810 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24811 gcc_assert (VECTOR_MODE_P (tmode));
24813 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24814 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24815 elt = get_element_number (TREE_TYPE (arg0), arg2);
24817 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24818 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24820 op0 = force_reg (tmode, op0);
24821 op1 = force_reg (mode1, op1);
24823 /* OP0 is the source of these builtin functions and shouldn't be
24824 modified. Create a copy, use it and return it as target. */
24825 target = gen_reg_rtx (tmode);
24826 emit_move_insn (target, op0);
24827 ix86_expand_vector_set (true, target, op1, elt);
24829 return target;
24832 /* Expand an expression EXP that calls a built-in function,
24833 with result going to TARGET if that's convenient
24834 (and in mode MODE if that's convenient).
24835 SUBTARGET may be used as the target for computing one of EXP's operands.
24836 IGNORE is nonzero if the value is to be ignored. */
24838 static rtx
24839 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24840 enum machine_mode mode ATTRIBUTE_UNUSED,
24841 int ignore ATTRIBUTE_UNUSED)
24843 const struct builtin_description *d;
24844 size_t i;
24845 enum insn_code icode;
24846 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24847 tree arg0, arg1, arg2;
24848 rtx op0, op1, op2, pat;
24849 enum machine_mode mode0, mode1, mode2;
24850 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24852 /* Determine whether the builtin function is available under the current ISA.
24853 Originally the builtin was not created if it wasn't applicable to the
24854 current ISA based on the command line switches. With function specific
24855 options, we need to check in the context of the function making the call
24856 whether it is supported. */
24857 if (ix86_builtins_isa[fcode].isa
24858 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24860 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24861 NULL, NULL, false);
24863 if (!opts)
24864 error ("%qE needs unknown isa option", fndecl);
24865 else
24867 gcc_assert (opts != NULL);
24868 error ("%qE needs isa option %s", fndecl, opts);
24869 free (opts);
24871 return const0_rtx;
24874 switch (fcode)
24876 case IX86_BUILTIN_MASKMOVQ:
24877 case IX86_BUILTIN_MASKMOVDQU:
24878 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24879 ? CODE_FOR_mmx_maskmovq
24880 : CODE_FOR_sse2_maskmovdqu);
24881 /* Note the arg order is different from the operand order. */
24882 arg1 = CALL_EXPR_ARG (exp, 0);
24883 arg2 = CALL_EXPR_ARG (exp, 1);
24884 arg0 = CALL_EXPR_ARG (exp, 2);
24885 op0 = expand_normal (arg0);
24886 op1 = expand_normal (arg1);
24887 op2 = expand_normal (arg2);
24888 mode0 = insn_data[icode].operand[0].mode;
24889 mode1 = insn_data[icode].operand[1].mode;
24890 mode2 = insn_data[icode].operand[2].mode;
24892 op0 = force_reg (Pmode, op0);
24893 op0 = gen_rtx_MEM (mode1, op0);
24895 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24896 op0 = copy_to_mode_reg (mode0, op0);
24897 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24898 op1 = copy_to_mode_reg (mode1, op1);
24899 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24900 op2 = copy_to_mode_reg (mode2, op2);
24901 pat = GEN_FCN (icode) (op0, op1, op2);
24902 if (! pat)
24903 return 0;
24904 emit_insn (pat);
24905 return 0;
24907 case IX86_BUILTIN_LDMXCSR:
24908 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24909 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24910 emit_move_insn (target, op0);
24911 emit_insn (gen_sse_ldmxcsr (target));
24912 return 0;
24914 case IX86_BUILTIN_STMXCSR:
24915 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24916 emit_insn (gen_sse_stmxcsr (target));
24917 return copy_to_mode_reg (SImode, target);
24919 case IX86_BUILTIN_CLFLUSH:
24920 arg0 = CALL_EXPR_ARG (exp, 0);
24921 op0 = expand_normal (arg0);
24922 icode = CODE_FOR_sse2_clflush;
24923 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24924 op0 = copy_to_mode_reg (Pmode, op0);
24926 emit_insn (gen_sse2_clflush (op0));
24927 return 0;
24929 case IX86_BUILTIN_MONITOR:
24930 arg0 = CALL_EXPR_ARG (exp, 0);
24931 arg1 = CALL_EXPR_ARG (exp, 1);
24932 arg2 = CALL_EXPR_ARG (exp, 2);
24933 op0 = expand_normal (arg0);
24934 op1 = expand_normal (arg1);
24935 op2 = expand_normal (arg2);
24936 if (!REG_P (op0))
24937 op0 = copy_to_mode_reg (Pmode, op0);
24938 if (!REG_P (op1))
24939 op1 = copy_to_mode_reg (SImode, op1);
24940 if (!REG_P (op2))
24941 op2 = copy_to_mode_reg (SImode, op2);
24942 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24943 return 0;
24945 case IX86_BUILTIN_MWAIT:
24946 arg0 = CALL_EXPR_ARG (exp, 0);
24947 arg1 = CALL_EXPR_ARG (exp, 1);
24948 op0 = expand_normal (arg0);
24949 op1 = expand_normal (arg1);
24950 if (!REG_P (op0))
24951 op0 = copy_to_mode_reg (SImode, op0);
24952 if (!REG_P (op1))
24953 op1 = copy_to_mode_reg (SImode, op1);
24954 emit_insn (gen_sse3_mwait (op0, op1));
24955 return 0;
24957 case IX86_BUILTIN_VEC_INIT_V2SI:
24958 case IX86_BUILTIN_VEC_INIT_V4HI:
24959 case IX86_BUILTIN_VEC_INIT_V8QI:
24960 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24962 case IX86_BUILTIN_VEC_EXT_V2DF:
24963 case IX86_BUILTIN_VEC_EXT_V2DI:
24964 case IX86_BUILTIN_VEC_EXT_V4SF:
24965 case IX86_BUILTIN_VEC_EXT_V4SI:
24966 case IX86_BUILTIN_VEC_EXT_V8HI:
24967 case IX86_BUILTIN_VEC_EXT_V2SI:
24968 case IX86_BUILTIN_VEC_EXT_V4HI:
24969 case IX86_BUILTIN_VEC_EXT_V16QI:
24970 return ix86_expand_vec_ext_builtin (exp, target);
24972 case IX86_BUILTIN_VEC_SET_V2DI:
24973 case IX86_BUILTIN_VEC_SET_V4SF:
24974 case IX86_BUILTIN_VEC_SET_V4SI:
24975 case IX86_BUILTIN_VEC_SET_V8HI:
24976 case IX86_BUILTIN_VEC_SET_V4HI:
24977 case IX86_BUILTIN_VEC_SET_V16QI:
24978 return ix86_expand_vec_set_builtin (exp);
24980 case IX86_BUILTIN_INFQ:
24982 REAL_VALUE_TYPE inf;
24983 rtx tmp;
24985 real_inf (&inf);
24986 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24988 tmp = validize_mem (force_const_mem (mode, tmp));
24990 if (target == 0)
24991 target = gen_reg_rtx (mode);
24993 emit_move_insn (target, tmp);
24994 return target;
24997 default:
24998 break;
25001 for (i = 0, d = bdesc_special_args;
25002 i < ARRAY_SIZE (bdesc_special_args);
25003 i++, d++)
25004 if (d->code == fcode)
25005 return ix86_expand_special_args_builtin (d, exp, target);
25007 for (i = 0, d = bdesc_args;
25008 i < ARRAY_SIZE (bdesc_args);
25009 i++, d++)
25010 if (d->code == fcode)
25011 switch (fcode)
25013 case IX86_BUILTIN_FABSQ:
25014 case IX86_BUILTIN_COPYSIGNQ:
25015 if (!TARGET_SSE2)
25016 /* Emit a normal call if SSE2 isn't available. */
25017 return expand_call (exp, target, ignore);
25018 default:
25019 return ix86_expand_args_builtin (d, exp, target);
25022 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
25023 if (d->code == fcode)
25024 return ix86_expand_sse_comi (d, exp, target);
25026 for (i = 0, d = bdesc_pcmpestr;
25027 i < ARRAY_SIZE (bdesc_pcmpestr);
25028 i++, d++)
25029 if (d->code == fcode)
25030 return ix86_expand_sse_pcmpestr (d, exp, target);
25032 for (i = 0, d = bdesc_pcmpistr;
25033 i < ARRAY_SIZE (bdesc_pcmpistr);
25034 i++, d++)
25035 if (d->code == fcode)
25036 return ix86_expand_sse_pcmpistr (d, exp, target);
25038 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
25039 if (d->code == fcode)
25040 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
25041 (enum multi_arg_type)d->flag,
25042 d->comparison);
25044 gcc_unreachable ();
25047 /* Returns a function decl for a vectorized version of the builtin function
25048 with builtin function code FN and the result vector type TYPE, or NULL_TREE
25049 if it is not available. */
25051 static tree
25052 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
25053 tree type_in)
25055 enum machine_mode in_mode, out_mode;
25056 int in_n, out_n;
25058 if (TREE_CODE (type_out) != VECTOR_TYPE
25059 || TREE_CODE (type_in) != VECTOR_TYPE)
25060 return NULL_TREE;
25062 out_mode = TYPE_MODE (TREE_TYPE (type_out));
25063 out_n = TYPE_VECTOR_SUBPARTS (type_out);
25064 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25065 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25067 switch (fn)
25069 case BUILT_IN_SQRT:
25070 if (out_mode == DFmode && out_n == 2
25071 && in_mode == DFmode && in_n == 2)
25072 return ix86_builtins[IX86_BUILTIN_SQRTPD];
25073 break;
25075 case BUILT_IN_SQRTF:
25076 if (out_mode == SFmode && out_n == 4
25077 && in_mode == SFmode && in_n == 4)
25078 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
25079 break;
25081 case BUILT_IN_LRINT:
25082 if (out_mode == SImode && out_n == 4
25083 && in_mode == DFmode && in_n == 2)
25084 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
25085 break;
25087 case BUILT_IN_LRINTF:
25088 if (out_mode == SImode && out_n == 4
25089 && in_mode == SFmode && in_n == 4)
25090 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
25091 break;
25093 default:
25097 /* Dispatch to a handler for a vectorization library. */
25098 if (ix86_veclib_handler)
25099 return (*ix86_veclib_handler)(fn, type_out, type_in);
25101 return NULL_TREE;
25104 /* Handler for an SVML-style interface to
25105 a library with vectorized intrinsics. */
25107 static tree
25108 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
25110 char name[20];
25111 tree fntype, new_fndecl, args;
25112 unsigned arity;
25113 const char *bname;
25114 enum machine_mode el_mode, in_mode;
25115 int n, in_n;
25117 /* The SVML is suitable for unsafe math only. */
25118 if (!flag_unsafe_math_optimizations)
25119 return NULL_TREE;
25121 el_mode = TYPE_MODE (TREE_TYPE (type_out));
25122 n = TYPE_VECTOR_SUBPARTS (type_out);
25123 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25124 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25125 if (el_mode != in_mode
25126 || n != in_n)
25127 return NULL_TREE;
25129 switch (fn)
25131 case BUILT_IN_EXP:
25132 case BUILT_IN_LOG:
25133 case BUILT_IN_LOG10:
25134 case BUILT_IN_POW:
25135 case BUILT_IN_TANH:
25136 case BUILT_IN_TAN:
25137 case BUILT_IN_ATAN:
25138 case BUILT_IN_ATAN2:
25139 case BUILT_IN_ATANH:
25140 case BUILT_IN_CBRT:
25141 case BUILT_IN_SINH:
25142 case BUILT_IN_SIN:
25143 case BUILT_IN_ASINH:
25144 case BUILT_IN_ASIN:
25145 case BUILT_IN_COSH:
25146 case BUILT_IN_COS:
25147 case BUILT_IN_ACOSH:
25148 case BUILT_IN_ACOS:
25149 if (el_mode != DFmode || n != 2)
25150 return NULL_TREE;
25151 break;
25153 case BUILT_IN_EXPF:
25154 case BUILT_IN_LOGF:
25155 case BUILT_IN_LOG10F:
25156 case BUILT_IN_POWF:
25157 case BUILT_IN_TANHF:
25158 case BUILT_IN_TANF:
25159 case BUILT_IN_ATANF:
25160 case BUILT_IN_ATAN2F:
25161 case BUILT_IN_ATANHF:
25162 case BUILT_IN_CBRTF:
25163 case BUILT_IN_SINHF:
25164 case BUILT_IN_SINF:
25165 case BUILT_IN_ASINHF:
25166 case BUILT_IN_ASINF:
25167 case BUILT_IN_COSHF:
25168 case BUILT_IN_COSF:
25169 case BUILT_IN_ACOSHF:
25170 case BUILT_IN_ACOSF:
25171 if (el_mode != SFmode || n != 4)
25172 return NULL_TREE;
25173 break;
25175 default:
25176 return NULL_TREE;
25179 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
25181 if (fn == BUILT_IN_LOGF)
25182 strcpy (name, "vmlsLn4");
25183 else if (fn == BUILT_IN_LOG)
25184 strcpy (name, "vmldLn2");
25185 else if (n == 4)
25187 sprintf (name, "vmls%s", bname+10);
25188 name[strlen (name)-1] = '4';
25190 else
25191 sprintf (name, "vmld%s2", bname+10);
25193 /* Convert to uppercase. */
25194 name[4] &= ~0x20;
25196 arity = 0;
25197 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
25198 args = TREE_CHAIN (args))
25199 arity++;
25201 if (arity == 1)
25202 fntype = build_function_type_list (type_out, type_in, NULL);
25203 else
25204 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
25206 /* Build a function declaration for the vectorized function. */
25207 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
25208 TREE_PUBLIC (new_fndecl) = 1;
25209 DECL_EXTERNAL (new_fndecl) = 1;
25210 DECL_IS_NOVOPS (new_fndecl) = 1;
25211 TREE_READONLY (new_fndecl) = 1;
25213 return new_fndecl;
25216 /* Handler for an ACML-style interface to
25217 a library with vectorized intrinsics. */
25219 static tree
25220 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
25222 char name[20] = "__vr.._";
25223 tree fntype, new_fndecl, args;
25224 unsigned arity;
25225 const char *bname;
25226 enum machine_mode el_mode, in_mode;
25227 int n, in_n;
25229 /* The ACML is 64bits only and suitable for unsafe math only as
25230 it does not correctly support parts of IEEE with the required
25231 precision such as denormals. */
25232 if (!TARGET_64BIT
25233 || !flag_unsafe_math_optimizations)
25234 return NULL_TREE;
25236 el_mode = TYPE_MODE (TREE_TYPE (type_out));
25237 n = TYPE_VECTOR_SUBPARTS (type_out);
25238 in_mode = TYPE_MODE (TREE_TYPE (type_in));
25239 in_n = TYPE_VECTOR_SUBPARTS (type_in);
25240 if (el_mode != in_mode
25241 || n != in_n)
25242 return NULL_TREE;
25244 switch (fn)
25246 case BUILT_IN_SIN:
25247 case BUILT_IN_COS:
25248 case BUILT_IN_EXP:
25249 case BUILT_IN_LOG:
25250 case BUILT_IN_LOG2:
25251 case BUILT_IN_LOG10:
25252 name[4] = 'd';
25253 name[5] = '2';
25254 if (el_mode != DFmode
25255 || n != 2)
25256 return NULL_TREE;
25257 break;
25259 case BUILT_IN_SINF:
25260 case BUILT_IN_COSF:
25261 case BUILT_IN_EXPF:
25262 case BUILT_IN_POWF:
25263 case BUILT_IN_LOGF:
25264 case BUILT_IN_LOG2F:
25265 case BUILT_IN_LOG10F:
25266 name[4] = 's';
25267 name[5] = '4';
25268 if (el_mode != SFmode
25269 || n != 4)
25270 return NULL_TREE;
25271 break;
25273 default:
25274 return NULL_TREE;
25277 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
25278 sprintf (name + 7, "%s", bname+10);
25280 arity = 0;
25281 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
25282 args = TREE_CHAIN (args))
25283 arity++;
25285 if (arity == 1)
25286 fntype = build_function_type_list (type_out, type_in, NULL);
25287 else
25288 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
25290 /* Build a function declaration for the vectorized function. */
25291 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
25292 TREE_PUBLIC (new_fndecl) = 1;
25293 DECL_EXTERNAL (new_fndecl) = 1;
25294 DECL_IS_NOVOPS (new_fndecl) = 1;
25295 TREE_READONLY (new_fndecl) = 1;
25297 return new_fndecl;
25301 /* Returns a decl of a function that implements conversion of an integer vector
25302 into a floating-point vector, or vice-versa. TYPE is the type of the integer
25303 side of the conversion.
25304 Return NULL_TREE if it is not available. */
25306 static tree
25307 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
25309 if (TREE_CODE (type) != VECTOR_TYPE
25310 /* There are only conversions from/to signed integers. */
25311 || TYPE_UNSIGNED (TREE_TYPE (type)))
25312 return NULL_TREE;
25314 switch (code)
25316 case FLOAT_EXPR:
25317 switch (TYPE_MODE (type))
25319 case V4SImode:
25320 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
25321 default:
25322 return NULL_TREE;
25325 case FIX_TRUNC_EXPR:
25326 switch (TYPE_MODE (type))
25328 case V4SImode:
25329 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
25330 default:
25331 return NULL_TREE;
25333 default:
25334 return NULL_TREE;
25339 /* Returns a code for a target-specific builtin that implements
25340 reciprocal of the function, or NULL_TREE if not available. */
25342 static tree
25343 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
25344 bool sqrt ATTRIBUTE_UNUSED)
25346 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
25347 && flag_finite_math_only && !flag_trapping_math
25348 && flag_unsafe_math_optimizations))
25349 return NULL_TREE;
25351 if (md_fn)
25352 /* Machine dependent builtins. */
25353 switch (fn)
25355 /* Vectorized version of sqrt to rsqrt conversion. */
25356 case IX86_BUILTIN_SQRTPS_NR:
25357 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
25359 default:
25360 return NULL_TREE;
25362 else
25363 /* Normal builtins. */
25364 switch (fn)
25366 /* Sqrt to rsqrt conversion. */
25367 case BUILT_IN_SQRTF:
25368 return ix86_builtins[IX86_BUILTIN_RSQRTF];
25370 default:
25371 return NULL_TREE;
25375 /* Store OPERAND to the memory after reload is completed. This means
25376 that we can't easily use assign_stack_local. */
25378 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25380 rtx result;
25382 gcc_assert (reload_completed);
25383 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25385 result = gen_rtx_MEM (mode,
25386 gen_rtx_PLUS (Pmode,
25387 stack_pointer_rtx,
25388 GEN_INT (-RED_ZONE_SIZE)));
25389 emit_move_insn (result, operand);
25391 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25393 switch (mode)
25395 case HImode:
25396 case SImode:
25397 operand = gen_lowpart (DImode, operand);
25398 /* FALLTHRU */
25399 case DImode:
25400 emit_insn (
25401 gen_rtx_SET (VOIDmode,
25402 gen_rtx_MEM (DImode,
25403 gen_rtx_PRE_DEC (DImode,
25404 stack_pointer_rtx)),
25405 operand));
25406 break;
25407 default:
25408 gcc_unreachable ();
25410 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25412 else
25414 switch (mode)
25416 case DImode:
25418 rtx operands[2];
25419 split_di (&operand, 1, operands, operands + 1);
25420 emit_insn (
25421 gen_rtx_SET (VOIDmode,
25422 gen_rtx_MEM (SImode,
25423 gen_rtx_PRE_DEC (Pmode,
25424 stack_pointer_rtx)),
25425 operands[1]));
25426 emit_insn (
25427 gen_rtx_SET (VOIDmode,
25428 gen_rtx_MEM (SImode,
25429 gen_rtx_PRE_DEC (Pmode,
25430 stack_pointer_rtx)),
25431 operands[0]));
25433 break;
25434 case HImode:
25435 /* Store HImodes as SImodes. */
25436 operand = gen_lowpart (SImode, operand);
25437 /* FALLTHRU */
25438 case SImode:
25439 emit_insn (
25440 gen_rtx_SET (VOIDmode,
25441 gen_rtx_MEM (GET_MODE (operand),
25442 gen_rtx_PRE_DEC (SImode,
25443 stack_pointer_rtx)),
25444 operand));
25445 break;
25446 default:
25447 gcc_unreachable ();
25449 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25451 return result;
25454 /* Free operand from the memory. */
25455 void
25456 ix86_free_from_memory (enum machine_mode mode)
25458 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25460 int size;
25462 if (mode == DImode || TARGET_64BIT)
25463 size = 8;
25464 else
25465 size = 4;
25466 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25467 to pop or add instruction if registers are available. */
25468 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25469 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25470 GEN_INT (size))));
25474 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25475 QImode must go into class Q_REGS.
25476 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25477 movdf to do mem-to-mem moves through integer regs. */
25478 enum reg_class
25479 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25481 enum machine_mode mode = GET_MODE (x);
25483 /* We're only allowed to return a subclass of CLASS. Many of the
25484 following checks fail for NO_REGS, so eliminate that early. */
25485 if (regclass == NO_REGS)
25486 return NO_REGS;
25488 /* All classes can load zeros. */
25489 if (x == CONST0_RTX (mode))
25490 return regclass;
25492 /* Force constants into memory if we are loading a (nonzero) constant into
25493 an MMX or SSE register. This is because there are no MMX/SSE instructions
25494 to load from a constant. */
25495 if (CONSTANT_P (x)
25496 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25497 return NO_REGS;
25499 /* Prefer SSE regs only, if we can use them for math. */
25500 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25501 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25503 /* Floating-point constants need more complex checks. */
25504 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25506 /* General regs can load everything. */
25507 if (reg_class_subset_p (regclass, GENERAL_REGS))
25508 return regclass;
25510 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25511 zero above. We only want to wind up preferring 80387 registers if
25512 we plan on doing computation with them. */
25513 if (TARGET_80387
25514 && standard_80387_constant_p (x))
25516 /* Limit class to non-sse. */
25517 if (regclass == FLOAT_SSE_REGS)
25518 return FLOAT_REGS;
25519 if (regclass == FP_TOP_SSE_REGS)
25520 return FP_TOP_REG;
25521 if (regclass == FP_SECOND_SSE_REGS)
25522 return FP_SECOND_REG;
25523 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25524 return regclass;
25527 return NO_REGS;
25530 /* Generally when we see PLUS here, it's the function invariant
25531 (plus soft-fp const_int). Which can only be computed into general
25532 regs. */
25533 if (GET_CODE (x) == PLUS)
25534 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25536 /* QImode constants are easy to load, but non-constant QImode data
25537 must go into Q_REGS. */
25538 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25540 if (reg_class_subset_p (regclass, Q_REGS))
25541 return regclass;
25542 if (reg_class_subset_p (Q_REGS, regclass))
25543 return Q_REGS;
25544 return NO_REGS;
25547 return regclass;
25550 /* Discourage putting floating-point values in SSE registers unless
25551 SSE math is being used, and likewise for the 387 registers. */
25552 enum reg_class
25553 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25555 enum machine_mode mode = GET_MODE (x);
25557 /* Restrict the output reload class to the register bank that we are doing
25558 math on. If we would like not to return a subset of CLASS, reject this
25559 alternative: if reload cannot do this, it will still use its choice. */
25560 mode = GET_MODE (x);
25561 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25562 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25564 if (X87_FLOAT_MODE_P (mode))
25566 if (regclass == FP_TOP_SSE_REGS)
25567 return FP_TOP_REG;
25568 else if (regclass == FP_SECOND_SSE_REGS)
25569 return FP_SECOND_REG;
25570 else
25571 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25574 return regclass;
25577 static enum reg_class
25578 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25579 enum machine_mode mode,
25580 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25582 /* QImode spills from non-QI registers require
25583 intermediate register on 32bit targets. */
25584 if (!in_p && mode == QImode && !TARGET_64BIT
25585 && (rclass == GENERAL_REGS
25586 || rclass == LEGACY_REGS
25587 || rclass == INDEX_REGS))
25589 int regno;
25591 if (REG_P (x))
25592 regno = REGNO (x);
25593 else
25594 regno = -1;
25596 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25597 regno = true_regnum (x);
25599 /* Return Q_REGS if the operand is in memory. */
25600 if (regno == -1)
25601 return Q_REGS;
25604 return NO_REGS;
25607 /* If we are copying between general and FP registers, we need a memory
25608 location. The same is true for SSE and MMX registers.
25610 To optimize register_move_cost performance, allow inline variant.
25612 The macro can't work reliably when one of the CLASSES is class containing
25613 registers from multiple units (SSE, MMX, integer). We avoid this by never
25614 combining those units in single alternative in the machine description.
25615 Ensure that this constraint holds to avoid unexpected surprises.
25617 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25618 enforce these sanity checks. */
25620 static inline int
25621 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25622 enum machine_mode mode, int strict)
25624 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25625 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25626 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25627 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25628 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25629 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25631 gcc_assert (!strict);
25632 return true;
25635 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25636 return true;
25638 /* ??? This is a lie. We do have moves between mmx/general, and for
25639 mmx/sse2. But by saying we need secondary memory we discourage the
25640 register allocator from using the mmx registers unless needed. */
25641 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25642 return true;
25644 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25646 /* SSE1 doesn't have any direct moves from other classes. */
25647 if (!TARGET_SSE2)
25648 return true;
25650 /* If the target says that inter-unit moves are more expensive
25651 than moving through memory, then don't generate them. */
25652 if (!TARGET_INTER_UNIT_MOVES)
25653 return true;
25655 /* Between SSE and general, we have moves no larger than word size. */
25656 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25657 return true;
25660 return false;
25664 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25665 enum machine_mode mode, int strict)
25667 return inline_secondary_memory_needed (class1, class2, mode, strict);
25670 /* Return true if the registers in CLASS cannot represent the change from
25671 modes FROM to TO. */
25673 bool
25674 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25675 enum reg_class regclass)
25677 if (from == to)
25678 return false;
25680 /* x87 registers can't do subreg at all, as all values are reformatted
25681 to extended precision. */
25682 if (MAYBE_FLOAT_CLASS_P (regclass))
25683 return true;
25685 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25687 /* Vector registers do not support QI or HImode loads. If we don't
25688 disallow a change to these modes, reload will assume it's ok to
25689 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25690 the vec_dupv4hi pattern. */
25691 if (GET_MODE_SIZE (from) < 4)
25692 return true;
25694 /* Vector registers do not support subreg with nonzero offsets, which
25695 are otherwise valid for integer registers. Since we can't see
25696 whether we have a nonzero offset from here, prohibit all
25697 nonparadoxical subregs changing size. */
25698 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25699 return true;
25702 return false;
25705 /* Return the cost of moving data of mode M between a
25706 register and memory. A value of 2 is the default; this cost is
25707 relative to those in `REGISTER_MOVE_COST'.
25709 This function is used extensively by register_move_cost that is used to
25710 build tables at startup. Make it inline in this case.
25711 When IN is 2, return maximum of in and out move cost.
25713 If moving between registers and memory is more expensive than
25714 between two registers, you should define this macro to express the
25715 relative cost.
25717 Model also increased moving costs of QImode registers in non
25718 Q_REGS classes.
25720 static inline int
25721 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25722 int in)
25724 int cost;
25725 if (FLOAT_CLASS_P (regclass))
25727 int index;
25728 switch (mode)
25730 case SFmode:
25731 index = 0;
25732 break;
25733 case DFmode:
25734 index = 1;
25735 break;
25736 case XFmode:
25737 index = 2;
25738 break;
25739 default:
25740 return 100;
25742 if (in == 2)
25743 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25744 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25746 if (SSE_CLASS_P (regclass))
25748 int index;
25749 switch (GET_MODE_SIZE (mode))
25751 case 4:
25752 index = 0;
25753 break;
25754 case 8:
25755 index = 1;
25756 break;
25757 case 16:
25758 index = 2;
25759 break;
25760 default:
25761 return 100;
25763 if (in == 2)
25764 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25765 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25767 if (MMX_CLASS_P (regclass))
25769 int index;
25770 switch (GET_MODE_SIZE (mode))
25772 case 4:
25773 index = 0;
25774 break;
25775 case 8:
25776 index = 1;
25777 break;
25778 default:
25779 return 100;
25781 if (in)
25782 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25783 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25785 switch (GET_MODE_SIZE (mode))
25787 case 1:
25788 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25790 if (!in)
25791 return ix86_cost->int_store[0];
25792 if (TARGET_PARTIAL_REG_DEPENDENCY
25793 && optimize_function_for_speed_p (cfun))
25794 cost = ix86_cost->movzbl_load;
25795 else
25796 cost = ix86_cost->int_load[0];
25797 if (in == 2)
25798 return MAX (cost, ix86_cost->int_store[0]);
25799 return cost;
25801 else
25803 if (in == 2)
25804 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25805 if (in)
25806 return ix86_cost->movzbl_load;
25807 else
25808 return ix86_cost->int_store[0] + 4;
25810 break;
25811 case 2:
25812 if (in == 2)
25813 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25814 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25815 default:
25816 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25817 if (mode == TFmode)
25818 mode = XFmode;
25819 if (in == 2)
25820 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25821 else if (in)
25822 cost = ix86_cost->int_load[2];
25823 else
25824 cost = ix86_cost->int_store[2];
25825 return (cost * (((int) GET_MODE_SIZE (mode)
25826 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25831 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25833 return inline_memory_move_cost (mode, regclass, in);
25837 /* Return the cost of moving data from a register in class CLASS1 to
25838 one in class CLASS2.
25840 It is not required that the cost always equal 2 when FROM is the same as TO;
25841 on some machines it is expensive to move between registers if they are not
25842 general registers. */
25845 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25846 enum reg_class class2)
25848 /* In case we require secondary memory, compute cost of the store followed
25849 by load. In order to avoid bad register allocation choices, we need
25850 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25852 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25854 int cost = 1;
25856 cost += inline_memory_move_cost (mode, class1, 2);
25857 cost += inline_memory_move_cost (mode, class2, 2);
25859 /* In case of copying from general_purpose_register we may emit multiple
25860 stores followed by single load causing memory size mismatch stall.
25861 Count this as arbitrarily high cost of 20. */
25862 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25863 cost += 20;
25865 /* In the case of FP/MMX moves, the registers actually overlap, and we
25866 have to switch modes in order to treat them differently. */
25867 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25868 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25869 cost += 20;
25871 return cost;
25874 /* Moves between SSE/MMX and integer unit are expensive. */
25875 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25876 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25878 /* ??? By keeping returned value relatively high, we limit the number
25879 of moves between integer and MMX/SSE registers for all targets.
25880 Additionally, high value prevents problem with x86_modes_tieable_p(),
25881 where integer modes in MMX/SSE registers are not tieable
25882 because of missing QImode and HImode moves to, from or between
25883 MMX/SSE registers. */
25884 return MAX (8, ix86_cost->mmxsse_to_integer);
25886 if (MAYBE_FLOAT_CLASS_P (class1))
25887 return ix86_cost->fp_move;
25888 if (MAYBE_SSE_CLASS_P (class1))
25889 return ix86_cost->sse_move;
25890 if (MAYBE_MMX_CLASS_P (class1))
25891 return ix86_cost->mmx_move;
25892 return 2;
25895 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25897 bool
25898 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25900 /* Flags and only flags can only hold CCmode values. */
25901 if (CC_REGNO_P (regno))
25902 return GET_MODE_CLASS (mode) == MODE_CC;
25903 if (GET_MODE_CLASS (mode) == MODE_CC
25904 || GET_MODE_CLASS (mode) == MODE_RANDOM
25905 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25906 return 0;
25907 if (FP_REGNO_P (regno))
25908 return VALID_FP_MODE_P (mode);
25909 if (SSE_REGNO_P (regno))
25911 /* We implement the move patterns for all vector modes into and
25912 out of SSE registers, even when no operation instructions
25913 are available. OImode move is available only when AVX is
25914 enabled. */
25915 return ((TARGET_AVX && mode == OImode)
25916 || VALID_AVX256_REG_MODE (mode)
25917 || VALID_SSE_REG_MODE (mode)
25918 || VALID_SSE2_REG_MODE (mode)
25919 || VALID_MMX_REG_MODE (mode)
25920 || VALID_MMX_REG_MODE_3DNOW (mode));
25922 if (MMX_REGNO_P (regno))
25924 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25925 so if the register is available at all, then we can move data of
25926 the given mode into or out of it. */
25927 return (VALID_MMX_REG_MODE (mode)
25928 || VALID_MMX_REG_MODE_3DNOW (mode));
25931 if (mode == QImode)
25933 /* Take care for QImode values - they can be in non-QI regs,
25934 but then they do cause partial register stalls. */
25935 if (regno <= BX_REG || TARGET_64BIT)
25936 return 1;
25937 if (!TARGET_PARTIAL_REG_STALL)
25938 return 1;
25939 return reload_in_progress || reload_completed;
25941 /* We handle both integer and floats in the general purpose registers. */
25942 else if (VALID_INT_MODE_P (mode))
25943 return 1;
25944 else if (VALID_FP_MODE_P (mode))
25945 return 1;
25946 else if (VALID_DFP_MODE_P (mode))
25947 return 1;
25948 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25949 on to use that value in smaller contexts, this can easily force a
25950 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25951 supporting DImode, allow it. */
25952 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25953 return 1;
25955 return 0;
25958 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25959 tieable integer mode. */
25961 static bool
25962 ix86_tieable_integer_mode_p (enum machine_mode mode)
25964 switch (mode)
25966 case HImode:
25967 case SImode:
25968 return true;
25970 case QImode:
25971 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25973 case DImode:
25974 return TARGET_64BIT;
25976 default:
25977 return false;
25981 /* Return true if MODE1 is accessible in a register that can hold MODE2
25982 without copying. That is, all register classes that can hold MODE2
25983 can also hold MODE1. */
25985 bool
25986 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25988 if (mode1 == mode2)
25989 return true;
25991 if (ix86_tieable_integer_mode_p (mode1)
25992 && ix86_tieable_integer_mode_p (mode2))
25993 return true;
25995 /* MODE2 being XFmode implies fp stack or general regs, which means we
25996 can tie any smaller floating point modes to it. Note that we do not
25997 tie this with TFmode. */
25998 if (mode2 == XFmode)
25999 return mode1 == SFmode || mode1 == DFmode;
26001 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
26002 that we can tie it with SFmode. */
26003 if (mode2 == DFmode)
26004 return mode1 == SFmode;
26006 /* If MODE2 is only appropriate for an SSE register, then tie with
26007 any other mode acceptable to SSE registers. */
26008 if (GET_MODE_SIZE (mode2) == 16
26009 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
26010 return (GET_MODE_SIZE (mode1) == 16
26011 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
26013 /* If MODE2 is appropriate for an MMX register, then tie
26014 with any other mode acceptable to MMX registers. */
26015 if (GET_MODE_SIZE (mode2) == 8
26016 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
26017 return (GET_MODE_SIZE (mode1) == 8
26018 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
26020 return false;
26023 /* Compute a (partial) cost for rtx X. Return true if the complete
26024 cost has been computed, and false if subexpressions should be
26025 scanned. In either case, *TOTAL contains the cost result. */
26027 static bool
26028 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
26030 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
26031 enum machine_mode mode = GET_MODE (x);
26032 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
26034 switch (code)
26036 case CONST_INT:
26037 case CONST:
26038 case LABEL_REF:
26039 case SYMBOL_REF:
26040 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
26041 *total = 3;
26042 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
26043 *total = 2;
26044 else if (flag_pic && SYMBOLIC_CONST (x)
26045 && (!TARGET_64BIT
26046 || (!GET_CODE (x) != LABEL_REF
26047 && (GET_CODE (x) != SYMBOL_REF
26048 || !SYMBOL_REF_LOCAL_P (x)))))
26049 *total = 1;
26050 else
26051 *total = 0;
26052 return true;
26054 case CONST_DOUBLE:
26055 if (mode == VOIDmode)
26056 *total = 0;
26057 else
26058 switch (standard_80387_constant_p (x))
26060 case 1: /* 0.0 */
26061 *total = 1;
26062 break;
26063 default: /* Other constants */
26064 *total = 2;
26065 break;
26066 case 0:
26067 case -1:
26068 /* Start with (MEM (SYMBOL_REF)), since that's where
26069 it'll probably end up. Add a penalty for size. */
26070 *total = (COSTS_N_INSNS (1)
26071 + (flag_pic != 0 && !TARGET_64BIT)
26072 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
26073 break;
26075 return true;
26077 case ZERO_EXTEND:
26078 /* The zero extensions is often completely free on x86_64, so make
26079 it as cheap as possible. */
26080 if (TARGET_64BIT && mode == DImode
26081 && GET_MODE (XEXP (x, 0)) == SImode)
26082 *total = 1;
26083 else if (TARGET_ZERO_EXTEND_WITH_AND)
26084 *total = cost->add;
26085 else
26086 *total = cost->movzx;
26087 return false;
26089 case SIGN_EXTEND:
26090 *total = cost->movsx;
26091 return false;
26093 case ASHIFT:
26094 if (CONST_INT_P (XEXP (x, 1))
26095 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
26097 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
26098 if (value == 1)
26100 *total = cost->add;
26101 return false;
26103 if ((value == 2 || value == 3)
26104 && cost->lea <= cost->shift_const)
26106 *total = cost->lea;
26107 return false;
26110 /* FALLTHRU */
26112 case ROTATE:
26113 case ASHIFTRT:
26114 case LSHIFTRT:
26115 case ROTATERT:
26116 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
26118 if (CONST_INT_P (XEXP (x, 1)))
26120 if (INTVAL (XEXP (x, 1)) > 32)
26121 *total = cost->shift_const + COSTS_N_INSNS (2);
26122 else
26123 *total = cost->shift_const * 2;
26125 else
26127 if (GET_CODE (XEXP (x, 1)) == AND)
26128 *total = cost->shift_var * 2;
26129 else
26130 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
26133 else
26135 if (CONST_INT_P (XEXP (x, 1)))
26136 *total = cost->shift_const;
26137 else
26138 *total = cost->shift_var;
26140 return false;
26142 case MULT:
26143 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26145 /* ??? SSE scalar cost should be used here. */
26146 *total = cost->fmul;
26147 return false;
26149 else if (X87_FLOAT_MODE_P (mode))
26151 *total = cost->fmul;
26152 return false;
26154 else if (FLOAT_MODE_P (mode))
26156 /* ??? SSE vector cost should be used here. */
26157 *total = cost->fmul;
26158 return false;
26160 else
26162 rtx op0 = XEXP (x, 0);
26163 rtx op1 = XEXP (x, 1);
26164 int nbits;
26165 if (CONST_INT_P (XEXP (x, 1)))
26167 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
26168 for (nbits = 0; value != 0; value &= value - 1)
26169 nbits++;
26171 else
26172 /* This is arbitrary. */
26173 nbits = 7;
26175 /* Compute costs correctly for widening multiplication. */
26176 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
26177 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
26178 == GET_MODE_SIZE (mode))
26180 int is_mulwiden = 0;
26181 enum machine_mode inner_mode = GET_MODE (op0);
26183 if (GET_CODE (op0) == GET_CODE (op1))
26184 is_mulwiden = 1, op1 = XEXP (op1, 0);
26185 else if (CONST_INT_P (op1))
26187 if (GET_CODE (op0) == SIGN_EXTEND)
26188 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
26189 == INTVAL (op1);
26190 else
26191 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
26194 if (is_mulwiden)
26195 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
26198 *total = (cost->mult_init[MODE_INDEX (mode)]
26199 + nbits * cost->mult_bit
26200 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
26202 return true;
26205 case DIV:
26206 case UDIV:
26207 case MOD:
26208 case UMOD:
26209 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26210 /* ??? SSE cost should be used here. */
26211 *total = cost->fdiv;
26212 else if (X87_FLOAT_MODE_P (mode))
26213 *total = cost->fdiv;
26214 else if (FLOAT_MODE_P (mode))
26215 /* ??? SSE vector cost should be used here. */
26216 *total = cost->fdiv;
26217 else
26218 *total = cost->divide[MODE_INDEX (mode)];
26219 return false;
26221 case PLUS:
26222 if (GET_MODE_CLASS (mode) == MODE_INT
26223 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
26225 if (GET_CODE (XEXP (x, 0)) == PLUS
26226 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
26227 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
26228 && CONSTANT_P (XEXP (x, 1)))
26230 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
26231 if (val == 2 || val == 4 || val == 8)
26233 *total = cost->lea;
26234 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26235 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
26236 outer_code, speed);
26237 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26238 return true;
26241 else if (GET_CODE (XEXP (x, 0)) == MULT
26242 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
26244 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
26245 if (val == 2 || val == 4 || val == 8)
26247 *total = cost->lea;
26248 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26249 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26250 return true;
26253 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26255 *total = cost->lea;
26256 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26257 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26258 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26259 return true;
26262 /* FALLTHRU */
26264 case MINUS:
26265 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26267 /* ??? SSE cost should be used here. */
26268 *total = cost->fadd;
26269 return false;
26271 else if (X87_FLOAT_MODE_P (mode))
26273 *total = cost->fadd;
26274 return false;
26276 else if (FLOAT_MODE_P (mode))
26278 /* ??? SSE vector cost should be used here. */
26279 *total = cost->fadd;
26280 return false;
26282 /* FALLTHRU */
26284 case AND:
26285 case IOR:
26286 case XOR:
26287 if (!TARGET_64BIT && mode == DImode)
26289 *total = (cost->add * 2
26290 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26291 << (GET_MODE (XEXP (x, 0)) != DImode))
26292 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26293 << (GET_MODE (XEXP (x, 1)) != DImode)));
26294 return true;
26296 /* FALLTHRU */
26298 case NEG:
26299 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26301 /* ??? SSE cost should be used here. */
26302 *total = cost->fchs;
26303 return false;
26305 else if (X87_FLOAT_MODE_P (mode))
26307 *total = cost->fchs;
26308 return false;
26310 else if (FLOAT_MODE_P (mode))
26312 /* ??? SSE vector cost should be used here. */
26313 *total = cost->fchs;
26314 return false;
26316 /* FALLTHRU */
26318 case NOT:
26319 if (!TARGET_64BIT && mode == DImode)
26320 *total = cost->add * 2;
26321 else
26322 *total = cost->add;
26323 return false;
26325 case COMPARE:
26326 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26327 && XEXP (XEXP (x, 0), 1) == const1_rtx
26328 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26329 && XEXP (x, 1) == const0_rtx)
26331 /* This kind of construct is implemented using test[bwl].
26332 Treat it as if we had an AND. */
26333 *total = (cost->add
26334 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26335 + rtx_cost (const1_rtx, outer_code, speed));
26336 return true;
26338 return false;
26340 case FLOAT_EXTEND:
26341 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26342 *total = 0;
26343 return false;
26345 case ABS:
26346 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26347 /* ??? SSE cost should be used here. */
26348 *total = cost->fabs;
26349 else if (X87_FLOAT_MODE_P (mode))
26350 *total = cost->fabs;
26351 else if (FLOAT_MODE_P (mode))
26352 /* ??? SSE vector cost should be used here. */
26353 *total = cost->fabs;
26354 return false;
26356 case SQRT:
26357 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26358 /* ??? SSE cost should be used here. */
26359 *total = cost->fsqrt;
26360 else if (X87_FLOAT_MODE_P (mode))
26361 *total = cost->fsqrt;
26362 else if (FLOAT_MODE_P (mode))
26363 /* ??? SSE vector cost should be used here. */
26364 *total = cost->fsqrt;
26365 return false;
26367 case UNSPEC:
26368 if (XINT (x, 1) == UNSPEC_TP)
26369 *total = 0;
26370 return false;
26372 default:
26373 return false;
26377 #if TARGET_MACHO
26379 static int current_machopic_label_num;
26381 /* Given a symbol name and its associated stub, write out the
26382 definition of the stub. */
26384 void
26385 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26387 unsigned int length;
26388 char *binder_name, *symbol_name, lazy_ptr_name[32];
26389 int label = ++current_machopic_label_num;
26391 /* For 64-bit we shouldn't get here. */
26392 gcc_assert (!TARGET_64BIT);
26394 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26395 symb = (*targetm.strip_name_encoding) (symb);
26397 length = strlen (stub);
26398 binder_name = XALLOCAVEC (char, length + 32);
26399 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26401 length = strlen (symb);
26402 symbol_name = XALLOCAVEC (char, length + 32);
26403 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26405 sprintf (lazy_ptr_name, "L%d$lz", label);
26407 if (MACHOPIC_PURE)
26408 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26409 else
26410 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26412 fprintf (file, "%s:\n", stub);
26413 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26415 if (MACHOPIC_PURE)
26417 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26418 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26419 fprintf (file, "\tjmp\t*%%edx\n");
26421 else
26422 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26424 fprintf (file, "%s:\n", binder_name);
26426 if (MACHOPIC_PURE)
26428 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26429 fprintf (file, "\tpushl\t%%eax\n");
26431 else
26432 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26434 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
26436 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26437 fprintf (file, "%s:\n", lazy_ptr_name);
26438 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26439 fprintf (file, "\t.long %s\n", binder_name);
26442 void
26443 darwin_x86_file_end (void)
26445 darwin_file_end ();
26446 ix86_file_end ();
26448 #endif /* TARGET_MACHO */
26450 /* Order the registers for register allocator. */
26452 void
26453 x86_order_regs_for_local_alloc (void)
26455 int pos = 0;
26456 int i;
26458 /* First allocate the local general purpose registers. */
26459 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26460 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26461 reg_alloc_order [pos++] = i;
26463 /* Global general purpose registers. */
26464 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26465 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26466 reg_alloc_order [pos++] = i;
26468 /* x87 registers come first in case we are doing FP math
26469 using them. */
26470 if (!TARGET_SSE_MATH)
26471 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26472 reg_alloc_order [pos++] = i;
26474 /* SSE registers. */
26475 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26476 reg_alloc_order [pos++] = i;
26477 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26478 reg_alloc_order [pos++] = i;
26480 /* x87 registers. */
26481 if (TARGET_SSE_MATH)
26482 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26483 reg_alloc_order [pos++] = i;
26485 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26486 reg_alloc_order [pos++] = i;
26488 /* Initialize the rest of array as we do not allocate some registers
26489 at all. */
26490 while (pos < FIRST_PSEUDO_REGISTER)
26491 reg_alloc_order [pos++] = 0;
26494 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26495 struct attribute_spec.handler. */
26496 static tree
26497 ix86_handle_abi_attribute (tree *node, tree name,
26498 tree args ATTRIBUTE_UNUSED,
26499 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26501 if (TREE_CODE (*node) != FUNCTION_TYPE
26502 && TREE_CODE (*node) != METHOD_TYPE
26503 && TREE_CODE (*node) != FIELD_DECL
26504 && TREE_CODE (*node) != TYPE_DECL)
26506 warning (OPT_Wattributes, "%qs attribute only applies to functions",
26507 IDENTIFIER_POINTER (name));
26508 *no_add_attrs = true;
26509 return NULL_TREE;
26511 if (!TARGET_64BIT)
26513 warning (OPT_Wattributes, "%qs attribute only available for 64-bit",
26514 IDENTIFIER_POINTER (name));
26515 *no_add_attrs = true;
26516 return NULL_TREE;
26519 /* Can combine regparm with all attributes but fastcall. */
26520 if (is_attribute_p ("ms_abi", name))
26522 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26524 error ("ms_abi and sysv_abi attributes are not compatible");
26527 return NULL_TREE;
26529 else if (is_attribute_p ("sysv_abi", name))
26531 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26533 error ("ms_abi and sysv_abi attributes are not compatible");
26536 return NULL_TREE;
26539 return NULL_TREE;
26542 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26543 struct attribute_spec.handler. */
26544 static tree
26545 ix86_handle_struct_attribute (tree *node, tree name,
26546 tree args ATTRIBUTE_UNUSED,
26547 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26549 tree *type = NULL;
26550 if (DECL_P (*node))
26552 if (TREE_CODE (*node) == TYPE_DECL)
26553 type = &TREE_TYPE (*node);
26555 else
26556 type = node;
26558 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26559 || TREE_CODE (*type) == UNION_TYPE)))
26561 warning (OPT_Wattributes, "%qs attribute ignored",
26562 IDENTIFIER_POINTER (name));
26563 *no_add_attrs = true;
26566 else if ((is_attribute_p ("ms_struct", name)
26567 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26568 || ((is_attribute_p ("gcc_struct", name)
26569 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26571 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
26572 IDENTIFIER_POINTER (name));
26573 *no_add_attrs = true;
26576 return NULL_TREE;
26579 static bool
26580 ix86_ms_bitfield_layout_p (const_tree record_type)
26582 return (TARGET_MS_BITFIELD_LAYOUT &&
26583 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26584 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26587 /* Returns an expression indicating where the this parameter is
26588 located on entry to the FUNCTION. */
26590 static rtx
26591 x86_this_parameter (tree function)
26593 tree type = TREE_TYPE (function);
26594 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26595 int nregs;
26597 if (TARGET_64BIT)
26599 const int *parm_regs;
26601 if (ix86_function_type_abi (type) == MS_ABI)
26602 parm_regs = x86_64_ms_abi_int_parameter_registers;
26603 else
26604 parm_regs = x86_64_int_parameter_registers;
26605 return gen_rtx_REG (DImode, parm_regs[aggr]);
26608 nregs = ix86_function_regparm (type, function);
26610 if (nregs > 0 && !stdarg_p (type))
26612 int regno;
26614 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26615 regno = aggr ? DX_REG : CX_REG;
26616 else
26618 regno = AX_REG;
26619 if (aggr)
26621 regno = DX_REG;
26622 if (nregs == 1)
26623 return gen_rtx_MEM (SImode,
26624 plus_constant (stack_pointer_rtx, 4));
26627 return gen_rtx_REG (SImode, regno);
26630 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26633 /* Determine whether x86_output_mi_thunk can succeed. */
26635 static bool
26636 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26637 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26638 HOST_WIDE_INT vcall_offset, const_tree function)
26640 /* 64-bit can handle anything. */
26641 if (TARGET_64BIT)
26642 return true;
26644 /* For 32-bit, everything's fine if we have one free register. */
26645 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26646 return true;
26648 /* Need a free register for vcall_offset. */
26649 if (vcall_offset)
26650 return false;
26652 /* Need a free register for GOT references. */
26653 if (flag_pic && !(*targetm.binds_local_p) (function))
26654 return false;
26656 /* Otherwise ok. */
26657 return true;
26660 /* Output the assembler code for a thunk function. THUNK_DECL is the
26661 declaration for the thunk function itself, FUNCTION is the decl for
26662 the target function. DELTA is an immediate constant offset to be
26663 added to THIS. If VCALL_OFFSET is nonzero, the word at
26664 *(*this + vcall_offset) should be added to THIS. */
26666 static void
26667 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
26668 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26669 HOST_WIDE_INT vcall_offset, tree function)
26671 rtx xops[3];
26672 rtx this_param = x86_this_parameter (function);
26673 rtx this_reg, tmp;
26675 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26676 pull it in now and let DELTA benefit. */
26677 if (REG_P (this_param))
26678 this_reg = this_param;
26679 else if (vcall_offset)
26681 /* Put the this parameter into %eax. */
26682 xops[0] = this_param;
26683 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26684 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26686 else
26687 this_reg = NULL_RTX;
26689 /* Adjust the this parameter by a fixed constant. */
26690 if (delta)
26692 xops[0] = GEN_INT (delta);
26693 xops[1] = this_reg ? this_reg : this_param;
26694 if (TARGET_64BIT)
26696 if (!x86_64_general_operand (xops[0], DImode))
26698 tmp = gen_rtx_REG (DImode, R10_REG);
26699 xops[1] = tmp;
26700 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26701 xops[0] = tmp;
26702 xops[1] = this_param;
26704 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26706 else
26707 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26710 /* Adjust the this parameter by a value stored in the vtable. */
26711 if (vcall_offset)
26713 if (TARGET_64BIT)
26714 tmp = gen_rtx_REG (DImode, R10_REG);
26715 else
26717 int tmp_regno = CX_REG;
26718 if (lookup_attribute ("fastcall",
26719 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26720 tmp_regno = AX_REG;
26721 tmp = gen_rtx_REG (SImode, tmp_regno);
26724 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26725 xops[1] = tmp;
26726 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26728 /* Adjust the this parameter. */
26729 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26730 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26732 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26733 xops[0] = GEN_INT (vcall_offset);
26734 xops[1] = tmp2;
26735 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26736 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26738 xops[1] = this_reg;
26739 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26742 /* If necessary, drop THIS back to its stack slot. */
26743 if (this_reg && this_reg != this_param)
26745 xops[0] = this_reg;
26746 xops[1] = this_param;
26747 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26750 xops[0] = XEXP (DECL_RTL (function), 0);
26751 if (TARGET_64BIT)
26753 if (!flag_pic || (*targetm.binds_local_p) (function))
26754 output_asm_insn ("jmp\t%P0", xops);
26755 /* All thunks should be in the same object as their target,
26756 and thus binds_local_p should be true. */
26757 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26758 gcc_unreachable ();
26759 else
26761 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26762 tmp = gen_rtx_CONST (Pmode, tmp);
26763 tmp = gen_rtx_MEM (QImode, tmp);
26764 xops[0] = tmp;
26765 output_asm_insn ("jmp\t%A0", xops);
26768 else
26770 if (!flag_pic || (*targetm.binds_local_p) (function))
26771 output_asm_insn ("jmp\t%P0", xops);
26772 else
26773 #if TARGET_MACHO
26774 if (TARGET_MACHO)
26776 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26777 tmp = (gen_rtx_SYMBOL_REF
26778 (Pmode,
26779 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26780 tmp = gen_rtx_MEM (QImode, tmp);
26781 xops[0] = tmp;
26782 output_asm_insn ("jmp\t%0", xops);
26784 else
26785 #endif /* TARGET_MACHO */
26787 tmp = gen_rtx_REG (SImode, CX_REG);
26788 output_set_got (tmp, NULL_RTX);
26790 xops[1] = tmp;
26791 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26792 output_asm_insn ("jmp\t{*}%1", xops);
26797 static void
26798 x86_file_start (void)
26800 default_file_start ();
26801 #if TARGET_MACHO
26802 darwin_file_start ();
26803 #endif
26804 if (X86_FILE_START_VERSION_DIRECTIVE)
26805 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26806 if (X86_FILE_START_FLTUSED)
26807 fputs ("\t.global\t__fltused\n", asm_out_file);
26808 if (ix86_asm_dialect == ASM_INTEL)
26809 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26813 x86_field_alignment (tree field, int computed)
26815 enum machine_mode mode;
26816 tree type = TREE_TYPE (field);
26818 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26819 return computed;
26820 mode = TYPE_MODE (strip_array_types (type));
26821 if (mode == DFmode || mode == DCmode
26822 || GET_MODE_CLASS (mode) == MODE_INT
26823 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26824 return MIN (32, computed);
26825 return computed;
26828 /* Output assembler code to FILE to increment profiler label # LABELNO
26829 for profiling a function entry. */
26830 void
26831 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26833 if (TARGET_64BIT)
26835 #ifndef NO_PROFILE_COUNTERS
26836 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
26837 #endif
26839 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26840 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
26841 else
26842 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26844 else if (flag_pic)
26846 #ifndef NO_PROFILE_COUNTERS
26847 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
26848 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
26849 #endif
26850 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
26852 else
26854 #ifndef NO_PROFILE_COUNTERS
26855 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
26856 PROFILE_COUNT_REGISTER);
26857 #endif
26858 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
26862 /* We don't have exact information about the insn sizes, but we may assume
26863 quite safely that we are informed about all 1 byte insns and memory
26864 address sizes. This is enough to eliminate unnecessary padding in
26865 99% of cases. */
26867 static int
26868 min_insn_size (rtx insn)
26870 int l = 0;
26872 if (!INSN_P (insn) || !active_insn_p (insn))
26873 return 0;
26875 /* Discard alignments we've emit and jump instructions. */
26876 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26877 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26878 return 0;
26879 if (JUMP_P (insn)
26880 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
26881 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
26882 return 0;
26884 /* Important case - calls are always 5 bytes.
26885 It is common to have many calls in the row. */
26886 if (CALL_P (insn)
26887 && symbolic_reference_mentioned_p (PATTERN (insn))
26888 && !SIBLING_CALL_P (insn))
26889 return 5;
26890 if (get_attr_length (insn) <= 1)
26891 return 1;
26893 /* For normal instructions we may rely on the sizes of addresses
26894 and the presence of symbol to require 4 bytes of encoding.
26895 This is not the case for jumps where references are PC relative. */
26896 if (!JUMP_P (insn))
26898 l = get_attr_length_address (insn);
26899 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26900 l = 4;
26902 if (l)
26903 return 1+l;
26904 else
26905 return 2;
26908 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26909 window. */
26911 static void
26912 ix86_avoid_jump_misspredicts (void)
26914 rtx insn, start = get_insns ();
26915 int nbytes = 0, njumps = 0;
26916 int isjump = 0;
26918 /* Look for all minimal intervals of instructions containing 4 jumps.
26919 The intervals are bounded by START and INSN. NBYTES is the total
26920 size of instructions in the interval including INSN and not including
26921 START. When the NBYTES is smaller than 16 bytes, it is possible
26922 that the end of START and INSN ends up in the same 16byte page.
26924 The smallest offset in the page INSN can start is the case where START
26925 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26926 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
26928 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
26931 nbytes += min_insn_size (insn);
26932 if (dump_file)
26933 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
26934 INSN_UID (insn), min_insn_size (insn));
26935 if ((JUMP_P (insn)
26936 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26937 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26938 || CALL_P (insn))
26939 njumps++;
26940 else
26941 continue;
26943 while (njumps > 3)
26945 start = NEXT_INSN (start);
26946 if ((JUMP_P (start)
26947 && GET_CODE (PATTERN (start)) != ADDR_VEC
26948 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26949 || CALL_P (start))
26950 njumps--, isjump = 1;
26951 else
26952 isjump = 0;
26953 nbytes -= min_insn_size (start);
26955 gcc_assert (njumps >= 0);
26956 if (dump_file)
26957 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26958 INSN_UID (start), INSN_UID (insn), nbytes);
26960 if (njumps == 3 && isjump && nbytes < 16)
26962 int padsize = 15 - nbytes + min_insn_size (insn);
26964 if (dump_file)
26965 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26966 INSN_UID (insn), padsize);
26967 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
26972 /* AMD Athlon works faster
26973 when RET is not destination of conditional jump or directly preceded
26974 by other jump instruction. We avoid the penalty by inserting NOP just
26975 before the RET instructions in such cases. */
26976 static void
26977 ix86_pad_returns (void)
26979 edge e;
26980 edge_iterator ei;
26982 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26984 basic_block bb = e->src;
26985 rtx ret = BB_END (bb);
26986 rtx prev;
26987 bool replace = false;
26989 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26990 || optimize_bb_for_size_p (bb))
26991 continue;
26992 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26993 if (active_insn_p (prev) || LABEL_P (prev))
26994 break;
26995 if (prev && LABEL_P (prev))
26997 edge e;
26998 edge_iterator ei;
27000 FOR_EACH_EDGE (e, ei, bb->preds)
27001 if (EDGE_FREQUENCY (e) && e->src->index >= 0
27002 && !(e->flags & EDGE_FALLTHRU))
27003 replace = true;
27005 if (!replace)
27007 prev = prev_active_insn (ret);
27008 if (prev
27009 && ((JUMP_P (prev) && any_condjump_p (prev))
27010 || CALL_P (prev)))
27011 replace = true;
27012 /* Empty functions get branch mispredict even when the jump destination
27013 is not visible to us. */
27014 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
27015 replace = true;
27017 if (replace)
27019 emit_insn_before (gen_return_internal_long (), ret);
27020 delete_insn (ret);
27025 /* Implement machine specific optimizations. We implement padding of returns
27026 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
27027 static void
27028 ix86_reorg (void)
27030 if (TARGET_PAD_RETURNS && optimize
27031 && optimize_function_for_speed_p (cfun))
27032 ix86_pad_returns ();
27033 if (TARGET_FOUR_JUMP_LIMIT && optimize
27034 && optimize_function_for_speed_p (cfun))
27035 ix86_avoid_jump_misspredicts ();
27038 /* Return nonzero when QImode register that must be represented via REX prefix
27039 is used. */
27040 bool
27041 x86_extended_QIreg_mentioned_p (rtx insn)
27043 int i;
27044 extract_insn_cached (insn);
27045 for (i = 0; i < recog_data.n_operands; i++)
27046 if (REG_P (recog_data.operand[i])
27047 && REGNO (recog_data.operand[i]) > BX_REG)
27048 return true;
27049 return false;
27052 /* Return nonzero when P points to register encoded via REX prefix.
27053 Called via for_each_rtx. */
27054 static int
27055 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
27057 unsigned int regno;
27058 if (!REG_P (*p))
27059 return 0;
27060 regno = REGNO (*p);
27061 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
27064 /* Return true when INSN mentions register that must be encoded using REX
27065 prefix. */
27066 bool
27067 x86_extended_reg_mentioned_p (rtx insn)
27069 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
27070 extended_reg_mentioned_1, NULL);
27073 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
27074 optabs would emit if we didn't have TFmode patterns. */
27076 void
27077 x86_emit_floatuns (rtx operands[2])
27079 rtx neglab, donelab, i0, i1, f0, in, out;
27080 enum machine_mode mode, inmode;
27082 inmode = GET_MODE (operands[1]);
27083 gcc_assert (inmode == SImode || inmode == DImode);
27085 out = operands[0];
27086 in = force_reg (inmode, operands[1]);
27087 mode = GET_MODE (out);
27088 neglab = gen_label_rtx ();
27089 donelab = gen_label_rtx ();
27090 f0 = gen_reg_rtx (mode);
27092 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
27094 expand_float (out, in, 0);
27096 emit_jump_insn (gen_jump (donelab));
27097 emit_barrier ();
27099 emit_label (neglab);
27101 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27102 1, OPTAB_DIRECT);
27103 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27104 1, OPTAB_DIRECT);
27105 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27107 expand_float (f0, i0, 0);
27109 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27111 emit_label (donelab);
27114 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27115 with all elements equal to VAR. Return true if successful. */
27117 static bool
27118 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27119 rtx target, rtx val)
27121 enum machine_mode hmode, smode, wsmode, wvmode;
27122 rtx x;
27124 switch (mode)
27126 case V2SImode:
27127 case V2SFmode:
27128 if (!mmx_ok)
27129 return false;
27130 /* FALLTHRU */
27132 case V2DFmode:
27133 case V2DImode:
27134 case V4SFmode:
27135 case V4SImode:
27136 val = force_reg (GET_MODE_INNER (mode), val);
27137 x = gen_rtx_VEC_DUPLICATE (mode, val);
27138 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27139 return true;
27141 case V4HImode:
27142 if (!mmx_ok)
27143 return false;
27144 if (TARGET_SSE || TARGET_3DNOW_A)
27146 val = gen_lowpart (SImode, val);
27147 x = gen_rtx_TRUNCATE (HImode, val);
27148 x = gen_rtx_VEC_DUPLICATE (mode, x);
27149 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27150 return true;
27152 else
27154 smode = HImode;
27155 wsmode = SImode;
27156 wvmode = V2SImode;
27157 goto widen;
27160 case V8QImode:
27161 if (!mmx_ok)
27162 return false;
27163 smode = QImode;
27164 wsmode = HImode;
27165 wvmode = V4HImode;
27166 goto widen;
27167 case V8HImode:
27168 if (TARGET_SSE2)
27170 rtx tmp1, tmp2;
27171 /* Extend HImode to SImode using a paradoxical SUBREG. */
27172 tmp1 = gen_reg_rtx (SImode);
27173 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27174 /* Insert the SImode value as low element of V4SImode vector. */
27175 tmp2 = gen_reg_rtx (V4SImode);
27176 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
27177 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
27178 CONST0_RTX (V4SImode),
27179 const1_rtx);
27180 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
27181 /* Cast the V4SImode vector back to a V8HImode vector. */
27182 tmp1 = gen_reg_rtx (V8HImode);
27183 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
27184 /* Duplicate the low short through the whole low SImode word. */
27185 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
27186 /* Cast the V8HImode vector back to a V4SImode vector. */
27187 tmp2 = gen_reg_rtx (V4SImode);
27188 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
27189 /* Replicate the low element of the V4SImode vector. */
27190 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
27191 /* Cast the V2SImode back to V8HImode, and store in target. */
27192 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
27193 return true;
27195 smode = HImode;
27196 wsmode = SImode;
27197 wvmode = V4SImode;
27198 goto widen;
27199 case V16QImode:
27200 if (TARGET_SSE2)
27202 rtx tmp1, tmp2;
27203 /* Extend QImode to SImode using a paradoxical SUBREG. */
27204 tmp1 = gen_reg_rtx (SImode);
27205 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27206 /* Insert the SImode value as low element of V4SImode vector. */
27207 tmp2 = gen_reg_rtx (V4SImode);
27208 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
27209 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
27210 CONST0_RTX (V4SImode),
27211 const1_rtx);
27212 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
27213 /* Cast the V4SImode vector back to a V16QImode vector. */
27214 tmp1 = gen_reg_rtx (V16QImode);
27215 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
27216 /* Duplicate the low byte through the whole low SImode word. */
27217 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
27218 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
27219 /* Cast the V16QImode vector back to a V4SImode vector. */
27220 tmp2 = gen_reg_rtx (V4SImode);
27221 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
27222 /* Replicate the low element of the V4SImode vector. */
27223 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
27224 /* Cast the V2SImode back to V16QImode, and store in target. */
27225 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
27226 return true;
27228 smode = QImode;
27229 wsmode = HImode;
27230 wvmode = V8HImode;
27231 goto widen;
27232 widen:
27233 /* Replicate the value once into the next wider mode and recurse. */
27234 val = convert_modes (wsmode, smode, val, true);
27235 x = expand_simple_binop (wsmode, ASHIFT, val,
27236 GEN_INT (GET_MODE_BITSIZE (smode)),
27237 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27238 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27240 x = gen_reg_rtx (wvmode);
27241 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
27242 gcc_unreachable ();
27243 emit_move_insn (target, gen_lowpart (mode, x));
27244 return true;
27246 case V4DFmode:
27247 hmode = V2DFmode;
27248 goto half;
27249 case V4DImode:
27250 hmode = V2DImode;
27251 goto half;
27252 case V8SFmode:
27253 hmode = V4SFmode;
27254 goto half;
27255 case V8SImode:
27256 hmode = V4SImode;
27257 goto half;
27258 case V16HImode:
27259 hmode = V8HImode;
27260 goto half;
27261 case V32QImode:
27262 hmode = V16QImode;
27263 goto half;
27264 half:
27266 rtx tmp = gen_reg_rtx (hmode);
27267 ix86_expand_vector_init_duplicate (mmx_ok, hmode, tmp, val);
27268 emit_insn (gen_rtx_SET (VOIDmode, target,
27269 gen_rtx_VEC_CONCAT (mode, tmp, tmp)));
27271 return true;
27273 default:
27274 return false;
27278 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27279 whose ONE_VAR element is VAR, and other elements are zero. Return true
27280 if successful. */
27282 static bool
27283 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27284 rtx target, rtx var, int one_var)
27286 enum machine_mode vsimode;
27287 rtx new_target;
27288 rtx x, tmp;
27289 bool use_vector_set = false;
27291 switch (mode)
27293 case V2DImode:
27294 /* For SSE4.1, we normally use vector set. But if the second
27295 element is zero and inter-unit moves are OK, we use movq
27296 instead. */
27297 use_vector_set = (TARGET_64BIT
27298 && TARGET_SSE4_1
27299 && !(TARGET_INTER_UNIT_MOVES
27300 && one_var == 0));
27301 break;
27302 case V16QImode:
27303 case V4SImode:
27304 case V4SFmode:
27305 use_vector_set = TARGET_SSE4_1;
27306 break;
27307 case V8HImode:
27308 use_vector_set = TARGET_SSE2;
27309 break;
27310 case V4HImode:
27311 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27312 break;
27313 case V32QImode:
27314 case V16HImode:
27315 case V8SImode:
27316 case V8SFmode:
27317 case V4DFmode:
27318 use_vector_set = TARGET_AVX;
27319 break;
27320 case V4DImode:
27321 /* Use ix86_expand_vector_set in 64bit mode only. */
27322 use_vector_set = TARGET_AVX && TARGET_64BIT;
27323 break;
27324 default:
27325 break;
27328 if (use_vector_set)
27330 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27331 var = force_reg (GET_MODE_INNER (mode), var);
27332 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27333 return true;
27336 switch (mode)
27338 case V2SFmode:
27339 case V2SImode:
27340 if (!mmx_ok)
27341 return false;
27342 /* FALLTHRU */
27344 case V2DFmode:
27345 case V2DImode:
27346 if (one_var != 0)
27347 return false;
27348 var = force_reg (GET_MODE_INNER (mode), var);
27349 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27350 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27351 return true;
27353 case V4SFmode:
27354 case V4SImode:
27355 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27356 new_target = gen_reg_rtx (mode);
27357 else
27358 new_target = target;
27359 var = force_reg (GET_MODE_INNER (mode), var);
27360 x = gen_rtx_VEC_DUPLICATE (mode, var);
27361 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27362 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27363 if (one_var != 0)
27365 /* We need to shuffle the value to the correct position, so
27366 create a new pseudo to store the intermediate result. */
27368 /* With SSE2, we can use the integer shuffle insns. */
27369 if (mode != V4SFmode && TARGET_SSE2)
27371 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27372 GEN_INT (1),
27373 GEN_INT (one_var == 1 ? 0 : 1),
27374 GEN_INT (one_var == 2 ? 0 : 1),
27375 GEN_INT (one_var == 3 ? 0 : 1)));
27376 if (target != new_target)
27377 emit_move_insn (target, new_target);
27378 return true;
27381 /* Otherwise convert the intermediate result to V4SFmode and
27382 use the SSE1 shuffle instructions. */
27383 if (mode != V4SFmode)
27385 tmp = gen_reg_rtx (V4SFmode);
27386 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27388 else
27389 tmp = new_target;
27391 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27392 GEN_INT (1),
27393 GEN_INT (one_var == 1 ? 0 : 1),
27394 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27395 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27397 if (mode != V4SFmode)
27398 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27399 else if (tmp != target)
27400 emit_move_insn (target, tmp);
27402 else if (target != new_target)
27403 emit_move_insn (target, new_target);
27404 return true;
27406 case V8HImode:
27407 case V16QImode:
27408 vsimode = V4SImode;
27409 goto widen;
27410 case V4HImode:
27411 case V8QImode:
27412 if (!mmx_ok)
27413 return false;
27414 vsimode = V2SImode;
27415 goto widen;
27416 widen:
27417 if (one_var != 0)
27418 return false;
27420 /* Zero extend the variable element to SImode and recurse. */
27421 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27423 x = gen_reg_rtx (vsimode);
27424 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27425 var, one_var))
27426 gcc_unreachable ();
27428 emit_move_insn (target, gen_lowpart (mode, x));
27429 return true;
27431 default:
27432 return false;
27436 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27437 consisting of the values in VALS. It is known that all elements
27438 except ONE_VAR are constants. Return true if successful. */
27440 static bool
27441 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27442 rtx target, rtx vals, int one_var)
27444 rtx var = XVECEXP (vals, 0, one_var);
27445 enum machine_mode wmode;
27446 rtx const_vec, x;
27448 const_vec = copy_rtx (vals);
27449 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27450 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27452 switch (mode)
27454 case V2DFmode:
27455 case V2DImode:
27456 case V2SFmode:
27457 case V2SImode:
27458 /* For the two element vectors, it's just as easy to use
27459 the general case. */
27460 return false;
27462 case V4DImode:
27463 /* Use ix86_expand_vector_set in 64bit mode only. */
27464 if (!TARGET_64BIT)
27465 return false;
27466 case V4DFmode:
27467 case V8SFmode:
27468 case V8SImode:
27469 case V16HImode:
27470 case V32QImode:
27471 case V4SFmode:
27472 case V4SImode:
27473 case V8HImode:
27474 case V4HImode:
27475 break;
27477 case V16QImode:
27478 if (TARGET_SSE4_1)
27479 break;
27480 wmode = V8HImode;
27481 goto widen;
27482 case V8QImode:
27483 wmode = V4HImode;
27484 goto widen;
27485 widen:
27486 /* There's no way to set one QImode entry easily. Combine
27487 the variable value with its adjacent constant value, and
27488 promote to an HImode set. */
27489 x = XVECEXP (vals, 0, one_var ^ 1);
27490 if (one_var & 1)
27492 var = convert_modes (HImode, QImode, var, true);
27493 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27494 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27495 x = GEN_INT (INTVAL (x) & 0xff);
27497 else
27499 var = convert_modes (HImode, QImode, var, true);
27500 x = gen_int_mode (INTVAL (x) << 8, HImode);
27502 if (x != const0_rtx)
27503 var = expand_simple_binop (HImode, IOR, var, x, var,
27504 1, OPTAB_LIB_WIDEN);
27506 x = gen_reg_rtx (wmode);
27507 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27508 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27510 emit_move_insn (target, gen_lowpart (mode, x));
27511 return true;
27513 default:
27514 return false;
27517 emit_move_insn (target, const_vec);
27518 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27519 return true;
27522 /* A subroutine of ix86_expand_vector_init_general. Use vector
27523 concatenate to handle the most general case: all values variable,
27524 and none identical. */
27526 static void
27527 ix86_expand_vector_init_concat (enum machine_mode mode,
27528 rtx target, rtx *ops, int n)
27530 enum machine_mode cmode, hmode = VOIDmode;
27531 rtx first[8], second[4];
27532 rtvec v;
27533 int i, j;
27535 switch (n)
27537 case 2:
27538 switch (mode)
27540 case V8SImode:
27541 cmode = V4SImode;
27542 break;
27543 case V8SFmode:
27544 cmode = V4SFmode;
27545 break;
27546 case V4DImode:
27547 cmode = V2DImode;
27548 break;
27549 case V4DFmode:
27550 cmode = V2DFmode;
27551 break;
27552 case V4SImode:
27553 cmode = V2SImode;
27554 break;
27555 case V4SFmode:
27556 cmode = V2SFmode;
27557 break;
27558 case V2DImode:
27559 cmode = DImode;
27560 break;
27561 case V2SImode:
27562 cmode = SImode;
27563 break;
27564 case V2DFmode:
27565 cmode = DFmode;
27566 break;
27567 case V2SFmode:
27568 cmode = SFmode;
27569 break;
27570 default:
27571 gcc_unreachable ();
27574 if (!register_operand (ops[1], cmode))
27575 ops[1] = force_reg (cmode, ops[1]);
27576 if (!register_operand (ops[0], cmode))
27577 ops[0] = force_reg (cmode, ops[0]);
27578 emit_insn (gen_rtx_SET (VOIDmode, target,
27579 gen_rtx_VEC_CONCAT (mode, ops[0],
27580 ops[1])));
27581 break;
27583 case 4:
27584 switch (mode)
27586 case V4DImode:
27587 cmode = V2DImode;
27588 break;
27589 case V4DFmode:
27590 cmode = V2DFmode;
27591 break;
27592 case V4SImode:
27593 cmode = V2SImode;
27594 break;
27595 case V4SFmode:
27596 cmode = V2SFmode;
27597 break;
27598 default:
27599 gcc_unreachable ();
27601 goto half;
27603 case 8:
27604 switch (mode)
27606 case V8SImode:
27607 cmode = V2SImode;
27608 hmode = V4SImode;
27609 break;
27610 case V8SFmode:
27611 cmode = V2SFmode;
27612 hmode = V4SFmode;
27613 break;
27614 default:
27615 gcc_unreachable ();
27617 goto half;
27619 half:
27620 /* FIXME: We process inputs backward to help RA. PR 36222. */
27621 i = n - 1;
27622 j = (n >> 1) - 1;
27623 for (; i > 0; i -= 2, j--)
27625 first[j] = gen_reg_rtx (cmode);
27626 v = gen_rtvec (2, ops[i - 1], ops[i]);
27627 ix86_expand_vector_init (false, first[j],
27628 gen_rtx_PARALLEL (cmode, v));
27631 n >>= 1;
27632 if (n > 2)
27634 gcc_assert (hmode != VOIDmode);
27635 for (i = j = 0; i < n; i += 2, j++)
27637 second[j] = gen_reg_rtx (hmode);
27638 ix86_expand_vector_init_concat (hmode, second [j],
27639 &first [i], 2);
27641 n >>= 1;
27642 ix86_expand_vector_init_concat (mode, target, second, n);
27644 else
27645 ix86_expand_vector_init_concat (mode, target, first, n);
27646 break;
27648 default:
27649 gcc_unreachable ();
27653 /* A subroutine of ix86_expand_vector_init_general. Use vector
27654 interleave to handle the most general case: all values variable,
27655 and none identical. */
27657 static void
27658 ix86_expand_vector_init_interleave (enum machine_mode mode,
27659 rtx target, rtx *ops, int n)
27661 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27662 int i, j;
27663 rtx op0, op1;
27664 rtx (*gen_load_even) (rtx, rtx, rtx);
27665 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27666 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27668 switch (mode)
27670 case V8HImode:
27671 gen_load_even = gen_vec_setv8hi;
27672 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27673 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27674 inner_mode = HImode;
27675 first_imode = V4SImode;
27676 second_imode = V2DImode;
27677 third_imode = VOIDmode;
27678 break;
27679 case V16QImode:
27680 gen_load_even = gen_vec_setv16qi;
27681 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27682 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27683 inner_mode = QImode;
27684 first_imode = V8HImode;
27685 second_imode = V4SImode;
27686 third_imode = V2DImode;
27687 break;
27688 default:
27689 gcc_unreachable ();
27692 for (i = 0; i < n; i++)
27694 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27695 op0 = gen_reg_rtx (SImode);
27696 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27698 /* Insert the SImode value as low element of V4SImode vector. */
27699 op1 = gen_reg_rtx (V4SImode);
27700 op0 = gen_rtx_VEC_MERGE (V4SImode,
27701 gen_rtx_VEC_DUPLICATE (V4SImode,
27702 op0),
27703 CONST0_RTX (V4SImode),
27704 const1_rtx);
27705 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27707 /* Cast the V4SImode vector back to a vector in orignal mode. */
27708 op0 = gen_reg_rtx (mode);
27709 emit_move_insn (op0, gen_lowpart (mode, op1));
27711 /* Load even elements into the second positon. */
27712 emit_insn ((*gen_load_even) (op0,
27713 force_reg (inner_mode,
27714 ops [i + i + 1]),
27715 const1_rtx));
27717 /* Cast vector to FIRST_IMODE vector. */
27718 ops[i] = gen_reg_rtx (first_imode);
27719 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27722 /* Interleave low FIRST_IMODE vectors. */
27723 for (i = j = 0; i < n; i += 2, j++)
27725 op0 = gen_reg_rtx (first_imode);
27726 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27728 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27729 ops[j] = gen_reg_rtx (second_imode);
27730 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27733 /* Interleave low SECOND_IMODE vectors. */
27734 switch (second_imode)
27736 case V4SImode:
27737 for (i = j = 0; i < n / 2; i += 2, j++)
27739 op0 = gen_reg_rtx (second_imode);
27740 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27741 ops[i + 1]));
27743 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27744 vector. */
27745 ops[j] = gen_reg_rtx (third_imode);
27746 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27748 second_imode = V2DImode;
27749 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27750 /* FALLTHRU */
27752 case V2DImode:
27753 op0 = gen_reg_rtx (second_imode);
27754 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27755 ops[1]));
27757 /* Cast the SECOND_IMODE vector back to a vector on original
27758 mode. */
27759 emit_insn (gen_rtx_SET (VOIDmode, target,
27760 gen_lowpart (mode, op0)));
27761 break;
27763 default:
27764 gcc_unreachable ();
27768 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27769 all values variable, and none identical. */
27771 static void
27772 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27773 rtx target, rtx vals)
27775 rtx ops[32], op0, op1;
27776 enum machine_mode half_mode = VOIDmode;
27777 int n, i;
27779 switch (mode)
27781 case V2SFmode:
27782 case V2SImode:
27783 if (!mmx_ok && !TARGET_SSE)
27784 break;
27785 /* FALLTHRU */
27787 case V8SFmode:
27788 case V8SImode:
27789 case V4DFmode:
27790 case V4DImode:
27791 case V4SFmode:
27792 case V4SImode:
27793 case V2DFmode:
27794 case V2DImode:
27795 n = GET_MODE_NUNITS (mode);
27796 for (i = 0; i < n; i++)
27797 ops[i] = XVECEXP (vals, 0, i);
27798 ix86_expand_vector_init_concat (mode, target, ops, n);
27799 return;
27801 case V32QImode:
27802 half_mode = V16QImode;
27803 goto half;
27805 case V16HImode:
27806 half_mode = V8HImode;
27807 goto half;
27809 half:
27810 n = GET_MODE_NUNITS (mode);
27811 for (i = 0; i < n; i++)
27812 ops[i] = XVECEXP (vals, 0, i);
27813 op0 = gen_reg_rtx (half_mode);
27814 op1 = gen_reg_rtx (half_mode);
27815 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27816 n >> 2);
27817 ix86_expand_vector_init_interleave (half_mode, op1,
27818 &ops [n >> 1], n >> 2);
27819 emit_insn (gen_rtx_SET (VOIDmode, target,
27820 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27821 return;
27823 case V16QImode:
27824 if (!TARGET_SSE4_1)
27825 break;
27826 /* FALLTHRU */
27828 case V8HImode:
27829 if (!TARGET_SSE2)
27830 break;
27832 /* Don't use ix86_expand_vector_init_interleave if we can't
27833 move from GPR to SSE register directly. */
27834 if (!TARGET_INTER_UNIT_MOVES)
27835 break;
27837 n = GET_MODE_NUNITS (mode);
27838 for (i = 0; i < n; i++)
27839 ops[i] = XVECEXP (vals, 0, i);
27840 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27841 return;
27843 case V4HImode:
27844 case V8QImode:
27845 break;
27847 default:
27848 gcc_unreachable ();
27852 int i, j, n_elts, n_words, n_elt_per_word;
27853 enum machine_mode inner_mode;
27854 rtx words[4], shift;
27856 inner_mode = GET_MODE_INNER (mode);
27857 n_elts = GET_MODE_NUNITS (mode);
27858 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27859 n_elt_per_word = n_elts / n_words;
27860 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27862 for (i = 0; i < n_words; ++i)
27864 rtx word = NULL_RTX;
27866 for (j = 0; j < n_elt_per_word; ++j)
27868 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27869 elt = convert_modes (word_mode, inner_mode, elt, true);
27871 if (j == 0)
27872 word = elt;
27873 else
27875 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27876 word, 1, OPTAB_LIB_WIDEN);
27877 word = expand_simple_binop (word_mode, IOR, word, elt,
27878 word, 1, OPTAB_LIB_WIDEN);
27882 words[i] = word;
27885 if (n_words == 1)
27886 emit_move_insn (target, gen_lowpart (mode, words[0]));
27887 else if (n_words == 2)
27889 rtx tmp = gen_reg_rtx (mode);
27890 emit_clobber (tmp);
27891 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27892 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27893 emit_move_insn (target, tmp);
27895 else if (n_words == 4)
27897 rtx tmp = gen_reg_rtx (V4SImode);
27898 gcc_assert (word_mode == SImode);
27899 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27900 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27901 emit_move_insn (target, gen_lowpart (mode, tmp));
27903 else
27904 gcc_unreachable ();
27908 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27909 instructions unless MMX_OK is true. */
27911 void
27912 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27914 enum machine_mode mode = GET_MODE (target);
27915 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27916 int n_elts = GET_MODE_NUNITS (mode);
27917 int n_var = 0, one_var = -1;
27918 bool all_same = true, all_const_zero = true;
27919 int i;
27920 rtx x;
27922 for (i = 0; i < n_elts; ++i)
27924 x = XVECEXP (vals, 0, i);
27925 if (!(CONST_INT_P (x)
27926 || GET_CODE (x) == CONST_DOUBLE
27927 || GET_CODE (x) == CONST_FIXED))
27928 n_var++, one_var = i;
27929 else if (x != CONST0_RTX (inner_mode))
27930 all_const_zero = false;
27931 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27932 all_same = false;
27935 /* Constants are best loaded from the constant pool. */
27936 if (n_var == 0)
27938 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27939 return;
27942 /* If all values are identical, broadcast the value. */
27943 if (all_same
27944 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27945 XVECEXP (vals, 0, 0)))
27946 return;
27948 /* Values where only one field is non-constant are best loaded from
27949 the pool and overwritten via move later. */
27950 if (n_var == 1)
27952 if (all_const_zero
27953 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27954 XVECEXP (vals, 0, one_var),
27955 one_var))
27956 return;
27958 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27959 return;
27962 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27965 void
27966 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27968 enum machine_mode mode = GET_MODE (target);
27969 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27970 enum machine_mode half_mode;
27971 bool use_vec_merge = false;
27972 rtx tmp;
27973 static rtx (*gen_extract[6][2]) (rtx, rtx)
27975 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27976 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27977 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27978 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27979 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27980 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27982 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27984 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27985 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27986 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27987 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27988 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27989 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27991 int i, j, n;
27993 switch (mode)
27995 case V2SFmode:
27996 case V2SImode:
27997 if (mmx_ok)
27999 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
28000 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
28001 if (elt == 0)
28002 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
28003 else
28004 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
28005 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28006 return;
28008 break;
28010 case V2DImode:
28011 use_vec_merge = TARGET_SSE4_1;
28012 if (use_vec_merge)
28013 break;
28015 case V2DFmode:
28017 rtx op0, op1;
28019 /* For the two element vectors, we implement a VEC_CONCAT with
28020 the extraction of the other element. */
28022 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
28023 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
28025 if (elt == 0)
28026 op0 = val, op1 = tmp;
28027 else
28028 op0 = tmp, op1 = val;
28030 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
28031 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28033 return;
28035 case V4SFmode:
28036 use_vec_merge = TARGET_SSE4_1;
28037 if (use_vec_merge)
28038 break;
28040 switch (elt)
28042 case 0:
28043 use_vec_merge = true;
28044 break;
28046 case 1:
28047 /* tmp = target = A B C D */
28048 tmp = copy_to_reg (target);
28049 /* target = A A B B */
28050 emit_insn (gen_sse_unpcklps (target, target, target));
28051 /* target = X A B B */
28052 ix86_expand_vector_set (false, target, val, 0);
28053 /* target = A X C D */
28054 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28055 GEN_INT (1), GEN_INT (0),
28056 GEN_INT (2+4), GEN_INT (3+4)));
28057 return;
28059 case 2:
28060 /* tmp = target = A B C D */
28061 tmp = copy_to_reg (target);
28062 /* tmp = X B C D */
28063 ix86_expand_vector_set (false, tmp, val, 0);
28064 /* target = A B X D */
28065 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28066 GEN_INT (0), GEN_INT (1),
28067 GEN_INT (0+4), GEN_INT (3+4)));
28068 return;
28070 case 3:
28071 /* tmp = target = A B C D */
28072 tmp = copy_to_reg (target);
28073 /* tmp = X B C D */
28074 ix86_expand_vector_set (false, tmp, val, 0);
28075 /* target = A B X D */
28076 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
28077 GEN_INT (0), GEN_INT (1),
28078 GEN_INT (2+4), GEN_INT (0+4)));
28079 return;
28081 default:
28082 gcc_unreachable ();
28084 break;
28086 case V4SImode:
28087 use_vec_merge = TARGET_SSE4_1;
28088 if (use_vec_merge)
28089 break;
28091 /* Element 0 handled by vec_merge below. */
28092 if (elt == 0)
28094 use_vec_merge = true;
28095 break;
28098 if (TARGET_SSE2)
28100 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28101 store into element 0, then shuffle them back. */
28103 rtx order[4];
28105 order[0] = GEN_INT (elt);
28106 order[1] = const1_rtx;
28107 order[2] = const2_rtx;
28108 order[3] = GEN_INT (3);
28109 order[elt] = const0_rtx;
28111 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28112 order[1], order[2], order[3]));
28114 ix86_expand_vector_set (false, target, val, 0);
28116 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28117 order[1], order[2], order[3]));
28119 else
28121 /* For SSE1, we have to reuse the V4SF code. */
28122 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28123 gen_lowpart (SFmode, val), elt);
28125 return;
28127 case V8HImode:
28128 use_vec_merge = TARGET_SSE2;
28129 break;
28130 case V4HImode:
28131 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28132 break;
28134 case V16QImode:
28135 use_vec_merge = TARGET_SSE4_1;
28136 break;
28138 case V8QImode:
28139 break;
28141 case V32QImode:
28142 half_mode = V16QImode;
28143 j = 0;
28144 n = 16;
28145 goto half;
28147 case V16HImode:
28148 half_mode = V8HImode;
28149 j = 1;
28150 n = 8;
28151 goto half;
28153 case V8SImode:
28154 half_mode = V4SImode;
28155 j = 2;
28156 n = 4;
28157 goto half;
28159 case V4DImode:
28160 half_mode = V2DImode;
28161 j = 3;
28162 n = 2;
28163 goto half;
28165 case V8SFmode:
28166 half_mode = V4SFmode;
28167 j = 4;
28168 n = 4;
28169 goto half;
28171 case V4DFmode:
28172 half_mode = V2DFmode;
28173 j = 5;
28174 n = 2;
28175 goto half;
28177 half:
28178 /* Compute offset. */
28179 i = elt / n;
28180 elt %= n;
28182 gcc_assert (i <= 1);
28184 /* Extract the half. */
28185 tmp = gen_reg_rtx (half_mode);
28186 emit_insn ((*gen_extract[j][i]) (tmp, target));
28188 /* Put val in tmp at elt. */
28189 ix86_expand_vector_set (false, tmp, val, elt);
28191 /* Put it back. */
28192 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28193 return;
28195 default:
28196 break;
28199 if (use_vec_merge)
28201 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28202 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28203 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28205 else
28207 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28209 emit_move_insn (mem, target);
28211 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28212 emit_move_insn (tmp, val);
28214 emit_move_insn (target, mem);
28218 void
28219 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28221 enum machine_mode mode = GET_MODE (vec);
28222 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28223 bool use_vec_extr = false;
28224 rtx tmp;
28226 switch (mode)
28228 case V2SImode:
28229 case V2SFmode:
28230 if (!mmx_ok)
28231 break;
28232 /* FALLTHRU */
28234 case V2DFmode:
28235 case V2DImode:
28236 use_vec_extr = true;
28237 break;
28239 case V4SFmode:
28240 use_vec_extr = TARGET_SSE4_1;
28241 if (use_vec_extr)
28242 break;
28244 switch (elt)
28246 case 0:
28247 tmp = vec;
28248 break;
28250 case 1:
28251 case 3:
28252 tmp = gen_reg_rtx (mode);
28253 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28254 GEN_INT (elt), GEN_INT (elt),
28255 GEN_INT (elt+4), GEN_INT (elt+4)));
28256 break;
28258 case 2:
28259 tmp = gen_reg_rtx (mode);
28260 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
28261 break;
28263 default:
28264 gcc_unreachable ();
28266 vec = tmp;
28267 use_vec_extr = true;
28268 elt = 0;
28269 break;
28271 case V4SImode:
28272 use_vec_extr = TARGET_SSE4_1;
28273 if (use_vec_extr)
28274 break;
28276 if (TARGET_SSE2)
28278 switch (elt)
28280 case 0:
28281 tmp = vec;
28282 break;
28284 case 1:
28285 case 3:
28286 tmp = gen_reg_rtx (mode);
28287 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28288 GEN_INT (elt), GEN_INT (elt),
28289 GEN_INT (elt), GEN_INT (elt)));
28290 break;
28292 case 2:
28293 tmp = gen_reg_rtx (mode);
28294 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
28295 break;
28297 default:
28298 gcc_unreachable ();
28300 vec = tmp;
28301 use_vec_extr = true;
28302 elt = 0;
28304 else
28306 /* For SSE1, we have to reuse the V4SF code. */
28307 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28308 gen_lowpart (V4SFmode, vec), elt);
28309 return;
28311 break;
28313 case V8HImode:
28314 use_vec_extr = TARGET_SSE2;
28315 break;
28316 case V4HImode:
28317 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28318 break;
28320 case V16QImode:
28321 use_vec_extr = TARGET_SSE4_1;
28322 break;
28324 case V8QImode:
28325 /* ??? Could extract the appropriate HImode element and shift. */
28326 default:
28327 break;
28330 if (use_vec_extr)
28332 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28333 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28335 /* Let the rtl optimizers know about the zero extension performed. */
28336 if (inner_mode == QImode || inner_mode == HImode)
28338 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28339 target = gen_lowpart (SImode, target);
28342 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28344 else
28346 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28348 emit_move_insn (mem, vec);
28350 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28351 emit_move_insn (target, tmp);
28355 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28356 pattern to reduce; DEST is the destination; IN is the input vector. */
28358 void
28359 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28361 rtx tmp1, tmp2, tmp3;
28363 tmp1 = gen_reg_rtx (V4SFmode);
28364 tmp2 = gen_reg_rtx (V4SFmode);
28365 tmp3 = gen_reg_rtx (V4SFmode);
28367 emit_insn (gen_sse_movhlps (tmp1, in, in));
28368 emit_insn (fn (tmp2, tmp1, in));
28370 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28371 GEN_INT (1), GEN_INT (1),
28372 GEN_INT (1+4), GEN_INT (1+4)));
28373 emit_insn (fn (dest, tmp2, tmp3));
28376 /* Target hook for scalar_mode_supported_p. */
28377 static bool
28378 ix86_scalar_mode_supported_p (enum machine_mode mode)
28380 if (DECIMAL_FLOAT_MODE_P (mode))
28381 return true;
28382 else if (mode == TFmode)
28383 return true;
28384 else
28385 return default_scalar_mode_supported_p (mode);
28388 /* Implements target hook vector_mode_supported_p. */
28389 static bool
28390 ix86_vector_mode_supported_p (enum machine_mode mode)
28392 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28393 return true;
28394 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28395 return true;
28396 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28397 return true;
28398 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28399 return true;
28400 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28401 return true;
28402 return false;
28405 /* Target hook for c_mode_for_suffix. */
28406 static enum machine_mode
28407 ix86_c_mode_for_suffix (char suffix)
28409 if (suffix == 'q')
28410 return TFmode;
28411 if (suffix == 'w')
28412 return XFmode;
28414 return VOIDmode;
28417 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28419 We do this in the new i386 backend to maintain source compatibility
28420 with the old cc0-based compiler. */
28422 static tree
28423 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28424 tree inputs ATTRIBUTE_UNUSED,
28425 tree clobbers)
28427 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28428 clobbers);
28429 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28430 clobbers);
28431 return clobbers;
28434 /* Implements target vector targetm.asm.encode_section_info. This
28435 is not used by netware. */
28437 static void ATTRIBUTE_UNUSED
28438 ix86_encode_section_info (tree decl, rtx rtl, int first)
28440 default_encode_section_info (decl, rtl, first);
28442 if (TREE_CODE (decl) == VAR_DECL
28443 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28444 && ix86_in_large_data_p (decl))
28445 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28448 /* Worker function for REVERSE_CONDITION. */
28450 enum rtx_code
28451 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28453 return (mode != CCFPmode && mode != CCFPUmode
28454 ? reverse_condition (code)
28455 : reverse_condition_maybe_unordered (code));
28458 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28459 to OPERANDS[0]. */
28461 const char *
28462 output_387_reg_move (rtx insn, rtx *operands)
28464 if (REG_P (operands[0]))
28466 if (REG_P (operands[1])
28467 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28469 if (REGNO (operands[0]) == FIRST_STACK_REG)
28470 return output_387_ffreep (operands, 0);
28471 return "fstp\t%y0";
28473 if (STACK_TOP_P (operands[0]))
28474 return "fld%z1\t%y1";
28475 return "fst\t%y0";
28477 else if (MEM_P (operands[0]))
28479 gcc_assert (REG_P (operands[1]));
28480 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28481 return "fstp%z0\t%y0";
28482 else
28484 /* There is no non-popping store to memory for XFmode.
28485 So if we need one, follow the store with a load. */
28486 if (GET_MODE (operands[0]) == XFmode)
28487 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
28488 else
28489 return "fst%z0\t%y0";
28492 else
28493 gcc_unreachable();
28496 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28497 FP status register is set. */
28499 void
28500 ix86_emit_fp_unordered_jump (rtx label)
28502 rtx reg = gen_reg_rtx (HImode);
28503 rtx temp;
28505 emit_insn (gen_x86_fnstsw_1 (reg));
28507 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28509 emit_insn (gen_x86_sahf_1 (reg));
28511 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28512 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28514 else
28516 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28518 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28519 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28522 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28523 gen_rtx_LABEL_REF (VOIDmode, label),
28524 pc_rtx);
28525 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28527 emit_jump_insn (temp);
28528 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28531 /* Output code to perform a log1p XFmode calculation. */
28533 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28535 rtx label1 = gen_label_rtx ();
28536 rtx label2 = gen_label_rtx ();
28538 rtx tmp = gen_reg_rtx (XFmode);
28539 rtx tmp2 = gen_reg_rtx (XFmode);
28541 emit_insn (gen_absxf2 (tmp, op1));
28542 emit_insn (gen_cmpxf (tmp,
28543 CONST_DOUBLE_FROM_REAL_VALUE (
28544 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28545 XFmode)));
28546 emit_jump_insn (gen_bge (label1));
28548 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28549 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28550 emit_jump (label2);
28552 emit_label (label1);
28553 emit_move_insn (tmp, CONST1_RTX (XFmode));
28554 emit_insn (gen_addxf3 (tmp, op1, tmp));
28555 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28556 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28558 emit_label (label2);
28561 /* Output code to perform a Newton-Rhapson approximation of a single precision
28562 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28564 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28566 rtx x0, x1, e0, e1, two;
28568 x0 = gen_reg_rtx (mode);
28569 e0 = gen_reg_rtx (mode);
28570 e1 = gen_reg_rtx (mode);
28571 x1 = gen_reg_rtx (mode);
28573 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28575 if (VECTOR_MODE_P (mode))
28576 two = ix86_build_const_vector (SFmode, true, two);
28578 two = force_reg (mode, two);
28580 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28582 /* x0 = rcp(b) estimate */
28583 emit_insn (gen_rtx_SET (VOIDmode, x0,
28584 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28585 UNSPEC_RCP)));
28586 /* e0 = x0 * b */
28587 emit_insn (gen_rtx_SET (VOIDmode, e0,
28588 gen_rtx_MULT (mode, x0, b)));
28589 /* e1 = 2. - e0 */
28590 emit_insn (gen_rtx_SET (VOIDmode, e1,
28591 gen_rtx_MINUS (mode, two, e0)));
28592 /* x1 = x0 * e1 */
28593 emit_insn (gen_rtx_SET (VOIDmode, x1,
28594 gen_rtx_MULT (mode, x0, e1)));
28595 /* res = a * x1 */
28596 emit_insn (gen_rtx_SET (VOIDmode, res,
28597 gen_rtx_MULT (mode, a, x1)));
28600 /* Output code to perform a Newton-Rhapson approximation of a
28601 single precision floating point [reciprocal] square root. */
28603 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28604 bool recip)
28606 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28607 REAL_VALUE_TYPE r;
28609 x0 = gen_reg_rtx (mode);
28610 e0 = gen_reg_rtx (mode);
28611 e1 = gen_reg_rtx (mode);
28612 e2 = gen_reg_rtx (mode);
28613 e3 = gen_reg_rtx (mode);
28615 real_from_integer (&r, VOIDmode, -3, -1, 0);
28616 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28618 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28619 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28621 if (VECTOR_MODE_P (mode))
28623 mthree = ix86_build_const_vector (SFmode, true, mthree);
28624 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28627 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28628 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28630 /* x0 = rsqrt(a) estimate */
28631 emit_insn (gen_rtx_SET (VOIDmode, x0,
28632 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28633 UNSPEC_RSQRT)));
28635 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28636 if (!recip)
28638 rtx zero, mask;
28640 zero = gen_reg_rtx (mode);
28641 mask = gen_reg_rtx (mode);
28643 zero = force_reg (mode, CONST0_RTX(mode));
28644 emit_insn (gen_rtx_SET (VOIDmode, mask,
28645 gen_rtx_NE (mode, zero, a)));
28647 emit_insn (gen_rtx_SET (VOIDmode, x0,
28648 gen_rtx_AND (mode, x0, mask)));
28651 /* e0 = x0 * a */
28652 emit_insn (gen_rtx_SET (VOIDmode, e0,
28653 gen_rtx_MULT (mode, x0, a)));
28654 /* e1 = e0 * x0 */
28655 emit_insn (gen_rtx_SET (VOIDmode, e1,
28656 gen_rtx_MULT (mode, e0, x0)));
28658 /* e2 = e1 - 3. */
28659 mthree = force_reg (mode, mthree);
28660 emit_insn (gen_rtx_SET (VOIDmode, e2,
28661 gen_rtx_PLUS (mode, e1, mthree)));
28663 mhalf = force_reg (mode, mhalf);
28664 if (recip)
28665 /* e3 = -.5 * x0 */
28666 emit_insn (gen_rtx_SET (VOIDmode, e3,
28667 gen_rtx_MULT (mode, x0, mhalf)));
28668 else
28669 /* e3 = -.5 * e0 */
28670 emit_insn (gen_rtx_SET (VOIDmode, e3,
28671 gen_rtx_MULT (mode, e0, mhalf)));
28672 /* ret = e2 * e3 */
28673 emit_insn (gen_rtx_SET (VOIDmode, res,
28674 gen_rtx_MULT (mode, e2, e3)));
28677 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28679 static void ATTRIBUTE_UNUSED
28680 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28681 tree decl)
28683 /* With Binutils 2.15, the "@unwind" marker must be specified on
28684 every occurrence of the ".eh_frame" section, not just the first
28685 one. */
28686 if (TARGET_64BIT
28687 && strcmp (name, ".eh_frame") == 0)
28689 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28690 flags & SECTION_WRITE ? "aw" : "a");
28691 return;
28693 default_elf_asm_named_section (name, flags, decl);
28696 /* Return the mangling of TYPE if it is an extended fundamental type. */
28698 static const char *
28699 ix86_mangle_type (const_tree type)
28701 type = TYPE_MAIN_VARIANT (type);
28703 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28704 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28705 return NULL;
28707 switch (TYPE_MODE (type))
28709 case TFmode:
28710 /* __float128 is "g". */
28711 return "g";
28712 case XFmode:
28713 /* "long double" or __float80 is "e". */
28714 return "e";
28715 default:
28716 return NULL;
28720 /* For 32-bit code we can save PIC register setup by using
28721 __stack_chk_fail_local hidden function instead of calling
28722 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28723 register, so it is better to call __stack_chk_fail directly. */
28725 static tree
28726 ix86_stack_protect_fail (void)
28728 return TARGET_64BIT
28729 ? default_external_stack_protect_fail ()
28730 : default_hidden_stack_protect_fail ();
28733 /* Select a format to encode pointers in exception handling data. CODE
28734 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28735 true if the symbol may be affected by dynamic relocations.
28737 ??? All x86 object file formats are capable of representing this.
28738 After all, the relocation needed is the same as for the call insn.
28739 Whether or not a particular assembler allows us to enter such, I
28740 guess we'll have to see. */
28742 asm_preferred_eh_data_format (int code, int global)
28744 if (flag_pic)
28746 int type = DW_EH_PE_sdata8;
28747 if (!TARGET_64BIT
28748 || ix86_cmodel == CM_SMALL_PIC
28749 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28750 type = DW_EH_PE_sdata4;
28751 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28753 if (ix86_cmodel == CM_SMALL
28754 || (ix86_cmodel == CM_MEDIUM && code))
28755 return DW_EH_PE_udata4;
28756 return DW_EH_PE_absptr;
28759 /* Expand copysign from SIGN to the positive value ABS_VALUE
28760 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28761 the sign-bit. */
28762 static void
28763 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28765 enum machine_mode mode = GET_MODE (sign);
28766 rtx sgn = gen_reg_rtx (mode);
28767 if (mask == NULL_RTX)
28769 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28770 if (!VECTOR_MODE_P (mode))
28772 /* We need to generate a scalar mode mask in this case. */
28773 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28774 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28775 mask = gen_reg_rtx (mode);
28776 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28779 else
28780 mask = gen_rtx_NOT (mode, mask);
28781 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28782 gen_rtx_AND (mode, mask, sign)));
28783 emit_insn (gen_rtx_SET (VOIDmode, result,
28784 gen_rtx_IOR (mode, abs_value, sgn)));
28787 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28788 mask for masking out the sign-bit is stored in *SMASK, if that is
28789 non-null. */
28790 static rtx
28791 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28793 enum machine_mode mode = GET_MODE (op0);
28794 rtx xa, mask;
28796 xa = gen_reg_rtx (mode);
28797 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28798 if (!VECTOR_MODE_P (mode))
28800 /* We need to generate a scalar mode mask in this case. */
28801 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28802 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28803 mask = gen_reg_rtx (mode);
28804 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28806 emit_insn (gen_rtx_SET (VOIDmode, xa,
28807 gen_rtx_AND (mode, op0, mask)));
28809 if (smask)
28810 *smask = mask;
28812 return xa;
28815 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28816 swapping the operands if SWAP_OPERANDS is true. The expanded
28817 code is a forward jump to a newly created label in case the
28818 comparison is true. The generated label rtx is returned. */
28819 static rtx
28820 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28821 bool swap_operands)
28823 rtx label, tmp;
28825 if (swap_operands)
28827 tmp = op0;
28828 op0 = op1;
28829 op1 = tmp;
28832 label = gen_label_rtx ();
28833 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28834 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28835 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28836 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28837 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28838 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28839 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28840 JUMP_LABEL (tmp) = label;
28842 return label;
28845 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28846 using comparison code CODE. Operands are swapped for the comparison if
28847 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28848 static rtx
28849 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28850 bool swap_operands)
28852 enum machine_mode mode = GET_MODE (op0);
28853 rtx mask = gen_reg_rtx (mode);
28855 if (swap_operands)
28857 rtx tmp = op0;
28858 op0 = op1;
28859 op1 = tmp;
28862 if (mode == DFmode)
28863 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28864 gen_rtx_fmt_ee (code, mode, op0, op1)));
28865 else
28866 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28867 gen_rtx_fmt_ee (code, mode, op0, op1)));
28869 return mask;
28872 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28873 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28874 static rtx
28875 ix86_gen_TWO52 (enum machine_mode mode)
28877 REAL_VALUE_TYPE TWO52r;
28878 rtx TWO52;
28880 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28881 TWO52 = const_double_from_real_value (TWO52r, mode);
28882 TWO52 = force_reg (mode, TWO52);
28884 return TWO52;
28887 /* Expand SSE sequence for computing lround from OP1 storing
28888 into OP0. */
28889 void
28890 ix86_expand_lround (rtx op0, rtx op1)
28892 /* C code for the stuff we're doing below:
28893 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28894 return (long)tmp;
28896 enum machine_mode mode = GET_MODE (op1);
28897 const struct real_format *fmt;
28898 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28899 rtx adj;
28901 /* load nextafter (0.5, 0.0) */
28902 fmt = REAL_MODE_FORMAT (mode);
28903 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28904 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28906 /* adj = copysign (0.5, op1) */
28907 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28908 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28910 /* adj = op1 + adj */
28911 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28913 /* op0 = (imode)adj */
28914 expand_fix (op0, adj, 0);
28917 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28918 into OPERAND0. */
28919 void
28920 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28922 /* C code for the stuff we're doing below (for do_floor):
28923 xi = (long)op1;
28924 xi -= (double)xi > op1 ? 1 : 0;
28925 return xi;
28927 enum machine_mode fmode = GET_MODE (op1);
28928 enum machine_mode imode = GET_MODE (op0);
28929 rtx ireg, freg, label, tmp;
28931 /* reg = (long)op1 */
28932 ireg = gen_reg_rtx (imode);
28933 expand_fix (ireg, op1, 0);
28935 /* freg = (double)reg */
28936 freg = gen_reg_rtx (fmode);
28937 expand_float (freg, ireg, 0);
28939 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28940 label = ix86_expand_sse_compare_and_jump (UNLE,
28941 freg, op1, !do_floor);
28942 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28943 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28944 emit_move_insn (ireg, tmp);
28946 emit_label (label);
28947 LABEL_NUSES (label) = 1;
28949 emit_move_insn (op0, ireg);
28952 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28953 result in OPERAND0. */
28954 void
28955 ix86_expand_rint (rtx operand0, rtx operand1)
28957 /* C code for the stuff we're doing below:
28958 xa = fabs (operand1);
28959 if (!isless (xa, 2**52))
28960 return operand1;
28961 xa = xa + 2**52 - 2**52;
28962 return copysign (xa, operand1);
28964 enum machine_mode mode = GET_MODE (operand0);
28965 rtx res, xa, label, TWO52, mask;
28967 res = gen_reg_rtx (mode);
28968 emit_move_insn (res, operand1);
28970 /* xa = abs (operand1) */
28971 xa = ix86_expand_sse_fabs (res, &mask);
28973 /* if (!isless (xa, TWO52)) goto label; */
28974 TWO52 = ix86_gen_TWO52 (mode);
28975 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28977 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28978 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28980 ix86_sse_copysign_to_positive (res, xa, res, mask);
28982 emit_label (label);
28983 LABEL_NUSES (label) = 1;
28985 emit_move_insn (operand0, res);
28988 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28989 into OPERAND0. */
28990 void
28991 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28993 /* C code for the stuff we expand below.
28994 double xa = fabs (x), x2;
28995 if (!isless (xa, TWO52))
28996 return x;
28997 xa = xa + TWO52 - TWO52;
28998 x2 = copysign (xa, x);
28999 Compensate. Floor:
29000 if (x2 > x)
29001 x2 -= 1;
29002 Compensate. Ceil:
29003 if (x2 < x)
29004 x2 -= -1;
29005 return x2;
29007 enum machine_mode mode = GET_MODE (operand0);
29008 rtx xa, TWO52, tmp, label, one, res, mask;
29010 TWO52 = ix86_gen_TWO52 (mode);
29012 /* Temporary for holding the result, initialized to the input
29013 operand to ease control flow. */
29014 res = gen_reg_rtx (mode);
29015 emit_move_insn (res, operand1);
29017 /* xa = abs (operand1) */
29018 xa = ix86_expand_sse_fabs (res, &mask);
29020 /* if (!isless (xa, TWO52)) goto label; */
29021 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29023 /* xa = xa + TWO52 - TWO52; */
29024 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29025 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
29027 /* xa = copysign (xa, operand1) */
29028 ix86_sse_copysign_to_positive (xa, xa, res, mask);
29030 /* generate 1.0 or -1.0 */
29031 one = force_reg (mode,
29032 const_double_from_real_value (do_floor
29033 ? dconst1 : dconstm1, mode));
29035 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29036 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29037 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29038 gen_rtx_AND (mode, one, tmp)));
29039 /* We always need to subtract here to preserve signed zero. */
29040 tmp = expand_simple_binop (mode, MINUS,
29041 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29042 emit_move_insn (res, tmp);
29044 emit_label (label);
29045 LABEL_NUSES (label) = 1;
29047 emit_move_insn (operand0, res);
29050 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
29051 into OPERAND0. */
29052 void
29053 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
29055 /* C code for the stuff we expand below.
29056 double xa = fabs (x), x2;
29057 if (!isless (xa, TWO52))
29058 return x;
29059 x2 = (double)(long)x;
29060 Compensate. Floor:
29061 if (x2 > x)
29062 x2 -= 1;
29063 Compensate. Ceil:
29064 if (x2 < x)
29065 x2 += 1;
29066 if (HONOR_SIGNED_ZEROS (mode))
29067 return copysign (x2, x);
29068 return x2;
29070 enum machine_mode mode = GET_MODE (operand0);
29071 rtx xa, xi, TWO52, tmp, label, one, res, mask;
29073 TWO52 = ix86_gen_TWO52 (mode);
29075 /* Temporary for holding the result, initialized to the input
29076 operand to ease control flow. */
29077 res = gen_reg_rtx (mode);
29078 emit_move_insn (res, operand1);
29080 /* xa = abs (operand1) */
29081 xa = ix86_expand_sse_fabs (res, &mask);
29083 /* if (!isless (xa, TWO52)) goto label; */
29084 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29086 /* xa = (double)(long)x */
29087 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29088 expand_fix (xi, res, 0);
29089 expand_float (xa, xi, 0);
29091 /* generate 1.0 */
29092 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29094 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29095 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29096 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29097 gen_rtx_AND (mode, one, tmp)));
29098 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29099 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29100 emit_move_insn (res, tmp);
29102 if (HONOR_SIGNED_ZEROS (mode))
29103 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29105 emit_label (label);
29106 LABEL_NUSES (label) = 1;
29108 emit_move_insn (operand0, res);
29111 /* Expand SSE sequence for computing round from OPERAND1 storing
29112 into OPERAND0. Sequence that works without relying on DImode truncation
29113 via cvttsd2siq that is only available on 64bit targets. */
29114 void
29115 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29117 /* C code for the stuff we expand below.
29118 double xa = fabs (x), xa2, x2;
29119 if (!isless (xa, TWO52))
29120 return x;
29121 Using the absolute value and copying back sign makes
29122 -0.0 -> -0.0 correct.
29123 xa2 = xa + TWO52 - TWO52;
29124 Compensate.
29125 dxa = xa2 - xa;
29126 if (dxa <= -0.5)
29127 xa2 += 1;
29128 else if (dxa > 0.5)
29129 xa2 -= 1;
29130 x2 = copysign (xa2, x);
29131 return x2;
29133 enum machine_mode mode = GET_MODE (operand0);
29134 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29136 TWO52 = ix86_gen_TWO52 (mode);
29138 /* Temporary for holding the result, initialized to the input
29139 operand to ease control flow. */
29140 res = gen_reg_rtx (mode);
29141 emit_move_insn (res, operand1);
29143 /* xa = abs (operand1) */
29144 xa = ix86_expand_sse_fabs (res, &mask);
29146 /* if (!isless (xa, TWO52)) goto label; */
29147 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29149 /* xa2 = xa + TWO52 - TWO52; */
29150 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29151 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29153 /* dxa = xa2 - xa; */
29154 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29156 /* generate 0.5, 1.0 and -0.5 */
29157 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29158 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29159 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29160 0, OPTAB_DIRECT);
29162 /* Compensate. */
29163 tmp = gen_reg_rtx (mode);
29164 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29165 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29166 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29167 gen_rtx_AND (mode, one, tmp)));
29168 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29169 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29170 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29171 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29172 gen_rtx_AND (mode, one, tmp)));
29173 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29175 /* res = copysign (xa2, operand1) */
29176 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29178 emit_label (label);
29179 LABEL_NUSES (label) = 1;
29181 emit_move_insn (operand0, res);
29184 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29185 into OPERAND0. */
29186 void
29187 ix86_expand_trunc (rtx operand0, rtx operand1)
29189 /* C code for SSE variant we expand below.
29190 double xa = fabs (x), x2;
29191 if (!isless (xa, TWO52))
29192 return x;
29193 x2 = (double)(long)x;
29194 if (HONOR_SIGNED_ZEROS (mode))
29195 return copysign (x2, x);
29196 return x2;
29198 enum machine_mode mode = GET_MODE (operand0);
29199 rtx xa, xi, TWO52, label, res, mask;
29201 TWO52 = ix86_gen_TWO52 (mode);
29203 /* Temporary for holding the result, initialized to the input
29204 operand to ease control flow. */
29205 res = gen_reg_rtx (mode);
29206 emit_move_insn (res, operand1);
29208 /* xa = abs (operand1) */
29209 xa = ix86_expand_sse_fabs (res, &mask);
29211 /* if (!isless (xa, TWO52)) goto label; */
29212 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29214 /* x = (double)(long)x */
29215 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29216 expand_fix (xi, res, 0);
29217 expand_float (res, xi, 0);
29219 if (HONOR_SIGNED_ZEROS (mode))
29220 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29222 emit_label (label);
29223 LABEL_NUSES (label) = 1;
29225 emit_move_insn (operand0, res);
29228 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29229 into OPERAND0. */
29230 void
29231 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29233 enum machine_mode mode = GET_MODE (operand0);
29234 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29236 /* C code for SSE variant we expand below.
29237 double xa = fabs (x), x2;
29238 if (!isless (xa, TWO52))
29239 return x;
29240 xa2 = xa + TWO52 - TWO52;
29241 Compensate:
29242 if (xa2 > xa)
29243 xa2 -= 1.0;
29244 x2 = copysign (xa2, x);
29245 return x2;
29248 TWO52 = ix86_gen_TWO52 (mode);
29250 /* Temporary for holding the result, initialized to the input
29251 operand to ease control flow. */
29252 res = gen_reg_rtx (mode);
29253 emit_move_insn (res, operand1);
29255 /* xa = abs (operand1) */
29256 xa = ix86_expand_sse_fabs (res, &smask);
29258 /* if (!isless (xa, TWO52)) goto label; */
29259 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29261 /* res = xa + TWO52 - TWO52; */
29262 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29263 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29264 emit_move_insn (res, tmp);
29266 /* generate 1.0 */
29267 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29269 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29270 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29271 emit_insn (gen_rtx_SET (VOIDmode, mask,
29272 gen_rtx_AND (mode, mask, one)));
29273 tmp = expand_simple_binop (mode, MINUS,
29274 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29275 emit_move_insn (res, tmp);
29277 /* res = copysign (res, operand1) */
29278 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29280 emit_label (label);
29281 LABEL_NUSES (label) = 1;
29283 emit_move_insn (operand0, res);
29286 /* Expand SSE sequence for computing round from OPERAND1 storing
29287 into OPERAND0. */
29288 void
29289 ix86_expand_round (rtx operand0, rtx operand1)
29291 /* C code for the stuff we're doing below:
29292 double xa = fabs (x);
29293 if (!isless (xa, TWO52))
29294 return x;
29295 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29296 return copysign (xa, x);
29298 enum machine_mode mode = GET_MODE (operand0);
29299 rtx res, TWO52, xa, label, xi, half, mask;
29300 const struct real_format *fmt;
29301 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29303 /* Temporary for holding the result, initialized to the input
29304 operand to ease control flow. */
29305 res = gen_reg_rtx (mode);
29306 emit_move_insn (res, operand1);
29308 TWO52 = ix86_gen_TWO52 (mode);
29309 xa = ix86_expand_sse_fabs (res, &mask);
29310 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29312 /* load nextafter (0.5, 0.0) */
29313 fmt = REAL_MODE_FORMAT (mode);
29314 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29315 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29317 /* xa = xa + 0.5 */
29318 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29319 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29321 /* xa = (double)(int64_t)xa */
29322 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29323 expand_fix (xi, xa, 0);
29324 expand_float (xa, xi, 0);
29326 /* res = copysign (xa, operand1) */
29327 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29329 emit_label (label);
29330 LABEL_NUSES (label) = 1;
29332 emit_move_insn (operand0, res);
29336 /* Validate whether a SSE5 instruction is valid or not.
29337 OPERANDS is the array of operands.
29338 NUM is the number of operands.
29339 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
29340 NUM_MEMORY is the maximum number of memory operands to accept.
29341 when COMMUTATIVE is set, operand 1 and 2 can be swapped. */
29343 bool
29344 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
29345 bool uses_oc0, int num_memory, bool commutative)
29347 int mem_mask;
29348 int mem_count;
29349 int i;
29351 /* Count the number of memory arguments */
29352 mem_mask = 0;
29353 mem_count = 0;
29354 for (i = 0; i < num; i++)
29356 enum machine_mode mode = GET_MODE (operands[i]);
29357 if (register_operand (operands[i], mode))
29360 else if (memory_operand (operands[i], mode))
29362 mem_mask |= (1 << i);
29363 mem_count++;
29366 else
29368 rtx pattern = PATTERN (insn);
29370 /* allow 0 for pcmov */
29371 if (GET_CODE (pattern) != SET
29372 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
29373 || i < 2
29374 || operands[i] != CONST0_RTX (mode))
29375 return false;
29379 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
29380 a memory operation. */
29381 if (num_memory < 0)
29383 num_memory = -num_memory;
29384 if ((mem_mask & (1 << (num-1))) != 0)
29386 mem_mask &= ~(1 << (num-1));
29387 mem_count--;
29391 /* If there were no memory operations, allow the insn */
29392 if (mem_mask == 0)
29393 return true;
29395 /* Do not allow the destination register to be a memory operand. */
29396 else if (mem_mask & (1 << 0))
29397 return false;
29399 /* If there are too many memory operations, disallow the instruction. While
29400 the hardware only allows 1 memory reference, before register allocation
29401 for some insns, we allow two memory operations sometimes in order to allow
29402 code like the following to be optimized:
29404 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
29406 or similar cases that are vectorized into using the fmaddss
29407 instruction. */
29408 else if (mem_count > num_memory)
29409 return false;
29411 /* Don't allow more than one memory operation if not optimizing. */
29412 else if (mem_count > 1 && !optimize)
29413 return false;
29415 else if (num == 4 && mem_count == 1)
29417 /* formats (destination is the first argument), example fmaddss:
29418 xmm1, xmm1, xmm2, xmm3/mem
29419 xmm1, xmm1, xmm2/mem, xmm3
29420 xmm1, xmm2, xmm3/mem, xmm1
29421 xmm1, xmm2/mem, xmm3, xmm1 */
29422 if (uses_oc0)
29423 return ((mem_mask == (1 << 1))
29424 || (mem_mask == (1 << 2))
29425 || (mem_mask == (1 << 3)));
29427 /* format, example pmacsdd:
29428 xmm1, xmm2, xmm3/mem, xmm1 */
29429 if (commutative)
29430 return (mem_mask == (1 << 2) || mem_mask == (1 << 1));
29431 else
29432 return (mem_mask == (1 << 2));
29435 else if (num == 4 && num_memory == 2)
29437 /* If there are two memory operations, we can load one of the memory ops
29438 into the destination register. This is for optimizing the
29439 multiply/add ops, which the combiner has optimized both the multiply
29440 and the add insns to have a memory operation. We have to be careful
29441 that the destination doesn't overlap with the inputs. */
29442 rtx op0 = operands[0];
29444 if (reg_mentioned_p (op0, operands[1])
29445 || reg_mentioned_p (op0, operands[2])
29446 || reg_mentioned_p (op0, operands[3]))
29447 return false;
29449 /* formats (destination is the first argument), example fmaddss:
29450 xmm1, xmm1, xmm2, xmm3/mem
29451 xmm1, xmm1, xmm2/mem, xmm3
29452 xmm1, xmm2, xmm3/mem, xmm1
29453 xmm1, xmm2/mem, xmm3, xmm1
29455 For the oc0 case, we will load either operands[1] or operands[3] into
29456 operands[0], so any combination of 2 memory operands is ok. */
29457 if (uses_oc0)
29458 return true;
29460 /* format, example pmacsdd:
29461 xmm1, xmm2, xmm3/mem, xmm1
29463 For the integer multiply/add instructions be more restrictive and
29464 require operands[2] and operands[3] to be the memory operands. */
29465 if (commutative)
29466 return (mem_mask == ((1 << 1) | (1 << 3)) || ((1 << 2) | (1 << 3)));
29467 else
29468 return (mem_mask == ((1 << 2) | (1 << 3)));
29471 else if (num == 3 && num_memory == 1)
29473 /* formats, example protb:
29474 xmm1, xmm2, xmm3/mem
29475 xmm1, xmm2/mem, xmm3 */
29476 if (uses_oc0)
29477 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
29479 /* format, example comeq:
29480 xmm1, xmm2, xmm3/mem */
29481 else
29482 return (mem_mask == (1 << 2));
29485 else
29486 gcc_unreachable ();
29488 return false;
29492 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
29493 hardware will allow by using the destination register to load one of the
29494 memory operations. Presently this is used by the multiply/add routines to
29495 allow 2 memory references. */
29497 void
29498 ix86_expand_sse5_multiple_memory (rtx operands[],
29499 int num,
29500 enum machine_mode mode)
29502 rtx op0 = operands[0];
29503 if (num != 4
29504 || memory_operand (op0, mode)
29505 || reg_mentioned_p (op0, operands[1])
29506 || reg_mentioned_p (op0, operands[2])
29507 || reg_mentioned_p (op0, operands[3]))
29508 gcc_unreachable ();
29510 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
29511 the destination register. */
29512 if (memory_operand (operands[1], mode))
29514 emit_move_insn (op0, operands[1]);
29515 operands[1] = op0;
29517 else if (memory_operand (operands[3], mode))
29519 emit_move_insn (op0, operands[3]);
29520 operands[3] = op0;
29522 else
29523 gcc_unreachable ();
29525 return;
29529 /* Table of valid machine attributes. */
29530 static const struct attribute_spec ix86_attribute_table[] =
29532 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29533 /* Stdcall attribute says callee is responsible for popping arguments
29534 if they are not variable. */
29535 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29536 /* Fastcall attribute says callee is responsible for popping arguments
29537 if they are not variable. */
29538 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29539 /* Cdecl attribute says the callee is a normal C declaration */
29540 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29541 /* Regparm attribute specifies how many integer arguments are to be
29542 passed in registers. */
29543 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29544 /* Sseregparm attribute says we are using x86_64 calling conventions
29545 for FP arguments. */
29546 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29547 /* force_align_arg_pointer says this function realigns the stack at entry. */
29548 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29549 false, true, true, ix86_handle_cconv_attribute },
29550 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29551 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29552 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29553 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29554 #endif
29555 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29556 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29557 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29558 SUBTARGET_ATTRIBUTE_TABLE,
29559 #endif
29560 /* ms_abi and sysv_abi calling convention function attributes. */
29561 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29562 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29563 /* End element. */
29564 { NULL, 0, 0, false, false, false, NULL }
29567 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29568 static int
29569 x86_builtin_vectorization_cost (bool runtime_test)
29571 /* If the branch of the runtime test is taken - i.e. - the vectorized
29572 version is skipped - this incurs a misprediction cost (because the
29573 vectorized version is expected to be the fall-through). So we subtract
29574 the latency of a mispredicted branch from the costs that are incured
29575 when the vectorized version is executed.
29577 TODO: The values in individual target tables have to be tuned or new
29578 fields may be needed. For eg. on K8, the default branch path is the
29579 not-taken path. If the taken path is predicted correctly, the minimum
29580 penalty of going down the taken-path is 1 cycle. If the taken-path is
29581 not predicted correctly, then the minimum penalty is 10 cycles. */
29583 if (runtime_test)
29585 return (-(ix86_cost->cond_taken_branch_cost));
29587 else
29588 return 0;
29591 /* This function returns the calling abi specific va_list type node.
29592 It returns the FNDECL specific va_list type. */
29594 tree
29595 ix86_fn_abi_va_list (tree fndecl)
29597 int abi;
29599 if (!TARGET_64BIT)
29600 return va_list_type_node;
29601 gcc_assert (fndecl != NULL_TREE);
29602 abi = ix86_function_abi ((const_tree) fndecl);
29604 if (abi == MS_ABI)
29605 return ms_va_list_type_node;
29606 else
29607 return sysv_va_list_type_node;
29610 /* Returns the canonical va_list type specified by TYPE. If there
29611 is no valid TYPE provided, it return NULL_TREE. */
29613 tree
29614 ix86_canonical_va_list_type (tree type)
29616 tree wtype, htype;
29618 /* Resolve references and pointers to va_list type. */
29619 if (INDIRECT_REF_P (type))
29620 type = TREE_TYPE (type);
29621 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
29622 type = TREE_TYPE (type);
29624 if (TARGET_64BIT)
29626 wtype = va_list_type_node;
29627 gcc_assert (wtype != NULL_TREE);
29628 htype = type;
29629 if (TREE_CODE (wtype) == ARRAY_TYPE)
29631 /* If va_list is an array type, the argument may have decayed
29632 to a pointer type, e.g. by being passed to another function.
29633 In that case, unwrap both types so that we can compare the
29634 underlying records. */
29635 if (TREE_CODE (htype) == ARRAY_TYPE
29636 || POINTER_TYPE_P (htype))
29638 wtype = TREE_TYPE (wtype);
29639 htype = TREE_TYPE (htype);
29642 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29643 return va_list_type_node;
29644 wtype = sysv_va_list_type_node;
29645 gcc_assert (wtype != NULL_TREE);
29646 htype = type;
29647 if (TREE_CODE (wtype) == ARRAY_TYPE)
29649 /* If va_list is an array type, the argument may have decayed
29650 to a pointer type, e.g. by being passed to another function.
29651 In that case, unwrap both types so that we can compare the
29652 underlying records. */
29653 if (TREE_CODE (htype) == ARRAY_TYPE
29654 || POINTER_TYPE_P (htype))
29656 wtype = TREE_TYPE (wtype);
29657 htype = TREE_TYPE (htype);
29660 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29661 return sysv_va_list_type_node;
29662 wtype = ms_va_list_type_node;
29663 gcc_assert (wtype != NULL_TREE);
29664 htype = type;
29665 if (TREE_CODE (wtype) == ARRAY_TYPE)
29667 /* If va_list is an array type, the argument may have decayed
29668 to a pointer type, e.g. by being passed to another function.
29669 In that case, unwrap both types so that we can compare the
29670 underlying records. */
29671 if (TREE_CODE (htype) == ARRAY_TYPE
29672 || POINTER_TYPE_P (htype))
29674 wtype = TREE_TYPE (wtype);
29675 htype = TREE_TYPE (htype);
29678 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29679 return ms_va_list_type_node;
29680 return NULL_TREE;
29682 return std_canonical_va_list_type (type);
29685 /* Iterate through the target-specific builtin types for va_list.
29686 IDX denotes the iterator, *PTREE is set to the result type of
29687 the va_list builtin, and *PNAME to its internal type.
29688 Returns zero if there is no element for this index, otherwise
29689 IDX should be increased upon the next call.
29690 Note, do not iterate a base builtin's name like __builtin_va_list.
29691 Used from c_common_nodes_and_builtins. */
29694 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
29696 if (!TARGET_64BIT)
29697 return 0;
29698 switch (idx) {
29699 case 0:
29700 *ptree = ms_va_list_type_node;
29701 *pname = "__builtin_ms_va_list";
29702 break;
29703 case 1:
29704 *ptree = sysv_va_list_type_node;
29705 *pname = "__builtin_sysv_va_list";
29706 break;
29707 default:
29708 return 0;
29710 return 1;
29713 /* Initialize the GCC target structure. */
29714 #undef TARGET_RETURN_IN_MEMORY
29715 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
29717 #undef TARGET_ATTRIBUTE_TABLE
29718 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
29719 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29720 # undef TARGET_MERGE_DECL_ATTRIBUTES
29721 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
29722 #endif
29724 #undef TARGET_COMP_TYPE_ATTRIBUTES
29725 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
29727 #undef TARGET_INIT_BUILTINS
29728 #define TARGET_INIT_BUILTINS ix86_init_builtins
29729 #undef TARGET_EXPAND_BUILTIN
29730 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
29732 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
29733 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
29734 ix86_builtin_vectorized_function
29736 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
29737 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
29739 #undef TARGET_BUILTIN_RECIPROCAL
29740 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
29742 #undef TARGET_ASM_FUNCTION_EPILOGUE
29743 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
29745 #undef TARGET_ENCODE_SECTION_INFO
29746 #ifndef SUBTARGET_ENCODE_SECTION_INFO
29747 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
29748 #else
29749 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
29750 #endif
29752 #undef TARGET_ASM_OPEN_PAREN
29753 #define TARGET_ASM_OPEN_PAREN ""
29754 #undef TARGET_ASM_CLOSE_PAREN
29755 #define TARGET_ASM_CLOSE_PAREN ""
29757 #undef TARGET_ASM_ALIGNED_HI_OP
29758 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
29759 #undef TARGET_ASM_ALIGNED_SI_OP
29760 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
29761 #ifdef ASM_QUAD
29762 #undef TARGET_ASM_ALIGNED_DI_OP
29763 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
29764 #endif
29766 #undef TARGET_ASM_UNALIGNED_HI_OP
29767 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
29768 #undef TARGET_ASM_UNALIGNED_SI_OP
29769 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
29770 #undef TARGET_ASM_UNALIGNED_DI_OP
29771 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
29773 #undef TARGET_SCHED_ADJUST_COST
29774 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
29775 #undef TARGET_SCHED_ISSUE_RATE
29776 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
29777 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
29778 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
29779 ia32_multipass_dfa_lookahead
29781 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
29782 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
29784 #ifdef HAVE_AS_TLS
29785 #undef TARGET_HAVE_TLS
29786 #define TARGET_HAVE_TLS true
29787 #endif
29788 #undef TARGET_CANNOT_FORCE_CONST_MEM
29789 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
29790 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
29791 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
29793 #undef TARGET_DELEGITIMIZE_ADDRESS
29794 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
29796 #undef TARGET_MS_BITFIELD_LAYOUT_P
29797 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
29799 #if TARGET_MACHO
29800 #undef TARGET_BINDS_LOCAL_P
29801 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
29802 #endif
29803 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29804 #undef TARGET_BINDS_LOCAL_P
29805 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
29806 #endif
29808 #undef TARGET_ASM_OUTPUT_MI_THUNK
29809 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
29810 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
29811 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
29813 #undef TARGET_ASM_FILE_START
29814 #define TARGET_ASM_FILE_START x86_file_start
29816 #undef TARGET_DEFAULT_TARGET_FLAGS
29817 #define TARGET_DEFAULT_TARGET_FLAGS \
29818 (TARGET_DEFAULT \
29819 | TARGET_SUBTARGET_DEFAULT \
29820 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
29822 #undef TARGET_HANDLE_OPTION
29823 #define TARGET_HANDLE_OPTION ix86_handle_option
29825 #undef TARGET_RTX_COSTS
29826 #define TARGET_RTX_COSTS ix86_rtx_costs
29827 #undef TARGET_ADDRESS_COST
29828 #define TARGET_ADDRESS_COST ix86_address_cost
29830 #undef TARGET_FIXED_CONDITION_CODE_REGS
29831 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
29832 #undef TARGET_CC_MODES_COMPATIBLE
29833 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
29835 #undef TARGET_MACHINE_DEPENDENT_REORG
29836 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
29838 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
29839 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
29841 #undef TARGET_BUILD_BUILTIN_VA_LIST
29842 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
29844 #undef TARGET_FN_ABI_VA_LIST
29845 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
29847 #undef TARGET_CANONICAL_VA_LIST_TYPE
29848 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
29850 #undef TARGET_EXPAND_BUILTIN_VA_START
29851 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
29853 #undef TARGET_MD_ASM_CLOBBERS
29854 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
29856 #undef TARGET_PROMOTE_PROTOTYPES
29857 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
29858 #undef TARGET_STRUCT_VALUE_RTX
29859 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
29860 #undef TARGET_SETUP_INCOMING_VARARGS
29861 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
29862 #undef TARGET_MUST_PASS_IN_STACK
29863 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
29864 #undef TARGET_PASS_BY_REFERENCE
29865 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
29866 #undef TARGET_INTERNAL_ARG_POINTER
29867 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
29868 #undef TARGET_UPDATE_STACK_BOUNDARY
29869 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
29870 #undef TARGET_GET_DRAP_RTX
29871 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
29872 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
29873 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
29874 #undef TARGET_STRICT_ARGUMENT_NAMING
29875 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
29877 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
29878 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
29880 #undef TARGET_SCALAR_MODE_SUPPORTED_P
29881 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
29883 #undef TARGET_VECTOR_MODE_SUPPORTED_P
29884 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
29886 #undef TARGET_C_MODE_FOR_SUFFIX
29887 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
29889 #ifdef HAVE_AS_TLS
29890 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
29891 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
29892 #endif
29894 #ifdef SUBTARGET_INSERT_ATTRIBUTES
29895 #undef TARGET_INSERT_ATTRIBUTES
29896 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
29897 #endif
29899 #undef TARGET_MANGLE_TYPE
29900 #define TARGET_MANGLE_TYPE ix86_mangle_type
29902 #undef TARGET_STACK_PROTECT_FAIL
29903 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
29905 #undef TARGET_FUNCTION_VALUE
29906 #define TARGET_FUNCTION_VALUE ix86_function_value
29908 #undef TARGET_SECONDARY_RELOAD
29909 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
29911 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
29912 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
29914 #undef TARGET_SET_CURRENT_FUNCTION
29915 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
29917 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
29918 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
29920 #undef TARGET_OPTION_SAVE
29921 #define TARGET_OPTION_SAVE ix86_function_specific_save
29923 #undef TARGET_OPTION_RESTORE
29924 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
29926 #undef TARGET_OPTION_PRINT
29927 #define TARGET_OPTION_PRINT ix86_function_specific_print
29929 #undef TARGET_OPTION_CAN_INLINE_P
29930 #define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
29932 #undef TARGET_EXPAND_TO_RTL_HOOK
29933 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
29935 struct gcc_target targetm = TARGET_INITIALIZER;
29937 #include "gt-i386.h"