tree.h: Include real.h and fixed-value.h as basic datatypes.
[official-gcc.git] / gcc / config / i386 / i386.c
blobe4e758dbeb0c25c3d689dc957d6a18053d8e44f1
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-codes.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "langhooks.h"
48 #include "cgraph.h"
49 #include "gimple.h"
50 #include "dwarf2.h"
51 #include "df.h"
52 #include "tm-constrs.h"
53 #include "params.h"
54 #include "cselib.h"
55 #include "debug.h"
56 #include "dwarf2out.h"
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78 const
79 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 struct processor_costs bdver1_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (2), /* cost of a lea instruction */
824 COSTS_N_INSNS (1), /* variable shift costs */
825 COSTS_N_INSNS (1), /* constant shift costs */
826 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (4), /* HI */
828 COSTS_N_INSNS (3), /* SI */
829 COSTS_N_INSNS (4), /* DI */
830 COSTS_N_INSNS (5)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (35), /* HI */
834 COSTS_N_INSNS (51), /* SI */
835 COSTS_N_INSNS (83), /* DI */
836 COSTS_N_INSNS (83)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 8, /* "large" insn */
840 9, /* MOVE_RATIO */
841 4, /* cost for loading QImode using movzbl */
842 {3, 4, 3}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {3, 4, 3}, /* cost of storing integer registers */
846 4, /* cost of reg,reg fld/fst */
847 {4, 4, 12}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {6, 6, 8}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {3, 3}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {4, 4}, /* cost of storing MMX registers
855 in SImode and DImode */
856 2, /* cost of moving SSE register */
857 {4, 4, 3}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {4, 4, 5}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 3, /* MMX or SSE register to integer */
862 /* On K8
863 MOVD reg64, xmmreg Double FSTORE 4
864 MOVD reg32, xmmreg Double FSTORE 4
865 On AMDFAM10
866 MOVD reg64, xmmreg Double FADD 3
867 1/1 1/1
868 MOVD reg32, xmmreg Double FADD 3
869 1/1 1/1 */
870 64, /* size of l1 cache. */
871 1024, /* size of l2 cache. */
872 64, /* size of prefetch block */
873 /* New AMD processors never drop prefetches; if they cannot be performed
874 immediately, they are queued. We set number of simultaneous prefetches
875 to a large constant to reflect this (it probably is not a good idea not
876 to limit number of prefetches at all, as their execution also takes some
877 time). */
878 100, /* number of parallel prefetches */
879 2, /* Branch cost */
880 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
881 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
882 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
883 COSTS_N_INSNS (2), /* cost of FABS instruction. */
884 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
885 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
887 /* BDVER1 has optimized REP instruction for medium sized blocks, but for
888 very small blocks it is better to use loop. For large blocks, libcall can
889 do nontemporary accesses and beat inline considerably. */
890 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
891 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
892 {{libcall, {{8, loop}, {24, unrolled_loop},
893 {2048, rep_prefix_4_byte}, {-1, libcall}}},
894 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
895 4, /* scalar_stmt_cost. */
896 2, /* scalar load_cost. */
897 2, /* scalar_store_cost. */
898 6, /* vec_stmt_cost. */
899 0, /* vec_to_scalar_cost. */
900 2, /* scalar_to_vec_cost. */
901 2, /* vec_align_load_cost. */
902 2, /* vec_unalign_load_cost. */
903 2, /* vec_store_cost. */
904 2, /* cond_taken_branch_cost. */
905 1, /* cond_not_taken_branch_cost. */
908 static const
909 struct processor_costs pentium4_cost = {
910 COSTS_N_INSNS (1), /* cost of an add instruction */
911 COSTS_N_INSNS (3), /* cost of a lea instruction */
912 COSTS_N_INSNS (4), /* variable shift costs */
913 COSTS_N_INSNS (4), /* constant shift costs */
914 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
915 COSTS_N_INSNS (15), /* HI */
916 COSTS_N_INSNS (15), /* SI */
917 COSTS_N_INSNS (15), /* DI */
918 COSTS_N_INSNS (15)}, /* other */
919 0, /* cost of multiply per each bit set */
920 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
921 COSTS_N_INSNS (56), /* HI */
922 COSTS_N_INSNS (56), /* SI */
923 COSTS_N_INSNS (56), /* DI */
924 COSTS_N_INSNS (56)}, /* other */
925 COSTS_N_INSNS (1), /* cost of movsx */
926 COSTS_N_INSNS (1), /* cost of movzx */
927 16, /* "large" insn */
928 6, /* MOVE_RATIO */
929 2, /* cost for loading QImode using movzbl */
930 {4, 5, 4}, /* cost of loading integer registers
931 in QImode, HImode and SImode.
932 Relative to reg-reg move (2). */
933 {2, 3, 2}, /* cost of storing integer registers */
934 2, /* cost of reg,reg fld/fst */
935 {2, 2, 6}, /* cost of loading fp registers
936 in SFmode, DFmode and XFmode */
937 {4, 4, 6}, /* cost of storing fp registers
938 in SFmode, DFmode and XFmode */
939 2, /* cost of moving MMX register */
940 {2, 2}, /* cost of loading MMX registers
941 in SImode and DImode */
942 {2, 2}, /* cost of storing MMX registers
943 in SImode and DImode */
944 12, /* cost of moving SSE register */
945 {12, 12, 12}, /* cost of loading SSE registers
946 in SImode, DImode and TImode */
947 {2, 2, 8}, /* cost of storing SSE registers
948 in SImode, DImode and TImode */
949 10, /* MMX or SSE register to integer */
950 8, /* size of l1 cache. */
951 256, /* size of l2 cache. */
952 64, /* size of prefetch block */
953 6, /* number of parallel prefetches */
954 2, /* Branch cost */
955 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
956 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
957 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
958 COSTS_N_INSNS (2), /* cost of FABS instruction. */
959 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
960 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
961 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
962 DUMMY_STRINGOP_ALGS},
963 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
964 {-1, libcall}}},
965 DUMMY_STRINGOP_ALGS},
966 1, /* scalar_stmt_cost. */
967 1, /* scalar load_cost. */
968 1, /* scalar_store_cost. */
969 1, /* vec_stmt_cost. */
970 1, /* vec_to_scalar_cost. */
971 1, /* scalar_to_vec_cost. */
972 1, /* vec_align_load_cost. */
973 2, /* vec_unalign_load_cost. */
974 1, /* vec_store_cost. */
975 3, /* cond_taken_branch_cost. */
976 1, /* cond_not_taken_branch_cost. */
979 static const
980 struct processor_costs nocona_cost = {
981 COSTS_N_INSNS (1), /* cost of an add instruction */
982 COSTS_N_INSNS (1), /* cost of a lea instruction */
983 COSTS_N_INSNS (1), /* variable shift costs */
984 COSTS_N_INSNS (1), /* constant shift costs */
985 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
986 COSTS_N_INSNS (10), /* HI */
987 COSTS_N_INSNS (10), /* SI */
988 COSTS_N_INSNS (10), /* DI */
989 COSTS_N_INSNS (10)}, /* other */
990 0, /* cost of multiply per each bit set */
991 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
992 COSTS_N_INSNS (66), /* HI */
993 COSTS_N_INSNS (66), /* SI */
994 COSTS_N_INSNS (66), /* DI */
995 COSTS_N_INSNS (66)}, /* other */
996 COSTS_N_INSNS (1), /* cost of movsx */
997 COSTS_N_INSNS (1), /* cost of movzx */
998 16, /* "large" insn */
999 17, /* MOVE_RATIO */
1000 4, /* cost for loading QImode using movzbl */
1001 {4, 4, 4}, /* cost of loading integer registers
1002 in QImode, HImode and SImode.
1003 Relative to reg-reg move (2). */
1004 {4, 4, 4}, /* cost of storing integer registers */
1005 3, /* cost of reg,reg fld/fst */
1006 {12, 12, 12}, /* cost of loading fp registers
1007 in SFmode, DFmode and XFmode */
1008 {4, 4, 4}, /* cost of storing fp registers
1009 in SFmode, DFmode and XFmode */
1010 6, /* cost of moving MMX register */
1011 {12, 12}, /* cost of loading MMX registers
1012 in SImode and DImode */
1013 {12, 12}, /* cost of storing MMX registers
1014 in SImode and DImode */
1015 6, /* cost of moving SSE register */
1016 {12, 12, 12}, /* cost of loading SSE registers
1017 in SImode, DImode and TImode */
1018 {12, 12, 12}, /* cost of storing SSE registers
1019 in SImode, DImode and TImode */
1020 8, /* MMX or SSE register to integer */
1021 8, /* size of l1 cache. */
1022 1024, /* size of l2 cache. */
1023 128, /* size of prefetch block */
1024 8, /* number of parallel prefetches */
1025 1, /* Branch cost */
1026 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
1027 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1028 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
1029 COSTS_N_INSNS (3), /* cost of FABS instruction. */
1030 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
1031 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
1032 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
1033 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
1034 {100000, unrolled_loop}, {-1, libcall}}}},
1035 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
1036 {-1, libcall}}},
1037 {libcall, {{24, loop}, {64, unrolled_loop},
1038 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1039 1, /* scalar_stmt_cost. */
1040 1, /* scalar load_cost. */
1041 1, /* scalar_store_cost. */
1042 1, /* vec_stmt_cost. */
1043 1, /* vec_to_scalar_cost. */
1044 1, /* scalar_to_vec_cost. */
1045 1, /* vec_align_load_cost. */
1046 2, /* vec_unalign_load_cost. */
1047 1, /* vec_store_cost. */
1048 3, /* cond_taken_branch_cost. */
1049 1, /* cond_not_taken_branch_cost. */
1052 static const
1053 struct processor_costs core2_cost = {
1054 COSTS_N_INSNS (1), /* cost of an add instruction */
1055 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1056 COSTS_N_INSNS (1), /* variable shift costs */
1057 COSTS_N_INSNS (1), /* constant shift costs */
1058 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1059 COSTS_N_INSNS (3), /* HI */
1060 COSTS_N_INSNS (3), /* SI */
1061 COSTS_N_INSNS (3), /* DI */
1062 COSTS_N_INSNS (3)}, /* other */
1063 0, /* cost of multiply per each bit set */
1064 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
1065 COSTS_N_INSNS (22), /* HI */
1066 COSTS_N_INSNS (22), /* SI */
1067 COSTS_N_INSNS (22), /* DI */
1068 COSTS_N_INSNS (22)}, /* other */
1069 COSTS_N_INSNS (1), /* cost of movsx */
1070 COSTS_N_INSNS (1), /* cost of movzx */
1071 8, /* "large" insn */
1072 16, /* MOVE_RATIO */
1073 2, /* cost for loading QImode using movzbl */
1074 {6, 6, 6}, /* cost of loading integer registers
1075 in QImode, HImode and SImode.
1076 Relative to reg-reg move (2). */
1077 {4, 4, 4}, /* cost of storing integer registers */
1078 2, /* cost of reg,reg fld/fst */
1079 {6, 6, 6}, /* cost of loading fp registers
1080 in SFmode, DFmode and XFmode */
1081 {4, 4, 4}, /* cost of storing fp registers
1082 in SFmode, DFmode and XFmode */
1083 2, /* cost of moving MMX register */
1084 {6, 6}, /* cost of loading MMX registers
1085 in SImode and DImode */
1086 {4, 4}, /* cost of storing MMX registers
1087 in SImode and DImode */
1088 2, /* cost of moving SSE register */
1089 {6, 6, 6}, /* cost of loading SSE registers
1090 in SImode, DImode and TImode */
1091 {4, 4, 4}, /* cost of storing SSE registers
1092 in SImode, DImode and TImode */
1093 2, /* MMX or SSE register to integer */
1094 32, /* size of l1 cache. */
1095 2048, /* size of l2 cache. */
1096 128, /* size of prefetch block */
1097 8, /* number of parallel prefetches */
1098 3, /* Branch cost */
1099 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1100 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1101 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1102 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1103 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1104 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1105 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1106 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1107 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1108 {{libcall, {{8, loop}, {15, unrolled_loop},
1109 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1110 {libcall, {{24, loop}, {32, unrolled_loop},
1111 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1112 1, /* scalar_stmt_cost. */
1113 1, /* scalar load_cost. */
1114 1, /* scalar_store_cost. */
1115 1, /* vec_stmt_cost. */
1116 1, /* vec_to_scalar_cost. */
1117 1, /* scalar_to_vec_cost. */
1118 1, /* vec_align_load_cost. */
1119 2, /* vec_unalign_load_cost. */
1120 1, /* vec_store_cost. */
1121 3, /* cond_taken_branch_cost. */
1122 1, /* cond_not_taken_branch_cost. */
1125 static const
1126 struct processor_costs atom_cost = {
1127 COSTS_N_INSNS (1), /* cost of an add instruction */
1128 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1129 COSTS_N_INSNS (1), /* variable shift costs */
1130 COSTS_N_INSNS (1), /* constant shift costs */
1131 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1132 COSTS_N_INSNS (4), /* HI */
1133 COSTS_N_INSNS (3), /* SI */
1134 COSTS_N_INSNS (4), /* DI */
1135 COSTS_N_INSNS (2)}, /* other */
1136 0, /* cost of multiply per each bit set */
1137 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1138 COSTS_N_INSNS (26), /* HI */
1139 COSTS_N_INSNS (42), /* SI */
1140 COSTS_N_INSNS (74), /* DI */
1141 COSTS_N_INSNS (74)}, /* other */
1142 COSTS_N_INSNS (1), /* cost of movsx */
1143 COSTS_N_INSNS (1), /* cost of movzx */
1144 8, /* "large" insn */
1145 17, /* MOVE_RATIO */
1146 2, /* cost for loading QImode using movzbl */
1147 {4, 4, 4}, /* cost of loading integer registers
1148 in QImode, HImode and SImode.
1149 Relative to reg-reg move (2). */
1150 {4, 4, 4}, /* cost of storing integer registers */
1151 4, /* cost of reg,reg fld/fst */
1152 {12, 12, 12}, /* cost of loading fp registers
1153 in SFmode, DFmode and XFmode */
1154 {6, 6, 8}, /* cost of storing fp registers
1155 in SFmode, DFmode and XFmode */
1156 2, /* cost of moving MMX register */
1157 {8, 8}, /* cost of loading MMX registers
1158 in SImode and DImode */
1159 {8, 8}, /* cost of storing MMX registers
1160 in SImode and DImode */
1161 2, /* cost of moving SSE register */
1162 {8, 8, 8}, /* cost of loading SSE registers
1163 in SImode, DImode and TImode */
1164 {8, 8, 8}, /* cost of storing SSE registers
1165 in SImode, DImode and TImode */
1166 5, /* MMX or SSE register to integer */
1167 32, /* size of l1 cache. */
1168 256, /* size of l2 cache. */
1169 64, /* size of prefetch block */
1170 6, /* number of parallel prefetches */
1171 3, /* Branch cost */
1172 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1173 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1174 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1175 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1176 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1177 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1178 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1179 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1180 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1181 {{libcall, {{8, loop}, {15, unrolled_loop},
1182 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1183 {libcall, {{24, loop}, {32, unrolled_loop},
1184 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1185 1, /* scalar_stmt_cost. */
1186 1, /* scalar load_cost. */
1187 1, /* scalar_store_cost. */
1188 1, /* vec_stmt_cost. */
1189 1, /* vec_to_scalar_cost. */
1190 1, /* scalar_to_vec_cost. */
1191 1, /* vec_align_load_cost. */
1192 2, /* vec_unalign_load_cost. */
1193 1, /* vec_store_cost. */
1194 3, /* cond_taken_branch_cost. */
1195 1, /* cond_not_taken_branch_cost. */
1198 /* Generic64 should produce code tuned for Nocona and K8. */
1199 static const
1200 struct processor_costs generic64_cost = {
1201 COSTS_N_INSNS (1), /* cost of an add instruction */
1202 /* On all chips taken into consideration lea is 2 cycles and more. With
1203 this cost however our current implementation of synth_mult results in
1204 use of unnecessary temporary registers causing regression on several
1205 SPECfp benchmarks. */
1206 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1207 COSTS_N_INSNS (1), /* variable shift costs */
1208 COSTS_N_INSNS (1), /* constant shift costs */
1209 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1210 COSTS_N_INSNS (4), /* HI */
1211 COSTS_N_INSNS (3), /* SI */
1212 COSTS_N_INSNS (4), /* DI */
1213 COSTS_N_INSNS (2)}, /* other */
1214 0, /* cost of multiply per each bit set */
1215 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1216 COSTS_N_INSNS (26), /* HI */
1217 COSTS_N_INSNS (42), /* SI */
1218 COSTS_N_INSNS (74), /* DI */
1219 COSTS_N_INSNS (74)}, /* other */
1220 COSTS_N_INSNS (1), /* cost of movsx */
1221 COSTS_N_INSNS (1), /* cost of movzx */
1222 8, /* "large" insn */
1223 17, /* MOVE_RATIO */
1224 4, /* cost for loading QImode using movzbl */
1225 {4, 4, 4}, /* cost of loading integer registers
1226 in QImode, HImode and SImode.
1227 Relative to reg-reg move (2). */
1228 {4, 4, 4}, /* cost of storing integer registers */
1229 4, /* cost of reg,reg fld/fst */
1230 {12, 12, 12}, /* cost of loading fp registers
1231 in SFmode, DFmode and XFmode */
1232 {6, 6, 8}, /* cost of storing fp registers
1233 in SFmode, DFmode and XFmode */
1234 2, /* cost of moving MMX register */
1235 {8, 8}, /* cost of loading MMX registers
1236 in SImode and DImode */
1237 {8, 8}, /* cost of storing MMX registers
1238 in SImode and DImode */
1239 2, /* cost of moving SSE register */
1240 {8, 8, 8}, /* cost of loading SSE registers
1241 in SImode, DImode and TImode */
1242 {8, 8, 8}, /* cost of storing SSE registers
1243 in SImode, DImode and TImode */
1244 5, /* MMX or SSE register to integer */
1245 32, /* size of l1 cache. */
1246 512, /* size of l2 cache. */
1247 64, /* size of prefetch block */
1248 6, /* number of parallel prefetches */
1249 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1250 is increased to perhaps more appropriate value of 5. */
1251 3, /* Branch cost */
1252 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1253 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1254 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1255 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1256 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1257 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1258 {DUMMY_STRINGOP_ALGS,
1259 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1260 {DUMMY_STRINGOP_ALGS,
1261 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1262 1, /* scalar_stmt_cost. */
1263 1, /* scalar load_cost. */
1264 1, /* scalar_store_cost. */
1265 1, /* vec_stmt_cost. */
1266 1, /* vec_to_scalar_cost. */
1267 1, /* scalar_to_vec_cost. */
1268 1, /* vec_align_load_cost. */
1269 2, /* vec_unalign_load_cost. */
1270 1, /* vec_store_cost. */
1271 3, /* cond_taken_branch_cost. */
1272 1, /* cond_not_taken_branch_cost. */
1275 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1276 static const
1277 struct processor_costs generic32_cost = {
1278 COSTS_N_INSNS (1), /* cost of an add instruction */
1279 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1280 COSTS_N_INSNS (1), /* variable shift costs */
1281 COSTS_N_INSNS (1), /* constant shift costs */
1282 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1283 COSTS_N_INSNS (4), /* HI */
1284 COSTS_N_INSNS (3), /* SI */
1285 COSTS_N_INSNS (4), /* DI */
1286 COSTS_N_INSNS (2)}, /* other */
1287 0, /* cost of multiply per each bit set */
1288 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1289 COSTS_N_INSNS (26), /* HI */
1290 COSTS_N_INSNS (42), /* SI */
1291 COSTS_N_INSNS (74), /* DI */
1292 COSTS_N_INSNS (74)}, /* other */
1293 COSTS_N_INSNS (1), /* cost of movsx */
1294 COSTS_N_INSNS (1), /* cost of movzx */
1295 8, /* "large" insn */
1296 17, /* MOVE_RATIO */
1297 4, /* cost for loading QImode using movzbl */
1298 {4, 4, 4}, /* cost of loading integer registers
1299 in QImode, HImode and SImode.
1300 Relative to reg-reg move (2). */
1301 {4, 4, 4}, /* cost of storing integer registers */
1302 4, /* cost of reg,reg fld/fst */
1303 {12, 12, 12}, /* cost of loading fp registers
1304 in SFmode, DFmode and XFmode */
1305 {6, 6, 8}, /* cost of storing fp registers
1306 in SFmode, DFmode and XFmode */
1307 2, /* cost of moving MMX register */
1308 {8, 8}, /* cost of loading MMX registers
1309 in SImode and DImode */
1310 {8, 8}, /* cost of storing MMX registers
1311 in SImode and DImode */
1312 2, /* cost of moving SSE register */
1313 {8, 8, 8}, /* cost of loading SSE registers
1314 in SImode, DImode and TImode */
1315 {8, 8, 8}, /* cost of storing SSE registers
1316 in SImode, DImode and TImode */
1317 5, /* MMX or SSE register to integer */
1318 32, /* size of l1 cache. */
1319 256, /* size of l2 cache. */
1320 64, /* size of prefetch block */
1321 6, /* number of parallel prefetches */
1322 3, /* Branch cost */
1323 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1324 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1325 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1326 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1327 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1328 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1329 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1330 DUMMY_STRINGOP_ALGS},
1331 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1332 DUMMY_STRINGOP_ALGS},
1333 1, /* scalar_stmt_cost. */
1334 1, /* scalar load_cost. */
1335 1, /* scalar_store_cost. */
1336 1, /* vec_stmt_cost. */
1337 1, /* vec_to_scalar_cost. */
1338 1, /* scalar_to_vec_cost. */
1339 1, /* vec_align_load_cost. */
1340 2, /* vec_unalign_load_cost. */
1341 1, /* vec_store_cost. */
1342 3, /* cond_taken_branch_cost. */
1343 1, /* cond_not_taken_branch_cost. */
1346 const struct processor_costs *ix86_cost = &pentium_cost;
1348 /* Processor feature/optimization bitmasks. */
1349 #define m_386 (1<<PROCESSOR_I386)
1350 #define m_486 (1<<PROCESSOR_I486)
1351 #define m_PENT (1<<PROCESSOR_PENTIUM)
1352 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1353 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1354 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1355 #define m_CORE2 (1<<PROCESSOR_CORE2)
1356 #define m_ATOM (1<<PROCESSOR_ATOM)
1358 #define m_GEODE (1<<PROCESSOR_GEODE)
1359 #define m_K6 (1<<PROCESSOR_K6)
1360 #define m_K6_GEODE (m_K6 | m_GEODE)
1361 #define m_K8 (1<<PROCESSOR_K8)
1362 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1363 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1364 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1365 #define m_BDVER1 (1<<PROCESSOR_BDVER1)
1366 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10 | m_BDVER1)
1368 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1369 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1371 /* Generic instruction choice should be common subset of supported CPUs
1372 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1373 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1375 /* Feature tests against the various tunings. */
1376 unsigned char ix86_tune_features[X86_TUNE_LAST];
1378 /* Feature tests against the various tunings used to create ix86_tune_features
1379 based on the processor mask. */
1380 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1381 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1382 negatively, so enabling for Generic64 seems like good code size
1383 tradeoff. We can't enable it for 32bit generic because it does not
1384 work well with PPro base chips. */
1385 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1387 /* X86_TUNE_PUSH_MEMORY */
1388 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1389 | m_NOCONA | m_CORE2 | m_GENERIC,
1391 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1392 m_486 | m_PENT,
1394 /* X86_TUNE_UNROLL_STRLEN */
1395 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1396 | m_CORE2 | m_GENERIC,
1398 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1399 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1401 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1402 on simulation result. But after P4 was made, no performance benefit
1403 was observed with branch hints. It also increases the code size.
1404 As a result, icc never generates branch hints. */
1407 /* X86_TUNE_DOUBLE_WITH_ADD */
1408 ~m_386,
1410 /* X86_TUNE_USE_SAHF */
1411 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER1 | m_PENT4
1412 | m_NOCONA | m_CORE2 | m_GENERIC,
1414 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1415 partial dependencies. */
1416 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1417 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1419 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1420 register stalls on Generic32 compilation setting as well. However
1421 in current implementation the partial register stalls are not eliminated
1422 very well - they can be introduced via subregs synthesized by combine
1423 and can happen in caller/callee saving sequences. Because this option
1424 pays back little on PPro based chips and is in conflict with partial reg
1425 dependencies used by Athlon/P4 based chips, it is better to leave it off
1426 for generic32 for now. */
1427 m_PPRO,
1429 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1430 m_CORE2 | m_GENERIC,
1432 /* X86_TUNE_USE_HIMODE_FIOP */
1433 m_386 | m_486 | m_K6_GEODE,
1435 /* X86_TUNE_USE_SIMODE_FIOP */
1436 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1438 /* X86_TUNE_USE_MOV0 */
1439 m_K6,
1441 /* X86_TUNE_USE_CLTD */
1442 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1444 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1445 m_PENT4,
1447 /* X86_TUNE_SPLIT_LONG_MOVES */
1448 m_PPRO,
1450 /* X86_TUNE_READ_MODIFY_WRITE */
1451 ~m_PENT,
1453 /* X86_TUNE_READ_MODIFY */
1454 ~(m_PENT | m_PPRO),
1456 /* X86_TUNE_PROMOTE_QIMODE */
1457 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1458 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1460 /* X86_TUNE_FAST_PREFIX */
1461 ~(m_PENT | m_486 | m_386),
1463 /* X86_TUNE_SINGLE_STRINGOP */
1464 m_386 | m_PENT4 | m_NOCONA,
1466 /* X86_TUNE_QIMODE_MATH */
1469 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1470 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1471 might be considered for Generic32 if our scheme for avoiding partial
1472 stalls was more effective. */
1473 ~m_PPRO,
1475 /* X86_TUNE_PROMOTE_QI_REGS */
1478 /* X86_TUNE_PROMOTE_HI_REGS */
1479 m_PPRO,
1481 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1482 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1483 | m_CORE2 | m_GENERIC,
1485 /* X86_TUNE_ADD_ESP_8 */
1486 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1487 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1489 /* X86_TUNE_SUB_ESP_4 */
1490 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1491 | m_GENERIC,
1493 /* X86_TUNE_SUB_ESP_8 */
1494 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1495 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1497 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1498 for DFmode copies */
1499 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1500 | m_GENERIC | m_GEODE),
1502 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1503 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1505 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1506 conflict here in between PPro/Pentium4 based chips that thread 128bit
1507 SSE registers as single units versus K8 based chips that divide SSE
1508 registers to two 64bit halves. This knob promotes all store destinations
1509 to be 128bit to allow register renaming on 128bit SSE units, but usually
1510 results in one extra microop on 64bit SSE units. Experimental results
1511 shows that disabling this option on P4 brings over 20% SPECfp regression,
1512 while enabling it on K8 brings roughly 2.4% regression that can be partly
1513 masked by careful scheduling of moves. */
1514 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1515 | m_AMDFAM10 | m_BDVER1,
1517 /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
1518 m_AMDFAM10 | m_BDVER1,
1520 /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
1521 m_BDVER1,
1523 /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
1524 m_BDVER1,
1526 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1527 are resolved on SSE register parts instead of whole registers, so we may
1528 maintain just lower part of scalar values in proper format leaving the
1529 upper part undefined. */
1530 m_ATHLON_K8,
1532 /* X86_TUNE_SSE_TYPELESS_STORES */
1533 m_AMD_MULTIPLE,
1535 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1536 m_PPRO | m_PENT4 | m_NOCONA,
1538 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1539 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1541 /* X86_TUNE_PROLOGUE_USING_MOVE */
1542 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1544 /* X86_TUNE_EPILOGUE_USING_MOVE */
1545 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1547 /* X86_TUNE_SHIFT1 */
1548 ~m_486,
1550 /* X86_TUNE_USE_FFREEP */
1551 m_AMD_MULTIPLE,
1553 /* X86_TUNE_INTER_UNIT_MOVES */
1554 ~(m_AMD_MULTIPLE | m_GENERIC),
1556 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1557 ~(m_AMDFAM10 | m_BDVER1),
1559 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1560 than 4 branch instructions in the 16 byte window. */
1561 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1562 | m_GENERIC,
1564 /* X86_TUNE_SCHEDULE */
1565 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1566 | m_GENERIC,
1568 /* X86_TUNE_USE_BT */
1569 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1571 /* X86_TUNE_USE_INCDEC */
1572 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1574 /* X86_TUNE_PAD_RETURNS */
1575 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1577 /* X86_TUNE_EXT_80387_CONSTANTS */
1578 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1579 | m_CORE2 | m_GENERIC,
1581 /* X86_TUNE_SHORTEN_X87_SSE */
1582 ~m_K8,
1584 /* X86_TUNE_AVOID_VECTOR_DECODE */
1585 m_K8 | m_GENERIC64,
1587 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1588 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1589 ~(m_386 | m_486),
1591 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1592 vector path on AMD machines. */
1593 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1595 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1596 machines. */
1597 m_K8 | m_GENERIC64 | m_AMDFAM10 | m_BDVER1,
1599 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1600 than a MOV. */
1601 m_PENT,
1603 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1604 but one byte longer. */
1605 m_PENT,
1607 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1608 operand that cannot be represented using a modRM byte. The XOR
1609 replacement is long decoded, so this split helps here as well. */
1610 m_K6,
1612 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1613 from FP to FP. */
1614 m_AMDFAM10 | m_GENERIC,
1616 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1617 from integer to FP. */
1618 m_AMDFAM10,
1620 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1621 with a subsequent conditional jump instruction into a single
1622 compare-and-branch uop. */
1623 m_CORE2 | m_BDVER1,
1625 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1626 will impact LEA instruction selection. */
1627 m_ATOM,
1630 /* Feature tests against the various architecture variations. */
1631 unsigned char ix86_arch_features[X86_ARCH_LAST];
1633 /* Feature tests against the various architecture variations, used to create
1634 ix86_arch_features based on the processor mask. */
1635 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1636 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1637 ~(m_386 | m_486 | m_PENT | m_K6),
1639 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1640 ~m_386,
1642 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1643 ~(m_386 | m_486),
1645 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1646 ~m_386,
1648 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1649 ~m_386,
1652 static const unsigned int x86_accumulate_outgoing_args
1653 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1654 | m_GENERIC;
1656 static const unsigned int x86_arch_always_fancy_math_387
1657 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1658 | m_NOCONA | m_CORE2 | m_GENERIC;
1660 static enum stringop_alg stringop_alg = no_stringop;
1662 /* In case the average insn count for single function invocation is
1663 lower than this constant, emit fast (but longer) prologue and
1664 epilogue code. */
1665 #define FAST_PROLOGUE_INSN_COUNT 20
1667 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1668 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1669 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1670 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1672 /* Array of the smallest class containing reg number REGNO, indexed by
1673 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1675 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1677 /* ax, dx, cx, bx */
1678 AREG, DREG, CREG, BREG,
1679 /* si, di, bp, sp */
1680 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1681 /* FP registers */
1682 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1683 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1684 /* arg pointer */
1685 NON_Q_REGS,
1686 /* flags, fpsr, fpcr, frame */
1687 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1688 /* SSE registers */
1689 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1690 SSE_REGS, SSE_REGS,
1691 /* MMX registers */
1692 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1693 MMX_REGS, MMX_REGS,
1694 /* REX registers */
1695 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1696 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1697 /* SSE REX registers */
1698 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1699 SSE_REGS, SSE_REGS,
1702 /* The "default" register map used in 32bit mode. */
1704 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1706 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1707 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1708 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1709 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1710 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1711 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1712 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1715 /* The "default" register map used in 64bit mode. */
1717 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1719 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1720 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1721 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1722 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1723 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1724 8,9,10,11,12,13,14,15, /* extended integer registers */
1725 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1728 /* Define the register numbers to be used in Dwarf debugging information.
1729 The SVR4 reference port C compiler uses the following register numbers
1730 in its Dwarf output code:
1731 0 for %eax (gcc regno = 0)
1732 1 for %ecx (gcc regno = 2)
1733 2 for %edx (gcc regno = 1)
1734 3 for %ebx (gcc regno = 3)
1735 4 for %esp (gcc regno = 7)
1736 5 for %ebp (gcc regno = 6)
1737 6 for %esi (gcc regno = 4)
1738 7 for %edi (gcc regno = 5)
1739 The following three DWARF register numbers are never generated by
1740 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1741 believes these numbers have these meanings.
1742 8 for %eip (no gcc equivalent)
1743 9 for %eflags (gcc regno = 17)
1744 10 for %trapno (no gcc equivalent)
1745 It is not at all clear how we should number the FP stack registers
1746 for the x86 architecture. If the version of SDB on x86/svr4 were
1747 a bit less brain dead with respect to floating-point then we would
1748 have a precedent to follow with respect to DWARF register numbers
1749 for x86 FP registers, but the SDB on x86/svr4 is so completely
1750 broken with respect to FP registers that it is hardly worth thinking
1751 of it as something to strive for compatibility with.
1752 The version of x86/svr4 SDB I have at the moment does (partially)
1753 seem to believe that DWARF register number 11 is associated with
1754 the x86 register %st(0), but that's about all. Higher DWARF
1755 register numbers don't seem to be associated with anything in
1756 particular, and even for DWARF regno 11, SDB only seems to under-
1757 stand that it should say that a variable lives in %st(0) (when
1758 asked via an `=' command) if we said it was in DWARF regno 11,
1759 but SDB still prints garbage when asked for the value of the
1760 variable in question (via a `/' command).
1761 (Also note that the labels SDB prints for various FP stack regs
1762 when doing an `x' command are all wrong.)
1763 Note that these problems generally don't affect the native SVR4
1764 C compiler because it doesn't allow the use of -O with -g and
1765 because when it is *not* optimizing, it allocates a memory
1766 location for each floating-point variable, and the memory
1767 location is what gets described in the DWARF AT_location
1768 attribute for the variable in question.
1769 Regardless of the severe mental illness of the x86/svr4 SDB, we
1770 do something sensible here and we use the following DWARF
1771 register numbers. Note that these are all stack-top-relative
1772 numbers.
1773 11 for %st(0) (gcc regno = 8)
1774 12 for %st(1) (gcc regno = 9)
1775 13 for %st(2) (gcc regno = 10)
1776 14 for %st(3) (gcc regno = 11)
1777 15 for %st(4) (gcc regno = 12)
1778 16 for %st(5) (gcc regno = 13)
1779 17 for %st(6) (gcc regno = 14)
1780 18 for %st(7) (gcc regno = 15)
1782 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1784 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1785 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1786 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1787 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1788 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1789 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1790 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1793 /* Test and compare insns in i386.md store the information needed to
1794 generate branch and scc insns here. */
1796 rtx ix86_compare_op0 = NULL_RTX;
1797 rtx ix86_compare_op1 = NULL_RTX;
1799 /* Define parameter passing and return registers. */
1801 static int const x86_64_int_parameter_registers[6] =
1803 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1806 static int const x86_64_ms_abi_int_parameter_registers[4] =
1808 CX_REG, DX_REG, R8_REG, R9_REG
1811 static int const x86_64_int_return_registers[4] =
1813 AX_REG, DX_REG, DI_REG, SI_REG
1816 /* Define the structure for the machine field in struct function. */
1818 struct GTY(()) stack_local_entry {
1819 unsigned short mode;
1820 unsigned short n;
1821 rtx rtl;
1822 struct stack_local_entry *next;
1825 /* Structure describing stack frame layout.
1826 Stack grows downward:
1828 [arguments]
1829 <- ARG_POINTER
1830 saved pc
1832 saved frame pointer if frame_pointer_needed
1833 <- HARD_FRAME_POINTER
1834 [saved regs]
1836 [padding0]
1838 [saved SSE regs]
1840 [padding1] \
1842 [va_arg registers] (
1843 > to_allocate <- FRAME_POINTER
1844 [frame] (
1846 [padding2] /
1848 struct ix86_frame
1850 int padding0;
1851 int nsseregs;
1852 int nregs;
1853 int padding1;
1854 int va_arg_size;
1855 HOST_WIDE_INT frame;
1856 int padding2;
1857 int outgoing_arguments_size;
1858 int red_zone_size;
1860 HOST_WIDE_INT to_allocate;
1861 /* The offsets relative to ARG_POINTER. */
1862 HOST_WIDE_INT frame_pointer_offset;
1863 HOST_WIDE_INT hard_frame_pointer_offset;
1864 HOST_WIDE_INT stack_pointer_offset;
1866 /* When save_regs_using_mov is set, emit prologue using
1867 move instead of push instructions. */
1868 bool save_regs_using_mov;
1871 /* Code model option. */
1872 enum cmodel ix86_cmodel;
1873 /* Asm dialect. */
1874 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1875 /* TLS dialects. */
1876 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1878 /* Which unit we are generating floating point math for. */
1879 enum fpmath_unit ix86_fpmath;
1881 /* Which cpu are we scheduling for. */
1882 enum attr_cpu ix86_schedule;
1884 /* Which cpu are we optimizing for. */
1885 enum processor_type ix86_tune;
1887 /* Which instruction set architecture to use. */
1888 enum processor_type ix86_arch;
1890 /* true if sse prefetch instruction is not NOOP. */
1891 int x86_prefetch_sse;
1893 /* ix86_regparm_string as a number */
1894 static int ix86_regparm;
1896 /* -mstackrealign option */
1897 extern int ix86_force_align_arg_pointer;
1898 static const char ix86_force_align_arg_pointer_string[]
1899 = "force_align_arg_pointer";
1901 static rtx (*ix86_gen_leave) (void);
1902 static rtx (*ix86_gen_pop1) (rtx);
1903 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1904 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1905 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1906 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1907 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1908 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1910 /* Preferred alignment for stack boundary in bits. */
1911 unsigned int ix86_preferred_stack_boundary;
1913 /* Alignment for incoming stack boundary in bits specified at
1914 command line. */
1915 static unsigned int ix86_user_incoming_stack_boundary;
1917 /* Default alignment for incoming stack boundary in bits. */
1918 static unsigned int ix86_default_incoming_stack_boundary;
1920 /* Alignment for incoming stack boundary in bits. */
1921 unsigned int ix86_incoming_stack_boundary;
1923 /* The abi used by target. */
1924 enum calling_abi ix86_abi;
1926 /* Values 1-5: see jump.c */
1927 int ix86_branch_cost;
1929 /* Calling abi specific va_list type nodes. */
1930 static GTY(()) tree sysv_va_list_type_node;
1931 static GTY(()) tree ms_va_list_type_node;
1933 /* Variables which are this size or smaller are put in the data/bss
1934 or ldata/lbss sections. */
1936 int ix86_section_threshold = 65536;
1938 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1939 char internal_label_prefix[16];
1940 int internal_label_prefix_len;
1942 /* Fence to use after loop using movnt. */
1943 tree x86_mfence;
1945 /* Register class used for passing given 64bit part of the argument.
1946 These represent classes as documented by the PS ABI, with the exception
1947 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1948 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1950 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1951 whenever possible (upper half does contain padding). */
1952 enum x86_64_reg_class
1954 X86_64_NO_CLASS,
1955 X86_64_INTEGER_CLASS,
1956 X86_64_INTEGERSI_CLASS,
1957 X86_64_SSE_CLASS,
1958 X86_64_SSESF_CLASS,
1959 X86_64_SSEDF_CLASS,
1960 X86_64_SSEUP_CLASS,
1961 X86_64_X87_CLASS,
1962 X86_64_X87UP_CLASS,
1963 X86_64_COMPLEX_X87_CLASS,
1964 X86_64_MEMORY_CLASS
1967 #define MAX_CLASSES 4
1969 /* Table of constants used by fldpi, fldln2, etc.... */
1970 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1971 static bool ext_80387_constants_init = 0;
1974 static struct machine_function * ix86_init_machine_status (void);
1975 static rtx ix86_function_value (const_tree, const_tree, bool);
1976 static bool ix86_function_value_regno_p (const unsigned int);
1977 static rtx ix86_static_chain (const_tree, bool);
1978 static int ix86_function_regparm (const_tree, const_tree);
1979 static void ix86_compute_frame_layout (struct ix86_frame *);
1980 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1981 rtx, rtx, int);
1982 static void ix86_add_new_builtins (int);
1983 static rtx ix86_expand_vec_perm_builtin (tree);
1985 enum ix86_function_specific_strings
1987 IX86_FUNCTION_SPECIFIC_ARCH,
1988 IX86_FUNCTION_SPECIFIC_TUNE,
1989 IX86_FUNCTION_SPECIFIC_FPMATH,
1990 IX86_FUNCTION_SPECIFIC_MAX
1993 static char *ix86_target_string (int, int, const char *, const char *,
1994 const char *, bool);
1995 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1996 static void ix86_function_specific_save (struct cl_target_option *);
1997 static void ix86_function_specific_restore (struct cl_target_option *);
1998 static void ix86_function_specific_print (FILE *, int,
1999 struct cl_target_option *);
2000 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
2001 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
2002 static bool ix86_can_inline_p (tree, tree);
2003 static void ix86_set_current_function (tree);
2004 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
2006 static enum calling_abi ix86_function_abi (const_tree);
2009 #ifndef SUBTARGET32_DEFAULT_CPU
2010 #define SUBTARGET32_DEFAULT_CPU "i386"
2011 #endif
2013 /* The svr4 ABI for the i386 says that records and unions are returned
2014 in memory. */
2015 #ifndef DEFAULT_PCC_STRUCT_RETURN
2016 #define DEFAULT_PCC_STRUCT_RETURN 1
2017 #endif
2019 /* Whether -mtune= or -march= were specified */
2020 static int ix86_tune_defaulted;
2021 static int ix86_arch_specified;
2023 /* Bit flags that specify the ISA we are compiling for. */
2024 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
2026 /* A mask of ix86_isa_flags that includes bit X if X
2027 was set or cleared on the command line. */
2028 static int ix86_isa_flags_explicit;
2030 /* Define a set of ISAs which are available when a given ISA is
2031 enabled. MMX and SSE ISAs are handled separately. */
2033 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
2034 #define OPTION_MASK_ISA_3DNOW_SET \
2035 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
2037 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
2038 #define OPTION_MASK_ISA_SSE2_SET \
2039 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
2040 #define OPTION_MASK_ISA_SSE3_SET \
2041 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
2042 #define OPTION_MASK_ISA_SSSE3_SET \
2043 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
2044 #define OPTION_MASK_ISA_SSE4_1_SET \
2045 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
2046 #define OPTION_MASK_ISA_SSE4_2_SET \
2047 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
2048 #define OPTION_MASK_ISA_AVX_SET \
2049 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
2050 #define OPTION_MASK_ISA_FMA_SET \
2051 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
2053 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
2054 as -msse4.2. */
2055 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
2057 #define OPTION_MASK_ISA_SSE4A_SET \
2058 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
2059 #define OPTION_MASK_ISA_FMA4_SET \
2060 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
2061 | OPTION_MASK_ISA_AVX_SET)
2062 #define OPTION_MASK_ISA_XOP_SET \
2063 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
2064 #define OPTION_MASK_ISA_LWP_SET \
2065 OPTION_MASK_ISA_LWP
2067 /* AES and PCLMUL need SSE2 because they use xmm registers */
2068 #define OPTION_MASK_ISA_AES_SET \
2069 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
2070 #define OPTION_MASK_ISA_PCLMUL_SET \
2071 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
2073 #define OPTION_MASK_ISA_ABM_SET \
2074 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
2076 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
2077 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
2078 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
2079 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
2080 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
2082 /* Define a set of ISAs which aren't available when a given ISA is
2083 disabled. MMX and SSE ISAs are handled separately. */
2085 #define OPTION_MASK_ISA_MMX_UNSET \
2086 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
2087 #define OPTION_MASK_ISA_3DNOW_UNSET \
2088 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
2089 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
2091 #define OPTION_MASK_ISA_SSE_UNSET \
2092 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
2093 #define OPTION_MASK_ISA_SSE2_UNSET \
2094 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
2095 #define OPTION_MASK_ISA_SSE3_UNSET \
2096 (OPTION_MASK_ISA_SSE3 \
2097 | OPTION_MASK_ISA_SSSE3_UNSET \
2098 | OPTION_MASK_ISA_SSE4A_UNSET )
2099 #define OPTION_MASK_ISA_SSSE3_UNSET \
2100 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2101 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2102 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2103 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2104 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2105 #define OPTION_MASK_ISA_AVX_UNSET \
2106 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2107 | OPTION_MASK_ISA_FMA4_UNSET)
2108 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2110 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2111 as -mno-sse4.1. */
2112 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2114 #define OPTION_MASK_ISA_SSE4A_UNSET \
2115 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2117 #define OPTION_MASK_ISA_FMA4_UNSET \
2118 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2119 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2120 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2122 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2123 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2124 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2125 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2126 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2127 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2128 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2129 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2131 /* Vectorization library interface and handlers. */
2132 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2133 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2134 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2136 /* Processor target table, indexed by processor number */
2137 struct ptt
2139 const struct processor_costs *cost; /* Processor costs */
2140 const int align_loop; /* Default alignments. */
2141 const int align_loop_max_skip;
2142 const int align_jump;
2143 const int align_jump_max_skip;
2144 const int align_func;
2147 static const struct ptt processor_target_table[PROCESSOR_max] =
2149 {&i386_cost, 4, 3, 4, 3, 4},
2150 {&i486_cost, 16, 15, 16, 15, 16},
2151 {&pentium_cost, 16, 7, 16, 7, 16},
2152 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2153 {&geode_cost, 0, 0, 0, 0, 0},
2154 {&k6_cost, 32, 7, 32, 7, 32},
2155 {&athlon_cost, 16, 7, 16, 7, 16},
2156 {&pentium4_cost, 0, 0, 0, 0, 0},
2157 {&k8_cost, 16, 7, 16, 7, 16},
2158 {&nocona_cost, 0, 0, 0, 0, 0},
2159 {&core2_cost, 16, 10, 16, 10, 16},
2160 {&generic32_cost, 16, 7, 16, 7, 16},
2161 {&generic64_cost, 16, 10, 16, 10, 16},
2162 {&amdfam10_cost, 32, 24, 32, 7, 32},
2163 {&bdver1_cost, 32, 24, 32, 7, 32},
2164 {&atom_cost, 16, 7, 16, 7, 16}
2167 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2169 "generic",
2170 "i386",
2171 "i486",
2172 "pentium",
2173 "pentium-mmx",
2174 "pentiumpro",
2175 "pentium2",
2176 "pentium3",
2177 "pentium4",
2178 "pentium-m",
2179 "prescott",
2180 "nocona",
2181 "core2",
2182 "atom",
2183 "geode",
2184 "k6",
2185 "k6-2",
2186 "k6-3",
2187 "athlon",
2188 "athlon-4",
2189 "k8",
2190 "amdfam10",
2191 "bdver1"
2194 /* Implement TARGET_HANDLE_OPTION. */
2196 static bool
2197 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2199 switch (code)
2201 case OPT_mmmx:
2202 if (value)
2204 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2205 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2207 else
2209 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2210 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2212 return true;
2214 case OPT_m3dnow:
2215 if (value)
2217 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2218 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2220 else
2222 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2223 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2225 return true;
2227 case OPT_m3dnowa:
2228 return false;
2230 case OPT_msse:
2231 if (value)
2233 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2236 else
2238 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2239 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2241 return true;
2243 case OPT_msse2:
2244 if (value)
2246 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2247 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2249 else
2251 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2252 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2254 return true;
2256 case OPT_msse3:
2257 if (value)
2259 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2260 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2262 else
2264 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2265 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2267 return true;
2269 case OPT_mssse3:
2270 if (value)
2272 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2273 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2275 else
2277 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2278 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2280 return true;
2282 case OPT_msse4_1:
2283 if (value)
2285 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2286 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2288 else
2290 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2291 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2293 return true;
2295 case OPT_msse4_2:
2296 if (value)
2298 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2299 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2301 else
2303 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2304 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2306 return true;
2308 case OPT_mavx:
2309 if (value)
2311 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2312 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2314 else
2316 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2317 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2319 return true;
2321 case OPT_mfma:
2322 if (value)
2324 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2325 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2327 else
2329 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2330 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2332 return true;
2334 case OPT_msse4:
2335 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2336 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2337 return true;
2339 case OPT_mno_sse4:
2340 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2341 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2342 return true;
2344 case OPT_msse4a:
2345 if (value)
2347 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2348 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2350 else
2352 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2353 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2355 return true;
2357 case OPT_mfma4:
2358 if (value)
2360 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2361 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2363 else
2365 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2366 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2368 return true;
2370 case OPT_mxop:
2371 if (value)
2373 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2374 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2376 else
2378 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2379 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2381 return true;
2383 case OPT_mlwp:
2384 if (value)
2386 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2387 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2389 else
2391 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2392 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2394 return true;
2396 case OPT_mabm:
2397 if (value)
2399 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2400 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2402 else
2404 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2405 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2407 return true;
2409 case OPT_mpopcnt:
2410 if (value)
2412 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2413 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2415 else
2417 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2418 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2420 return true;
2422 case OPT_msahf:
2423 if (value)
2425 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2426 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2428 else
2430 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2431 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2433 return true;
2435 case OPT_mcx16:
2436 if (value)
2438 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2439 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2441 else
2443 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2444 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2446 return true;
2448 case OPT_mmovbe:
2449 if (value)
2451 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2452 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2454 else
2456 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2457 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2459 return true;
2461 case OPT_mcrc32:
2462 if (value)
2464 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2465 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2467 else
2469 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2470 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2472 return true;
2474 case OPT_maes:
2475 if (value)
2477 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2478 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2480 else
2482 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2483 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2485 return true;
2487 case OPT_mpclmul:
2488 if (value)
2490 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2491 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2493 else
2495 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2496 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2498 return true;
2500 default:
2501 return true;
2505 /* Return a string that documents the current -m options. The caller is
2506 responsible for freeing the string. */
2508 static char *
2509 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2510 const char *fpmath, bool add_nl_p)
2512 struct ix86_target_opts
2514 const char *option; /* option string */
2515 int mask; /* isa mask options */
2518 /* This table is ordered so that options like -msse4.2 that imply
2519 preceding options while match those first. */
2520 static struct ix86_target_opts isa_opts[] =
2522 { "-m64", OPTION_MASK_ISA_64BIT },
2523 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2524 { "-mfma", OPTION_MASK_ISA_FMA },
2525 { "-mxop", OPTION_MASK_ISA_XOP },
2526 { "-mlwp", OPTION_MASK_ISA_LWP },
2527 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2528 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2529 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2530 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2531 { "-msse3", OPTION_MASK_ISA_SSE3 },
2532 { "-msse2", OPTION_MASK_ISA_SSE2 },
2533 { "-msse", OPTION_MASK_ISA_SSE },
2534 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2535 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2536 { "-mmmx", OPTION_MASK_ISA_MMX },
2537 { "-mabm", OPTION_MASK_ISA_ABM },
2538 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2539 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2540 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2541 { "-maes", OPTION_MASK_ISA_AES },
2542 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2545 /* Flag options. */
2546 static struct ix86_target_opts flag_opts[] =
2548 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2549 { "-m80387", MASK_80387 },
2550 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2551 { "-malign-double", MASK_ALIGN_DOUBLE },
2552 { "-mcld", MASK_CLD },
2553 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2554 { "-mieee-fp", MASK_IEEE_FP },
2555 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2556 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2557 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2558 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2559 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2560 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2561 { "-mno-red-zone", MASK_NO_RED_ZONE },
2562 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2563 { "-mrecip", MASK_RECIP },
2564 { "-mrtd", MASK_RTD },
2565 { "-msseregparm", MASK_SSEREGPARM },
2566 { "-mstack-arg-probe", MASK_STACK_PROBE },
2567 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2570 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2572 char isa_other[40];
2573 char target_other[40];
2574 unsigned num = 0;
2575 unsigned i, j;
2576 char *ret;
2577 char *ptr;
2578 size_t len;
2579 size_t line_len;
2580 size_t sep_len;
2582 memset (opts, '\0', sizeof (opts));
2584 /* Add -march= option. */
2585 if (arch)
2587 opts[num][0] = "-march=";
2588 opts[num++][1] = arch;
2591 /* Add -mtune= option. */
2592 if (tune)
2594 opts[num][0] = "-mtune=";
2595 opts[num++][1] = tune;
2598 /* Pick out the options in isa options. */
2599 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2601 if ((isa & isa_opts[i].mask) != 0)
2603 opts[num++][0] = isa_opts[i].option;
2604 isa &= ~ isa_opts[i].mask;
2608 if (isa && add_nl_p)
2610 opts[num++][0] = isa_other;
2611 sprintf (isa_other, "(other isa: %#x)", isa);
2614 /* Add flag options. */
2615 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2617 if ((flags & flag_opts[i].mask) != 0)
2619 opts[num++][0] = flag_opts[i].option;
2620 flags &= ~ flag_opts[i].mask;
2624 if (flags && add_nl_p)
2626 opts[num++][0] = target_other;
2627 sprintf (target_other, "(other flags: %#x)", flags);
2630 /* Add -fpmath= option. */
2631 if (fpmath)
2633 opts[num][0] = "-mfpmath=";
2634 opts[num++][1] = fpmath;
2637 /* Any options? */
2638 if (num == 0)
2639 return NULL;
2641 gcc_assert (num < ARRAY_SIZE (opts));
2643 /* Size the string. */
2644 len = 0;
2645 sep_len = (add_nl_p) ? 3 : 1;
2646 for (i = 0; i < num; i++)
2648 len += sep_len;
2649 for (j = 0; j < 2; j++)
2650 if (opts[i][j])
2651 len += strlen (opts[i][j]);
2654 /* Build the string. */
2655 ret = ptr = (char *) xmalloc (len);
2656 line_len = 0;
2658 for (i = 0; i < num; i++)
2660 size_t len2[2];
2662 for (j = 0; j < 2; j++)
2663 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2665 if (i != 0)
2667 *ptr++ = ' ';
2668 line_len++;
2670 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2672 *ptr++ = '\\';
2673 *ptr++ = '\n';
2674 line_len = 0;
2678 for (j = 0; j < 2; j++)
2679 if (opts[i][j])
2681 memcpy (ptr, opts[i][j], len2[j]);
2682 ptr += len2[j];
2683 line_len += len2[j];
2687 *ptr = '\0';
2688 gcc_assert (ret + len >= ptr);
2690 return ret;
2693 /* Function that is callable from the debugger to print the current
2694 options. */
2695 void
2696 ix86_debug_options (void)
2698 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2699 ix86_arch_string, ix86_tune_string,
2700 ix86_fpmath_string, true);
2702 if (opts)
2704 fprintf (stderr, "%s\n\n", opts);
2705 free (opts);
2707 else
2708 fputs ("<no options>\n\n", stderr);
2710 return;
2713 /* Sometimes certain combinations of command options do not make
2714 sense on a particular target machine. You can define a macro
2715 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2716 defined, is executed once just after all the command options have
2717 been parsed.
2719 Don't use this macro to turn on various extra optimizations for
2720 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2722 void
2723 override_options (bool main_args_p)
2725 int i;
2726 unsigned int ix86_arch_mask, ix86_tune_mask;
2727 const bool ix86_tune_specified = (ix86_tune_string != NULL);
2728 const char *prefix;
2729 const char *suffix;
2730 const char *sw;
2732 /* Comes from final.c -- no real reason to change it. */
2733 #define MAX_CODE_ALIGN 16
2735 enum pta_flags
2737 PTA_SSE = 1 << 0,
2738 PTA_SSE2 = 1 << 1,
2739 PTA_SSE3 = 1 << 2,
2740 PTA_MMX = 1 << 3,
2741 PTA_PREFETCH_SSE = 1 << 4,
2742 PTA_3DNOW = 1 << 5,
2743 PTA_3DNOW_A = 1 << 6,
2744 PTA_64BIT = 1 << 7,
2745 PTA_SSSE3 = 1 << 8,
2746 PTA_CX16 = 1 << 9,
2747 PTA_POPCNT = 1 << 10,
2748 PTA_ABM = 1 << 11,
2749 PTA_SSE4A = 1 << 12,
2750 PTA_NO_SAHF = 1 << 13,
2751 PTA_SSE4_1 = 1 << 14,
2752 PTA_SSE4_2 = 1 << 15,
2753 PTA_AES = 1 << 16,
2754 PTA_PCLMUL = 1 << 17,
2755 PTA_AVX = 1 << 18,
2756 PTA_FMA = 1 << 19,
2757 PTA_MOVBE = 1 << 20,
2758 PTA_FMA4 = 1 << 21,
2759 PTA_XOP = 1 << 22,
2760 PTA_LWP = 1 << 23
2763 static struct pta
2765 const char *const name; /* processor name or nickname. */
2766 const enum processor_type processor;
2767 const enum attr_cpu schedule;
2768 const unsigned /*enum pta_flags*/ flags;
2770 const processor_alias_table[] =
2772 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2773 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2774 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2775 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2776 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2777 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2778 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2779 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2780 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2781 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2782 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2783 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2784 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2785 PTA_MMX | PTA_SSE},
2786 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2787 PTA_MMX | PTA_SSE},
2788 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2789 PTA_MMX | PTA_SSE | PTA_SSE2},
2790 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2791 PTA_MMX |PTA_SSE | PTA_SSE2},
2792 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2793 PTA_MMX | PTA_SSE | PTA_SSE2},
2794 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2795 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2796 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2797 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2798 | PTA_CX16 | PTA_NO_SAHF},
2799 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2800 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2801 | PTA_SSSE3 | PTA_CX16},
2802 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2803 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2804 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2805 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2806 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2807 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2808 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2809 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2810 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2811 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2812 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2813 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2814 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2815 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2816 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2817 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2818 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2819 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2820 {"x86-64", PROCESSOR_K8, CPU_K8,
2821 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2822 {"k8", PROCESSOR_K8, CPU_K8,
2823 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2824 | PTA_SSE2 | PTA_NO_SAHF},
2825 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2826 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2827 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2828 {"opteron", PROCESSOR_K8, CPU_K8,
2829 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2830 | PTA_SSE2 | PTA_NO_SAHF},
2831 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2832 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2833 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2834 {"athlon64", PROCESSOR_K8, CPU_K8,
2835 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2836 | PTA_SSE2 | PTA_NO_SAHF},
2837 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2838 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2839 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2840 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2841 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2842 | PTA_SSE2 | PTA_NO_SAHF},
2843 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2844 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2845 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2846 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2847 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2848 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2849 {"bdver1", PROCESSOR_BDVER1, CPU_BDVER1,
2850 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2851 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM
2852 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES
2853 | PTA_PCLMUL | PTA_AVX | PTA_FMA4 | PTA_XOP | PTA_LWP},
2854 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2855 0 /* flags are only used for -march switch. */ },
2856 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2857 PTA_64BIT /* flags are only used for -march switch. */ },
2860 int const pta_size = ARRAY_SIZE (processor_alias_table);
2862 /* Set up prefix/suffix so the error messages refer to either the command
2863 line argument, or the attribute(target). */
2864 if (main_args_p)
2866 prefix = "-m";
2867 suffix = "";
2868 sw = "switch";
2870 else
2872 prefix = "option(\"";
2873 suffix = "\")";
2874 sw = "attribute";
2877 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2878 SUBTARGET_OVERRIDE_OPTIONS;
2879 #endif
2881 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2882 SUBSUBTARGET_OVERRIDE_OPTIONS;
2883 #endif
2885 /* -fPIC is the default for x86_64. */
2886 if (TARGET_MACHO && TARGET_64BIT)
2887 flag_pic = 2;
2889 /* Set the default values for switches whose default depends on TARGET_64BIT
2890 in case they weren't overwritten by command line options. */
2891 if (TARGET_64BIT)
2893 /* Mach-O doesn't support omitting the frame pointer for now. */
2894 if (flag_omit_frame_pointer == 2)
2895 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2896 if (flag_asynchronous_unwind_tables == 2)
2897 flag_asynchronous_unwind_tables = 1;
2898 if (flag_pcc_struct_return == 2)
2899 flag_pcc_struct_return = 0;
2901 else
2903 if (flag_omit_frame_pointer == 2)
2904 flag_omit_frame_pointer = 0;
2905 if (flag_asynchronous_unwind_tables == 2)
2906 flag_asynchronous_unwind_tables = 0;
2907 if (flag_pcc_struct_return == 2)
2908 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2911 /* Need to check -mtune=generic first. */
2912 if (ix86_tune_string)
2914 if (!strcmp (ix86_tune_string, "generic")
2915 || !strcmp (ix86_tune_string, "i686")
2916 /* As special support for cross compilers we read -mtune=native
2917 as -mtune=generic. With native compilers we won't see the
2918 -mtune=native, as it was changed by the driver. */
2919 || !strcmp (ix86_tune_string, "native"))
2921 if (TARGET_64BIT)
2922 ix86_tune_string = "generic64";
2923 else
2924 ix86_tune_string = "generic32";
2926 /* If this call is for setting the option attribute, allow the
2927 generic32/generic64 that was previously set. */
2928 else if (!main_args_p
2929 && (!strcmp (ix86_tune_string, "generic32")
2930 || !strcmp (ix86_tune_string, "generic64")))
2932 else if (!strncmp (ix86_tune_string, "generic", 7))
2933 error ("bad value (%s) for %stune=%s %s",
2934 ix86_tune_string, prefix, suffix, sw);
2935 else if (!strcmp (ix86_tune_string, "x86-64"))
2936 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2937 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2938 prefix, suffix, prefix, suffix, prefix, suffix);
2940 else
2942 if (ix86_arch_string)
2943 ix86_tune_string = ix86_arch_string;
2944 if (!ix86_tune_string)
2946 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2947 ix86_tune_defaulted = 1;
2950 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2951 need to use a sensible tune option. */
2952 if (!strcmp (ix86_tune_string, "generic")
2953 || !strcmp (ix86_tune_string, "x86-64")
2954 || !strcmp (ix86_tune_string, "i686"))
2956 if (TARGET_64BIT)
2957 ix86_tune_string = "generic64";
2958 else
2959 ix86_tune_string = "generic32";
2963 if (ix86_stringop_string)
2965 if (!strcmp (ix86_stringop_string, "rep_byte"))
2966 stringop_alg = rep_prefix_1_byte;
2967 else if (!strcmp (ix86_stringop_string, "libcall"))
2968 stringop_alg = libcall;
2969 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2970 stringop_alg = rep_prefix_4_byte;
2971 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2972 && TARGET_64BIT)
2973 /* rep; movq isn't available in 32-bit code. */
2974 stringop_alg = rep_prefix_8_byte;
2975 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2976 stringop_alg = loop_1_byte;
2977 else if (!strcmp (ix86_stringop_string, "loop"))
2978 stringop_alg = loop;
2979 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2980 stringop_alg = unrolled_loop;
2981 else
2982 error ("bad value (%s) for %sstringop-strategy=%s %s",
2983 ix86_stringop_string, prefix, suffix, sw);
2986 if (!ix86_arch_string)
2987 ix86_arch_string = TARGET_64BIT ? "x86-64" : SUBTARGET32_DEFAULT_CPU;
2988 else
2989 ix86_arch_specified = 1;
2991 /* Validate -mabi= value. */
2992 if (ix86_abi_string)
2994 if (strcmp (ix86_abi_string, "sysv") == 0)
2995 ix86_abi = SYSV_ABI;
2996 else if (strcmp (ix86_abi_string, "ms") == 0)
2997 ix86_abi = MS_ABI;
2998 else
2999 error ("unknown ABI (%s) for %sabi=%s %s",
3000 ix86_abi_string, prefix, suffix, sw);
3002 else
3003 ix86_abi = DEFAULT_ABI;
3005 if (ix86_cmodel_string != 0)
3007 if (!strcmp (ix86_cmodel_string, "small"))
3008 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3009 else if (!strcmp (ix86_cmodel_string, "medium"))
3010 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
3011 else if (!strcmp (ix86_cmodel_string, "large"))
3012 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
3013 else if (flag_pic)
3014 error ("code model %s does not support PIC mode", ix86_cmodel_string);
3015 else if (!strcmp (ix86_cmodel_string, "32"))
3016 ix86_cmodel = CM_32;
3017 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
3018 ix86_cmodel = CM_KERNEL;
3019 else
3020 error ("bad value (%s) for %scmodel=%s %s",
3021 ix86_cmodel_string, prefix, suffix, sw);
3023 else
3025 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
3026 use of rip-relative addressing. This eliminates fixups that
3027 would otherwise be needed if this object is to be placed in a
3028 DLL, and is essentially just as efficient as direct addressing. */
3029 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
3030 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
3031 else if (TARGET_64BIT)
3032 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
3033 else
3034 ix86_cmodel = CM_32;
3036 if (ix86_asm_string != 0)
3038 if (! TARGET_MACHO
3039 && !strcmp (ix86_asm_string, "intel"))
3040 ix86_asm_dialect = ASM_INTEL;
3041 else if (!strcmp (ix86_asm_string, "att"))
3042 ix86_asm_dialect = ASM_ATT;
3043 else
3044 error ("bad value (%s) for %sasm=%s %s",
3045 ix86_asm_string, prefix, suffix, sw);
3047 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
3048 error ("code model %qs not supported in the %s bit mode",
3049 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
3050 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
3051 sorry ("%i-bit mode not compiled in",
3052 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
3054 for (i = 0; i < pta_size; i++)
3055 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
3057 ix86_schedule = processor_alias_table[i].schedule;
3058 ix86_arch = processor_alias_table[i].processor;
3059 /* Default cpu tuning to the architecture. */
3060 ix86_tune = ix86_arch;
3062 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3063 error ("CPU you selected does not support x86-64 "
3064 "instruction set");
3066 if (processor_alias_table[i].flags & PTA_MMX
3067 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
3068 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
3069 if (processor_alias_table[i].flags & PTA_3DNOW
3070 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
3071 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
3072 if (processor_alias_table[i].flags & PTA_3DNOW_A
3073 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
3074 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
3075 if (processor_alias_table[i].flags & PTA_SSE
3076 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
3077 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
3078 if (processor_alias_table[i].flags & PTA_SSE2
3079 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
3080 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
3081 if (processor_alias_table[i].flags & PTA_SSE3
3082 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
3083 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
3084 if (processor_alias_table[i].flags & PTA_SSSE3
3085 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
3086 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
3087 if (processor_alias_table[i].flags & PTA_SSE4_1
3088 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
3089 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
3090 if (processor_alias_table[i].flags & PTA_SSE4_2
3091 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
3092 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
3093 if (processor_alias_table[i].flags & PTA_AVX
3094 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
3095 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
3096 if (processor_alias_table[i].flags & PTA_FMA
3097 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
3098 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
3099 if (processor_alias_table[i].flags & PTA_SSE4A
3100 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
3101 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
3102 if (processor_alias_table[i].flags & PTA_FMA4
3103 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3104 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3105 if (processor_alias_table[i].flags & PTA_XOP
3106 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3107 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3108 if (processor_alias_table[i].flags & PTA_LWP
3109 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3110 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3111 if (processor_alias_table[i].flags & PTA_ABM
3112 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3113 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3114 if (processor_alias_table[i].flags & PTA_CX16
3115 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3116 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3117 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3118 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3119 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3120 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3121 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3122 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3123 if (processor_alias_table[i].flags & PTA_MOVBE
3124 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3125 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3126 if (processor_alias_table[i].flags & PTA_AES
3127 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3128 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3129 if (processor_alias_table[i].flags & PTA_PCLMUL
3130 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3131 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3132 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3133 x86_prefetch_sse = true;
3135 break;
3138 if (!strcmp (ix86_arch_string, "generic"))
3139 error ("generic CPU can be used only for %stune=%s %s",
3140 prefix, suffix, sw);
3141 else if (!strncmp (ix86_arch_string, "generic", 7) || i == pta_size)
3142 error ("bad value (%s) for %sarch=%s %s",
3143 ix86_arch_string, prefix, suffix, sw);
3145 ix86_arch_mask = 1u << ix86_arch;
3146 for (i = 0; i < X86_ARCH_LAST; ++i)
3147 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3149 for (i = 0; i < pta_size; i++)
3150 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3152 ix86_schedule = processor_alias_table[i].schedule;
3153 ix86_tune = processor_alias_table[i].processor;
3154 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3156 if (ix86_tune_defaulted)
3158 ix86_tune_string = "x86-64";
3159 for (i = 0; i < pta_size; i++)
3160 if (! strcmp (ix86_tune_string,
3161 processor_alias_table[i].name))
3162 break;
3163 ix86_schedule = processor_alias_table[i].schedule;
3164 ix86_tune = processor_alias_table[i].processor;
3166 else
3167 error ("CPU you selected does not support x86-64 "
3168 "instruction set");
3170 /* Intel CPUs have always interpreted SSE prefetch instructions as
3171 NOPs; so, we can enable SSE prefetch instructions even when
3172 -mtune (rather than -march) points us to a processor that has them.
3173 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3174 higher processors. */
3175 if (TARGET_CMOVE
3176 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3177 x86_prefetch_sse = true;
3178 break;
3181 if (ix86_tune_specified && i == pta_size)
3182 error ("bad value (%s) for %stune=%s %s",
3183 ix86_tune_string, prefix, suffix, sw);
3185 ix86_tune_mask = 1u << ix86_tune;
3186 for (i = 0; i < X86_TUNE_LAST; ++i)
3187 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3189 if (optimize_size)
3190 ix86_cost = &ix86_size_cost;
3191 else
3192 ix86_cost = processor_target_table[ix86_tune].cost;
3194 /* Arrange to set up i386_stack_locals for all functions. */
3195 init_machine_status = ix86_init_machine_status;
3197 /* Validate -mregparm= value. */
3198 if (ix86_regparm_string)
3200 if (TARGET_64BIT)
3201 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3202 i = atoi (ix86_regparm_string);
3203 if (i < 0 || i > REGPARM_MAX)
3204 error ("%sregparm=%d%s is not between 0 and %d",
3205 prefix, i, suffix, REGPARM_MAX);
3206 else
3207 ix86_regparm = i;
3209 if (TARGET_64BIT)
3210 ix86_regparm = REGPARM_MAX;
3212 /* If the user has provided any of the -malign-* options,
3213 warn and use that value only if -falign-* is not set.
3214 Remove this code in GCC 3.2 or later. */
3215 if (ix86_align_loops_string)
3217 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3218 prefix, suffix, suffix);
3219 if (align_loops == 0)
3221 i = atoi (ix86_align_loops_string);
3222 if (i < 0 || i > MAX_CODE_ALIGN)
3223 error ("%salign-loops=%d%s is not between 0 and %d",
3224 prefix, i, suffix, MAX_CODE_ALIGN);
3225 else
3226 align_loops = 1 << i;
3230 if (ix86_align_jumps_string)
3232 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3233 prefix, suffix, suffix);
3234 if (align_jumps == 0)
3236 i = atoi (ix86_align_jumps_string);
3237 if (i < 0 || i > MAX_CODE_ALIGN)
3238 error ("%salign-loops=%d%s is not between 0 and %d",
3239 prefix, i, suffix, MAX_CODE_ALIGN);
3240 else
3241 align_jumps = 1 << i;
3245 if (ix86_align_funcs_string)
3247 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3248 prefix, suffix, suffix);
3249 if (align_functions == 0)
3251 i = atoi (ix86_align_funcs_string);
3252 if (i < 0 || i > MAX_CODE_ALIGN)
3253 error ("%salign-loops=%d%s is not between 0 and %d",
3254 prefix, i, suffix, MAX_CODE_ALIGN);
3255 else
3256 align_functions = 1 << i;
3260 /* Default align_* from the processor table. */
3261 if (align_loops == 0)
3263 align_loops = processor_target_table[ix86_tune].align_loop;
3264 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3266 if (align_jumps == 0)
3268 align_jumps = processor_target_table[ix86_tune].align_jump;
3269 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3271 if (align_functions == 0)
3273 align_functions = processor_target_table[ix86_tune].align_func;
3276 /* Validate -mbranch-cost= value, or provide default. */
3277 ix86_branch_cost = ix86_cost->branch_cost;
3278 if (ix86_branch_cost_string)
3280 i = atoi (ix86_branch_cost_string);
3281 if (i < 0 || i > 5)
3282 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3283 else
3284 ix86_branch_cost = i;
3286 if (ix86_section_threshold_string)
3288 i = atoi (ix86_section_threshold_string);
3289 if (i < 0)
3290 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3291 else
3292 ix86_section_threshold = i;
3295 if (ix86_tls_dialect_string)
3297 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3298 ix86_tls_dialect = TLS_DIALECT_GNU;
3299 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3300 ix86_tls_dialect = TLS_DIALECT_GNU2;
3301 else
3302 error ("bad value (%s) for %stls-dialect=%s %s",
3303 ix86_tls_dialect_string, prefix, suffix, sw);
3306 if (ix87_precision_string)
3308 i = atoi (ix87_precision_string);
3309 if (i != 32 && i != 64 && i != 80)
3310 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3313 if (TARGET_64BIT)
3315 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3317 /* Enable by default the SSE and MMX builtins. Do allow the user to
3318 explicitly disable any of these. In particular, disabling SSE and
3319 MMX for kernel code is extremely useful. */
3320 if (!ix86_arch_specified)
3321 ix86_isa_flags
3322 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3323 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3325 if (TARGET_RTD)
3326 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3328 else
3330 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3332 if (!ix86_arch_specified)
3333 ix86_isa_flags
3334 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3336 /* i386 ABI does not specify red zone. It still makes sense to use it
3337 when programmer takes care to stack from being destroyed. */
3338 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3339 target_flags |= MASK_NO_RED_ZONE;
3342 /* Keep nonleaf frame pointers. */
3343 if (flag_omit_frame_pointer)
3344 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3345 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3346 flag_omit_frame_pointer = 1;
3348 /* If we're doing fast math, we don't care about comparison order
3349 wrt NaNs. This lets us use a shorter comparison sequence. */
3350 if (flag_finite_math_only)
3351 target_flags &= ~MASK_IEEE_FP;
3353 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3354 since the insns won't need emulation. */
3355 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3356 target_flags &= ~MASK_NO_FANCY_MATH_387;
3358 /* Likewise, if the target doesn't have a 387, or we've specified
3359 software floating point, don't use 387 inline intrinsics. */
3360 if (!TARGET_80387)
3361 target_flags |= MASK_NO_FANCY_MATH_387;
3363 /* Turn on MMX builtins for -msse. */
3364 if (TARGET_SSE)
3366 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3367 x86_prefetch_sse = true;
3370 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3371 if (TARGET_SSE4_2 || TARGET_ABM)
3372 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3374 /* Validate -mpreferred-stack-boundary= value or default it to
3375 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3376 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3377 if (ix86_preferred_stack_boundary_string)
3379 i = atoi (ix86_preferred_stack_boundary_string);
3380 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3381 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3382 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3383 else
3384 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3387 /* Set the default value for -mstackrealign. */
3388 if (ix86_force_align_arg_pointer == -1)
3389 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3391 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3393 /* Validate -mincoming-stack-boundary= value or default it to
3394 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3395 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3396 if (ix86_incoming_stack_boundary_string)
3398 i = atoi (ix86_incoming_stack_boundary_string);
3399 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3400 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3401 i, TARGET_64BIT ? 4 : 2);
3402 else
3404 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3405 ix86_incoming_stack_boundary
3406 = ix86_user_incoming_stack_boundary;
3410 /* Accept -msseregparm only if at least SSE support is enabled. */
3411 if (TARGET_SSEREGPARM
3412 && ! TARGET_SSE)
3413 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3415 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3416 if (ix86_fpmath_string != 0)
3418 if (! strcmp (ix86_fpmath_string, "387"))
3419 ix86_fpmath = FPMATH_387;
3420 else if (! strcmp (ix86_fpmath_string, "sse"))
3422 if (!TARGET_SSE)
3424 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3425 ix86_fpmath = FPMATH_387;
3427 else
3428 ix86_fpmath = FPMATH_SSE;
3430 else if (! strcmp (ix86_fpmath_string, "387,sse")
3431 || ! strcmp (ix86_fpmath_string, "387+sse")
3432 || ! strcmp (ix86_fpmath_string, "sse,387")
3433 || ! strcmp (ix86_fpmath_string, "sse+387")
3434 || ! strcmp (ix86_fpmath_string, "both"))
3436 if (!TARGET_SSE)
3438 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3439 ix86_fpmath = FPMATH_387;
3441 else if (!TARGET_80387)
3443 warning (0, "387 instruction set disabled, using SSE arithmetics");
3444 ix86_fpmath = FPMATH_SSE;
3446 else
3447 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3449 else
3450 error ("bad value (%s) for %sfpmath=%s %s",
3451 ix86_fpmath_string, prefix, suffix, sw);
3454 /* If the i387 is disabled, then do not return values in it. */
3455 if (!TARGET_80387)
3456 target_flags &= ~MASK_FLOAT_RETURNS;
3458 /* Use external vectorized library in vectorizing intrinsics. */
3459 if (ix86_veclibabi_string)
3461 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3462 ix86_veclib_handler = ix86_veclibabi_svml;
3463 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3464 ix86_veclib_handler = ix86_veclibabi_acml;
3465 else
3466 error ("unknown vectorization library ABI type (%s) for "
3467 "%sveclibabi=%s %s", ix86_veclibabi_string,
3468 prefix, suffix, sw);
3471 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3472 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3473 && !optimize_size)
3474 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3476 /* ??? Unwind info is not correct around the CFG unless either a frame
3477 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3478 unwind info generation to be aware of the CFG and propagating states
3479 around edges. */
3480 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3481 || flag_exceptions || flag_non_call_exceptions)
3482 && flag_omit_frame_pointer
3483 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3485 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3486 warning (0, "unwind tables currently require either a frame pointer "
3487 "or %saccumulate-outgoing-args%s for correctness",
3488 prefix, suffix);
3489 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3492 /* If stack probes are required, the space used for large function
3493 arguments on the stack must also be probed, so enable
3494 -maccumulate-outgoing-args so this happens in the prologue. */
3495 if (TARGET_STACK_PROBE
3496 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3498 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3499 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3500 "for correctness", prefix, suffix);
3501 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3504 /* For sane SSE instruction set generation we need fcomi instruction.
3505 It is safe to enable all CMOVE instructions. */
3506 if (TARGET_SSE)
3507 TARGET_CMOVE = 1;
3509 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3511 char *p;
3512 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3513 p = strchr (internal_label_prefix, 'X');
3514 internal_label_prefix_len = p - internal_label_prefix;
3515 *p = '\0';
3518 /* When scheduling description is not available, disable scheduler pass
3519 so it won't slow down the compilation and make x87 code slower. */
3520 if (!TARGET_SCHEDULE)
3521 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3523 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3524 set_param_value ("simultaneous-prefetches",
3525 ix86_cost->simultaneous_prefetches);
3526 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3527 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3528 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3529 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3530 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3531 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3533 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3534 can be optimized to ap = __builtin_next_arg (0). */
3535 if (!TARGET_64BIT)
3536 targetm.expand_builtin_va_start = NULL;
3538 if (TARGET_64BIT)
3540 ix86_gen_leave = gen_leave_rex64;
3541 ix86_gen_pop1 = gen_popdi1;
3542 ix86_gen_add3 = gen_adddi3;
3543 ix86_gen_sub3 = gen_subdi3;
3544 ix86_gen_sub3_carry = gen_subdi3_carry;
3545 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3546 ix86_gen_monitor = gen_sse3_monitor64;
3547 ix86_gen_andsp = gen_anddi3;
3549 else
3551 ix86_gen_leave = gen_leave;
3552 ix86_gen_pop1 = gen_popsi1;
3553 ix86_gen_add3 = gen_addsi3;
3554 ix86_gen_sub3 = gen_subsi3;
3555 ix86_gen_sub3_carry = gen_subsi3_carry;
3556 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3557 ix86_gen_monitor = gen_sse3_monitor;
3558 ix86_gen_andsp = gen_andsi3;
3561 #ifdef USE_IX86_CLD
3562 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3563 if (!TARGET_64BIT)
3564 target_flags |= MASK_CLD & ~target_flags_explicit;
3565 #endif
3567 /* Save the initial options in case the user does function specific options */
3568 if (main_args_p)
3569 target_option_default_node = target_option_current_node
3570 = build_target_option_node ();
3573 /* Update register usage after having seen the compiler flags. */
3575 void
3576 ix86_conditional_register_usage (void)
3578 int i;
3579 unsigned int j;
3581 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3583 if (fixed_regs[i] > 1)
3584 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3585 if (call_used_regs[i] > 1)
3586 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3589 /* The PIC register, if it exists, is fixed. */
3590 j = PIC_OFFSET_TABLE_REGNUM;
3591 if (j != INVALID_REGNUM)
3592 fixed_regs[j] = call_used_regs[j] = 1;
3594 /* The MS_ABI changes the set of call-used registers. */
3595 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3597 call_used_regs[SI_REG] = 0;
3598 call_used_regs[DI_REG] = 0;
3599 call_used_regs[XMM6_REG] = 0;
3600 call_used_regs[XMM7_REG] = 0;
3601 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3602 call_used_regs[i] = 0;
3605 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3606 other call-clobbered regs for 64-bit. */
3607 if (TARGET_64BIT)
3609 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3611 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3612 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3613 && call_used_regs[i])
3614 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3617 /* If MMX is disabled, squash the registers. */
3618 if (! TARGET_MMX)
3619 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3620 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3621 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3623 /* If SSE is disabled, squash the registers. */
3624 if (! TARGET_SSE)
3625 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3626 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3627 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3629 /* If the FPU is disabled, squash the registers. */
3630 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3631 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3632 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3633 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3635 /* If 32-bit, squash the 64-bit registers. */
3636 if (! TARGET_64BIT)
3638 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3639 reg_names[i] = "";
3640 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3641 reg_names[i] = "";
3646 /* Save the current options */
3648 static void
3649 ix86_function_specific_save (struct cl_target_option *ptr)
3651 ptr->arch = ix86_arch;
3652 ptr->schedule = ix86_schedule;
3653 ptr->tune = ix86_tune;
3654 ptr->fpmath = ix86_fpmath;
3655 ptr->branch_cost = ix86_branch_cost;
3656 ptr->tune_defaulted = ix86_tune_defaulted;
3657 ptr->arch_specified = ix86_arch_specified;
3658 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3659 ptr->target_flags_explicit = target_flags_explicit;
3661 /* The fields are char but the variables are not; make sure the
3662 values fit in the fields. */
3663 gcc_assert (ptr->arch == ix86_arch);
3664 gcc_assert (ptr->schedule == ix86_schedule);
3665 gcc_assert (ptr->tune == ix86_tune);
3666 gcc_assert (ptr->fpmath == ix86_fpmath);
3667 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3670 /* Restore the current options */
3672 static void
3673 ix86_function_specific_restore (struct cl_target_option *ptr)
3675 enum processor_type old_tune = ix86_tune;
3676 enum processor_type old_arch = ix86_arch;
3677 unsigned int ix86_arch_mask, ix86_tune_mask;
3678 int i;
3680 ix86_arch = (enum processor_type) ptr->arch;
3681 ix86_schedule = (enum attr_cpu) ptr->schedule;
3682 ix86_tune = (enum processor_type) ptr->tune;
3683 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3684 ix86_branch_cost = ptr->branch_cost;
3685 ix86_tune_defaulted = ptr->tune_defaulted;
3686 ix86_arch_specified = ptr->arch_specified;
3687 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3688 target_flags_explicit = ptr->target_flags_explicit;
3690 /* Recreate the arch feature tests if the arch changed */
3691 if (old_arch != ix86_arch)
3693 ix86_arch_mask = 1u << ix86_arch;
3694 for (i = 0; i < X86_ARCH_LAST; ++i)
3695 ix86_arch_features[i]
3696 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3699 /* Recreate the tune optimization tests */
3700 if (old_tune != ix86_tune)
3702 ix86_tune_mask = 1u << ix86_tune;
3703 for (i = 0; i < X86_TUNE_LAST; ++i)
3704 ix86_tune_features[i]
3705 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3709 /* Print the current options */
3711 static void
3712 ix86_function_specific_print (FILE *file, int indent,
3713 struct cl_target_option *ptr)
3715 char *target_string
3716 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3717 NULL, NULL, NULL, false);
3719 fprintf (file, "%*sarch = %d (%s)\n",
3720 indent, "",
3721 ptr->arch,
3722 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3723 ? cpu_names[ptr->arch]
3724 : "<unknown>"));
3726 fprintf (file, "%*stune = %d (%s)\n",
3727 indent, "",
3728 ptr->tune,
3729 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3730 ? cpu_names[ptr->tune]
3731 : "<unknown>"));
3733 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3734 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3735 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3736 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3738 if (target_string)
3740 fprintf (file, "%*s%s\n", indent, "", target_string);
3741 free (target_string);
3746 /* Inner function to process the attribute((target(...))), take an argument and
3747 set the current options from the argument. If we have a list, recursively go
3748 over the list. */
3750 static bool
3751 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3753 char *next_optstr;
3754 bool ret = true;
3756 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3757 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3758 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3759 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3761 enum ix86_opt_type
3763 ix86_opt_unknown,
3764 ix86_opt_yes,
3765 ix86_opt_no,
3766 ix86_opt_str,
3767 ix86_opt_isa
3770 static const struct
3772 const char *string;
3773 size_t len;
3774 enum ix86_opt_type type;
3775 int opt;
3776 int mask;
3777 } attrs[] = {
3778 /* isa options */
3779 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3780 IX86_ATTR_ISA ("abm", OPT_mabm),
3781 IX86_ATTR_ISA ("aes", OPT_maes),
3782 IX86_ATTR_ISA ("avx", OPT_mavx),
3783 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3784 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3785 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3786 IX86_ATTR_ISA ("sse", OPT_msse),
3787 IX86_ATTR_ISA ("sse2", OPT_msse2),
3788 IX86_ATTR_ISA ("sse3", OPT_msse3),
3789 IX86_ATTR_ISA ("sse4", OPT_msse4),
3790 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3791 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3792 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3793 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3794 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3795 IX86_ATTR_ISA ("xop", OPT_mxop),
3796 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3798 /* string options */
3799 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3800 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3801 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3803 /* flag options */
3804 IX86_ATTR_YES ("cld",
3805 OPT_mcld,
3806 MASK_CLD),
3808 IX86_ATTR_NO ("fancy-math-387",
3809 OPT_mfancy_math_387,
3810 MASK_NO_FANCY_MATH_387),
3812 IX86_ATTR_YES ("ieee-fp",
3813 OPT_mieee_fp,
3814 MASK_IEEE_FP),
3816 IX86_ATTR_YES ("inline-all-stringops",
3817 OPT_minline_all_stringops,
3818 MASK_INLINE_ALL_STRINGOPS),
3820 IX86_ATTR_YES ("inline-stringops-dynamically",
3821 OPT_minline_stringops_dynamically,
3822 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3824 IX86_ATTR_NO ("align-stringops",
3825 OPT_mno_align_stringops,
3826 MASK_NO_ALIGN_STRINGOPS),
3828 IX86_ATTR_YES ("recip",
3829 OPT_mrecip,
3830 MASK_RECIP),
3834 /* If this is a list, recurse to get the options. */
3835 if (TREE_CODE (args) == TREE_LIST)
3837 bool ret = true;
3839 for (; args; args = TREE_CHAIN (args))
3840 if (TREE_VALUE (args)
3841 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3842 ret = false;
3844 return ret;
3847 else if (TREE_CODE (args) != STRING_CST)
3848 gcc_unreachable ();
3850 /* Handle multiple arguments separated by commas. */
3851 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3853 while (next_optstr && *next_optstr != '\0')
3855 char *p = next_optstr;
3856 char *orig_p = p;
3857 char *comma = strchr (next_optstr, ',');
3858 const char *opt_string;
3859 size_t len, opt_len;
3860 int opt;
3861 bool opt_set_p;
3862 char ch;
3863 unsigned i;
3864 enum ix86_opt_type type = ix86_opt_unknown;
3865 int mask = 0;
3867 if (comma)
3869 *comma = '\0';
3870 len = comma - next_optstr;
3871 next_optstr = comma + 1;
3873 else
3875 len = strlen (p);
3876 next_optstr = NULL;
3879 /* Recognize no-xxx. */
3880 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3882 opt_set_p = false;
3883 p += 3;
3884 len -= 3;
3886 else
3887 opt_set_p = true;
3889 /* Find the option. */
3890 ch = *p;
3891 opt = N_OPTS;
3892 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3894 type = attrs[i].type;
3895 opt_len = attrs[i].len;
3896 if (ch == attrs[i].string[0]
3897 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3898 && memcmp (p, attrs[i].string, opt_len) == 0)
3900 opt = attrs[i].opt;
3901 mask = attrs[i].mask;
3902 opt_string = attrs[i].string;
3903 break;
3907 /* Process the option. */
3908 if (opt == N_OPTS)
3910 error ("attribute(target(\"%s\")) is unknown", orig_p);
3911 ret = false;
3914 else if (type == ix86_opt_isa)
3915 ix86_handle_option (opt, p, opt_set_p);
3917 else if (type == ix86_opt_yes || type == ix86_opt_no)
3919 if (type == ix86_opt_no)
3920 opt_set_p = !opt_set_p;
3922 if (opt_set_p)
3923 target_flags |= mask;
3924 else
3925 target_flags &= ~mask;
3928 else if (type == ix86_opt_str)
3930 if (p_strings[opt])
3932 error ("option(\"%s\") was already specified", opt_string);
3933 ret = false;
3935 else
3936 p_strings[opt] = xstrdup (p + opt_len);
3939 else
3940 gcc_unreachable ();
3943 return ret;
3946 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3948 tree
3949 ix86_valid_target_attribute_tree (tree args)
3951 const char *orig_arch_string = ix86_arch_string;
3952 const char *orig_tune_string = ix86_tune_string;
3953 const char *orig_fpmath_string = ix86_fpmath_string;
3954 int orig_tune_defaulted = ix86_tune_defaulted;
3955 int orig_arch_specified = ix86_arch_specified;
3956 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3957 tree t = NULL_TREE;
3958 int i;
3959 struct cl_target_option *def
3960 = TREE_TARGET_OPTION (target_option_default_node);
3962 /* Process each of the options on the chain. */
3963 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3964 return NULL_TREE;
3966 /* If the changed options are different from the default, rerun override_options,
3967 and then save the options away. The string options are are attribute options,
3968 and will be undone when we copy the save structure. */
3969 if (ix86_isa_flags != def->ix86_isa_flags
3970 || target_flags != def->target_flags
3971 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3972 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3973 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3975 /* If we are using the default tune= or arch=, undo the string assigned,
3976 and use the default. */
3977 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3978 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3979 else if (!orig_arch_specified)
3980 ix86_arch_string = NULL;
3982 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3983 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3984 else if (orig_tune_defaulted)
3985 ix86_tune_string = NULL;
3987 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3988 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3989 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3990 else if (!TARGET_64BIT && TARGET_SSE)
3991 ix86_fpmath_string = "sse,387";
3993 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3994 override_options (false);
3996 /* Add any builtin functions with the new isa if any. */
3997 ix86_add_new_builtins (ix86_isa_flags);
3999 /* Save the current options unless we are validating options for
4000 #pragma. */
4001 t = build_target_option_node ();
4003 ix86_arch_string = orig_arch_string;
4004 ix86_tune_string = orig_tune_string;
4005 ix86_fpmath_string = orig_fpmath_string;
4007 /* Free up memory allocated to hold the strings */
4008 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
4009 if (option_strings[i])
4010 free (option_strings[i]);
4013 return t;
4016 /* Hook to validate attribute((target("string"))). */
4018 static bool
4019 ix86_valid_target_attribute_p (tree fndecl,
4020 tree ARG_UNUSED (name),
4021 tree args,
4022 int ARG_UNUSED (flags))
4024 struct cl_target_option cur_target;
4025 bool ret = true;
4026 tree old_optimize = build_optimization_node ();
4027 tree new_target, new_optimize;
4028 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
4030 /* If the function changed the optimization levels as well as setting target
4031 options, start with the optimizations specified. */
4032 if (func_optimize && func_optimize != old_optimize)
4033 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
4035 /* The target attributes may also change some optimization flags, so update
4036 the optimization options if necessary. */
4037 cl_target_option_save (&cur_target);
4038 new_target = ix86_valid_target_attribute_tree (args);
4039 new_optimize = build_optimization_node ();
4041 if (!new_target)
4042 ret = false;
4044 else if (fndecl)
4046 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
4048 if (old_optimize != new_optimize)
4049 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
4052 cl_target_option_restore (&cur_target);
4054 if (old_optimize != new_optimize)
4055 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
4057 return ret;
4061 /* Hook to determine if one function can safely inline another. */
4063 static bool
4064 ix86_can_inline_p (tree caller, tree callee)
4066 bool ret = false;
4067 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
4068 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
4070 /* If callee has no option attributes, then it is ok to inline. */
4071 if (!callee_tree)
4072 ret = true;
4074 /* If caller has no option attributes, but callee does then it is not ok to
4075 inline. */
4076 else if (!caller_tree)
4077 ret = false;
4079 else
4081 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
4082 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
4084 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
4085 can inline a SSE2 function but a SSE2 function can't inline a SSE4
4086 function. */
4087 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
4088 != callee_opts->ix86_isa_flags)
4089 ret = false;
4091 /* See if we have the same non-isa options. */
4092 else if (caller_opts->target_flags != callee_opts->target_flags)
4093 ret = false;
4095 /* See if arch, tune, etc. are the same. */
4096 else if (caller_opts->arch != callee_opts->arch)
4097 ret = false;
4099 else if (caller_opts->tune != callee_opts->tune)
4100 ret = false;
4102 else if (caller_opts->fpmath != callee_opts->fpmath)
4103 ret = false;
4105 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4106 ret = false;
4108 else
4109 ret = true;
4112 return ret;
4116 /* Remember the last target of ix86_set_current_function. */
4117 static GTY(()) tree ix86_previous_fndecl;
4119 /* Establish appropriate back-end context for processing the function
4120 FNDECL. The argument might be NULL to indicate processing at top
4121 level, outside of any function scope. */
4122 static void
4123 ix86_set_current_function (tree fndecl)
4125 /* Only change the context if the function changes. This hook is called
4126 several times in the course of compiling a function, and we don't want to
4127 slow things down too much or call target_reinit when it isn't safe. */
4128 if (fndecl && fndecl != ix86_previous_fndecl)
4130 tree old_tree = (ix86_previous_fndecl
4131 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4132 : NULL_TREE);
4134 tree new_tree = (fndecl
4135 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4136 : NULL_TREE);
4138 ix86_previous_fndecl = fndecl;
4139 if (old_tree == new_tree)
4142 else if (new_tree)
4144 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4145 target_reinit ();
4148 else if (old_tree)
4150 struct cl_target_option *def
4151 = TREE_TARGET_OPTION (target_option_current_node);
4153 cl_target_option_restore (def);
4154 target_reinit ();
4160 /* Return true if this goes in large data/bss. */
4162 static bool
4163 ix86_in_large_data_p (tree exp)
4165 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4166 return false;
4168 /* Functions are never large data. */
4169 if (TREE_CODE (exp) == FUNCTION_DECL)
4170 return false;
4172 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4174 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4175 if (strcmp (section, ".ldata") == 0
4176 || strcmp (section, ".lbss") == 0)
4177 return true;
4178 return false;
4180 else
4182 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4184 /* If this is an incomplete type with size 0, then we can't put it
4185 in data because it might be too big when completed. */
4186 if (!size || size > ix86_section_threshold)
4187 return true;
4190 return false;
4193 /* Switch to the appropriate section for output of DECL.
4194 DECL is either a `VAR_DECL' node or a constant of some sort.
4195 RELOC indicates whether forming the initial value of DECL requires
4196 link-time relocations. */
4198 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4199 ATTRIBUTE_UNUSED;
4201 static section *
4202 x86_64_elf_select_section (tree decl, int reloc,
4203 unsigned HOST_WIDE_INT align)
4205 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4206 && ix86_in_large_data_p (decl))
4208 const char *sname = NULL;
4209 unsigned int flags = SECTION_WRITE;
4210 switch (categorize_decl_for_section (decl, reloc))
4212 case SECCAT_DATA:
4213 sname = ".ldata";
4214 break;
4215 case SECCAT_DATA_REL:
4216 sname = ".ldata.rel";
4217 break;
4218 case SECCAT_DATA_REL_LOCAL:
4219 sname = ".ldata.rel.local";
4220 break;
4221 case SECCAT_DATA_REL_RO:
4222 sname = ".ldata.rel.ro";
4223 break;
4224 case SECCAT_DATA_REL_RO_LOCAL:
4225 sname = ".ldata.rel.ro.local";
4226 break;
4227 case SECCAT_BSS:
4228 sname = ".lbss";
4229 flags |= SECTION_BSS;
4230 break;
4231 case SECCAT_RODATA:
4232 case SECCAT_RODATA_MERGE_STR:
4233 case SECCAT_RODATA_MERGE_STR_INIT:
4234 case SECCAT_RODATA_MERGE_CONST:
4235 sname = ".lrodata";
4236 flags = 0;
4237 break;
4238 case SECCAT_SRODATA:
4239 case SECCAT_SDATA:
4240 case SECCAT_SBSS:
4241 gcc_unreachable ();
4242 case SECCAT_TEXT:
4243 case SECCAT_TDATA:
4244 case SECCAT_TBSS:
4245 /* We don't split these for medium model. Place them into
4246 default sections and hope for best. */
4247 break;
4248 case SECCAT_EMUTLS_VAR:
4249 case SECCAT_EMUTLS_TMPL:
4250 gcc_unreachable ();
4252 if (sname)
4254 /* We might get called with string constants, but get_named_section
4255 doesn't like them as they are not DECLs. Also, we need to set
4256 flags in that case. */
4257 if (!DECL_P (decl))
4258 return get_section (sname, flags, NULL);
4259 return get_named_section (decl, sname, reloc);
4262 return default_elf_select_section (decl, reloc, align);
4265 /* Build up a unique section name, expressed as a
4266 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4267 RELOC indicates whether the initial value of EXP requires
4268 link-time relocations. */
4270 static void ATTRIBUTE_UNUSED
4271 x86_64_elf_unique_section (tree decl, int reloc)
4273 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4274 && ix86_in_large_data_p (decl))
4276 const char *prefix = NULL;
4277 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4278 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4280 switch (categorize_decl_for_section (decl, reloc))
4282 case SECCAT_DATA:
4283 case SECCAT_DATA_REL:
4284 case SECCAT_DATA_REL_LOCAL:
4285 case SECCAT_DATA_REL_RO:
4286 case SECCAT_DATA_REL_RO_LOCAL:
4287 prefix = one_only ? ".ld" : ".ldata";
4288 break;
4289 case SECCAT_BSS:
4290 prefix = one_only ? ".lb" : ".lbss";
4291 break;
4292 case SECCAT_RODATA:
4293 case SECCAT_RODATA_MERGE_STR:
4294 case SECCAT_RODATA_MERGE_STR_INIT:
4295 case SECCAT_RODATA_MERGE_CONST:
4296 prefix = one_only ? ".lr" : ".lrodata";
4297 break;
4298 case SECCAT_SRODATA:
4299 case SECCAT_SDATA:
4300 case SECCAT_SBSS:
4301 gcc_unreachable ();
4302 case SECCAT_TEXT:
4303 case SECCAT_TDATA:
4304 case SECCAT_TBSS:
4305 /* We don't split these for medium model. Place them into
4306 default sections and hope for best. */
4307 break;
4308 case SECCAT_EMUTLS_VAR:
4309 prefix = targetm.emutls.var_section;
4310 break;
4311 case SECCAT_EMUTLS_TMPL:
4312 prefix = targetm.emutls.tmpl_section;
4313 break;
4315 if (prefix)
4317 const char *name, *linkonce;
4318 char *string;
4320 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4321 name = targetm.strip_name_encoding (name);
4323 /* If we're using one_only, then there needs to be a .gnu.linkonce
4324 prefix to the section name. */
4325 linkonce = one_only ? ".gnu.linkonce" : "";
4327 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4329 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4330 return;
4333 default_unique_section (decl, reloc);
4336 #ifdef COMMON_ASM_OP
4337 /* This says how to output assembler code to declare an
4338 uninitialized external linkage data object.
4340 For medium model x86-64 we need to use .largecomm opcode for
4341 large objects. */
4342 void
4343 x86_elf_aligned_common (FILE *file,
4344 const char *name, unsigned HOST_WIDE_INT size,
4345 int align)
4347 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4348 && size > (unsigned int)ix86_section_threshold)
4349 fputs (".largecomm\t", file);
4350 else
4351 fputs (COMMON_ASM_OP, file);
4352 assemble_name (file, name);
4353 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4354 size, align / BITS_PER_UNIT);
4356 #endif
4358 /* Utility function for targets to use in implementing
4359 ASM_OUTPUT_ALIGNED_BSS. */
4361 void
4362 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4363 const char *name, unsigned HOST_WIDE_INT size,
4364 int align)
4366 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4367 && size > (unsigned int)ix86_section_threshold)
4368 switch_to_section (get_named_section (decl, ".lbss", 0));
4369 else
4370 switch_to_section (bss_section);
4371 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4372 #ifdef ASM_DECLARE_OBJECT_NAME
4373 last_assemble_variable_decl = decl;
4374 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4375 #else
4376 /* Standard thing is just output label for the object. */
4377 ASM_OUTPUT_LABEL (file, name);
4378 #endif /* ASM_DECLARE_OBJECT_NAME */
4379 ASM_OUTPUT_SKIP (file, size ? size : 1);
4382 void
4383 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4385 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4386 make the problem with not enough registers even worse. */
4387 #ifdef INSN_SCHEDULING
4388 if (level > 1)
4389 flag_schedule_insns = 0;
4390 #endif
4392 /* For -O2 and beyond, turn on -fzee for x86_64 target. */
4393 if (level > 1 && TARGET_64BIT)
4394 flag_zee = 1;
4396 if (TARGET_MACHO)
4397 /* The Darwin libraries never set errno, so we might as well
4398 avoid calling them when that's the only reason we would. */
4399 flag_errno_math = 0;
4401 /* The default values of these switches depend on the TARGET_64BIT
4402 that is not known at this moment. Mark these values with 2 and
4403 let user the to override these. In case there is no command line option
4404 specifying them, we will set the defaults in override_options. */
4405 if (optimize >= 1)
4406 flag_omit_frame_pointer = 2;
4407 flag_pcc_struct_return = 2;
4408 flag_asynchronous_unwind_tables = 2;
4409 flag_vect_cost_model = 1;
4410 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4411 SUBTARGET_OPTIMIZATION_OPTIONS;
4412 #endif
4415 /* Decide whether we can make a sibling call to a function. DECL is the
4416 declaration of the function being targeted by the call and EXP is the
4417 CALL_EXPR representing the call. */
4419 static bool
4420 ix86_function_ok_for_sibcall (tree decl, tree exp)
4422 tree type, decl_or_type;
4423 rtx a, b;
4425 /* If we are generating position-independent code, we cannot sibcall
4426 optimize any indirect call, or a direct call to a global function,
4427 as the PLT requires %ebx be live. */
4428 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4429 return false;
4431 /* If we need to align the outgoing stack, then sibcalling would
4432 unalign the stack, which may break the called function. */
4433 if (ix86_minimum_incoming_stack_boundary (true)
4434 < PREFERRED_STACK_BOUNDARY)
4435 return false;
4437 if (decl)
4439 decl_or_type = decl;
4440 type = TREE_TYPE (decl);
4442 else
4444 /* We're looking at the CALL_EXPR, we need the type of the function. */
4445 type = CALL_EXPR_FN (exp); /* pointer expression */
4446 type = TREE_TYPE (type); /* pointer type */
4447 type = TREE_TYPE (type); /* function type */
4448 decl_or_type = type;
4451 /* Check that the return value locations are the same. Like
4452 if we are returning floats on the 80387 register stack, we cannot
4453 make a sibcall from a function that doesn't return a float to a
4454 function that does or, conversely, from a function that does return
4455 a float to a function that doesn't; the necessary stack adjustment
4456 would not be executed. This is also the place we notice
4457 differences in the return value ABI. Note that it is ok for one
4458 of the functions to have void return type as long as the return
4459 value of the other is passed in a register. */
4460 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4461 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4462 cfun->decl, false);
4463 if (STACK_REG_P (a) || STACK_REG_P (b))
4465 if (!rtx_equal_p (a, b))
4466 return false;
4468 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4470 else if (!rtx_equal_p (a, b))
4471 return false;
4473 if (TARGET_64BIT)
4475 /* The SYSV ABI has more call-clobbered registers;
4476 disallow sibcalls from MS to SYSV. */
4477 if (cfun->machine->call_abi == MS_ABI
4478 && ix86_function_type_abi (type) == SYSV_ABI)
4479 return false;
4481 else
4483 /* If this call is indirect, we'll need to be able to use a
4484 call-clobbered register for the address of the target function.
4485 Make sure that all such registers are not used for passing
4486 parameters. Note that DLLIMPORT functions are indirect. */
4487 if (!decl
4488 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4490 if (ix86_function_regparm (type, NULL) >= 3)
4492 /* ??? Need to count the actual number of registers to be used,
4493 not the possible number of registers. Fix later. */
4494 return false;
4499 /* Otherwise okay. That also includes certain types of indirect calls. */
4500 return true;
4503 /* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
4504 and "sseregparm" calling convention attributes;
4505 arguments as in struct attribute_spec.handler. */
4507 static tree
4508 ix86_handle_cconv_attribute (tree *node, tree name,
4509 tree args,
4510 int flags ATTRIBUTE_UNUSED,
4511 bool *no_add_attrs)
4513 if (TREE_CODE (*node) != FUNCTION_TYPE
4514 && TREE_CODE (*node) != METHOD_TYPE
4515 && TREE_CODE (*node) != FIELD_DECL
4516 && TREE_CODE (*node) != TYPE_DECL)
4518 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4519 name);
4520 *no_add_attrs = true;
4521 return NULL_TREE;
4524 /* Can combine regparm with all attributes but fastcall. */
4525 if (is_attribute_p ("regparm", name))
4527 tree cst;
4529 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4531 error ("fastcall and regparm attributes are not compatible");
4534 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4536 error ("regparam and thiscall attributes are not compatible");
4539 cst = TREE_VALUE (args);
4540 if (TREE_CODE (cst) != INTEGER_CST)
4542 warning (OPT_Wattributes,
4543 "%qE attribute requires an integer constant argument",
4544 name);
4545 *no_add_attrs = true;
4547 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4549 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4550 name, REGPARM_MAX);
4551 *no_add_attrs = true;
4554 return NULL_TREE;
4557 if (TARGET_64BIT)
4559 /* Do not warn when emulating the MS ABI. */
4560 if ((TREE_CODE (*node) != FUNCTION_TYPE
4561 && TREE_CODE (*node) != METHOD_TYPE)
4562 || ix86_function_type_abi (*node) != MS_ABI)
4563 warning (OPT_Wattributes, "%qE attribute ignored",
4564 name);
4565 *no_add_attrs = true;
4566 return NULL_TREE;
4569 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4570 if (is_attribute_p ("fastcall", name))
4572 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4574 error ("fastcall and cdecl attributes are not compatible");
4576 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4578 error ("fastcall and stdcall attributes are not compatible");
4580 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4582 error ("fastcall and regparm attributes are not compatible");
4584 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4586 error ("fastcall and thiscall attributes are not compatible");
4590 /* Can combine stdcall with fastcall (redundant), regparm and
4591 sseregparm. */
4592 else if (is_attribute_p ("stdcall", name))
4594 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4596 error ("stdcall and cdecl attributes are not compatible");
4598 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4600 error ("stdcall and fastcall attributes are not compatible");
4602 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4604 error ("stdcall and thiscall attributes are not compatible");
4608 /* Can combine cdecl with regparm and sseregparm. */
4609 else if (is_attribute_p ("cdecl", name))
4611 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4613 error ("stdcall and cdecl attributes are not compatible");
4615 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4617 error ("fastcall and cdecl attributes are not compatible");
4619 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
4621 error ("cdecl and thiscall attributes are not compatible");
4624 else if (is_attribute_p ("thiscall", name))
4626 if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
4627 warning (OPT_Wattributes, "%qE attribute is used for none class-method",
4628 name);
4629 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4631 error ("stdcall and thiscall attributes are not compatible");
4633 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4635 error ("fastcall and thiscall attributes are not compatible");
4637 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4639 error ("cdecl and thiscall attributes are not compatible");
4643 /* Can combine sseregparm with all attributes. */
4645 return NULL_TREE;
4648 /* Return 0 if the attributes for two types are incompatible, 1 if they
4649 are compatible, and 2 if they are nearly compatible (which causes a
4650 warning to be generated). */
4652 static int
4653 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4655 /* Check for mismatch of non-default calling convention. */
4656 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4658 if (TREE_CODE (type1) != FUNCTION_TYPE
4659 && TREE_CODE (type1) != METHOD_TYPE)
4660 return 1;
4662 /* Check for mismatched fastcall/regparm types. */
4663 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4664 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4665 || (ix86_function_regparm (type1, NULL)
4666 != ix86_function_regparm (type2, NULL)))
4667 return 0;
4669 /* Check for mismatched sseregparm types. */
4670 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4671 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4672 return 0;
4674 /* Check for mismatched thiscall types. */
4675 if (!lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type1))
4676 != !lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type2)))
4677 return 0;
4679 /* Check for mismatched return types (cdecl vs stdcall). */
4680 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4681 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4682 return 0;
4684 return 1;
4687 /* Return the regparm value for a function with the indicated TYPE and DECL.
4688 DECL may be NULL when calling function indirectly
4689 or considering a libcall. */
4691 static int
4692 ix86_function_regparm (const_tree type, const_tree decl)
4694 tree attr;
4695 int regparm;
4697 if (TARGET_64BIT)
4698 return (ix86_function_type_abi (type) == SYSV_ABI
4699 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4701 regparm = ix86_regparm;
4702 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4703 if (attr)
4705 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4706 return regparm;
4709 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4710 return 2;
4712 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
4713 return 1;
4715 /* Use register calling convention for local functions when possible. */
4716 if (decl
4717 && TREE_CODE (decl) == FUNCTION_DECL
4718 && optimize
4719 && !profile_flag)
4721 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4722 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4723 if (i && i->local)
4725 int local_regparm, globals = 0, regno;
4727 /* Make sure no regparm register is taken by a
4728 fixed register variable. */
4729 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4730 if (fixed_regs[local_regparm])
4731 break;
4733 /* We don't want to use regparm(3) for nested functions as
4734 these use a static chain pointer in the third argument. */
4735 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4736 local_regparm = 2;
4738 /* Each fixed register usage increases register pressure,
4739 so less registers should be used for argument passing.
4740 This functionality can be overriden by an explicit
4741 regparm value. */
4742 for (regno = 0; regno <= DI_REG; regno++)
4743 if (fixed_regs[regno])
4744 globals++;
4746 local_regparm
4747 = globals < local_regparm ? local_regparm - globals : 0;
4749 if (local_regparm > regparm)
4750 regparm = local_regparm;
4754 return regparm;
4757 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4758 DFmode (2) arguments in SSE registers for a function with the
4759 indicated TYPE and DECL. DECL may be NULL when calling function
4760 indirectly or considering a libcall. Otherwise return 0. */
4762 static int
4763 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4765 gcc_assert (!TARGET_64BIT);
4767 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4768 by the sseregparm attribute. */
4769 if (TARGET_SSEREGPARM
4770 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4772 if (!TARGET_SSE)
4774 if (warn)
4776 if (decl)
4777 error ("Calling %qD with attribute sseregparm without "
4778 "SSE/SSE2 enabled", decl);
4779 else
4780 error ("Calling %qT with attribute sseregparm without "
4781 "SSE/SSE2 enabled", type);
4783 return 0;
4786 return 2;
4789 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4790 (and DFmode for SSE2) arguments in SSE registers. */
4791 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4793 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4794 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4795 if (i && i->local)
4796 return TARGET_SSE2 ? 2 : 1;
4799 return 0;
4802 /* Return true if EAX is live at the start of the function. Used by
4803 ix86_expand_prologue to determine if we need special help before
4804 calling allocate_stack_worker. */
4806 static bool
4807 ix86_eax_live_at_start_p (void)
4809 /* Cheat. Don't bother working forward from ix86_function_regparm
4810 to the function type to whether an actual argument is located in
4811 eax. Instead just look at cfg info, which is still close enough
4812 to correct at this point. This gives false positives for broken
4813 functions that might use uninitialized data that happens to be
4814 allocated in eax, but who cares? */
4815 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4818 /* Value is the number of bytes of arguments automatically
4819 popped when returning from a subroutine call.
4820 FUNDECL is the declaration node of the function (as a tree),
4821 FUNTYPE is the data type of the function (as a tree),
4822 or for a library call it is an identifier node for the subroutine name.
4823 SIZE is the number of bytes of arguments passed on the stack.
4825 On the 80386, the RTD insn may be used to pop them if the number
4826 of args is fixed, but if the number is variable then the caller
4827 must pop them all. RTD can't be used for library calls now
4828 because the library is compiled with the Unix compiler.
4829 Use of RTD is a selectable option, since it is incompatible with
4830 standard Unix calling sequences. If the option is not selected,
4831 the caller must always pop the args.
4833 The attribute stdcall is equivalent to RTD on a per module basis. */
4836 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4838 int rtd;
4840 /* None of the 64-bit ABIs pop arguments. */
4841 if (TARGET_64BIT)
4842 return 0;
4844 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4846 /* Cdecl functions override -mrtd, and never pop the stack. */
4847 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4849 /* Stdcall and fastcall functions will pop the stack if not
4850 variable args. */
4851 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4852 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))
4853 || lookup_attribute ("thiscall", TYPE_ATTRIBUTES (funtype)))
4854 rtd = 1;
4856 if (rtd && ! stdarg_p (funtype))
4857 return size;
4860 /* Lose any fake structure return argument if it is passed on the stack. */
4861 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4862 && !KEEP_AGGREGATE_RETURN_POINTER)
4864 int nregs = ix86_function_regparm (funtype, fundecl);
4865 if (nregs == 0)
4866 return GET_MODE_SIZE (Pmode);
4869 return 0;
4872 /* Argument support functions. */
4874 /* Return true when register may be used to pass function parameters. */
4875 bool
4876 ix86_function_arg_regno_p (int regno)
4878 int i;
4879 const int *parm_regs;
4881 if (!TARGET_64BIT)
4883 if (TARGET_MACHO)
4884 return (regno < REGPARM_MAX
4885 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4886 else
4887 return (regno < REGPARM_MAX
4888 || (TARGET_MMX && MMX_REGNO_P (regno)
4889 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4890 || (TARGET_SSE && SSE_REGNO_P (regno)
4891 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4894 if (TARGET_MACHO)
4896 if (SSE_REGNO_P (regno) && TARGET_SSE)
4897 return true;
4899 else
4901 if (TARGET_SSE && SSE_REGNO_P (regno)
4902 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4903 return true;
4906 /* TODO: The function should depend on current function ABI but
4907 builtins.c would need updating then. Therefore we use the
4908 default ABI. */
4910 /* RAX is used as hidden argument to va_arg functions. */
4911 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4912 return true;
4914 if (ix86_abi == MS_ABI)
4915 parm_regs = x86_64_ms_abi_int_parameter_registers;
4916 else
4917 parm_regs = x86_64_int_parameter_registers;
4918 for (i = 0; i < (ix86_abi == MS_ABI
4919 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4920 if (regno == parm_regs[i])
4921 return true;
4922 return false;
4925 /* Return if we do not know how to pass TYPE solely in registers. */
4927 static bool
4928 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4930 if (must_pass_in_stack_var_size_or_pad (mode, type))
4931 return true;
4933 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4934 The layout_type routine is crafty and tries to trick us into passing
4935 currently unsupported vector types on the stack by using TImode. */
4936 return (!TARGET_64BIT && mode == TImode
4937 && type && TREE_CODE (type) != VECTOR_TYPE);
4940 /* It returns the size, in bytes, of the area reserved for arguments passed
4941 in registers for the function represented by fndecl dependent to the used
4942 abi format. */
4944 ix86_reg_parm_stack_space (const_tree fndecl)
4946 enum calling_abi call_abi = SYSV_ABI;
4947 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4948 call_abi = ix86_function_abi (fndecl);
4949 else
4950 call_abi = ix86_function_type_abi (fndecl);
4951 if (call_abi == MS_ABI)
4952 return 32;
4953 return 0;
4956 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4957 call abi used. */
4958 enum calling_abi
4959 ix86_function_type_abi (const_tree fntype)
4961 if (TARGET_64BIT && fntype != NULL)
4963 enum calling_abi abi = ix86_abi;
4964 if (abi == SYSV_ABI)
4966 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4967 abi = MS_ABI;
4969 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4970 abi = SYSV_ABI;
4971 return abi;
4973 return ix86_abi;
4976 static bool
4977 ix86_function_ms_hook_prologue (const_tree fntype)
4979 if (!TARGET_64BIT)
4981 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4983 if (decl_function_context (fntype) != NULL_TREE)
4985 error_at (DECL_SOURCE_LOCATION (fntype),
4986 "ms_hook_prologue is not compatible with nested function");
4989 return true;
4992 return false;
4995 static enum calling_abi
4996 ix86_function_abi (const_tree fndecl)
4998 if (! fndecl)
4999 return ix86_abi;
5000 return ix86_function_type_abi (TREE_TYPE (fndecl));
5003 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
5004 call abi used. */
5005 enum calling_abi
5006 ix86_cfun_abi (void)
5008 if (! cfun || ! TARGET_64BIT)
5009 return ix86_abi;
5010 return cfun->machine->call_abi;
5013 /* regclass.c */
5014 extern void init_regs (void);
5016 /* Implementation of call abi switching target hook. Specific to FNDECL
5017 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
5018 for more details. */
5019 void
5020 ix86_call_abi_override (const_tree fndecl)
5022 if (fndecl == NULL_TREE)
5023 cfun->machine->call_abi = ix86_abi;
5024 else
5025 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
5028 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
5029 re-initialization of init_regs each time we switch function context since
5030 this is needed only during RTL expansion. */
5031 static void
5032 ix86_maybe_switch_abi (void)
5034 if (TARGET_64BIT &&
5035 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
5036 reinit_regs ();
5039 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5040 for a call to a function whose data type is FNTYPE.
5041 For a library call, FNTYPE is 0. */
5043 void
5044 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
5045 tree fntype, /* tree ptr for function decl */
5046 rtx libname, /* SYMBOL_REF of library name or 0 */
5047 tree fndecl)
5049 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
5050 memset (cum, 0, sizeof (*cum));
5052 if (fndecl)
5053 cum->call_abi = ix86_function_abi (fndecl);
5054 else
5055 cum->call_abi = ix86_function_type_abi (fntype);
5056 /* Set up the number of registers to use for passing arguments. */
5058 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
5059 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
5060 "or subtarget optimization implying it");
5061 cum->nregs = ix86_regparm;
5062 if (TARGET_64BIT)
5064 if (cum->call_abi != ix86_abi)
5065 cum->nregs = (ix86_abi != SYSV_ABI
5066 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
5068 if (TARGET_SSE)
5070 cum->sse_nregs = SSE_REGPARM_MAX;
5071 if (TARGET_64BIT)
5073 if (cum->call_abi != ix86_abi)
5074 cum->sse_nregs = (ix86_abi != SYSV_ABI
5075 ? X86_64_SSE_REGPARM_MAX
5076 : X86_64_MS_SSE_REGPARM_MAX);
5079 if (TARGET_MMX)
5080 cum->mmx_nregs = MMX_REGPARM_MAX;
5081 cum->warn_avx = true;
5082 cum->warn_sse = true;
5083 cum->warn_mmx = true;
5085 /* Because type might mismatch in between caller and callee, we need to
5086 use actual type of function for local calls.
5087 FIXME: cgraph_analyze can be told to actually record if function uses
5088 va_start so for local functions maybe_vaarg can be made aggressive
5089 helping K&R code.
5090 FIXME: once typesytem is fixed, we won't need this code anymore. */
5091 if (i && i->local)
5092 fntype = TREE_TYPE (fndecl);
5093 cum->maybe_vaarg = (fntype
5094 ? (!prototype_p (fntype) || stdarg_p (fntype))
5095 : !libname);
5097 if (!TARGET_64BIT)
5099 /* If there are variable arguments, then we won't pass anything
5100 in registers in 32-bit mode. */
5101 if (stdarg_p (fntype))
5103 cum->nregs = 0;
5104 cum->sse_nregs = 0;
5105 cum->mmx_nregs = 0;
5106 cum->warn_avx = 0;
5107 cum->warn_sse = 0;
5108 cum->warn_mmx = 0;
5109 return;
5112 /* Use ecx and edx registers if function has fastcall attribute,
5113 else look for regparm information. */
5114 if (fntype)
5116 if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
5118 cum->nregs = 1;
5119 cum->fastcall = 1; /* Same first register as in fastcall. */
5121 else if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
5123 cum->nregs = 2;
5124 cum->fastcall = 1;
5126 else
5127 cum->nregs = ix86_function_regparm (fntype, fndecl);
5130 /* Set up the number of SSE registers used for passing SFmode
5131 and DFmode arguments. Warn for mismatching ABI. */
5132 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
5136 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
5137 But in the case of vector types, it is some vector mode.
5139 When we have only some of our vector isa extensions enabled, then there
5140 are some modes for which vector_mode_supported_p is false. For these
5141 modes, the generic vector support in gcc will choose some non-vector mode
5142 in order to implement the type. By computing the natural mode, we'll
5143 select the proper ABI location for the operand and not depend on whatever
5144 the middle-end decides to do with these vector types.
5146 The midde-end can't deal with the vector types > 16 bytes. In this
5147 case, we return the original mode and warn ABI change if CUM isn't
5148 NULL. */
5150 static enum machine_mode
5151 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
5153 enum machine_mode mode = TYPE_MODE (type);
5155 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
5157 HOST_WIDE_INT size = int_size_in_bytes (type);
5158 if ((size == 8 || size == 16 || size == 32)
5159 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5160 && TYPE_VECTOR_SUBPARTS (type) > 1)
5162 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5164 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5165 mode = MIN_MODE_VECTOR_FLOAT;
5166 else
5167 mode = MIN_MODE_VECTOR_INT;
5169 /* Get the mode which has this inner mode and number of units. */
5170 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5171 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5172 && GET_MODE_INNER (mode) == innermode)
5174 if (size == 32 && !TARGET_AVX)
5176 static bool warnedavx;
5178 if (cum
5179 && !warnedavx
5180 && cum->warn_avx)
5182 warnedavx = true;
5183 warning (0, "AVX vector argument without AVX "
5184 "enabled changes the ABI");
5186 return TYPE_MODE (type);
5188 else
5189 return mode;
5192 gcc_unreachable ();
5196 return mode;
5199 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5200 this may not agree with the mode that the type system has chosen for the
5201 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5202 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5204 static rtx
5205 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5206 unsigned int regno)
5208 rtx tmp;
5210 if (orig_mode != BLKmode)
5211 tmp = gen_rtx_REG (orig_mode, regno);
5212 else
5214 tmp = gen_rtx_REG (mode, regno);
5215 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5216 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5219 return tmp;
5222 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5223 of this code is to classify each 8bytes of incoming argument by the register
5224 class and assign registers accordingly. */
5226 /* Return the union class of CLASS1 and CLASS2.
5227 See the x86-64 PS ABI for details. */
5229 static enum x86_64_reg_class
5230 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5232 /* Rule #1: If both classes are equal, this is the resulting class. */
5233 if (class1 == class2)
5234 return class1;
5236 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5237 the other class. */
5238 if (class1 == X86_64_NO_CLASS)
5239 return class2;
5240 if (class2 == X86_64_NO_CLASS)
5241 return class1;
5243 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5244 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5245 return X86_64_MEMORY_CLASS;
5247 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5248 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5249 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5250 return X86_64_INTEGERSI_CLASS;
5251 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5252 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5253 return X86_64_INTEGER_CLASS;
5255 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5256 MEMORY is used. */
5257 if (class1 == X86_64_X87_CLASS
5258 || class1 == X86_64_X87UP_CLASS
5259 || class1 == X86_64_COMPLEX_X87_CLASS
5260 || class2 == X86_64_X87_CLASS
5261 || class2 == X86_64_X87UP_CLASS
5262 || class2 == X86_64_COMPLEX_X87_CLASS)
5263 return X86_64_MEMORY_CLASS;
5265 /* Rule #6: Otherwise class SSE is used. */
5266 return X86_64_SSE_CLASS;
5269 /* Classify the argument of type TYPE and mode MODE.
5270 CLASSES will be filled by the register class used to pass each word
5271 of the operand. The number of words is returned. In case the parameter
5272 should be passed in memory, 0 is returned. As a special case for zero
5273 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5275 BIT_OFFSET is used internally for handling records and specifies offset
5276 of the offset in bits modulo 256 to avoid overflow cases.
5278 See the x86-64 PS ABI for details.
5281 static int
5282 classify_argument (enum machine_mode mode, const_tree type,
5283 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5285 HOST_WIDE_INT bytes =
5286 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5287 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5289 /* Variable sized entities are always passed/returned in memory. */
5290 if (bytes < 0)
5291 return 0;
5293 if (mode != VOIDmode
5294 && targetm.calls.must_pass_in_stack (mode, type))
5295 return 0;
5297 if (type && AGGREGATE_TYPE_P (type))
5299 int i;
5300 tree field;
5301 enum x86_64_reg_class subclasses[MAX_CLASSES];
5303 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5304 if (bytes > 32)
5305 return 0;
5307 for (i = 0; i < words; i++)
5308 classes[i] = X86_64_NO_CLASS;
5310 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5311 signalize memory class, so handle it as special case. */
5312 if (!words)
5314 classes[0] = X86_64_NO_CLASS;
5315 return 1;
5318 /* Classify each field of record and merge classes. */
5319 switch (TREE_CODE (type))
5321 case RECORD_TYPE:
5322 /* And now merge the fields of structure. */
5323 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5325 if (TREE_CODE (field) == FIELD_DECL)
5327 int num;
5329 if (TREE_TYPE (field) == error_mark_node)
5330 continue;
5332 /* Bitfields are always classified as integer. Handle them
5333 early, since later code would consider them to be
5334 misaligned integers. */
5335 if (DECL_BIT_FIELD (field))
5337 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5338 i < ((int_bit_position (field) + (bit_offset % 64))
5339 + tree_low_cst (DECL_SIZE (field), 0)
5340 + 63) / 8 / 8; i++)
5341 classes[i] =
5342 merge_classes (X86_64_INTEGER_CLASS,
5343 classes[i]);
5345 else
5347 int pos;
5349 type = TREE_TYPE (field);
5351 /* Flexible array member is ignored. */
5352 if (TYPE_MODE (type) == BLKmode
5353 && TREE_CODE (type) == ARRAY_TYPE
5354 && TYPE_SIZE (type) == NULL_TREE
5355 && TYPE_DOMAIN (type) != NULL_TREE
5356 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5357 == NULL_TREE))
5359 static bool warned;
5361 if (!warned && warn_psabi)
5363 warned = true;
5364 inform (input_location,
5365 "The ABI of passing struct with"
5366 " a flexible array member has"
5367 " changed in GCC 4.4");
5369 continue;
5371 num = classify_argument (TYPE_MODE (type), type,
5372 subclasses,
5373 (int_bit_position (field)
5374 + bit_offset) % 256);
5375 if (!num)
5376 return 0;
5377 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5378 for (i = 0; i < num && (i + pos) < words; i++)
5379 classes[i + pos] =
5380 merge_classes (subclasses[i], classes[i + pos]);
5384 break;
5386 case ARRAY_TYPE:
5387 /* Arrays are handled as small records. */
5389 int num;
5390 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5391 TREE_TYPE (type), subclasses, bit_offset);
5392 if (!num)
5393 return 0;
5395 /* The partial classes are now full classes. */
5396 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5397 subclasses[0] = X86_64_SSE_CLASS;
5398 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5399 && !((bit_offset % 64) == 0 && bytes == 4))
5400 subclasses[0] = X86_64_INTEGER_CLASS;
5402 for (i = 0; i < words; i++)
5403 classes[i] = subclasses[i % num];
5405 break;
5407 case UNION_TYPE:
5408 case QUAL_UNION_TYPE:
5409 /* Unions are similar to RECORD_TYPE but offset is always 0.
5411 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5413 if (TREE_CODE (field) == FIELD_DECL)
5415 int num;
5417 if (TREE_TYPE (field) == error_mark_node)
5418 continue;
5420 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5421 TREE_TYPE (field), subclasses,
5422 bit_offset);
5423 if (!num)
5424 return 0;
5425 for (i = 0; i < num; i++)
5426 classes[i] = merge_classes (subclasses[i], classes[i]);
5429 break;
5431 default:
5432 gcc_unreachable ();
5435 if (words > 2)
5437 /* When size > 16 bytes, if the first one isn't
5438 X86_64_SSE_CLASS or any other ones aren't
5439 X86_64_SSEUP_CLASS, everything should be passed in
5440 memory. */
5441 if (classes[0] != X86_64_SSE_CLASS)
5442 return 0;
5444 for (i = 1; i < words; i++)
5445 if (classes[i] != X86_64_SSEUP_CLASS)
5446 return 0;
5449 /* Final merger cleanup. */
5450 for (i = 0; i < words; i++)
5452 /* If one class is MEMORY, everything should be passed in
5453 memory. */
5454 if (classes[i] == X86_64_MEMORY_CLASS)
5455 return 0;
5457 /* The X86_64_SSEUP_CLASS should be always preceded by
5458 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5459 if (classes[i] == X86_64_SSEUP_CLASS
5460 && classes[i - 1] != X86_64_SSE_CLASS
5461 && classes[i - 1] != X86_64_SSEUP_CLASS)
5463 /* The first one should never be X86_64_SSEUP_CLASS. */
5464 gcc_assert (i != 0);
5465 classes[i] = X86_64_SSE_CLASS;
5468 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5469 everything should be passed in memory. */
5470 if (classes[i] == X86_64_X87UP_CLASS
5471 && (classes[i - 1] != X86_64_X87_CLASS))
5473 static bool warned;
5475 /* The first one should never be X86_64_X87UP_CLASS. */
5476 gcc_assert (i != 0);
5477 if (!warned && warn_psabi)
5479 warned = true;
5480 inform (input_location,
5481 "The ABI of passing union with long double"
5482 " has changed in GCC 4.4");
5484 return 0;
5487 return words;
5490 /* Compute alignment needed. We align all types to natural boundaries with
5491 exception of XFmode that is aligned to 64bits. */
5492 if (mode != VOIDmode && mode != BLKmode)
5494 int mode_alignment = GET_MODE_BITSIZE (mode);
5496 if (mode == XFmode)
5497 mode_alignment = 128;
5498 else if (mode == XCmode)
5499 mode_alignment = 256;
5500 if (COMPLEX_MODE_P (mode))
5501 mode_alignment /= 2;
5502 /* Misaligned fields are always returned in memory. */
5503 if (bit_offset % mode_alignment)
5504 return 0;
5507 /* for V1xx modes, just use the base mode */
5508 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5509 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5510 mode = GET_MODE_INNER (mode);
5512 /* Classification of atomic types. */
5513 switch (mode)
5515 case SDmode:
5516 case DDmode:
5517 classes[0] = X86_64_SSE_CLASS;
5518 return 1;
5519 case TDmode:
5520 classes[0] = X86_64_SSE_CLASS;
5521 classes[1] = X86_64_SSEUP_CLASS;
5522 return 2;
5523 case DImode:
5524 case SImode:
5525 case HImode:
5526 case QImode:
5527 case CSImode:
5528 case CHImode:
5529 case CQImode:
5531 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5533 if (size <= 32)
5535 classes[0] = X86_64_INTEGERSI_CLASS;
5536 return 1;
5538 else if (size <= 64)
5540 classes[0] = X86_64_INTEGER_CLASS;
5541 return 1;
5543 else if (size <= 64+32)
5545 classes[0] = X86_64_INTEGER_CLASS;
5546 classes[1] = X86_64_INTEGERSI_CLASS;
5547 return 2;
5549 else if (size <= 64+64)
5551 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5552 return 2;
5554 else
5555 gcc_unreachable ();
5557 case CDImode:
5558 case TImode:
5559 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5560 return 2;
5561 case COImode:
5562 case OImode:
5563 /* OImode shouldn't be used directly. */
5564 gcc_unreachable ();
5565 case CTImode:
5566 return 0;
5567 case SFmode:
5568 if (!(bit_offset % 64))
5569 classes[0] = X86_64_SSESF_CLASS;
5570 else
5571 classes[0] = X86_64_SSE_CLASS;
5572 return 1;
5573 case DFmode:
5574 classes[0] = X86_64_SSEDF_CLASS;
5575 return 1;
5576 case XFmode:
5577 classes[0] = X86_64_X87_CLASS;
5578 classes[1] = X86_64_X87UP_CLASS;
5579 return 2;
5580 case TFmode:
5581 classes[0] = X86_64_SSE_CLASS;
5582 classes[1] = X86_64_SSEUP_CLASS;
5583 return 2;
5584 case SCmode:
5585 classes[0] = X86_64_SSE_CLASS;
5586 if (!(bit_offset % 64))
5587 return 1;
5588 else
5590 static bool warned;
5592 if (!warned && warn_psabi)
5594 warned = true;
5595 inform (input_location,
5596 "The ABI of passing structure with complex float"
5597 " member has changed in GCC 4.4");
5599 classes[1] = X86_64_SSESF_CLASS;
5600 return 2;
5602 case DCmode:
5603 classes[0] = X86_64_SSEDF_CLASS;
5604 classes[1] = X86_64_SSEDF_CLASS;
5605 return 2;
5606 case XCmode:
5607 classes[0] = X86_64_COMPLEX_X87_CLASS;
5608 return 1;
5609 case TCmode:
5610 /* This modes is larger than 16 bytes. */
5611 return 0;
5612 case V8SFmode:
5613 case V8SImode:
5614 case V32QImode:
5615 case V16HImode:
5616 case V4DFmode:
5617 case V4DImode:
5618 classes[0] = X86_64_SSE_CLASS;
5619 classes[1] = X86_64_SSEUP_CLASS;
5620 classes[2] = X86_64_SSEUP_CLASS;
5621 classes[3] = X86_64_SSEUP_CLASS;
5622 return 4;
5623 case V4SFmode:
5624 case V4SImode:
5625 case V16QImode:
5626 case V8HImode:
5627 case V2DFmode:
5628 case V2DImode:
5629 classes[0] = X86_64_SSE_CLASS;
5630 classes[1] = X86_64_SSEUP_CLASS;
5631 return 2;
5632 case V1TImode:
5633 case V1DImode:
5634 case V2SFmode:
5635 case V2SImode:
5636 case V4HImode:
5637 case V8QImode:
5638 classes[0] = X86_64_SSE_CLASS;
5639 return 1;
5640 case BLKmode:
5641 case VOIDmode:
5642 return 0;
5643 default:
5644 gcc_assert (VECTOR_MODE_P (mode));
5646 if (bytes > 16)
5647 return 0;
5649 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5651 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5652 classes[0] = X86_64_INTEGERSI_CLASS;
5653 else
5654 classes[0] = X86_64_INTEGER_CLASS;
5655 classes[1] = X86_64_INTEGER_CLASS;
5656 return 1 + (bytes > 8);
5660 /* Examine the argument and return set number of register required in each
5661 class. Return 0 iff parameter should be passed in memory. */
5662 static int
5663 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5664 int *int_nregs, int *sse_nregs)
5666 enum x86_64_reg_class regclass[MAX_CLASSES];
5667 int n = classify_argument (mode, type, regclass, 0);
5669 *int_nregs = 0;
5670 *sse_nregs = 0;
5671 if (!n)
5672 return 0;
5673 for (n--; n >= 0; n--)
5674 switch (regclass[n])
5676 case X86_64_INTEGER_CLASS:
5677 case X86_64_INTEGERSI_CLASS:
5678 (*int_nregs)++;
5679 break;
5680 case X86_64_SSE_CLASS:
5681 case X86_64_SSESF_CLASS:
5682 case X86_64_SSEDF_CLASS:
5683 (*sse_nregs)++;
5684 break;
5685 case X86_64_NO_CLASS:
5686 case X86_64_SSEUP_CLASS:
5687 break;
5688 case X86_64_X87_CLASS:
5689 case X86_64_X87UP_CLASS:
5690 if (!in_return)
5691 return 0;
5692 break;
5693 case X86_64_COMPLEX_X87_CLASS:
5694 return in_return ? 2 : 0;
5695 case X86_64_MEMORY_CLASS:
5696 gcc_unreachable ();
5698 return 1;
5701 /* Construct container for the argument used by GCC interface. See
5702 FUNCTION_ARG for the detailed description. */
5704 static rtx
5705 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5706 const_tree type, int in_return, int nintregs, int nsseregs,
5707 const int *intreg, int sse_regno)
5709 /* The following variables hold the static issued_error state. */
5710 static bool issued_sse_arg_error;
5711 static bool issued_sse_ret_error;
5712 static bool issued_x87_ret_error;
5714 enum machine_mode tmpmode;
5715 int bytes =
5716 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5717 enum x86_64_reg_class regclass[MAX_CLASSES];
5718 int n;
5719 int i;
5720 int nexps = 0;
5721 int needed_sseregs, needed_intregs;
5722 rtx exp[MAX_CLASSES];
5723 rtx ret;
5725 n = classify_argument (mode, type, regclass, 0);
5726 if (!n)
5727 return NULL;
5728 if (!examine_argument (mode, type, in_return, &needed_intregs,
5729 &needed_sseregs))
5730 return NULL;
5731 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5732 return NULL;
5734 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5735 some less clueful developer tries to use floating-point anyway. */
5736 if (needed_sseregs && !TARGET_SSE)
5738 if (in_return)
5740 if (!issued_sse_ret_error)
5742 error ("SSE register return with SSE disabled");
5743 issued_sse_ret_error = true;
5746 else if (!issued_sse_arg_error)
5748 error ("SSE register argument with SSE disabled");
5749 issued_sse_arg_error = true;
5751 return NULL;
5754 /* Likewise, error if the ABI requires us to return values in the
5755 x87 registers and the user specified -mno-80387. */
5756 if (!TARGET_80387 && in_return)
5757 for (i = 0; i < n; i++)
5758 if (regclass[i] == X86_64_X87_CLASS
5759 || regclass[i] == X86_64_X87UP_CLASS
5760 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5762 if (!issued_x87_ret_error)
5764 error ("x87 register return with x87 disabled");
5765 issued_x87_ret_error = true;
5767 return NULL;
5770 /* First construct simple cases. Avoid SCmode, since we want to use
5771 single register to pass this type. */
5772 if (n == 1 && mode != SCmode)
5773 switch (regclass[0])
5775 case X86_64_INTEGER_CLASS:
5776 case X86_64_INTEGERSI_CLASS:
5777 return gen_rtx_REG (mode, intreg[0]);
5778 case X86_64_SSE_CLASS:
5779 case X86_64_SSESF_CLASS:
5780 case X86_64_SSEDF_CLASS:
5781 if (mode != BLKmode)
5782 return gen_reg_or_parallel (mode, orig_mode,
5783 SSE_REGNO (sse_regno));
5784 break;
5785 case X86_64_X87_CLASS:
5786 case X86_64_COMPLEX_X87_CLASS:
5787 return gen_rtx_REG (mode, FIRST_STACK_REG);
5788 case X86_64_NO_CLASS:
5789 /* Zero sized array, struct or class. */
5790 return NULL;
5791 default:
5792 gcc_unreachable ();
5794 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5795 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5796 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5797 if (n == 4
5798 && regclass[0] == X86_64_SSE_CLASS
5799 && regclass[1] == X86_64_SSEUP_CLASS
5800 && regclass[2] == X86_64_SSEUP_CLASS
5801 && regclass[3] == X86_64_SSEUP_CLASS
5802 && mode != BLKmode)
5803 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5805 if (n == 2
5806 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5807 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5808 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5809 && regclass[1] == X86_64_INTEGER_CLASS
5810 && (mode == CDImode || mode == TImode || mode == TFmode)
5811 && intreg[0] + 1 == intreg[1])
5812 return gen_rtx_REG (mode, intreg[0]);
5814 /* Otherwise figure out the entries of the PARALLEL. */
5815 for (i = 0; i < n; i++)
5817 int pos;
5819 switch (regclass[i])
5821 case X86_64_NO_CLASS:
5822 break;
5823 case X86_64_INTEGER_CLASS:
5824 case X86_64_INTEGERSI_CLASS:
5825 /* Merge TImodes on aligned occasions here too. */
5826 if (i * 8 + 8 > bytes)
5827 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5828 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5829 tmpmode = SImode;
5830 else
5831 tmpmode = DImode;
5832 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5833 if (tmpmode == BLKmode)
5834 tmpmode = DImode;
5835 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5836 gen_rtx_REG (tmpmode, *intreg),
5837 GEN_INT (i*8));
5838 intreg++;
5839 break;
5840 case X86_64_SSESF_CLASS:
5841 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5842 gen_rtx_REG (SFmode,
5843 SSE_REGNO (sse_regno)),
5844 GEN_INT (i*8));
5845 sse_regno++;
5846 break;
5847 case X86_64_SSEDF_CLASS:
5848 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5849 gen_rtx_REG (DFmode,
5850 SSE_REGNO (sse_regno)),
5851 GEN_INT (i*8));
5852 sse_regno++;
5853 break;
5854 case X86_64_SSE_CLASS:
5855 pos = i;
5856 switch (n)
5858 case 1:
5859 tmpmode = DImode;
5860 break;
5861 case 2:
5862 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5864 tmpmode = TImode;
5865 i++;
5867 else
5868 tmpmode = DImode;
5869 break;
5870 case 4:
5871 gcc_assert (i == 0
5872 && regclass[1] == X86_64_SSEUP_CLASS
5873 && regclass[2] == X86_64_SSEUP_CLASS
5874 && regclass[3] == X86_64_SSEUP_CLASS);
5875 tmpmode = OImode;
5876 i += 3;
5877 break;
5878 default:
5879 gcc_unreachable ();
5881 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5882 gen_rtx_REG (tmpmode,
5883 SSE_REGNO (sse_regno)),
5884 GEN_INT (pos*8));
5885 sse_regno++;
5886 break;
5887 default:
5888 gcc_unreachable ();
5892 /* Empty aligned struct, union or class. */
5893 if (nexps == 0)
5894 return NULL;
5896 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5897 for (i = 0; i < nexps; i++)
5898 XVECEXP (ret, 0, i) = exp [i];
5899 return ret;
5902 /* Update the data in CUM to advance over an argument of mode MODE
5903 and data type TYPE. (TYPE is null for libcalls where that information
5904 may not be available.) */
5906 static void
5907 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5908 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5910 switch (mode)
5912 default:
5913 break;
5915 case BLKmode:
5916 if (bytes < 0)
5917 break;
5918 /* FALLTHRU */
5920 case DImode:
5921 case SImode:
5922 case HImode:
5923 case QImode:
5924 cum->words += words;
5925 cum->nregs -= words;
5926 cum->regno += words;
5928 if (cum->nregs <= 0)
5930 cum->nregs = 0;
5931 cum->regno = 0;
5933 break;
5935 case OImode:
5936 /* OImode shouldn't be used directly. */
5937 gcc_unreachable ();
5939 case DFmode:
5940 if (cum->float_in_sse < 2)
5941 break;
5942 case SFmode:
5943 if (cum->float_in_sse < 1)
5944 break;
5945 /* FALLTHRU */
5947 case V8SFmode:
5948 case V8SImode:
5949 case V32QImode:
5950 case V16HImode:
5951 case V4DFmode:
5952 case V4DImode:
5953 case TImode:
5954 case V16QImode:
5955 case V8HImode:
5956 case V4SImode:
5957 case V2DImode:
5958 case V4SFmode:
5959 case V2DFmode:
5960 if (!type || !AGGREGATE_TYPE_P (type))
5962 cum->sse_words += words;
5963 cum->sse_nregs -= 1;
5964 cum->sse_regno += 1;
5965 if (cum->sse_nregs <= 0)
5967 cum->sse_nregs = 0;
5968 cum->sse_regno = 0;
5971 break;
5973 case V8QImode:
5974 case V4HImode:
5975 case V2SImode:
5976 case V2SFmode:
5977 case V1TImode:
5978 case V1DImode:
5979 if (!type || !AGGREGATE_TYPE_P (type))
5981 cum->mmx_words += words;
5982 cum->mmx_nregs -= 1;
5983 cum->mmx_regno += 1;
5984 if (cum->mmx_nregs <= 0)
5986 cum->mmx_nregs = 0;
5987 cum->mmx_regno = 0;
5990 break;
5994 static void
5995 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5996 tree type, HOST_WIDE_INT words, int named)
5998 int int_nregs, sse_nregs;
6000 /* Unnamed 256bit vector mode parameters are passed on stack. */
6001 if (!named && VALID_AVX256_REG_MODE (mode))
6002 return;
6004 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
6005 cum->words += words;
6006 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
6008 cum->nregs -= int_nregs;
6009 cum->sse_nregs -= sse_nregs;
6010 cum->regno += int_nregs;
6011 cum->sse_regno += sse_nregs;
6013 else
6014 cum->words += words;
6017 static void
6018 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
6019 HOST_WIDE_INT words)
6021 /* Otherwise, this should be passed indirect. */
6022 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
6024 cum->words += words;
6025 if (cum->nregs > 0)
6027 cum->nregs -= 1;
6028 cum->regno += 1;
6032 void
6033 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6034 tree type, int named)
6036 HOST_WIDE_INT bytes, words;
6038 if (mode == BLKmode)
6039 bytes = int_size_in_bytes (type);
6040 else
6041 bytes = GET_MODE_SIZE (mode);
6042 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6044 if (type)
6045 mode = type_natural_mode (type, NULL);
6047 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6048 function_arg_advance_ms_64 (cum, bytes, words);
6049 else if (TARGET_64BIT)
6050 function_arg_advance_64 (cum, mode, type, words, named);
6051 else
6052 function_arg_advance_32 (cum, mode, type, bytes, words);
6055 /* Define where to put the arguments to a function.
6056 Value is zero to push the argument on the stack,
6057 or a hard register in which to store the argument.
6059 MODE is the argument's machine mode.
6060 TYPE is the data type of the argument (as a tree).
6061 This is null for libcalls where that information may
6062 not be available.
6063 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6064 the preceding args and about the function being called.
6065 NAMED is nonzero if this argument is a named parameter
6066 (otherwise it is an extra parameter matching an ellipsis). */
6068 static rtx
6069 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6070 enum machine_mode orig_mode, tree type,
6071 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
6073 static bool warnedsse, warnedmmx;
6075 /* Avoid the AL settings for the Unix64 ABI. */
6076 if (mode == VOIDmode)
6077 return constm1_rtx;
6079 switch (mode)
6081 default:
6082 break;
6084 case BLKmode:
6085 if (bytes < 0)
6086 break;
6087 /* FALLTHRU */
6088 case DImode:
6089 case SImode:
6090 case HImode:
6091 case QImode:
6092 if (words <= cum->nregs)
6094 int regno = cum->regno;
6096 /* Fastcall allocates the first two DWORD (SImode) or
6097 smaller arguments to ECX and EDX if it isn't an
6098 aggregate type . */
6099 if (cum->fastcall)
6101 if (mode == BLKmode
6102 || mode == DImode
6103 || (type && AGGREGATE_TYPE_P (type)))
6104 break;
6106 /* ECX not EAX is the first allocated register. */
6107 if (regno == AX_REG)
6108 regno = CX_REG;
6110 return gen_rtx_REG (mode, regno);
6112 break;
6114 case DFmode:
6115 if (cum->float_in_sse < 2)
6116 break;
6117 case SFmode:
6118 if (cum->float_in_sse < 1)
6119 break;
6120 /* FALLTHRU */
6121 case TImode:
6122 /* In 32bit, we pass TImode in xmm registers. */
6123 case V16QImode:
6124 case V8HImode:
6125 case V4SImode:
6126 case V2DImode:
6127 case V4SFmode:
6128 case V2DFmode:
6129 if (!type || !AGGREGATE_TYPE_P (type))
6131 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
6133 warnedsse = true;
6134 warning (0, "SSE vector argument without SSE enabled "
6135 "changes the ABI");
6137 if (cum->sse_nregs)
6138 return gen_reg_or_parallel (mode, orig_mode,
6139 cum->sse_regno + FIRST_SSE_REG);
6141 break;
6143 case OImode:
6144 /* OImode shouldn't be used directly. */
6145 gcc_unreachable ();
6147 case V8SFmode:
6148 case V8SImode:
6149 case V32QImode:
6150 case V16HImode:
6151 case V4DFmode:
6152 case V4DImode:
6153 if (!type || !AGGREGATE_TYPE_P (type))
6155 if (cum->sse_nregs)
6156 return gen_reg_or_parallel (mode, orig_mode,
6157 cum->sse_regno + FIRST_SSE_REG);
6159 break;
6161 case V8QImode:
6162 case V4HImode:
6163 case V2SImode:
6164 case V2SFmode:
6165 case V1TImode:
6166 case V1DImode:
6167 if (!type || !AGGREGATE_TYPE_P (type))
6169 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6171 warnedmmx = true;
6172 warning (0, "MMX vector argument without MMX enabled "
6173 "changes the ABI");
6175 if (cum->mmx_nregs)
6176 return gen_reg_or_parallel (mode, orig_mode,
6177 cum->mmx_regno + FIRST_MMX_REG);
6179 break;
6182 return NULL_RTX;
6185 static rtx
6186 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6187 enum machine_mode orig_mode, tree type, int named)
6189 /* Handle a hidden AL argument containing number of registers
6190 for varargs x86-64 functions. */
6191 if (mode == VOIDmode)
6192 return GEN_INT (cum->maybe_vaarg
6193 ? (cum->sse_nregs < 0
6194 ? (cum->call_abi == ix86_abi
6195 ? SSE_REGPARM_MAX
6196 : (ix86_abi != SYSV_ABI
6197 ? X86_64_SSE_REGPARM_MAX
6198 : X86_64_MS_SSE_REGPARM_MAX))
6199 : cum->sse_regno)
6200 : -1);
6202 switch (mode)
6204 default:
6205 break;
6207 case V8SFmode:
6208 case V8SImode:
6209 case V32QImode:
6210 case V16HImode:
6211 case V4DFmode:
6212 case V4DImode:
6213 /* Unnamed 256bit vector mode parameters are passed on stack. */
6214 if (!named)
6215 return NULL;
6216 break;
6219 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6220 cum->sse_nregs,
6221 &x86_64_int_parameter_registers [cum->regno],
6222 cum->sse_regno);
6225 static rtx
6226 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6227 enum machine_mode orig_mode, int named,
6228 HOST_WIDE_INT bytes)
6230 unsigned int regno;
6232 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6233 We use value of -2 to specify that current function call is MSABI. */
6234 if (mode == VOIDmode)
6235 return GEN_INT (-2);
6237 /* If we've run out of registers, it goes on the stack. */
6238 if (cum->nregs == 0)
6239 return NULL_RTX;
6241 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6243 /* Only floating point modes are passed in anything but integer regs. */
6244 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6246 if (named)
6247 regno = cum->regno + FIRST_SSE_REG;
6248 else
6250 rtx t1, t2;
6252 /* Unnamed floating parameters are passed in both the
6253 SSE and integer registers. */
6254 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6255 t2 = gen_rtx_REG (mode, regno);
6256 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6257 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6258 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6261 /* Handle aggregated types passed in register. */
6262 if (orig_mode == BLKmode)
6264 if (bytes > 0 && bytes <= 8)
6265 mode = (bytes > 4 ? DImode : SImode);
6266 if (mode == BLKmode)
6267 mode = DImode;
6270 return gen_reg_or_parallel (mode, orig_mode, regno);
6274 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6275 tree type, int named)
6277 enum machine_mode mode = omode;
6278 HOST_WIDE_INT bytes, words;
6280 if (mode == BLKmode)
6281 bytes = int_size_in_bytes (type);
6282 else
6283 bytes = GET_MODE_SIZE (mode);
6284 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6286 /* To simplify the code below, represent vector types with a vector mode
6287 even if MMX/SSE are not active. */
6288 if (type && TREE_CODE (type) == VECTOR_TYPE)
6289 mode = type_natural_mode (type, cum);
6291 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6292 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6293 else if (TARGET_64BIT)
6294 return function_arg_64 (cum, mode, omode, type, named);
6295 else
6296 return function_arg_32 (cum, mode, omode, type, bytes, words);
6299 /* A C expression that indicates when an argument must be passed by
6300 reference. If nonzero for an argument, a copy of that argument is
6301 made in memory and a pointer to the argument is passed instead of
6302 the argument itself. The pointer is passed in whatever way is
6303 appropriate for passing a pointer to that type. */
6305 static bool
6306 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6307 enum machine_mode mode ATTRIBUTE_UNUSED,
6308 const_tree type, bool named ATTRIBUTE_UNUSED)
6310 /* See Windows x64 Software Convention. */
6311 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6313 int msize = (int) GET_MODE_SIZE (mode);
6314 if (type)
6316 /* Arrays are passed by reference. */
6317 if (TREE_CODE (type) == ARRAY_TYPE)
6318 return true;
6320 if (AGGREGATE_TYPE_P (type))
6322 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6323 are passed by reference. */
6324 msize = int_size_in_bytes (type);
6328 /* __m128 is passed by reference. */
6329 switch (msize) {
6330 case 1: case 2: case 4: case 8:
6331 break;
6332 default:
6333 return true;
6336 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6337 return 1;
6339 return 0;
6342 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6343 ABI. */
6344 static bool
6345 contains_aligned_value_p (tree type)
6347 enum machine_mode mode = TYPE_MODE (type);
6348 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6349 || mode == TDmode
6350 || mode == TFmode
6351 || mode == TCmode)
6352 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6353 return true;
6354 if (TYPE_ALIGN (type) < 128)
6355 return false;
6357 if (AGGREGATE_TYPE_P (type))
6359 /* Walk the aggregates recursively. */
6360 switch (TREE_CODE (type))
6362 case RECORD_TYPE:
6363 case UNION_TYPE:
6364 case QUAL_UNION_TYPE:
6366 tree field;
6368 /* Walk all the structure fields. */
6369 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6371 if (TREE_CODE (field) == FIELD_DECL
6372 && contains_aligned_value_p (TREE_TYPE (field)))
6373 return true;
6375 break;
6378 case ARRAY_TYPE:
6379 /* Just for use if some languages passes arrays by value. */
6380 if (contains_aligned_value_p (TREE_TYPE (type)))
6381 return true;
6382 break;
6384 default:
6385 gcc_unreachable ();
6388 return false;
6391 /* Gives the alignment boundary, in bits, of an argument with the
6392 specified mode and type. */
6395 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6397 int align;
6398 if (type)
6400 /* Since canonical type is used for call, we convert it to
6401 canonical type if needed. */
6402 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6403 type = TYPE_CANONICAL (type);
6404 align = TYPE_ALIGN (type);
6406 else
6407 align = GET_MODE_ALIGNMENT (mode);
6408 if (align < PARM_BOUNDARY)
6409 align = PARM_BOUNDARY;
6410 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6411 natural boundaries. */
6412 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6414 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6415 make an exception for SSE modes since these require 128bit
6416 alignment.
6418 The handling here differs from field_alignment. ICC aligns MMX
6419 arguments to 4 byte boundaries, while structure fields are aligned
6420 to 8 byte boundaries. */
6421 if (!type)
6423 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6424 align = PARM_BOUNDARY;
6426 else
6428 if (!contains_aligned_value_p (type))
6429 align = PARM_BOUNDARY;
6432 if (align > BIGGEST_ALIGNMENT)
6433 align = BIGGEST_ALIGNMENT;
6434 return align;
6437 /* Return true if N is a possible register number of function value. */
6439 static bool
6440 ix86_function_value_regno_p (const unsigned int regno)
6442 switch (regno)
6444 case 0:
6445 return true;
6447 case FIRST_FLOAT_REG:
6448 /* TODO: The function should depend on current function ABI but
6449 builtins.c would need updating then. Therefore we use the
6450 default ABI. */
6451 if (TARGET_64BIT && ix86_abi == MS_ABI)
6452 return false;
6453 return TARGET_FLOAT_RETURNS_IN_80387;
6455 case FIRST_SSE_REG:
6456 return TARGET_SSE;
6458 case FIRST_MMX_REG:
6459 if (TARGET_MACHO || TARGET_64BIT)
6460 return false;
6461 return TARGET_MMX;
6464 return false;
6467 /* Define how to find the value returned by a function.
6468 VALTYPE is the data type of the value (as a tree).
6469 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6470 otherwise, FUNC is 0. */
6472 static rtx
6473 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6474 const_tree fntype, const_tree fn)
6476 unsigned int regno;
6478 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6479 we normally prevent this case when mmx is not available. However
6480 some ABIs may require the result to be returned like DImode. */
6481 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6482 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6484 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6485 we prevent this case when sse is not available. However some ABIs
6486 may require the result to be returned like integer TImode. */
6487 else if (mode == TImode
6488 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6489 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6491 /* 32-byte vector modes in %ymm0. */
6492 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6493 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6495 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6496 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6497 regno = FIRST_FLOAT_REG;
6498 else
6499 /* Most things go in %eax. */
6500 regno = AX_REG;
6502 /* Override FP return register with %xmm0 for local functions when
6503 SSE math is enabled or for functions with sseregparm attribute. */
6504 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6506 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6507 if ((sse_level >= 1 && mode == SFmode)
6508 || (sse_level == 2 && mode == DFmode))
6509 regno = FIRST_SSE_REG;
6512 /* OImode shouldn't be used directly. */
6513 gcc_assert (mode != OImode);
6515 return gen_rtx_REG (orig_mode, regno);
6518 static rtx
6519 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6520 const_tree valtype)
6522 rtx ret;
6524 /* Handle libcalls, which don't provide a type node. */
6525 if (valtype == NULL)
6527 switch (mode)
6529 case SFmode:
6530 case SCmode:
6531 case DFmode:
6532 case DCmode:
6533 case TFmode:
6534 case SDmode:
6535 case DDmode:
6536 case TDmode:
6537 return gen_rtx_REG (mode, FIRST_SSE_REG);
6538 case XFmode:
6539 case XCmode:
6540 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6541 case TCmode:
6542 return NULL;
6543 default:
6544 return gen_rtx_REG (mode, AX_REG);
6548 ret = construct_container (mode, orig_mode, valtype, 1,
6549 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6550 x86_64_int_return_registers, 0);
6552 /* For zero sized structures, construct_container returns NULL, but we
6553 need to keep rest of compiler happy by returning meaningful value. */
6554 if (!ret)
6555 ret = gen_rtx_REG (orig_mode, AX_REG);
6557 return ret;
6560 static rtx
6561 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6563 unsigned int regno = AX_REG;
6565 if (TARGET_SSE)
6567 switch (GET_MODE_SIZE (mode))
6569 case 16:
6570 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6571 && !COMPLEX_MODE_P (mode))
6572 regno = FIRST_SSE_REG;
6573 break;
6574 case 8:
6575 case 4:
6576 if (mode == SFmode || mode == DFmode)
6577 regno = FIRST_SSE_REG;
6578 break;
6579 default:
6580 break;
6583 return gen_rtx_REG (orig_mode, regno);
6586 static rtx
6587 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6588 enum machine_mode orig_mode, enum machine_mode mode)
6590 const_tree fn, fntype;
6592 fn = NULL_TREE;
6593 if (fntype_or_decl && DECL_P (fntype_or_decl))
6594 fn = fntype_or_decl;
6595 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6597 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6598 return function_value_ms_64 (orig_mode, mode);
6599 else if (TARGET_64BIT)
6600 return function_value_64 (orig_mode, mode, valtype);
6601 else
6602 return function_value_32 (orig_mode, mode, fntype, fn);
6605 static rtx
6606 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6607 bool outgoing ATTRIBUTE_UNUSED)
6609 enum machine_mode mode, orig_mode;
6611 orig_mode = TYPE_MODE (valtype);
6612 mode = type_natural_mode (valtype, NULL);
6613 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6617 ix86_libcall_value (enum machine_mode mode)
6619 return ix86_function_value_1 (NULL, NULL, mode, mode);
6622 /* Return true iff type is returned in memory. */
6624 static int ATTRIBUTE_UNUSED
6625 return_in_memory_32 (const_tree type, enum machine_mode mode)
6627 HOST_WIDE_INT size;
6629 if (mode == BLKmode)
6630 return 1;
6632 size = int_size_in_bytes (type);
6634 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6635 return 0;
6637 if (VECTOR_MODE_P (mode) || mode == TImode)
6639 /* User-created vectors small enough to fit in EAX. */
6640 if (size < 8)
6641 return 0;
6643 /* MMX/3dNow values are returned in MM0,
6644 except when it doesn't exits. */
6645 if (size == 8)
6646 return (TARGET_MMX ? 0 : 1);
6648 /* SSE values are returned in XMM0, except when it doesn't exist. */
6649 if (size == 16)
6650 return (TARGET_SSE ? 0 : 1);
6652 /* AVX values are returned in YMM0, except when it doesn't exist. */
6653 if (size == 32)
6654 return TARGET_AVX ? 0 : 1;
6657 if (mode == XFmode)
6658 return 0;
6660 if (size > 12)
6661 return 1;
6663 /* OImode shouldn't be used directly. */
6664 gcc_assert (mode != OImode);
6666 return 0;
6669 static int ATTRIBUTE_UNUSED
6670 return_in_memory_64 (const_tree type, enum machine_mode mode)
6672 int needed_intregs, needed_sseregs;
6673 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6676 static int ATTRIBUTE_UNUSED
6677 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6679 HOST_WIDE_INT size = int_size_in_bytes (type);
6681 /* __m128 is returned in xmm0. */
6682 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6683 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6684 return 0;
6686 /* Otherwise, the size must be exactly in [1248]. */
6687 return (size != 1 && size != 2 && size != 4 && size != 8);
6690 static bool
6691 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6693 #ifdef SUBTARGET_RETURN_IN_MEMORY
6694 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6695 #else
6696 const enum machine_mode mode = type_natural_mode (type, NULL);
6698 if (TARGET_64BIT)
6700 if (ix86_function_type_abi (fntype) == MS_ABI)
6701 return return_in_memory_ms_64 (type, mode);
6702 else
6703 return return_in_memory_64 (type, mode);
6705 else
6706 return return_in_memory_32 (type, mode);
6707 #endif
6710 /* Return false iff TYPE is returned in memory. This version is used
6711 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6712 but differs notably in that when MMX is available, 8-byte vectors
6713 are returned in memory, rather than in MMX registers. */
6715 bool
6716 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6718 int size;
6719 enum machine_mode mode = type_natural_mode (type, NULL);
6721 if (TARGET_64BIT)
6722 return return_in_memory_64 (type, mode);
6724 if (mode == BLKmode)
6725 return 1;
6727 size = int_size_in_bytes (type);
6729 if (VECTOR_MODE_P (mode))
6731 /* Return in memory only if MMX registers *are* available. This
6732 seems backwards, but it is consistent with the existing
6733 Solaris x86 ABI. */
6734 if (size == 8)
6735 return TARGET_MMX;
6736 if (size == 16)
6737 return !TARGET_SSE;
6739 else if (mode == TImode)
6740 return !TARGET_SSE;
6741 else if (mode == XFmode)
6742 return 0;
6744 return size > 12;
6747 /* When returning SSE vector types, we have a choice of either
6748 (1) being abi incompatible with a -march switch, or
6749 (2) generating an error.
6750 Given no good solution, I think the safest thing is one warning.
6751 The user won't be able to use -Werror, but....
6753 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6754 called in response to actually generating a caller or callee that
6755 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6756 via aggregate_value_p for general type probing from tree-ssa. */
6758 static rtx
6759 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6761 static bool warnedsse, warnedmmx;
6763 if (!TARGET_64BIT && type)
6765 /* Look at the return type of the function, not the function type. */
6766 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6768 if (!TARGET_SSE && !warnedsse)
6770 if (mode == TImode
6771 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6773 warnedsse = true;
6774 warning (0, "SSE vector return without SSE enabled "
6775 "changes the ABI");
6779 if (!TARGET_MMX && !warnedmmx)
6781 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6783 warnedmmx = true;
6784 warning (0, "MMX vector return without MMX enabled "
6785 "changes the ABI");
6790 return NULL;
6794 /* Create the va_list data type. */
6796 /* Returns the calling convention specific va_list date type.
6797 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6799 static tree
6800 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6802 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6804 /* For i386 we use plain pointer to argument area. */
6805 if (!TARGET_64BIT || abi == MS_ABI)
6806 return build_pointer_type (char_type_node);
6808 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6809 type_decl = build_decl (BUILTINS_LOCATION,
6810 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6812 f_gpr = build_decl (BUILTINS_LOCATION,
6813 FIELD_DECL, get_identifier ("gp_offset"),
6814 unsigned_type_node);
6815 f_fpr = build_decl (BUILTINS_LOCATION,
6816 FIELD_DECL, get_identifier ("fp_offset"),
6817 unsigned_type_node);
6818 f_ovf = build_decl (BUILTINS_LOCATION,
6819 FIELD_DECL, get_identifier ("overflow_arg_area"),
6820 ptr_type_node);
6821 f_sav = build_decl (BUILTINS_LOCATION,
6822 FIELD_DECL, get_identifier ("reg_save_area"),
6823 ptr_type_node);
6825 va_list_gpr_counter_field = f_gpr;
6826 va_list_fpr_counter_field = f_fpr;
6828 DECL_FIELD_CONTEXT (f_gpr) = record;
6829 DECL_FIELD_CONTEXT (f_fpr) = record;
6830 DECL_FIELD_CONTEXT (f_ovf) = record;
6831 DECL_FIELD_CONTEXT (f_sav) = record;
6833 TREE_CHAIN (record) = type_decl;
6834 TYPE_NAME (record) = type_decl;
6835 TYPE_FIELDS (record) = f_gpr;
6836 TREE_CHAIN (f_gpr) = f_fpr;
6837 TREE_CHAIN (f_fpr) = f_ovf;
6838 TREE_CHAIN (f_ovf) = f_sav;
6840 layout_type (record);
6842 /* The correct type is an array type of one element. */
6843 return build_array_type (record, build_index_type (size_zero_node));
6846 /* Setup the builtin va_list data type and for 64-bit the additional
6847 calling convention specific va_list data types. */
6849 static tree
6850 ix86_build_builtin_va_list (void)
6852 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6854 /* Initialize abi specific va_list builtin types. */
6855 if (TARGET_64BIT)
6857 tree t;
6858 if (ix86_abi == MS_ABI)
6860 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6861 if (TREE_CODE (t) != RECORD_TYPE)
6862 t = build_variant_type_copy (t);
6863 sysv_va_list_type_node = t;
6865 else
6867 t = ret;
6868 if (TREE_CODE (t) != RECORD_TYPE)
6869 t = build_variant_type_copy (t);
6870 sysv_va_list_type_node = t;
6872 if (ix86_abi != MS_ABI)
6874 t = ix86_build_builtin_va_list_abi (MS_ABI);
6875 if (TREE_CODE (t) != RECORD_TYPE)
6876 t = build_variant_type_copy (t);
6877 ms_va_list_type_node = t;
6879 else
6881 t = ret;
6882 if (TREE_CODE (t) != RECORD_TYPE)
6883 t = build_variant_type_copy (t);
6884 ms_va_list_type_node = t;
6888 return ret;
6891 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6893 static void
6894 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6896 rtx save_area, mem;
6897 rtx label;
6898 rtx tmp_reg;
6899 rtx nsse_reg;
6900 alias_set_type set;
6901 int i;
6902 int regparm = ix86_regparm;
6904 if (cum->call_abi != ix86_abi)
6905 regparm = (ix86_abi != SYSV_ABI
6906 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6908 /* GPR size of varargs save area. */
6909 if (cfun->va_list_gpr_size)
6910 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6911 else
6912 ix86_varargs_gpr_size = 0;
6914 /* FPR size of varargs save area. We don't need it if we don't pass
6915 anything in SSE registers. */
6916 if (cum->sse_nregs && cfun->va_list_fpr_size)
6917 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6918 else
6919 ix86_varargs_fpr_size = 0;
6921 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6922 return;
6924 save_area = frame_pointer_rtx;
6925 set = get_varargs_alias_set ();
6927 for (i = cum->regno;
6928 i < regparm
6929 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6930 i++)
6932 mem = gen_rtx_MEM (Pmode,
6933 plus_constant (save_area, i * UNITS_PER_WORD));
6934 MEM_NOTRAP_P (mem) = 1;
6935 set_mem_alias_set (mem, set);
6936 emit_move_insn (mem, gen_rtx_REG (Pmode,
6937 x86_64_int_parameter_registers[i]));
6940 if (ix86_varargs_fpr_size)
6942 /* Now emit code to save SSE registers. The AX parameter contains number
6943 of SSE parameter registers used to call this function. We use
6944 sse_prologue_save insn template that produces computed jump across
6945 SSE saves. We need some preparation work to get this working. */
6947 label = gen_label_rtx ();
6949 nsse_reg = gen_reg_rtx (Pmode);
6950 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6952 /* Compute address of memory block we save into. We always use pointer
6953 pointing 127 bytes after first byte to store - this is needed to keep
6954 instruction size limited by 4 bytes (5 bytes for AVX) with one
6955 byte displacement. */
6956 tmp_reg = gen_reg_rtx (Pmode);
6957 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6958 plus_constant (save_area,
6959 ix86_varargs_gpr_size + 127)));
6960 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6961 MEM_NOTRAP_P (mem) = 1;
6962 set_mem_alias_set (mem, set);
6963 set_mem_align (mem, 64);
6965 /* And finally do the dirty job! */
6966 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6967 GEN_INT (cum->sse_regno), label,
6968 gen_reg_rtx (Pmode)));
6972 static void
6973 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6975 alias_set_type set = get_varargs_alias_set ();
6976 int i;
6978 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6980 rtx reg, mem;
6982 mem = gen_rtx_MEM (Pmode,
6983 plus_constant (virtual_incoming_args_rtx,
6984 i * UNITS_PER_WORD));
6985 MEM_NOTRAP_P (mem) = 1;
6986 set_mem_alias_set (mem, set);
6988 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6989 emit_move_insn (mem, reg);
6993 static void
6994 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6995 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6996 int no_rtl)
6998 CUMULATIVE_ARGS next_cum;
6999 tree fntype;
7001 /* This argument doesn't appear to be used anymore. Which is good,
7002 because the old code here didn't suppress rtl generation. */
7003 gcc_assert (!no_rtl);
7005 if (!TARGET_64BIT)
7006 return;
7008 fntype = TREE_TYPE (current_function_decl);
7010 /* For varargs, we do not want to skip the dummy va_dcl argument.
7011 For stdargs, we do want to skip the last named argument. */
7012 next_cum = *cum;
7013 if (stdarg_p (fntype))
7014 function_arg_advance (&next_cum, mode, type, 1);
7016 if (cum->call_abi == MS_ABI)
7017 setup_incoming_varargs_ms_64 (&next_cum);
7018 else
7019 setup_incoming_varargs_64 (&next_cum);
7022 /* Checks if TYPE is of kind va_list char *. */
7024 static bool
7025 is_va_list_char_pointer (tree type)
7027 tree canonic;
7029 /* For 32-bit it is always true. */
7030 if (!TARGET_64BIT)
7031 return true;
7032 canonic = ix86_canonical_va_list_type (type);
7033 return (canonic == ms_va_list_type_node
7034 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
7037 /* Implement va_start. */
7039 static void
7040 ix86_va_start (tree valist, rtx nextarg)
7042 HOST_WIDE_INT words, n_gpr, n_fpr;
7043 tree f_gpr, f_fpr, f_ovf, f_sav;
7044 tree gpr, fpr, ovf, sav, t;
7045 tree type;
7047 /* Only 64bit target needs something special. */
7048 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7050 std_expand_builtin_va_start (valist, nextarg);
7051 return;
7054 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7055 f_fpr = TREE_CHAIN (f_gpr);
7056 f_ovf = TREE_CHAIN (f_fpr);
7057 f_sav = TREE_CHAIN (f_ovf);
7059 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
7060 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
7061 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7062 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7063 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7065 /* Count number of gp and fp argument registers used. */
7066 words = crtl->args.info.words;
7067 n_gpr = crtl->args.info.regno;
7068 n_fpr = crtl->args.info.sse_regno;
7070 if (cfun->va_list_gpr_size)
7072 type = TREE_TYPE (gpr);
7073 t = build2 (MODIFY_EXPR, type,
7074 gpr, build_int_cst (type, n_gpr * 8));
7075 TREE_SIDE_EFFECTS (t) = 1;
7076 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7079 if (TARGET_SSE && cfun->va_list_fpr_size)
7081 type = TREE_TYPE (fpr);
7082 t = build2 (MODIFY_EXPR, type, fpr,
7083 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
7084 TREE_SIDE_EFFECTS (t) = 1;
7085 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7088 /* Find the overflow area. */
7089 type = TREE_TYPE (ovf);
7090 t = make_tree (type, crtl->args.internal_arg_pointer);
7091 if (words != 0)
7092 t = build2 (POINTER_PLUS_EXPR, type, t,
7093 size_int (words * UNITS_PER_WORD));
7094 t = build2 (MODIFY_EXPR, type, ovf, t);
7095 TREE_SIDE_EFFECTS (t) = 1;
7096 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7098 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
7100 /* Find the register save area.
7101 Prologue of the function save it right above stack frame. */
7102 type = TREE_TYPE (sav);
7103 t = make_tree (type, frame_pointer_rtx);
7104 if (!ix86_varargs_gpr_size)
7105 t = build2 (POINTER_PLUS_EXPR, type, t,
7106 size_int (-8 * X86_64_REGPARM_MAX));
7107 t = build2 (MODIFY_EXPR, type, sav, t);
7108 TREE_SIDE_EFFECTS (t) = 1;
7109 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7113 /* Implement va_arg. */
7115 static tree
7116 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7117 gimple_seq *post_p)
7119 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
7120 tree f_gpr, f_fpr, f_ovf, f_sav;
7121 tree gpr, fpr, ovf, sav, t;
7122 int size, rsize;
7123 tree lab_false, lab_over = NULL_TREE;
7124 tree addr, t2;
7125 rtx container;
7126 int indirect_p = 0;
7127 tree ptrtype;
7128 enum machine_mode nat_mode;
7129 unsigned int arg_boundary;
7131 /* Only 64bit target needs something special. */
7132 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7133 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7135 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7136 f_fpr = TREE_CHAIN (f_gpr);
7137 f_ovf = TREE_CHAIN (f_fpr);
7138 f_sav = TREE_CHAIN (f_ovf);
7140 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7141 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7142 valist = build_va_arg_indirect_ref (valist);
7143 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7144 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7145 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7147 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7148 if (indirect_p)
7149 type = build_pointer_type (type);
7150 size = int_size_in_bytes (type);
7151 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7153 nat_mode = type_natural_mode (type, NULL);
7154 switch (nat_mode)
7156 case V8SFmode:
7157 case V8SImode:
7158 case V32QImode:
7159 case V16HImode:
7160 case V4DFmode:
7161 case V4DImode:
7162 /* Unnamed 256bit vector mode parameters are passed on stack. */
7163 if (ix86_cfun_abi () == SYSV_ABI)
7165 container = NULL;
7166 break;
7169 default:
7170 container = construct_container (nat_mode, TYPE_MODE (type),
7171 type, 0, X86_64_REGPARM_MAX,
7172 X86_64_SSE_REGPARM_MAX, intreg,
7174 break;
7177 /* Pull the value out of the saved registers. */
7179 addr = create_tmp_var (ptr_type_node, "addr");
7181 if (container)
7183 int needed_intregs, needed_sseregs;
7184 bool need_temp;
7185 tree int_addr, sse_addr;
7187 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7188 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7190 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7192 need_temp = (!REG_P (container)
7193 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7194 || TYPE_ALIGN (type) > 128));
7196 /* In case we are passing structure, verify that it is consecutive block
7197 on the register save area. If not we need to do moves. */
7198 if (!need_temp && !REG_P (container))
7200 /* Verify that all registers are strictly consecutive */
7201 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7203 int i;
7205 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7207 rtx slot = XVECEXP (container, 0, i);
7208 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7209 || INTVAL (XEXP (slot, 1)) != i * 16)
7210 need_temp = 1;
7213 else
7215 int i;
7217 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7219 rtx slot = XVECEXP (container, 0, i);
7220 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7221 || INTVAL (XEXP (slot, 1)) != i * 8)
7222 need_temp = 1;
7226 if (!need_temp)
7228 int_addr = addr;
7229 sse_addr = addr;
7231 else
7233 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7234 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7237 /* First ensure that we fit completely in registers. */
7238 if (needed_intregs)
7240 t = build_int_cst (TREE_TYPE (gpr),
7241 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7242 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7243 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7244 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7245 gimplify_and_add (t, pre_p);
7247 if (needed_sseregs)
7249 t = build_int_cst (TREE_TYPE (fpr),
7250 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7251 + X86_64_REGPARM_MAX * 8);
7252 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7253 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7254 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7255 gimplify_and_add (t, pre_p);
7258 /* Compute index to start of area used for integer regs. */
7259 if (needed_intregs)
7261 /* int_addr = gpr + sav; */
7262 t = fold_convert (sizetype, gpr);
7263 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7264 gimplify_assign (int_addr, t, pre_p);
7266 if (needed_sseregs)
7268 /* sse_addr = fpr + sav; */
7269 t = fold_convert (sizetype, fpr);
7270 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7271 gimplify_assign (sse_addr, t, pre_p);
7273 if (need_temp)
7275 int i;
7276 tree temp = create_tmp_var (type, "va_arg_tmp");
7278 /* addr = &temp; */
7279 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7280 gimplify_assign (addr, t, pre_p);
7282 for (i = 0; i < XVECLEN (container, 0); i++)
7284 rtx slot = XVECEXP (container, 0, i);
7285 rtx reg = XEXP (slot, 0);
7286 enum machine_mode mode = GET_MODE (reg);
7287 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7288 tree addr_type = build_pointer_type (piece_type);
7289 tree daddr_type = build_pointer_type_for_mode (piece_type,
7290 ptr_mode, true);
7291 tree src_addr, src;
7292 int src_offset;
7293 tree dest_addr, dest;
7295 if (SSE_REGNO_P (REGNO (reg)))
7297 src_addr = sse_addr;
7298 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7300 else
7302 src_addr = int_addr;
7303 src_offset = REGNO (reg) * 8;
7305 src_addr = fold_convert (addr_type, src_addr);
7306 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7307 size_int (src_offset));
7308 src = build_va_arg_indirect_ref (src_addr);
7310 dest_addr = fold_convert (daddr_type, addr);
7311 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7312 size_int (INTVAL (XEXP (slot, 1))));
7313 dest = build_va_arg_indirect_ref (dest_addr);
7315 gimplify_assign (dest, src, pre_p);
7319 if (needed_intregs)
7321 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7322 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7323 gimplify_assign (gpr, t, pre_p);
7326 if (needed_sseregs)
7328 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7329 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7330 gimplify_assign (fpr, t, pre_p);
7333 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7335 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7338 /* ... otherwise out of the overflow area. */
7340 /* When we align parameter on stack for caller, if the parameter
7341 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7342 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7343 here with caller. */
7344 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7345 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7346 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7348 /* Care for on-stack alignment if needed. */
7349 if (arg_boundary <= 64
7350 || integer_zerop (TYPE_SIZE (type)))
7351 t = ovf;
7352 else
7354 HOST_WIDE_INT align = arg_boundary / 8;
7355 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7356 size_int (align - 1));
7357 t = fold_convert (sizetype, t);
7358 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7359 size_int (-align));
7360 t = fold_convert (TREE_TYPE (ovf), t);
7361 if (crtl->stack_alignment_needed < arg_boundary)
7362 crtl->stack_alignment_needed = arg_boundary;
7364 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7365 gimplify_assign (addr, t, pre_p);
7367 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7368 size_int (rsize * UNITS_PER_WORD));
7369 gimplify_assign (unshare_expr (ovf), t, pre_p);
7371 if (container)
7372 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7374 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7375 addr = fold_convert (ptrtype, addr);
7377 if (indirect_p)
7378 addr = build_va_arg_indirect_ref (addr);
7379 return build_va_arg_indirect_ref (addr);
7382 /* Return nonzero if OPNUM's MEM should be matched
7383 in movabs* patterns. */
7386 ix86_check_movabs (rtx insn, int opnum)
7388 rtx set, mem;
7390 set = PATTERN (insn);
7391 if (GET_CODE (set) == PARALLEL)
7392 set = XVECEXP (set, 0, 0);
7393 gcc_assert (GET_CODE (set) == SET);
7394 mem = XEXP (set, opnum);
7395 while (GET_CODE (mem) == SUBREG)
7396 mem = SUBREG_REG (mem);
7397 gcc_assert (MEM_P (mem));
7398 return (volatile_ok || !MEM_VOLATILE_P (mem));
7401 /* Initialize the table of extra 80387 mathematical constants. */
7403 static void
7404 init_ext_80387_constants (void)
7406 static const char * cst[5] =
7408 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7409 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7410 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7411 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7412 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7414 int i;
7416 for (i = 0; i < 5; i++)
7418 real_from_string (&ext_80387_constants_table[i], cst[i]);
7419 /* Ensure each constant is rounded to XFmode precision. */
7420 real_convert (&ext_80387_constants_table[i],
7421 XFmode, &ext_80387_constants_table[i]);
7424 ext_80387_constants_init = 1;
7427 /* Return true if the constant is something that can be loaded with
7428 a special instruction. */
7431 standard_80387_constant_p (rtx x)
7433 enum machine_mode mode = GET_MODE (x);
7435 REAL_VALUE_TYPE r;
7437 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7438 return -1;
7440 if (x == CONST0_RTX (mode))
7441 return 1;
7442 if (x == CONST1_RTX (mode))
7443 return 2;
7445 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7447 /* For XFmode constants, try to find a special 80387 instruction when
7448 optimizing for size or on those CPUs that benefit from them. */
7449 if (mode == XFmode
7450 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7452 int i;
7454 if (! ext_80387_constants_init)
7455 init_ext_80387_constants ();
7457 for (i = 0; i < 5; i++)
7458 if (real_identical (&r, &ext_80387_constants_table[i]))
7459 return i + 3;
7462 /* Load of the constant -0.0 or -1.0 will be split as
7463 fldz;fchs or fld1;fchs sequence. */
7464 if (real_isnegzero (&r))
7465 return 8;
7466 if (real_identical (&r, &dconstm1))
7467 return 9;
7469 return 0;
7472 /* Return the opcode of the special instruction to be used to load
7473 the constant X. */
7475 const char *
7476 standard_80387_constant_opcode (rtx x)
7478 switch (standard_80387_constant_p (x))
7480 case 1:
7481 return "fldz";
7482 case 2:
7483 return "fld1";
7484 case 3:
7485 return "fldlg2";
7486 case 4:
7487 return "fldln2";
7488 case 5:
7489 return "fldl2e";
7490 case 6:
7491 return "fldl2t";
7492 case 7:
7493 return "fldpi";
7494 case 8:
7495 case 9:
7496 return "#";
7497 default:
7498 gcc_unreachable ();
7502 /* Return the CONST_DOUBLE representing the 80387 constant that is
7503 loaded by the specified special instruction. The argument IDX
7504 matches the return value from standard_80387_constant_p. */
7507 standard_80387_constant_rtx (int idx)
7509 int i;
7511 if (! ext_80387_constants_init)
7512 init_ext_80387_constants ();
7514 switch (idx)
7516 case 3:
7517 case 4:
7518 case 5:
7519 case 6:
7520 case 7:
7521 i = idx - 3;
7522 break;
7524 default:
7525 gcc_unreachable ();
7528 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7529 XFmode);
7532 /* Return 1 if X is all 0s and 2 if x is all 1s
7533 in supported SSE vector mode. */
7536 standard_sse_constant_p (rtx x)
7538 enum machine_mode mode = GET_MODE (x);
7540 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7541 return 1;
7542 if (vector_all_ones_operand (x, mode))
7543 switch (mode)
7545 case V16QImode:
7546 case V8HImode:
7547 case V4SImode:
7548 case V2DImode:
7549 if (TARGET_SSE2)
7550 return 2;
7551 default:
7552 break;
7555 return 0;
7558 /* Return the opcode of the special instruction to be used to load
7559 the constant X. */
7561 const char *
7562 standard_sse_constant_opcode (rtx insn, rtx x)
7564 switch (standard_sse_constant_p (x))
7566 case 1:
7567 switch (get_attr_mode (insn))
7569 case MODE_V4SF:
7570 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7571 case MODE_V2DF:
7572 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7573 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7574 else
7575 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7576 case MODE_TI:
7577 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7578 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7579 else
7580 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7581 case MODE_V8SF:
7582 return "vxorps\t%x0, %x0, %x0";
7583 case MODE_V4DF:
7584 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7585 return "vxorps\t%x0, %x0, %x0";
7586 else
7587 return "vxorpd\t%x0, %x0, %x0";
7588 case MODE_OI:
7589 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
7590 return "vxorps\t%x0, %x0, %x0";
7591 else
7592 return "vpxor\t%x0, %x0, %x0";
7593 default:
7594 break;
7596 case 2:
7597 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7598 default:
7599 break;
7601 gcc_unreachable ();
7604 /* Returns 1 if OP contains a symbol reference */
7607 symbolic_reference_mentioned_p (rtx op)
7609 const char *fmt;
7610 int i;
7612 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7613 return 1;
7615 fmt = GET_RTX_FORMAT (GET_CODE (op));
7616 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7618 if (fmt[i] == 'E')
7620 int j;
7622 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7623 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7624 return 1;
7627 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7628 return 1;
7631 return 0;
7634 /* Return 1 if it is appropriate to emit `ret' instructions in the
7635 body of a function. Do this only if the epilogue is simple, needing a
7636 couple of insns. Prior to reloading, we can't tell how many registers
7637 must be saved, so return 0 then. Return 0 if there is no frame
7638 marker to de-allocate. */
7641 ix86_can_use_return_insn_p (void)
7643 struct ix86_frame frame;
7645 if (! reload_completed || frame_pointer_needed)
7646 return 0;
7648 /* Don't allow more than 32 pop, since that's all we can do
7649 with one instruction. */
7650 if (crtl->args.pops_args
7651 && crtl->args.size >= 32768)
7652 return 0;
7654 ix86_compute_frame_layout (&frame);
7655 return frame.to_allocate == 0 && frame.padding0 == 0
7656 && (frame.nregs + frame.nsseregs) == 0;
7659 /* Value should be nonzero if functions must have frame pointers.
7660 Zero means the frame pointer need not be set up (and parms may
7661 be accessed via the stack pointer) in functions that seem suitable. */
7663 static bool
7664 ix86_frame_pointer_required (void)
7666 /* If we accessed previous frames, then the generated code expects
7667 to be able to access the saved ebp value in our frame. */
7668 if (cfun->machine->accesses_prev_frame)
7669 return true;
7671 /* Several x86 os'es need a frame pointer for other reasons,
7672 usually pertaining to setjmp. */
7673 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7674 return true;
7676 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7677 the frame pointer by default. Turn it back on now if we've not
7678 got a leaf function. */
7679 if (TARGET_OMIT_LEAF_FRAME_POINTER
7680 && (!current_function_is_leaf
7681 || ix86_current_function_calls_tls_descriptor))
7682 return true;
7684 if (crtl->profile)
7685 return true;
7687 return false;
7690 /* Record that the current function accesses previous call frames. */
7692 void
7693 ix86_setup_frame_addresses (void)
7695 cfun->machine->accesses_prev_frame = 1;
7698 #ifndef USE_HIDDEN_LINKONCE
7699 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7700 # define USE_HIDDEN_LINKONCE 1
7701 # else
7702 # define USE_HIDDEN_LINKONCE 0
7703 # endif
7704 #endif
7706 static int pic_labels_used;
7708 /* Fills in the label name that should be used for a pc thunk for
7709 the given register. */
7711 static void
7712 get_pc_thunk_name (char name[32], unsigned int regno)
7714 gcc_assert (!TARGET_64BIT);
7716 if (USE_HIDDEN_LINKONCE)
7717 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7718 else
7719 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7723 /* This function generates code for -fpic that loads %ebx with
7724 the return address of the caller and then returns. */
7726 static void
7727 ix86_code_end (void)
7729 rtx xops[2];
7730 int regno;
7732 for (regno = 0; regno < 8; ++regno)
7734 char name[32];
7735 tree decl;
7737 if (! ((pic_labels_used >> regno) & 1))
7738 continue;
7740 get_pc_thunk_name (name, regno);
7742 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7743 get_identifier (name),
7744 build_function_type (void_type_node, void_list_node));
7745 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7746 NULL_TREE, void_type_node);
7747 TREE_PUBLIC (decl) = 1;
7748 TREE_STATIC (decl) = 1;
7750 #if TARGET_MACHO
7751 if (TARGET_MACHO)
7753 switch_to_section (darwin_sections[text_coal_section]);
7754 fputs ("\t.weak_definition\t", asm_out_file);
7755 assemble_name (asm_out_file, name);
7756 fputs ("\n\t.private_extern\t", asm_out_file);
7757 assemble_name (asm_out_file, name);
7758 fputs ("\n", asm_out_file);
7759 ASM_OUTPUT_LABEL (asm_out_file, name);
7760 DECL_WEAK (decl) = 1;
7762 else
7763 #endif
7764 if (USE_HIDDEN_LINKONCE)
7766 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7768 (*targetm.asm_out.unique_section) (decl, 0);
7769 switch_to_section (get_named_section (decl, NULL, 0));
7771 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7772 fputs ("\t.hidden\t", asm_out_file);
7773 assemble_name (asm_out_file, name);
7774 putc ('\n', asm_out_file);
7775 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7777 else
7779 switch_to_section (text_section);
7780 ASM_OUTPUT_LABEL (asm_out_file, name);
7783 DECL_INITIAL (decl) = make_node (BLOCK);
7784 current_function_decl = decl;
7785 init_function_start (decl);
7786 first_function_block_is_cold = false;
7787 /* Make sure unwind info is emitted for the thunk if needed. */
7788 final_start_function (emit_barrier (), asm_out_file, 1);
7790 xops[0] = gen_rtx_REG (Pmode, regno);
7791 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7792 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7793 output_asm_insn ("ret", xops);
7794 final_end_function ();
7795 init_insn_lengths ();
7796 free_after_compilation (cfun);
7797 set_cfun (NULL);
7798 current_function_decl = NULL;
7802 /* Emit code for the SET_GOT patterns. */
7804 const char *
7805 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7807 rtx xops[3];
7809 xops[0] = dest;
7811 if (TARGET_VXWORKS_RTP && flag_pic)
7813 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7814 xops[2] = gen_rtx_MEM (Pmode,
7815 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7816 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7818 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7819 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7820 an unadorned address. */
7821 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7822 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7823 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7824 return "";
7827 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7829 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7831 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7833 if (!flag_pic)
7834 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7835 else
7837 output_asm_insn ("call\t%a2", xops);
7838 #ifdef DWARF2_UNWIND_INFO
7839 /* The call to next label acts as a push. */
7840 if (dwarf2out_do_frame ())
7842 rtx insn;
7843 start_sequence ();
7844 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7845 gen_rtx_PLUS (Pmode,
7846 stack_pointer_rtx,
7847 GEN_INT (-4))));
7848 RTX_FRAME_RELATED_P (insn) = 1;
7849 dwarf2out_frame_debug (insn, true);
7850 end_sequence ();
7852 #endif
7855 #if TARGET_MACHO
7856 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7857 is what will be referenced by the Mach-O PIC subsystem. */
7858 if (!label)
7859 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7860 #endif
7862 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7863 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7865 if (flag_pic)
7867 output_asm_insn ("pop%z0\t%0", xops);
7868 #ifdef DWARF2_UNWIND_INFO
7869 /* The pop is a pop and clobbers dest, but doesn't restore it
7870 for unwind info purposes. */
7871 if (dwarf2out_do_frame ())
7873 rtx insn;
7874 start_sequence ();
7875 insn = emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
7876 dwarf2out_frame_debug (insn, true);
7877 insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7878 gen_rtx_PLUS (Pmode,
7879 stack_pointer_rtx,
7880 GEN_INT (4))));
7881 RTX_FRAME_RELATED_P (insn) = 1;
7882 dwarf2out_frame_debug (insn, true);
7883 end_sequence ();
7885 #endif
7888 else
7890 char name[32];
7891 get_pc_thunk_name (name, REGNO (dest));
7892 pic_labels_used |= 1 << REGNO (dest);
7894 #ifdef DWARF2_UNWIND_INFO
7895 /* Ensure all queued register saves are flushed before the
7896 call. */
7897 if (dwarf2out_do_frame ())
7899 rtx insn;
7900 start_sequence ();
7901 insn = emit_barrier ();
7902 end_sequence ();
7903 dwarf2out_frame_debug (insn, false);
7905 #endif
7906 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7907 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7908 output_asm_insn ("call\t%X2", xops);
7909 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7910 is what will be referenced by the Mach-O PIC subsystem. */
7911 #if TARGET_MACHO
7912 if (!label)
7913 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7914 else
7915 targetm.asm_out.internal_label (asm_out_file, "L",
7916 CODE_LABEL_NUMBER (label));
7917 #endif
7920 if (TARGET_MACHO)
7921 return "";
7923 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7924 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7925 else
7926 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7928 return "";
7931 /* Generate an "push" pattern for input ARG. */
7933 static rtx
7934 gen_push (rtx arg)
7936 if (ix86_cfa_state->reg == stack_pointer_rtx)
7937 ix86_cfa_state->offset += UNITS_PER_WORD;
7939 return gen_rtx_SET (VOIDmode,
7940 gen_rtx_MEM (Pmode,
7941 gen_rtx_PRE_DEC (Pmode,
7942 stack_pointer_rtx)),
7943 arg);
7946 /* Return >= 0 if there is an unused call-clobbered register available
7947 for the entire function. */
7949 static unsigned int
7950 ix86_select_alt_pic_regnum (void)
7952 if (current_function_is_leaf && !crtl->profile
7953 && !ix86_current_function_calls_tls_descriptor)
7955 int i, drap;
7956 /* Can't use the same register for both PIC and DRAP. */
7957 if (crtl->drap_reg)
7958 drap = REGNO (crtl->drap_reg);
7959 else
7960 drap = -1;
7961 for (i = 2; i >= 0; --i)
7962 if (i != drap && !df_regs_ever_live_p (i))
7963 return i;
7966 return INVALID_REGNUM;
7969 /* Return 1 if we need to save REGNO. */
7970 static int
7971 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7973 if (pic_offset_table_rtx
7974 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7975 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7976 || crtl->profile
7977 || crtl->calls_eh_return
7978 || crtl->uses_const_pool))
7980 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7981 return 0;
7982 return 1;
7985 if (crtl->calls_eh_return && maybe_eh_return)
7987 unsigned i;
7988 for (i = 0; ; i++)
7990 unsigned test = EH_RETURN_DATA_REGNO (i);
7991 if (test == INVALID_REGNUM)
7992 break;
7993 if (test == regno)
7994 return 1;
7998 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7999 return 1;
8001 return (df_regs_ever_live_p (regno)
8002 && !call_used_regs[regno]
8003 && !fixed_regs[regno]
8004 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
8007 /* Return number of saved general prupose registers. */
8009 static int
8010 ix86_nsaved_regs (void)
8012 int nregs = 0;
8013 int regno;
8015 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8016 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8017 nregs ++;
8018 return nregs;
8021 /* Return number of saved SSE registrers. */
8023 static int
8024 ix86_nsaved_sseregs (void)
8026 int nregs = 0;
8027 int regno;
8029 if (ix86_cfun_abi () != MS_ABI)
8030 return 0;
8031 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8032 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8033 nregs ++;
8034 return nregs;
8037 /* Given FROM and TO register numbers, say whether this elimination is
8038 allowed. If stack alignment is needed, we can only replace argument
8039 pointer with hard frame pointer, or replace frame pointer with stack
8040 pointer. Otherwise, frame pointer elimination is automatically
8041 handled and all other eliminations are valid. */
8043 static bool
8044 ix86_can_eliminate (const int from, const int to)
8046 if (stack_realign_fp)
8047 return ((from == ARG_POINTER_REGNUM
8048 && to == HARD_FRAME_POINTER_REGNUM)
8049 || (from == FRAME_POINTER_REGNUM
8050 && to == STACK_POINTER_REGNUM));
8051 else
8052 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
8055 /* Return the offset between two registers, one to be eliminated, and the other
8056 its replacement, at the start of a routine. */
8058 HOST_WIDE_INT
8059 ix86_initial_elimination_offset (int from, int to)
8061 struct ix86_frame frame;
8062 ix86_compute_frame_layout (&frame);
8064 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8065 return frame.hard_frame_pointer_offset;
8066 else if (from == FRAME_POINTER_REGNUM
8067 && to == HARD_FRAME_POINTER_REGNUM)
8068 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
8069 else
8071 gcc_assert (to == STACK_POINTER_REGNUM);
8073 if (from == ARG_POINTER_REGNUM)
8074 return frame.stack_pointer_offset;
8076 gcc_assert (from == FRAME_POINTER_REGNUM);
8077 return frame.stack_pointer_offset - frame.frame_pointer_offset;
8081 /* In a dynamically-aligned function, we can't know the offset from
8082 stack pointer to frame pointer, so we must ensure that setjmp
8083 eliminates fp against the hard fp (%ebp) rather than trying to
8084 index from %esp up to the top of the frame across a gap that is
8085 of unknown (at compile-time) size. */
8086 static rtx
8087 ix86_builtin_setjmp_frame_value (void)
8089 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
8092 /* Fill structure ix86_frame about frame of currently computed function. */
8094 static void
8095 ix86_compute_frame_layout (struct ix86_frame *frame)
8097 unsigned int stack_alignment_needed;
8098 HOST_WIDE_INT offset;
8099 unsigned int preferred_alignment;
8100 HOST_WIDE_INT size = get_frame_size ();
8102 frame->nregs = ix86_nsaved_regs ();
8103 frame->nsseregs = ix86_nsaved_sseregs ();
8105 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
8106 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
8108 /* MS ABI seem to require stack alignment to be always 16 except for function
8109 prologues. */
8110 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
8112 preferred_alignment = 16;
8113 stack_alignment_needed = 16;
8114 crtl->preferred_stack_boundary = 128;
8115 crtl->stack_alignment_needed = 128;
8118 gcc_assert (!size || stack_alignment_needed);
8119 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
8120 gcc_assert (preferred_alignment <= stack_alignment_needed);
8122 /* During reload iteration the amount of registers saved can change.
8123 Recompute the value as needed. Do not recompute when amount of registers
8124 didn't change as reload does multiple calls to the function and does not
8125 expect the decision to change within single iteration. */
8126 if (!optimize_function_for_size_p (cfun)
8127 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
8129 int count = frame->nregs;
8130 struct cgraph_node *node = cgraph_node (current_function_decl);
8132 cfun->machine->use_fast_prologue_epilogue_nregs = count;
8133 /* The fast prologue uses move instead of push to save registers. This
8134 is significantly longer, but also executes faster as modern hardware
8135 can execute the moves in parallel, but can't do that for push/pop.
8137 Be careful about choosing what prologue to emit: When function takes
8138 many instructions to execute we may use slow version as well as in
8139 case function is known to be outside hot spot (this is known with
8140 feedback only). Weight the size of function by number of registers
8141 to save as it is cheap to use one or two push instructions but very
8142 slow to use many of them. */
8143 if (count)
8144 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
8145 if (node->frequency < NODE_FREQUENCY_NORMAL
8146 || (flag_branch_probabilities
8147 && node->frequency < NODE_FREQUENCY_HOT))
8148 cfun->machine->use_fast_prologue_epilogue = false;
8149 else
8150 cfun->machine->use_fast_prologue_epilogue
8151 = !expensive_function_p (count);
8153 if (TARGET_PROLOGUE_USING_MOVE
8154 && cfun->machine->use_fast_prologue_epilogue)
8155 frame->save_regs_using_mov = true;
8156 else
8157 frame->save_regs_using_mov = false;
8159 /* Skip return address. */
8160 offset = UNITS_PER_WORD;
8162 /* Skip pushed static chain. */
8163 if (ix86_static_chain_on_stack)
8164 offset += UNITS_PER_WORD;
8166 /* Skip saved base pointer. */
8167 if (frame_pointer_needed)
8168 offset += UNITS_PER_WORD;
8170 frame->hard_frame_pointer_offset = offset;
8172 /* Set offset to aligned because the realigned frame starts from
8173 here. */
8174 if (stack_realign_fp)
8175 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
8177 /* Register save area */
8178 offset += frame->nregs * UNITS_PER_WORD;
8180 /* Align SSE reg save area. */
8181 if (frame->nsseregs)
8182 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
8183 else
8184 frame->padding0 = 0;
8186 /* SSE register save area. */
8187 offset += frame->padding0 + frame->nsseregs * 16;
8189 /* Va-arg area */
8190 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
8191 offset += frame->va_arg_size;
8193 /* Align start of frame for local function. */
8194 frame->padding1 = ((offset + stack_alignment_needed - 1)
8195 & -stack_alignment_needed) - offset;
8197 offset += frame->padding1;
8199 /* Frame pointer points here. */
8200 frame->frame_pointer_offset = offset;
8202 offset += size;
8204 /* Add outgoing arguments area. Can be skipped if we eliminated
8205 all the function calls as dead code.
8206 Skipping is however impossible when function calls alloca. Alloca
8207 expander assumes that last crtl->outgoing_args_size
8208 of stack frame are unused. */
8209 if (ACCUMULATE_OUTGOING_ARGS
8210 && (!current_function_is_leaf || cfun->calls_alloca
8211 || ix86_current_function_calls_tls_descriptor))
8213 offset += crtl->outgoing_args_size;
8214 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8216 else
8217 frame->outgoing_arguments_size = 0;
8219 /* Align stack boundary. Only needed if we're calling another function
8220 or using alloca. */
8221 if (!current_function_is_leaf || cfun->calls_alloca
8222 || ix86_current_function_calls_tls_descriptor)
8223 frame->padding2 = ((offset + preferred_alignment - 1)
8224 & -preferred_alignment) - offset;
8225 else
8226 frame->padding2 = 0;
8228 offset += frame->padding2;
8230 /* We've reached end of stack frame. */
8231 frame->stack_pointer_offset = offset;
8233 /* Size prologue needs to allocate. */
8234 frame->to_allocate =
8235 (size + frame->padding1 + frame->padding2
8236 + frame->outgoing_arguments_size + frame->va_arg_size);
8238 if ((!frame->to_allocate && frame->nregs <= 1)
8239 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8240 frame->save_regs_using_mov = false;
8242 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8243 && current_function_sp_is_unchanging
8244 && current_function_is_leaf
8245 && !ix86_current_function_calls_tls_descriptor)
8247 frame->red_zone_size = frame->to_allocate;
8248 if (frame->save_regs_using_mov)
8249 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8250 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8251 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8253 else
8254 frame->red_zone_size = 0;
8255 frame->to_allocate -= frame->red_zone_size;
8256 frame->stack_pointer_offset -= frame->red_zone_size;
8259 /* Emit code to save registers in the prologue. */
8261 static void
8262 ix86_emit_save_regs (void)
8264 unsigned int regno;
8265 rtx insn;
8267 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8268 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8270 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8271 RTX_FRAME_RELATED_P (insn) = 1;
8275 /* Emit code to save registers using MOV insns. First register
8276 is restored from POINTER + OFFSET. */
8277 static void
8278 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8280 unsigned int regno;
8281 rtx insn;
8283 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8284 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8286 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8287 Pmode, offset),
8288 gen_rtx_REG (Pmode, regno));
8289 RTX_FRAME_RELATED_P (insn) = 1;
8290 offset += UNITS_PER_WORD;
8294 /* Emit code to save registers using MOV insns. First register
8295 is restored from POINTER + OFFSET. */
8296 static void
8297 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8299 unsigned int regno;
8300 rtx insn;
8301 rtx mem;
8303 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8304 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8306 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8307 set_mem_align (mem, 128);
8308 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8309 RTX_FRAME_RELATED_P (insn) = 1;
8310 offset += 16;
8314 static GTY(()) rtx queued_cfa_restores;
8316 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8317 manipulation insn. Don't add it if the previously
8318 saved value will be left untouched within stack red-zone till return,
8319 as unwinders can find the same value in the register and
8320 on the stack. */
8322 static void
8323 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8325 if (TARGET_RED_ZONE
8326 && !TARGET_64BIT_MS_ABI
8327 && red_offset + RED_ZONE_SIZE >= 0
8328 && crtl->args.pops_args < 65536)
8329 return;
8331 if (insn)
8333 add_reg_note (insn, REG_CFA_RESTORE, reg);
8334 RTX_FRAME_RELATED_P (insn) = 1;
8336 else
8337 queued_cfa_restores
8338 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8341 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8343 static void
8344 ix86_add_queued_cfa_restore_notes (rtx insn)
8346 rtx last;
8347 if (!queued_cfa_restores)
8348 return;
8349 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8351 XEXP (last, 1) = REG_NOTES (insn);
8352 REG_NOTES (insn) = queued_cfa_restores;
8353 queued_cfa_restores = NULL_RTX;
8354 RTX_FRAME_RELATED_P (insn) = 1;
8357 /* Expand prologue or epilogue stack adjustment.
8358 The pattern exist to put a dependency on all ebp-based memory accesses.
8359 STYLE should be negative if instructions should be marked as frame related,
8360 zero if %r11 register is live and cannot be freely used and positive
8361 otherwise. */
8363 static void
8364 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8365 int style, bool set_cfa)
8367 rtx insn;
8369 if (! TARGET_64BIT)
8370 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8371 else if (x86_64_immediate_operand (offset, DImode))
8372 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8373 else
8375 rtx r11;
8376 /* r11 is used by indirect sibcall return as well, set before the
8377 epilogue and used after the epilogue. ATM indirect sibcall
8378 shouldn't be used together with huge frame sizes in one
8379 function because of the frame_size check in sibcall.c. */
8380 gcc_assert (style);
8381 r11 = gen_rtx_REG (DImode, R11_REG);
8382 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8383 if (style < 0)
8384 RTX_FRAME_RELATED_P (insn) = 1;
8385 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8386 offset));
8389 if (style >= 0)
8390 ix86_add_queued_cfa_restore_notes (insn);
8392 if (set_cfa)
8394 rtx r;
8396 gcc_assert (ix86_cfa_state->reg == src);
8397 ix86_cfa_state->offset += INTVAL (offset);
8398 ix86_cfa_state->reg = dest;
8400 r = gen_rtx_PLUS (Pmode, src, offset);
8401 r = gen_rtx_SET (VOIDmode, dest, r);
8402 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8403 RTX_FRAME_RELATED_P (insn) = 1;
8405 else if (style < 0)
8406 RTX_FRAME_RELATED_P (insn) = 1;
8409 /* Find an available register to be used as dynamic realign argument
8410 pointer regsiter. Such a register will be written in prologue and
8411 used in begin of body, so it must not be
8412 1. parameter passing register.
8413 2. GOT pointer.
8414 We reuse static-chain register if it is available. Otherwise, we
8415 use DI for i386 and R13 for x86-64. We chose R13 since it has
8416 shorter encoding.
8418 Return: the regno of chosen register. */
8420 static unsigned int
8421 find_drap_reg (void)
8423 tree decl = cfun->decl;
8425 if (TARGET_64BIT)
8427 /* Use R13 for nested function or function need static chain.
8428 Since function with tail call may use any caller-saved
8429 registers in epilogue, DRAP must not use caller-saved
8430 register in such case. */
8431 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8432 return R13_REG;
8434 return R10_REG;
8436 else
8438 /* Use DI for nested function or function need static chain.
8439 Since function with tail call may use any caller-saved
8440 registers in epilogue, DRAP must not use caller-saved
8441 register in such case. */
8442 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8443 return DI_REG;
8445 /* Reuse static chain register if it isn't used for parameter
8446 passing. */
8447 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8448 && !lookup_attribute ("fastcall",
8449 TYPE_ATTRIBUTES (TREE_TYPE (decl)))
8450 && !lookup_attribute ("thiscall",
8451 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8452 return CX_REG;
8453 else
8454 return DI_REG;
8458 /* Return minimum incoming stack alignment. */
8460 static unsigned int
8461 ix86_minimum_incoming_stack_boundary (bool sibcall)
8463 unsigned int incoming_stack_boundary;
8465 /* Prefer the one specified at command line. */
8466 if (ix86_user_incoming_stack_boundary)
8467 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8468 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8469 if -mstackrealign is used, it isn't used for sibcall check and
8470 estimated stack alignment is 128bit. */
8471 else if (!sibcall
8472 && !TARGET_64BIT
8473 && ix86_force_align_arg_pointer
8474 && crtl->stack_alignment_estimated == 128)
8475 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8476 else
8477 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8479 /* Incoming stack alignment can be changed on individual functions
8480 via force_align_arg_pointer attribute. We use the smallest
8481 incoming stack boundary. */
8482 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8483 && lookup_attribute (ix86_force_align_arg_pointer_string,
8484 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8485 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8487 /* The incoming stack frame has to be aligned at least at
8488 parm_stack_boundary. */
8489 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8490 incoming_stack_boundary = crtl->parm_stack_boundary;
8492 /* Stack at entrance of main is aligned by runtime. We use the
8493 smallest incoming stack boundary. */
8494 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8495 && DECL_NAME (current_function_decl)
8496 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8497 && DECL_FILE_SCOPE_P (current_function_decl))
8498 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8500 return incoming_stack_boundary;
8503 /* Update incoming stack boundary and estimated stack alignment. */
8505 static void
8506 ix86_update_stack_boundary (void)
8508 ix86_incoming_stack_boundary
8509 = ix86_minimum_incoming_stack_boundary (false);
8511 /* x86_64 vararg needs 16byte stack alignment for register save
8512 area. */
8513 if (TARGET_64BIT
8514 && cfun->stdarg
8515 && crtl->stack_alignment_estimated < 128)
8516 crtl->stack_alignment_estimated = 128;
8519 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8520 needed or an rtx for DRAP otherwise. */
8522 static rtx
8523 ix86_get_drap_rtx (void)
8525 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8526 crtl->need_drap = true;
8528 if (stack_realign_drap)
8530 /* Assign DRAP to vDRAP and returns vDRAP */
8531 unsigned int regno = find_drap_reg ();
8532 rtx drap_vreg;
8533 rtx arg_ptr;
8534 rtx seq, insn;
8536 arg_ptr = gen_rtx_REG (Pmode, regno);
8537 crtl->drap_reg = arg_ptr;
8539 start_sequence ();
8540 drap_vreg = copy_to_reg (arg_ptr);
8541 seq = get_insns ();
8542 end_sequence ();
8544 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8545 if (!optimize)
8547 add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
8548 RTX_FRAME_RELATED_P (insn) = 1;
8550 return drap_vreg;
8552 else
8553 return NULL;
8556 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8558 static rtx
8559 ix86_internal_arg_pointer (void)
8561 return virtual_incoming_args_rtx;
8564 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8565 to be generated in correct form. */
8566 static void
8567 ix86_finalize_stack_realign_flags (void)
8569 /* Check if stack realign is really needed after reload, and
8570 stores result in cfun */
8571 unsigned int incoming_stack_boundary
8572 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8573 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8574 unsigned int stack_realign = (incoming_stack_boundary
8575 < (current_function_is_leaf
8576 ? crtl->max_used_stack_slot_alignment
8577 : crtl->stack_alignment_needed));
8579 if (crtl->stack_realign_finalized)
8581 /* After stack_realign_needed is finalized, we can't no longer
8582 change it. */
8583 gcc_assert (crtl->stack_realign_needed == stack_realign);
8585 else
8587 crtl->stack_realign_needed = stack_realign;
8588 crtl->stack_realign_finalized = true;
8592 /* Expand the prologue into a bunch of separate insns. */
8594 void
8595 ix86_expand_prologue (void)
8597 rtx insn;
8598 bool pic_reg_used;
8599 struct ix86_frame frame;
8600 HOST_WIDE_INT allocate;
8601 int gen_frame_pointer = frame_pointer_needed;
8603 ix86_finalize_stack_realign_flags ();
8605 /* DRAP should not coexist with stack_realign_fp */
8606 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8608 /* Initialize CFA state for before the prologue. */
8609 ix86_cfa_state->reg = stack_pointer_rtx;
8610 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8612 ix86_compute_frame_layout (&frame);
8614 if (ix86_function_ms_hook_prologue (current_function_decl))
8616 rtx push, mov;
8618 /* Make sure the function starts with
8619 8b ff movl.s %edi,%edi
8620 55 push %ebp
8621 8b ec movl.s %esp,%ebp
8623 This matches the hookable function prologue in Win32 API
8624 functions in Microsoft Windows XP Service Pack 2 and newer.
8625 Wine uses this to enable Windows apps to hook the Win32 API
8626 functions provided by Wine. */
8627 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8628 gen_rtx_REG (SImode, DI_REG)));
8629 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8630 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8631 stack_pointer_rtx));
8633 if (frame_pointer_needed && !(crtl->drap_reg
8634 && crtl->stack_realign_needed))
8636 /* The push %ebp and movl.s %esp, %ebp already set up
8637 the frame pointer. No need to do this again. */
8638 gen_frame_pointer = 0;
8639 RTX_FRAME_RELATED_P (push) = 1;
8640 RTX_FRAME_RELATED_P (mov) = 1;
8641 if (ix86_cfa_state->reg == stack_pointer_rtx)
8642 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8644 else
8645 /* If the frame pointer is not needed, pop %ebp again. This
8646 could be optimized for cases where ebp needs to be backed up
8647 for some other reason. If stack realignment is needed, pop
8648 the base pointer again, align the stack, and later regenerate
8649 the frame pointer setup. The frame pointer generated by the
8650 hook prologue is not aligned, so it can't be used. */
8651 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8654 /* The first insn of a function that accepts its static chain on the
8655 stack is to push the register that would be filled in by a direct
8656 call. This insn will be skipped by the trampoline. */
8657 if (ix86_static_chain_on_stack)
8659 rtx t;
8661 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8662 emit_insn (gen_blockage ());
8664 /* We don't want to interpret this push insn as a register save,
8665 only as a stack adjustment. The real copy of the register as
8666 a save will be done later, if needed. */
8667 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8668 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8669 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8670 RTX_FRAME_RELATED_P (insn) = 1;
8673 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8674 of DRAP is needed and stack realignment is really needed after reload */
8675 if (crtl->drap_reg && crtl->stack_realign_needed)
8677 rtx x, y;
8678 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8679 int param_ptr_offset = UNITS_PER_WORD;
8681 if (ix86_static_chain_on_stack)
8682 param_ptr_offset += UNITS_PER_WORD;
8683 if (!call_used_regs[REGNO (crtl->drap_reg)])
8684 param_ptr_offset += UNITS_PER_WORD;
8686 gcc_assert (stack_realign_drap);
8688 /* Grab the argument pointer. */
8689 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8690 y = crtl->drap_reg;
8692 /* Only need to push parameter pointer reg if it is caller
8693 saved reg */
8694 if (!call_used_regs[REGNO (crtl->drap_reg)])
8696 /* Push arg pointer reg */
8697 insn = emit_insn (gen_push (y));
8698 RTX_FRAME_RELATED_P (insn) = 1;
8701 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8702 RTX_FRAME_RELATED_P (insn) = 1;
8703 ix86_cfa_state->reg = crtl->drap_reg;
8705 /* Align the stack. */
8706 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8707 stack_pointer_rtx,
8708 GEN_INT (-align_bytes)));
8709 RTX_FRAME_RELATED_P (insn) = 1;
8711 /* Replicate the return address on the stack so that return
8712 address can be reached via (argp - 1) slot. This is needed
8713 to implement macro RETURN_ADDR_RTX and intrinsic function
8714 expand_builtin_return_addr etc. */
8715 x = crtl->drap_reg;
8716 x = gen_frame_mem (Pmode,
8717 plus_constant (x, -UNITS_PER_WORD));
8718 insn = emit_insn (gen_push (x));
8719 RTX_FRAME_RELATED_P (insn) = 1;
8722 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8723 slower on all targets. Also sdb doesn't like it. */
8725 if (gen_frame_pointer)
8727 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8728 RTX_FRAME_RELATED_P (insn) = 1;
8730 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8731 RTX_FRAME_RELATED_P (insn) = 1;
8733 if (ix86_cfa_state->reg == stack_pointer_rtx)
8734 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8737 if (stack_realign_fp)
8739 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8740 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8742 /* Align the stack. */
8743 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8744 stack_pointer_rtx,
8745 GEN_INT (-align_bytes)));
8746 RTX_FRAME_RELATED_P (insn) = 1;
8749 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8751 if (!frame.save_regs_using_mov)
8752 ix86_emit_save_regs ();
8753 else
8754 allocate += frame.nregs * UNITS_PER_WORD;
8756 /* When using red zone we may start register saving before allocating
8757 the stack frame saving one cycle of the prologue. However I will
8758 avoid doing this if I am going to have to probe the stack since
8759 at least on x86_64 the stack probe can turn into a call that clobbers
8760 a red zone location */
8761 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8762 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8763 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8764 && !crtl->stack_realign_needed)
8765 ? hard_frame_pointer_rtx
8766 : stack_pointer_rtx,
8767 -frame.nregs * UNITS_PER_WORD);
8769 if (allocate == 0)
8771 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8772 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8773 GEN_INT (-allocate), -1,
8774 ix86_cfa_state->reg == stack_pointer_rtx);
8775 else
8777 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8778 bool eax_live;
8779 rtx t;
8781 if (cfun->machine->call_abi == MS_ABI)
8782 eax_live = false;
8783 else
8784 eax_live = ix86_eax_live_at_start_p ();
8786 if (eax_live)
8788 emit_insn (gen_push (eax));
8789 allocate -= UNITS_PER_WORD;
8792 emit_move_insn (eax, GEN_INT (allocate));
8794 if (TARGET_64BIT)
8795 insn = gen_allocate_stack_worker_64 (eax, eax);
8796 else
8797 insn = gen_allocate_stack_worker_32 (eax, eax);
8798 insn = emit_insn (insn);
8800 if (ix86_cfa_state->reg == stack_pointer_rtx)
8802 ix86_cfa_state->offset += allocate;
8803 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8804 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8805 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8806 RTX_FRAME_RELATED_P (insn) = 1;
8809 if (eax_live)
8811 if (frame_pointer_needed)
8812 t = plus_constant (hard_frame_pointer_rtx,
8813 allocate
8814 - frame.to_allocate
8815 - frame.nregs * UNITS_PER_WORD);
8816 else
8817 t = plus_constant (stack_pointer_rtx, allocate);
8818 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8822 if (frame.save_regs_using_mov
8823 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8824 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8826 if (!frame_pointer_needed
8827 || !(frame.to_allocate + frame.padding0)
8828 || crtl->stack_realign_needed)
8829 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8830 frame.to_allocate
8831 + frame.nsseregs * 16 + frame.padding0);
8832 else
8833 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8834 -frame.nregs * UNITS_PER_WORD);
8836 if (!frame_pointer_needed
8837 || !(frame.to_allocate + frame.padding0)
8838 || crtl->stack_realign_needed)
8839 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8840 frame.to_allocate);
8841 else
8842 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8843 - frame.nregs * UNITS_PER_WORD
8844 - frame.nsseregs * 16
8845 - frame.padding0);
8847 pic_reg_used = false;
8848 if (pic_offset_table_rtx
8849 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8850 || crtl->profile))
8852 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8854 if (alt_pic_reg_used != INVALID_REGNUM)
8855 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8857 pic_reg_used = true;
8860 if (pic_reg_used)
8862 if (TARGET_64BIT)
8864 if (ix86_cmodel == CM_LARGE_PIC)
8866 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8867 rtx label = gen_label_rtx ();
8868 emit_label (label);
8869 LABEL_PRESERVE_P (label) = 1;
8870 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8871 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8872 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8873 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8874 pic_offset_table_rtx, tmp_reg));
8876 else
8877 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8879 else
8880 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8883 /* In the pic_reg_used case, make sure that the got load isn't deleted
8884 when mcount needs it. Blockage to avoid call movement across mcount
8885 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8886 note. */
8887 if (crtl->profile && pic_reg_used)
8888 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8890 if (crtl->drap_reg && !crtl->stack_realign_needed)
8892 /* vDRAP is setup but after reload it turns out stack realign
8893 isn't necessary, here we will emit prologue to setup DRAP
8894 without stack realign adjustment */
8895 rtx x;
8896 int drap_bp_offset = UNITS_PER_WORD * 2;
8898 if (ix86_static_chain_on_stack)
8899 drap_bp_offset += UNITS_PER_WORD;
8900 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8901 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8904 /* Prevent instructions from being scheduled into register save push
8905 sequence when access to the redzone area is done through frame pointer.
8906 The offset between the frame pointer and the stack pointer is calculated
8907 relative to the value of the stack pointer at the end of the function
8908 prologue, and moving instructions that access redzone area via frame
8909 pointer inside push sequence violates this assumption. */
8910 if (frame_pointer_needed && frame.red_zone_size)
8911 emit_insn (gen_memory_blockage ());
8913 /* Emit cld instruction if stringops are used in the function. */
8914 if (TARGET_CLD && ix86_current_function_needs_cld)
8915 emit_insn (gen_cld ());
8918 /* Emit code to restore REG using a POP insn. */
8920 static void
8921 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8923 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8925 if (ix86_cfa_state->reg == crtl->drap_reg
8926 && REGNO (reg) == REGNO (crtl->drap_reg))
8928 /* Previously we'd represented the CFA as an expression
8929 like *(%ebp - 8). We've just popped that value from
8930 the stack, which means we need to reset the CFA to
8931 the drap register. This will remain until we restore
8932 the stack pointer. */
8933 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8934 RTX_FRAME_RELATED_P (insn) = 1;
8935 return;
8938 if (ix86_cfa_state->reg == stack_pointer_rtx)
8940 ix86_cfa_state->offset -= UNITS_PER_WORD;
8941 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8942 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8943 RTX_FRAME_RELATED_P (insn) = 1;
8946 /* When the frame pointer is the CFA, and we pop it, we are
8947 swapping back to the stack pointer as the CFA. This happens
8948 for stack frames that don't allocate other data, so we assume
8949 the stack pointer is now pointing at the return address, i.e.
8950 the function entry state, which makes the offset be 1 word. */
8951 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8952 && reg == hard_frame_pointer_rtx)
8954 ix86_cfa_state->reg = stack_pointer_rtx;
8955 ix86_cfa_state->offset -= UNITS_PER_WORD;
8957 add_reg_note (insn, REG_CFA_DEF_CFA,
8958 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8959 GEN_INT (ix86_cfa_state->offset)));
8960 RTX_FRAME_RELATED_P (insn) = 1;
8963 ix86_add_cfa_restore_note (insn, reg, red_offset);
8966 /* Emit code to restore saved registers using POP insns. */
8968 static void
8969 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8971 int regno;
8973 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8974 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8976 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8977 red_offset);
8978 red_offset += UNITS_PER_WORD;
8982 /* Emit code and notes for the LEAVE instruction. */
8984 static void
8985 ix86_emit_leave (HOST_WIDE_INT red_offset)
8987 rtx insn = emit_insn (ix86_gen_leave ());
8989 ix86_add_queued_cfa_restore_notes (insn);
8991 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8993 ix86_cfa_state->reg = stack_pointer_rtx;
8994 ix86_cfa_state->offset -= UNITS_PER_WORD;
8996 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8997 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8998 RTX_FRAME_RELATED_P (insn) = 1;
8999 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
9003 /* Emit code to restore saved registers using MOV insns. First register
9004 is restored from POINTER + OFFSET. */
9005 static void
9006 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9007 HOST_WIDE_INT red_offset,
9008 int maybe_eh_return)
9010 unsigned int regno;
9011 rtx base_address = gen_rtx_MEM (Pmode, pointer);
9012 rtx insn;
9014 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9015 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9017 rtx reg = gen_rtx_REG (Pmode, regno);
9019 /* Ensure that adjust_address won't be forced to produce pointer
9020 out of range allowed by x86-64 instruction set. */
9021 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9023 rtx r11;
9025 r11 = gen_rtx_REG (DImode, R11_REG);
9026 emit_move_insn (r11, GEN_INT (offset));
9027 emit_insn (gen_adddi3 (r11, r11, pointer));
9028 base_address = gen_rtx_MEM (Pmode, r11);
9029 offset = 0;
9031 insn = emit_move_insn (reg,
9032 adjust_address (base_address, Pmode, offset));
9033 offset += UNITS_PER_WORD;
9035 if (ix86_cfa_state->reg == crtl->drap_reg
9036 && regno == REGNO (crtl->drap_reg))
9038 /* Previously we'd represented the CFA as an expression
9039 like *(%ebp - 8). We've just popped that value from
9040 the stack, which means we need to reset the CFA to
9041 the drap register. This will remain until we restore
9042 the stack pointer. */
9043 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
9044 RTX_FRAME_RELATED_P (insn) = 1;
9046 else
9047 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9049 red_offset += UNITS_PER_WORD;
9053 /* Emit code to restore saved registers using MOV insns. First register
9054 is restored from POINTER + OFFSET. */
9055 static void
9056 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
9057 HOST_WIDE_INT red_offset,
9058 int maybe_eh_return)
9060 int regno;
9061 rtx base_address = gen_rtx_MEM (TImode, pointer);
9062 rtx mem;
9064 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
9065 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
9067 rtx reg = gen_rtx_REG (TImode, regno);
9069 /* Ensure that adjust_address won't be forced to produce pointer
9070 out of range allowed by x86-64 instruction set. */
9071 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
9073 rtx r11;
9075 r11 = gen_rtx_REG (DImode, R11_REG);
9076 emit_move_insn (r11, GEN_INT (offset));
9077 emit_insn (gen_adddi3 (r11, r11, pointer));
9078 base_address = gen_rtx_MEM (TImode, r11);
9079 offset = 0;
9081 mem = adjust_address (base_address, TImode, offset);
9082 set_mem_align (mem, 128);
9083 emit_move_insn (reg, mem);
9084 offset += 16;
9086 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
9088 red_offset += 16;
9092 /* Restore function stack, frame, and registers. */
9094 void
9095 ix86_expand_epilogue (int style)
9097 int sp_valid;
9098 struct ix86_frame frame;
9099 HOST_WIDE_INT offset, red_offset;
9100 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
9101 bool using_drap;
9103 ix86_finalize_stack_realign_flags ();
9105 /* When stack is realigned, SP must be valid. */
9106 sp_valid = (!frame_pointer_needed
9107 || current_function_sp_is_unchanging
9108 || stack_realign_fp);
9110 ix86_compute_frame_layout (&frame);
9112 /* See the comment about red zone and frame
9113 pointer usage in ix86_expand_prologue. */
9114 if (frame_pointer_needed && frame.red_zone_size)
9115 emit_insn (gen_memory_blockage ());
9117 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
9118 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
9120 /* Calculate start of saved registers relative to ebp. Special care
9121 must be taken for the normal return case of a function using
9122 eh_return: the eax and edx registers are marked as saved, but not
9123 restored along this path. */
9124 offset = frame.nregs;
9125 if (crtl->calls_eh_return && style != 2)
9126 offset -= 2;
9127 offset *= -UNITS_PER_WORD;
9128 offset -= frame.nsseregs * 16 + frame.padding0;
9130 /* Calculate start of saved registers relative to esp on entry of the
9131 function. When realigning stack, this needs to be the most negative
9132 value possible at runtime. */
9133 red_offset = offset;
9134 if (using_drap)
9135 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9136 + UNITS_PER_WORD;
9137 else if (stack_realign_fp)
9138 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
9139 - UNITS_PER_WORD;
9140 if (ix86_static_chain_on_stack)
9141 red_offset -= UNITS_PER_WORD;
9142 if (frame_pointer_needed)
9143 red_offset -= UNITS_PER_WORD;
9145 /* If we're only restoring one register and sp is not valid then
9146 using a move instruction to restore the register since it's
9147 less work than reloading sp and popping the register.
9149 The default code result in stack adjustment using add/lea instruction,
9150 while this code results in LEAVE instruction (or discrete equivalent),
9151 so it is profitable in some other cases as well. Especially when there
9152 are no registers to restore. We also use this code when TARGET_USE_LEAVE
9153 and there is exactly one register to pop. This heuristic may need some
9154 tuning in future. */
9155 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
9156 || (TARGET_EPILOGUE_USING_MOVE
9157 && cfun->machine->use_fast_prologue_epilogue
9158 && ((frame.nregs + frame.nsseregs) > 1
9159 || (frame.to_allocate + frame.padding0) != 0))
9160 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
9161 && (frame.to_allocate + frame.padding0) != 0)
9162 || (frame_pointer_needed && TARGET_USE_LEAVE
9163 && cfun->machine->use_fast_prologue_epilogue
9164 && (frame.nregs + frame.nsseregs) == 1)
9165 || crtl->calls_eh_return)
9167 /* Restore registers. We can use ebp or esp to address the memory
9168 locations. If both are available, default to ebp, since offsets
9169 are known to be small. Only exception is esp pointing directly
9170 to the end of block of saved registers, where we may simplify
9171 addressing mode.
9173 If we are realigning stack with bp and sp, regs restore can't
9174 be addressed by bp. sp must be used instead. */
9176 if (!frame_pointer_needed
9177 || (sp_valid && !(frame.to_allocate + frame.padding0))
9178 || stack_realign_fp)
9180 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9181 frame.to_allocate, red_offset,
9182 style == 2);
9183 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
9184 frame.to_allocate
9185 + frame.nsseregs * 16
9186 + frame.padding0,
9187 red_offset
9188 + frame.nsseregs * 16
9189 + frame.padding0, style == 2);
9191 else
9193 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
9194 offset, red_offset,
9195 style == 2);
9196 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
9197 offset
9198 + frame.nsseregs * 16
9199 + frame.padding0,
9200 red_offset
9201 + frame.nsseregs * 16
9202 + frame.padding0, style == 2);
9205 red_offset -= offset;
9207 /* eh_return epilogues need %ecx added to the stack pointer. */
9208 if (style == 2)
9210 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9212 /* Stack align doesn't work with eh_return. */
9213 gcc_assert (!crtl->stack_realign_needed);
9214 /* Neither does regparm nested functions. */
9215 gcc_assert (!ix86_static_chain_on_stack);
9217 if (frame_pointer_needed)
9219 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9220 tmp = plus_constant (tmp, UNITS_PER_WORD);
9221 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9223 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9224 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9226 /* Note that we use SA as a temporary CFA, as the return
9227 address is at the proper place relative to it. We
9228 pretend this happens at the FP restore insn because
9229 prior to this insn the FP would be stored at the wrong
9230 offset relative to SA, and after this insn we have no
9231 other reasonable register to use for the CFA. We don't
9232 bother resetting the CFA to the SP for the duration of
9233 the return insn. */
9234 add_reg_note (tmp, REG_CFA_DEF_CFA,
9235 plus_constant (sa, UNITS_PER_WORD));
9236 ix86_add_queued_cfa_restore_notes (tmp);
9237 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9238 RTX_FRAME_RELATED_P (tmp) = 1;
9239 ix86_cfa_state->reg = sa;
9240 ix86_cfa_state->offset = UNITS_PER_WORD;
9242 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9243 const0_rtx, style, false);
9245 else
9247 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9248 tmp = plus_constant (tmp, (frame.to_allocate
9249 + frame.nregs * UNITS_PER_WORD
9250 + frame.nsseregs * 16
9251 + frame.padding0));
9252 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9253 ix86_add_queued_cfa_restore_notes (tmp);
9255 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9256 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9258 ix86_cfa_state->offset = UNITS_PER_WORD;
9259 add_reg_note (tmp, REG_CFA_DEF_CFA,
9260 plus_constant (stack_pointer_rtx,
9261 UNITS_PER_WORD));
9262 RTX_FRAME_RELATED_P (tmp) = 1;
9266 else if (!frame_pointer_needed)
9267 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9268 GEN_INT (frame.to_allocate
9269 + frame.nregs * UNITS_PER_WORD
9270 + frame.nsseregs * 16
9271 + frame.padding0),
9272 style, !using_drap);
9273 /* If not an i386, mov & pop is faster than "leave". */
9274 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9275 || !cfun->machine->use_fast_prologue_epilogue)
9276 ix86_emit_leave (red_offset);
9277 else
9279 pro_epilogue_adjust_stack (stack_pointer_rtx,
9280 hard_frame_pointer_rtx,
9281 const0_rtx, style, !using_drap);
9283 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9286 else
9288 /* First step is to deallocate the stack frame so that we can
9289 pop the registers.
9291 If we realign stack with frame pointer, then stack pointer
9292 won't be able to recover via lea $offset(%bp), %sp, because
9293 there is a padding area between bp and sp for realign.
9294 "add $to_allocate, %sp" must be used instead. */
9295 if (!sp_valid)
9297 gcc_assert (frame_pointer_needed);
9298 gcc_assert (!stack_realign_fp);
9299 pro_epilogue_adjust_stack (stack_pointer_rtx,
9300 hard_frame_pointer_rtx,
9301 GEN_INT (offset), style, false);
9302 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9303 0, red_offset,
9304 style == 2);
9305 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9306 GEN_INT (frame.nsseregs * 16
9307 + frame.padding0),
9308 style, false);
9310 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9312 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9313 frame.to_allocate, red_offset,
9314 style == 2);
9315 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9316 GEN_INT (frame.to_allocate
9317 + frame.nsseregs * 16
9318 + frame.padding0), style,
9319 !using_drap && !frame_pointer_needed);
9322 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9323 + frame.padding0);
9324 red_offset -= offset;
9326 if (frame_pointer_needed)
9328 /* Leave results in shorter dependency chains on CPUs that are
9329 able to grok it fast. */
9330 if (TARGET_USE_LEAVE)
9331 ix86_emit_leave (red_offset);
9332 else
9334 /* For stack realigned really happens, recover stack
9335 pointer to hard frame pointer is a must, if not using
9336 leave. */
9337 if (stack_realign_fp)
9338 pro_epilogue_adjust_stack (stack_pointer_rtx,
9339 hard_frame_pointer_rtx,
9340 const0_rtx, style, !using_drap);
9341 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9342 red_offset);
9347 if (using_drap)
9349 int param_ptr_offset = UNITS_PER_WORD;
9350 rtx insn;
9352 gcc_assert (stack_realign_drap);
9354 if (ix86_static_chain_on_stack)
9355 param_ptr_offset += UNITS_PER_WORD;
9356 if (!call_used_regs[REGNO (crtl->drap_reg)])
9357 param_ptr_offset += UNITS_PER_WORD;
9359 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9360 crtl->drap_reg,
9361 GEN_INT (-param_ptr_offset)));
9363 ix86_cfa_state->reg = stack_pointer_rtx;
9364 ix86_cfa_state->offset = param_ptr_offset;
9366 add_reg_note (insn, REG_CFA_DEF_CFA,
9367 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9368 GEN_INT (ix86_cfa_state->offset)));
9369 RTX_FRAME_RELATED_P (insn) = 1;
9371 if (!call_used_regs[REGNO (crtl->drap_reg)])
9372 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9375 /* Remove the saved static chain from the stack. The use of ECX is
9376 merely as a scratch register, not as the actual static chain. */
9377 if (ix86_static_chain_on_stack)
9379 rtx r, insn;
9381 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9382 ix86_cfa_state->offset += UNITS_PER_WORD;
9384 r = gen_rtx_REG (Pmode, CX_REG);
9385 insn = emit_insn (ix86_gen_pop1 (r));
9387 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9388 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9389 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9390 RTX_FRAME_RELATED_P (insn) = 1;
9393 /* Sibcall epilogues don't want a return instruction. */
9394 if (style == 0)
9396 *ix86_cfa_state = cfa_state_save;
9397 return;
9400 if (crtl->args.pops_args && crtl->args.size)
9402 rtx popc = GEN_INT (crtl->args.pops_args);
9404 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9405 address, do explicit add, and jump indirectly to the caller. */
9407 if (crtl->args.pops_args >= 65536)
9409 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9410 rtx insn;
9412 /* There is no "pascal" calling convention in any 64bit ABI. */
9413 gcc_assert (!TARGET_64BIT);
9415 insn = emit_insn (gen_popsi1 (ecx));
9416 ix86_cfa_state->offset -= UNITS_PER_WORD;
9418 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9419 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9420 add_reg_note (insn, REG_CFA_REGISTER,
9421 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9422 RTX_FRAME_RELATED_P (insn) = 1;
9424 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9425 popc, -1, true);
9426 emit_jump_insn (gen_return_indirect_internal (ecx));
9428 else
9429 emit_jump_insn (gen_return_pop_internal (popc));
9431 else
9432 emit_jump_insn (gen_return_internal ());
9434 /* Restore the state back to the state from the prologue,
9435 so that it's correct for the next epilogue. */
9436 *ix86_cfa_state = cfa_state_save;
9439 /* Reset from the function's potential modifications. */
9441 static void
9442 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9443 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9445 if (pic_offset_table_rtx)
9446 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9447 #if TARGET_MACHO
9448 /* Mach-O doesn't support labels at the end of objects, so if
9449 it looks like we might want one, insert a NOP. */
9451 rtx insn = get_last_insn ();
9452 while (insn
9453 && NOTE_P (insn)
9454 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9455 insn = PREV_INSN (insn);
9456 if (insn
9457 && (LABEL_P (insn)
9458 || (NOTE_P (insn)
9459 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9460 fputs ("\tnop\n", file);
9462 #endif
9466 /* Extract the parts of an RTL expression that is a valid memory address
9467 for an instruction. Return 0 if the structure of the address is
9468 grossly off. Return -1 if the address contains ASHIFT, so it is not
9469 strictly valid, but still used for computing length of lea instruction. */
9472 ix86_decompose_address (rtx addr, struct ix86_address *out)
9474 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9475 rtx base_reg, index_reg;
9476 HOST_WIDE_INT scale = 1;
9477 rtx scale_rtx = NULL_RTX;
9478 rtx tmp;
9479 int retval = 1;
9480 enum ix86_address_seg seg = SEG_DEFAULT;
9482 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9483 base = addr;
9484 else if (GET_CODE (addr) == PLUS)
9486 rtx addends[4], op;
9487 int n = 0, i;
9489 op = addr;
9492 if (n >= 4)
9493 return 0;
9494 addends[n++] = XEXP (op, 1);
9495 op = XEXP (op, 0);
9497 while (GET_CODE (op) == PLUS);
9498 if (n >= 4)
9499 return 0;
9500 addends[n] = op;
9502 for (i = n; i >= 0; --i)
9504 op = addends[i];
9505 switch (GET_CODE (op))
9507 case MULT:
9508 if (index)
9509 return 0;
9510 index = XEXP (op, 0);
9511 scale_rtx = XEXP (op, 1);
9512 break;
9514 case ASHIFT:
9515 if (index)
9516 return 0;
9517 index = XEXP (op, 0);
9518 tmp = XEXP (op, 1);
9519 if (!CONST_INT_P (tmp))
9520 return 0;
9521 scale = INTVAL (tmp);
9522 if ((unsigned HOST_WIDE_INT) scale > 3)
9523 return 0;
9524 scale = 1 << scale;
9525 break;
9527 case UNSPEC:
9528 if (XINT (op, 1) == UNSPEC_TP
9529 && TARGET_TLS_DIRECT_SEG_REFS
9530 && seg == SEG_DEFAULT)
9531 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9532 else
9533 return 0;
9534 break;
9536 case REG:
9537 case SUBREG:
9538 if (!base)
9539 base = op;
9540 else if (!index)
9541 index = op;
9542 else
9543 return 0;
9544 break;
9546 case CONST:
9547 case CONST_INT:
9548 case SYMBOL_REF:
9549 case LABEL_REF:
9550 if (disp)
9551 return 0;
9552 disp = op;
9553 break;
9555 default:
9556 return 0;
9560 else if (GET_CODE (addr) == MULT)
9562 index = XEXP (addr, 0); /* index*scale */
9563 scale_rtx = XEXP (addr, 1);
9565 else if (GET_CODE (addr) == ASHIFT)
9567 /* We're called for lea too, which implements ashift on occasion. */
9568 index = XEXP (addr, 0);
9569 tmp = XEXP (addr, 1);
9570 if (!CONST_INT_P (tmp))
9571 return 0;
9572 scale = INTVAL (tmp);
9573 if ((unsigned HOST_WIDE_INT) scale > 3)
9574 return 0;
9575 scale = 1 << scale;
9576 retval = -1;
9578 else
9579 disp = addr; /* displacement */
9581 /* Extract the integral value of scale. */
9582 if (scale_rtx)
9584 if (!CONST_INT_P (scale_rtx))
9585 return 0;
9586 scale = INTVAL (scale_rtx);
9589 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9590 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9592 /* Avoid useless 0 displacement. */
9593 if (disp == const0_rtx && (base || index))
9594 disp = NULL_RTX;
9596 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9597 if (base_reg && index_reg && scale == 1
9598 && (index_reg == arg_pointer_rtx
9599 || index_reg == frame_pointer_rtx
9600 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9602 rtx tmp;
9603 tmp = base, base = index, index = tmp;
9604 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9607 /* Special case: %ebp cannot be encoded as a base without a displacement.
9608 Similarly %r13. */
9609 if (!disp
9610 && base_reg
9611 && (base_reg == hard_frame_pointer_rtx
9612 || base_reg == frame_pointer_rtx
9613 || base_reg == arg_pointer_rtx
9614 || (REG_P (base_reg)
9615 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9616 || REGNO (base_reg) == R13_REG))))
9617 disp = const0_rtx;
9619 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9620 Avoid this by transforming to [%esi+0].
9621 Reload calls address legitimization without cfun defined, so we need
9622 to test cfun for being non-NULL. */
9623 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9624 && base_reg && !index_reg && !disp
9625 && REG_P (base_reg)
9626 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9627 disp = const0_rtx;
9629 /* Special case: encode reg+reg instead of reg*2. */
9630 if (!base && index && scale == 2)
9631 base = index, base_reg = index_reg, scale = 1;
9633 /* Special case: scaling cannot be encoded without base or displacement. */
9634 if (!base && !disp && index && scale != 1)
9635 disp = const0_rtx;
9637 out->base = base;
9638 out->index = index;
9639 out->disp = disp;
9640 out->scale = scale;
9641 out->seg = seg;
9643 return retval;
9646 /* Return cost of the memory address x.
9647 For i386, it is better to use a complex address than let gcc copy
9648 the address into a reg and make a new pseudo. But not if the address
9649 requires to two regs - that would mean more pseudos with longer
9650 lifetimes. */
9651 static int
9652 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9654 struct ix86_address parts;
9655 int cost = 1;
9656 int ok = ix86_decompose_address (x, &parts);
9658 gcc_assert (ok);
9660 if (parts.base && GET_CODE (parts.base) == SUBREG)
9661 parts.base = SUBREG_REG (parts.base);
9662 if (parts.index && GET_CODE (parts.index) == SUBREG)
9663 parts.index = SUBREG_REG (parts.index);
9665 /* Attempt to minimize number of registers in the address. */
9666 if ((parts.base
9667 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9668 || (parts.index
9669 && (!REG_P (parts.index)
9670 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9671 cost++;
9673 if (parts.base
9674 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9675 && parts.index
9676 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9677 && parts.base != parts.index)
9678 cost++;
9680 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9681 since it's predecode logic can't detect the length of instructions
9682 and it degenerates to vector decoded. Increase cost of such
9683 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9684 to split such addresses or even refuse such addresses at all.
9686 Following addressing modes are affected:
9687 [base+scale*index]
9688 [scale*index+disp]
9689 [base+index]
9691 The first and last case may be avoidable by explicitly coding the zero in
9692 memory address, but I don't have AMD-K6 machine handy to check this
9693 theory. */
9695 if (TARGET_K6
9696 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9697 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9698 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9699 cost += 10;
9701 return cost;
9704 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9705 this is used for to form addresses to local data when -fPIC is in
9706 use. */
9708 static bool
9709 darwin_local_data_pic (rtx disp)
9711 return (GET_CODE (disp) == UNSPEC
9712 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9715 /* Determine if a given RTX is a valid constant. We already know this
9716 satisfies CONSTANT_P. */
9718 bool
9719 legitimate_constant_p (rtx x)
9721 switch (GET_CODE (x))
9723 case CONST:
9724 x = XEXP (x, 0);
9726 if (GET_CODE (x) == PLUS)
9728 if (!CONST_INT_P (XEXP (x, 1)))
9729 return false;
9730 x = XEXP (x, 0);
9733 if (TARGET_MACHO && darwin_local_data_pic (x))
9734 return true;
9736 /* Only some unspecs are valid as "constants". */
9737 if (GET_CODE (x) == UNSPEC)
9738 switch (XINT (x, 1))
9740 case UNSPEC_GOT:
9741 case UNSPEC_GOTOFF:
9742 case UNSPEC_PLTOFF:
9743 return TARGET_64BIT;
9744 case UNSPEC_TPOFF:
9745 case UNSPEC_NTPOFF:
9746 x = XVECEXP (x, 0, 0);
9747 return (GET_CODE (x) == SYMBOL_REF
9748 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9749 case UNSPEC_DTPOFF:
9750 x = XVECEXP (x, 0, 0);
9751 return (GET_CODE (x) == SYMBOL_REF
9752 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9753 default:
9754 return false;
9757 /* We must have drilled down to a symbol. */
9758 if (GET_CODE (x) == LABEL_REF)
9759 return true;
9760 if (GET_CODE (x) != SYMBOL_REF)
9761 return false;
9762 /* FALLTHRU */
9764 case SYMBOL_REF:
9765 /* TLS symbols are never valid. */
9766 if (SYMBOL_REF_TLS_MODEL (x))
9767 return false;
9769 /* DLLIMPORT symbols are never valid. */
9770 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9771 && SYMBOL_REF_DLLIMPORT_P (x))
9772 return false;
9773 break;
9775 case CONST_DOUBLE:
9776 if (GET_MODE (x) == TImode
9777 && x != CONST0_RTX (TImode)
9778 && !TARGET_64BIT)
9779 return false;
9780 break;
9782 case CONST_VECTOR:
9783 if (!standard_sse_constant_p (x))
9784 return false;
9786 default:
9787 break;
9790 /* Otherwise we handle everything else in the move patterns. */
9791 return true;
9794 /* Determine if it's legal to put X into the constant pool. This
9795 is not possible for the address of thread-local symbols, which
9796 is checked above. */
9798 static bool
9799 ix86_cannot_force_const_mem (rtx x)
9801 /* We can always put integral constants and vectors in memory. */
9802 switch (GET_CODE (x))
9804 case CONST_INT:
9805 case CONST_DOUBLE:
9806 case CONST_VECTOR:
9807 return false;
9809 default:
9810 break;
9812 return !legitimate_constant_p (x);
9816 /* Nonzero if the constant value X is a legitimate general operand
9817 when generating PIC code. It is given that flag_pic is on and
9818 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9820 bool
9821 legitimate_pic_operand_p (rtx x)
9823 rtx inner;
9825 switch (GET_CODE (x))
9827 case CONST:
9828 inner = XEXP (x, 0);
9829 if (GET_CODE (inner) == PLUS
9830 && CONST_INT_P (XEXP (inner, 1)))
9831 inner = XEXP (inner, 0);
9833 /* Only some unspecs are valid as "constants". */
9834 if (GET_CODE (inner) == UNSPEC)
9835 switch (XINT (inner, 1))
9837 case UNSPEC_GOT:
9838 case UNSPEC_GOTOFF:
9839 case UNSPEC_PLTOFF:
9840 return TARGET_64BIT;
9841 case UNSPEC_TPOFF:
9842 x = XVECEXP (inner, 0, 0);
9843 return (GET_CODE (x) == SYMBOL_REF
9844 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9845 case UNSPEC_MACHOPIC_OFFSET:
9846 return legitimate_pic_address_disp_p (x);
9847 default:
9848 return false;
9850 /* FALLTHRU */
9852 case SYMBOL_REF:
9853 case LABEL_REF:
9854 return legitimate_pic_address_disp_p (x);
9856 default:
9857 return true;
9861 /* Determine if a given CONST RTX is a valid memory displacement
9862 in PIC mode. */
9865 legitimate_pic_address_disp_p (rtx disp)
9867 bool saw_plus;
9869 /* In 64bit mode we can allow direct addresses of symbols and labels
9870 when they are not dynamic symbols. */
9871 if (TARGET_64BIT)
9873 rtx op0 = disp, op1;
9875 switch (GET_CODE (disp))
9877 case LABEL_REF:
9878 return true;
9880 case CONST:
9881 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9882 break;
9883 op0 = XEXP (XEXP (disp, 0), 0);
9884 op1 = XEXP (XEXP (disp, 0), 1);
9885 if (!CONST_INT_P (op1)
9886 || INTVAL (op1) >= 16*1024*1024
9887 || INTVAL (op1) < -16*1024*1024)
9888 break;
9889 if (GET_CODE (op0) == LABEL_REF)
9890 return true;
9891 if (GET_CODE (op0) != SYMBOL_REF)
9892 break;
9893 /* FALLTHRU */
9895 case SYMBOL_REF:
9896 /* TLS references should always be enclosed in UNSPEC. */
9897 if (SYMBOL_REF_TLS_MODEL (op0))
9898 return false;
9899 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9900 && ix86_cmodel != CM_LARGE_PIC)
9901 return true;
9902 break;
9904 default:
9905 break;
9908 if (GET_CODE (disp) != CONST)
9909 return 0;
9910 disp = XEXP (disp, 0);
9912 if (TARGET_64BIT)
9914 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9915 of GOT tables. We should not need these anyway. */
9916 if (GET_CODE (disp) != UNSPEC
9917 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9918 && XINT (disp, 1) != UNSPEC_GOTOFF
9919 && XINT (disp, 1) != UNSPEC_PLTOFF))
9920 return 0;
9922 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9923 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9924 return 0;
9925 return 1;
9928 saw_plus = false;
9929 if (GET_CODE (disp) == PLUS)
9931 if (!CONST_INT_P (XEXP (disp, 1)))
9932 return 0;
9933 disp = XEXP (disp, 0);
9934 saw_plus = true;
9937 if (TARGET_MACHO && darwin_local_data_pic (disp))
9938 return 1;
9940 if (GET_CODE (disp) != UNSPEC)
9941 return 0;
9943 switch (XINT (disp, 1))
9945 case UNSPEC_GOT:
9946 if (saw_plus)
9947 return false;
9948 /* We need to check for both symbols and labels because VxWorks loads
9949 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9950 details. */
9951 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9952 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9953 case UNSPEC_GOTOFF:
9954 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9955 While ABI specify also 32bit relocation but we don't produce it in
9956 small PIC model at all. */
9957 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9958 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9959 && !TARGET_64BIT)
9960 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9961 return false;
9962 case UNSPEC_GOTTPOFF:
9963 case UNSPEC_GOTNTPOFF:
9964 case UNSPEC_INDNTPOFF:
9965 if (saw_plus)
9966 return false;
9967 disp = XVECEXP (disp, 0, 0);
9968 return (GET_CODE (disp) == SYMBOL_REF
9969 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9970 case UNSPEC_NTPOFF:
9971 disp = XVECEXP (disp, 0, 0);
9972 return (GET_CODE (disp) == SYMBOL_REF
9973 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9974 case UNSPEC_DTPOFF:
9975 disp = XVECEXP (disp, 0, 0);
9976 return (GET_CODE (disp) == SYMBOL_REF
9977 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9980 return 0;
9983 /* Recognizes RTL expressions that are valid memory addresses for an
9984 instruction. The MODE argument is the machine mode for the MEM
9985 expression that wants to use this address.
9987 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9988 convert common non-canonical forms to canonical form so that they will
9989 be recognized. */
9991 static bool
9992 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9993 rtx addr, bool strict)
9995 struct ix86_address parts;
9996 rtx base, index, disp;
9997 HOST_WIDE_INT scale;
9999 if (ix86_decompose_address (addr, &parts) <= 0)
10000 /* Decomposition failed. */
10001 return false;
10003 base = parts.base;
10004 index = parts.index;
10005 disp = parts.disp;
10006 scale = parts.scale;
10008 /* Validate base register.
10010 Don't allow SUBREG's that span more than a word here. It can lead to spill
10011 failures when the base is one word out of a two word structure, which is
10012 represented internally as a DImode int. */
10014 if (base)
10016 rtx reg;
10018 if (REG_P (base))
10019 reg = base;
10020 else if (GET_CODE (base) == SUBREG
10021 && REG_P (SUBREG_REG (base))
10022 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
10023 <= UNITS_PER_WORD)
10024 reg = SUBREG_REG (base);
10025 else
10026 /* Base is not a register. */
10027 return false;
10029 if (GET_MODE (base) != Pmode)
10030 /* Base is not in Pmode. */
10031 return false;
10033 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
10034 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
10035 /* Base is not valid. */
10036 return false;
10039 /* Validate index register.
10041 Don't allow SUBREG's that span more than a word here -- same as above. */
10043 if (index)
10045 rtx reg;
10047 if (REG_P (index))
10048 reg = index;
10049 else if (GET_CODE (index) == SUBREG
10050 && REG_P (SUBREG_REG (index))
10051 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
10052 <= UNITS_PER_WORD)
10053 reg = SUBREG_REG (index);
10054 else
10055 /* Index is not a register. */
10056 return false;
10058 if (GET_MODE (index) != Pmode)
10059 /* Index is not in Pmode. */
10060 return false;
10062 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
10063 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
10064 /* Index is not valid. */
10065 return false;
10068 /* Validate scale factor. */
10069 if (scale != 1)
10071 if (!index)
10072 /* Scale without index. */
10073 return false;
10075 if (scale != 2 && scale != 4 && scale != 8)
10076 /* Scale is not a valid multiplier. */
10077 return false;
10080 /* Validate displacement. */
10081 if (disp)
10083 if (GET_CODE (disp) == CONST
10084 && GET_CODE (XEXP (disp, 0)) == UNSPEC
10085 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
10086 switch (XINT (XEXP (disp, 0), 1))
10088 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
10089 used. While ABI specify also 32bit relocations, we don't produce
10090 them at all and use IP relative instead. */
10091 case UNSPEC_GOT:
10092 case UNSPEC_GOTOFF:
10093 gcc_assert (flag_pic);
10094 if (!TARGET_64BIT)
10095 goto is_legitimate_pic;
10097 /* 64bit address unspec. */
10098 return false;
10100 case UNSPEC_GOTPCREL:
10101 gcc_assert (flag_pic);
10102 goto is_legitimate_pic;
10104 case UNSPEC_GOTTPOFF:
10105 case UNSPEC_GOTNTPOFF:
10106 case UNSPEC_INDNTPOFF:
10107 case UNSPEC_NTPOFF:
10108 case UNSPEC_DTPOFF:
10109 break;
10111 default:
10112 /* Invalid address unspec. */
10113 return false;
10116 else if (SYMBOLIC_CONST (disp)
10117 && (flag_pic
10118 || (TARGET_MACHO
10119 #if TARGET_MACHO
10120 && MACHOPIC_INDIRECT
10121 && !machopic_operand_p (disp)
10122 #endif
10126 is_legitimate_pic:
10127 if (TARGET_64BIT && (index || base))
10129 /* foo@dtpoff(%rX) is ok. */
10130 if (GET_CODE (disp) != CONST
10131 || GET_CODE (XEXP (disp, 0)) != PLUS
10132 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
10133 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
10134 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
10135 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
10136 /* Non-constant pic memory reference. */
10137 return false;
10139 else if (! legitimate_pic_address_disp_p (disp))
10140 /* Displacement is an invalid pic construct. */
10141 return false;
10143 /* This code used to verify that a symbolic pic displacement
10144 includes the pic_offset_table_rtx register.
10146 While this is good idea, unfortunately these constructs may
10147 be created by "adds using lea" optimization for incorrect
10148 code like:
10150 int a;
10151 int foo(int i)
10153 return *(&a+i);
10156 This code is nonsensical, but results in addressing
10157 GOT table with pic_offset_table_rtx base. We can't
10158 just refuse it easily, since it gets matched by
10159 "addsi3" pattern, that later gets split to lea in the
10160 case output register differs from input. While this
10161 can be handled by separate addsi pattern for this case
10162 that never results in lea, this seems to be easier and
10163 correct fix for crash to disable this test. */
10165 else if (GET_CODE (disp) != LABEL_REF
10166 && !CONST_INT_P (disp)
10167 && (GET_CODE (disp) != CONST
10168 || !legitimate_constant_p (disp))
10169 && (GET_CODE (disp) != SYMBOL_REF
10170 || !legitimate_constant_p (disp)))
10171 /* Displacement is not constant. */
10172 return false;
10173 else if (TARGET_64BIT
10174 && !x86_64_immediate_operand (disp, VOIDmode))
10175 /* Displacement is out of range. */
10176 return false;
10179 /* Everything looks valid. */
10180 return true;
10183 /* Determine if a given RTX is a valid constant address. */
10185 bool
10186 constant_address_p (rtx x)
10188 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
10191 /* Return a unique alias set for the GOT. */
10193 static alias_set_type
10194 ix86_GOT_alias_set (void)
10196 static alias_set_type set = -1;
10197 if (set == -1)
10198 set = new_alias_set ();
10199 return set;
10202 /* Return a legitimate reference for ORIG (an address) using the
10203 register REG. If REG is 0, a new pseudo is generated.
10205 There are two types of references that must be handled:
10207 1. Global data references must load the address from the GOT, via
10208 the PIC reg. An insn is emitted to do this load, and the reg is
10209 returned.
10211 2. Static data references, constant pool addresses, and code labels
10212 compute the address as an offset from the GOT, whose base is in
10213 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
10214 differentiate them from global data objects. The returned
10215 address is the PIC reg + an unspec constant.
10217 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
10218 reg also appears in the address. */
10220 static rtx
10221 legitimize_pic_address (rtx orig, rtx reg)
10223 rtx addr = orig;
10224 rtx new_rtx = orig;
10225 rtx base;
10227 #if TARGET_MACHO
10228 if (TARGET_MACHO && !TARGET_64BIT)
10230 if (reg == 0)
10231 reg = gen_reg_rtx (Pmode);
10232 /* Use the generic Mach-O PIC machinery. */
10233 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10235 #endif
10237 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10238 new_rtx = addr;
10239 else if (TARGET_64BIT
10240 && ix86_cmodel != CM_SMALL_PIC
10241 && gotoff_operand (addr, Pmode))
10243 rtx tmpreg;
10244 /* This symbol may be referenced via a displacement from the PIC
10245 base address (@GOTOFF). */
10247 if (reload_in_progress)
10248 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10249 if (GET_CODE (addr) == CONST)
10250 addr = XEXP (addr, 0);
10251 if (GET_CODE (addr) == PLUS)
10253 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10254 UNSPEC_GOTOFF);
10255 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10257 else
10258 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10259 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10260 if (!reg)
10261 tmpreg = gen_reg_rtx (Pmode);
10262 else
10263 tmpreg = reg;
10264 emit_move_insn (tmpreg, new_rtx);
10266 if (reg != 0)
10268 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10269 tmpreg, 1, OPTAB_DIRECT);
10270 new_rtx = reg;
10272 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10274 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10276 /* This symbol may be referenced via a displacement from the PIC
10277 base address (@GOTOFF). */
10279 if (reload_in_progress)
10280 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10281 if (GET_CODE (addr) == CONST)
10282 addr = XEXP (addr, 0);
10283 if (GET_CODE (addr) == PLUS)
10285 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10286 UNSPEC_GOTOFF);
10287 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10289 else
10290 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10291 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10292 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10294 if (reg != 0)
10296 emit_move_insn (reg, new_rtx);
10297 new_rtx = reg;
10300 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10301 /* We can't use @GOTOFF for text labels on VxWorks;
10302 see gotoff_operand. */
10303 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10305 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10307 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10308 return legitimize_dllimport_symbol (addr, true);
10309 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10310 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10311 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10313 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10314 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10318 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10320 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10321 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10322 new_rtx = gen_const_mem (Pmode, new_rtx);
10323 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10325 if (reg == 0)
10326 reg = gen_reg_rtx (Pmode);
10327 /* Use directly gen_movsi, otherwise the address is loaded
10328 into register for CSE. We don't want to CSE this addresses,
10329 instead we CSE addresses from the GOT table, so skip this. */
10330 emit_insn (gen_movsi (reg, new_rtx));
10331 new_rtx = reg;
10333 else
10335 /* This symbol must be referenced via a load from the
10336 Global Offset Table (@GOT). */
10338 if (reload_in_progress)
10339 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10340 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10341 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10342 if (TARGET_64BIT)
10343 new_rtx = force_reg (Pmode, new_rtx);
10344 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10345 new_rtx = gen_const_mem (Pmode, new_rtx);
10346 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10348 if (reg == 0)
10349 reg = gen_reg_rtx (Pmode);
10350 emit_move_insn (reg, new_rtx);
10351 new_rtx = reg;
10354 else
10356 if (CONST_INT_P (addr)
10357 && !x86_64_immediate_operand (addr, VOIDmode))
10359 if (reg)
10361 emit_move_insn (reg, addr);
10362 new_rtx = reg;
10364 else
10365 new_rtx = force_reg (Pmode, addr);
10367 else if (GET_CODE (addr) == CONST)
10369 addr = XEXP (addr, 0);
10371 /* We must match stuff we generate before. Assume the only
10372 unspecs that can get here are ours. Not that we could do
10373 anything with them anyway.... */
10374 if (GET_CODE (addr) == UNSPEC
10375 || (GET_CODE (addr) == PLUS
10376 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10377 return orig;
10378 gcc_assert (GET_CODE (addr) == PLUS);
10380 if (GET_CODE (addr) == PLUS)
10382 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10384 /* Check first to see if this is a constant offset from a @GOTOFF
10385 symbol reference. */
10386 if (gotoff_operand (op0, Pmode)
10387 && CONST_INT_P (op1))
10389 if (!TARGET_64BIT)
10391 if (reload_in_progress)
10392 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10393 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10394 UNSPEC_GOTOFF);
10395 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10396 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10397 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10399 if (reg != 0)
10401 emit_move_insn (reg, new_rtx);
10402 new_rtx = reg;
10405 else
10407 if (INTVAL (op1) < -16*1024*1024
10408 || INTVAL (op1) >= 16*1024*1024)
10410 if (!x86_64_immediate_operand (op1, Pmode))
10411 op1 = force_reg (Pmode, op1);
10412 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10416 else
10418 base = legitimize_pic_address (XEXP (addr, 0), reg);
10419 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10420 base == reg ? NULL_RTX : reg);
10422 if (CONST_INT_P (new_rtx))
10423 new_rtx = plus_constant (base, INTVAL (new_rtx));
10424 else
10426 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10428 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10429 new_rtx = XEXP (new_rtx, 1);
10431 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10436 return new_rtx;
10439 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10441 static rtx
10442 get_thread_pointer (int to_reg)
10444 rtx tp, reg, insn;
10446 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10447 if (!to_reg)
10448 return tp;
10450 reg = gen_reg_rtx (Pmode);
10451 insn = gen_rtx_SET (VOIDmode, reg, tp);
10452 insn = emit_insn (insn);
10454 return reg;
10457 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10458 false if we expect this to be used for a memory address and true if
10459 we expect to load the address into a register. */
10461 static rtx
10462 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10464 rtx dest, base, off, pic, tp;
10465 int type;
10467 switch (model)
10469 case TLS_MODEL_GLOBAL_DYNAMIC:
10470 dest = gen_reg_rtx (Pmode);
10471 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10473 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10475 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10477 start_sequence ();
10478 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10479 insns = get_insns ();
10480 end_sequence ();
10482 RTL_CONST_CALL_P (insns) = 1;
10483 emit_libcall_block (insns, dest, rax, x);
10485 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10486 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10487 else
10488 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10490 if (TARGET_GNU2_TLS)
10492 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10494 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10496 break;
10498 case TLS_MODEL_LOCAL_DYNAMIC:
10499 base = gen_reg_rtx (Pmode);
10500 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10502 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10504 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10506 start_sequence ();
10507 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10508 insns = get_insns ();
10509 end_sequence ();
10511 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10512 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10513 RTL_CONST_CALL_P (insns) = 1;
10514 emit_libcall_block (insns, base, rax, note);
10516 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10517 emit_insn (gen_tls_local_dynamic_base_64 (base));
10518 else
10519 emit_insn (gen_tls_local_dynamic_base_32 (base));
10521 if (TARGET_GNU2_TLS)
10523 rtx x = ix86_tls_module_base ();
10525 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10526 gen_rtx_MINUS (Pmode, x, tp));
10529 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10530 off = gen_rtx_CONST (Pmode, off);
10532 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10534 if (TARGET_GNU2_TLS)
10536 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10538 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10541 break;
10543 case TLS_MODEL_INITIAL_EXEC:
10544 if (TARGET_64BIT)
10546 pic = NULL;
10547 type = UNSPEC_GOTNTPOFF;
10549 else if (flag_pic)
10551 if (reload_in_progress)
10552 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10553 pic = pic_offset_table_rtx;
10554 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10556 else if (!TARGET_ANY_GNU_TLS)
10558 pic = gen_reg_rtx (Pmode);
10559 emit_insn (gen_set_got (pic));
10560 type = UNSPEC_GOTTPOFF;
10562 else
10564 pic = NULL;
10565 type = UNSPEC_INDNTPOFF;
10568 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10569 off = gen_rtx_CONST (Pmode, off);
10570 if (pic)
10571 off = gen_rtx_PLUS (Pmode, pic, off);
10572 off = gen_const_mem (Pmode, off);
10573 set_mem_alias_set (off, ix86_GOT_alias_set ());
10575 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10577 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10578 off = force_reg (Pmode, off);
10579 return gen_rtx_PLUS (Pmode, base, off);
10581 else
10583 base = get_thread_pointer (true);
10584 dest = gen_reg_rtx (Pmode);
10585 emit_insn (gen_subsi3 (dest, base, off));
10587 break;
10589 case TLS_MODEL_LOCAL_EXEC:
10590 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10591 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10592 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10593 off = gen_rtx_CONST (Pmode, off);
10595 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10597 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10598 return gen_rtx_PLUS (Pmode, base, off);
10600 else
10602 base = get_thread_pointer (true);
10603 dest = gen_reg_rtx (Pmode);
10604 emit_insn (gen_subsi3 (dest, base, off));
10606 break;
10608 default:
10609 gcc_unreachable ();
10612 return dest;
10615 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10616 to symbol DECL. */
10618 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10619 htab_t dllimport_map;
10621 static tree
10622 get_dllimport_decl (tree decl)
10624 struct tree_map *h, in;
10625 void **loc;
10626 const char *name;
10627 const char *prefix;
10628 size_t namelen, prefixlen;
10629 char *imp_name;
10630 tree to;
10631 rtx rtl;
10633 if (!dllimport_map)
10634 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10636 in.hash = htab_hash_pointer (decl);
10637 in.base.from = decl;
10638 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10639 h = (struct tree_map *) *loc;
10640 if (h)
10641 return h->to;
10643 *loc = h = GGC_NEW (struct tree_map);
10644 h->hash = in.hash;
10645 h->base.from = decl;
10646 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10647 VAR_DECL, NULL, ptr_type_node);
10648 DECL_ARTIFICIAL (to) = 1;
10649 DECL_IGNORED_P (to) = 1;
10650 DECL_EXTERNAL (to) = 1;
10651 TREE_READONLY (to) = 1;
10653 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10654 name = targetm.strip_name_encoding (name);
10655 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10656 ? "*__imp_" : "*__imp__";
10657 namelen = strlen (name);
10658 prefixlen = strlen (prefix);
10659 imp_name = (char *) alloca (namelen + prefixlen + 1);
10660 memcpy (imp_name, prefix, prefixlen);
10661 memcpy (imp_name + prefixlen, name, namelen + 1);
10663 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10664 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10665 SET_SYMBOL_REF_DECL (rtl, to);
10666 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10668 rtl = gen_const_mem (Pmode, rtl);
10669 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10671 SET_DECL_RTL (to, rtl);
10672 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10674 return to;
10677 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10678 true if we require the result be a register. */
10680 static rtx
10681 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10683 tree imp_decl;
10684 rtx x;
10686 gcc_assert (SYMBOL_REF_DECL (symbol));
10687 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10689 x = DECL_RTL (imp_decl);
10690 if (want_reg)
10691 x = force_reg (Pmode, x);
10692 return x;
10695 /* Try machine-dependent ways of modifying an illegitimate address
10696 to be legitimate. If we find one, return the new, valid address.
10697 This macro is used in only one place: `memory_address' in explow.c.
10699 OLDX is the address as it was before break_out_memory_refs was called.
10700 In some cases it is useful to look at this to decide what needs to be done.
10702 It is always safe for this macro to do nothing. It exists to recognize
10703 opportunities to optimize the output.
10705 For the 80386, we handle X+REG by loading X into a register R and
10706 using R+REG. R will go in a general reg and indexing will be used.
10707 However, if REG is a broken-out memory address or multiplication,
10708 nothing needs to be done because REG can certainly go in a general reg.
10710 When -fpic is used, special handling is needed for symbolic references.
10711 See comments by legitimize_pic_address in i386.c for details. */
10713 static rtx
10714 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10715 enum machine_mode mode)
10717 int changed = 0;
10718 unsigned log;
10720 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10721 if (log)
10722 return legitimize_tls_address (x, (enum tls_model) log, false);
10723 if (GET_CODE (x) == CONST
10724 && GET_CODE (XEXP (x, 0)) == PLUS
10725 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10726 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10728 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10729 (enum tls_model) log, false);
10730 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10733 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10735 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10736 return legitimize_dllimport_symbol (x, true);
10737 if (GET_CODE (x) == CONST
10738 && GET_CODE (XEXP (x, 0)) == PLUS
10739 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10740 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10742 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10743 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10747 if (flag_pic && SYMBOLIC_CONST (x))
10748 return legitimize_pic_address (x, 0);
10750 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10751 if (GET_CODE (x) == ASHIFT
10752 && CONST_INT_P (XEXP (x, 1))
10753 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10755 changed = 1;
10756 log = INTVAL (XEXP (x, 1));
10757 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10758 GEN_INT (1 << log));
10761 if (GET_CODE (x) == PLUS)
10763 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10765 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10766 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10767 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10769 changed = 1;
10770 log = INTVAL (XEXP (XEXP (x, 0), 1));
10771 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10772 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10773 GEN_INT (1 << log));
10776 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10777 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10778 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10780 changed = 1;
10781 log = INTVAL (XEXP (XEXP (x, 1), 1));
10782 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10783 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10784 GEN_INT (1 << log));
10787 /* Put multiply first if it isn't already. */
10788 if (GET_CODE (XEXP (x, 1)) == MULT)
10790 rtx tmp = XEXP (x, 0);
10791 XEXP (x, 0) = XEXP (x, 1);
10792 XEXP (x, 1) = tmp;
10793 changed = 1;
10796 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10797 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10798 created by virtual register instantiation, register elimination, and
10799 similar optimizations. */
10800 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10802 changed = 1;
10803 x = gen_rtx_PLUS (Pmode,
10804 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10805 XEXP (XEXP (x, 1), 0)),
10806 XEXP (XEXP (x, 1), 1));
10809 /* Canonicalize
10810 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10811 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10812 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10813 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10814 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10815 && CONSTANT_P (XEXP (x, 1)))
10817 rtx constant;
10818 rtx other = NULL_RTX;
10820 if (CONST_INT_P (XEXP (x, 1)))
10822 constant = XEXP (x, 1);
10823 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10825 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10827 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10828 other = XEXP (x, 1);
10830 else
10831 constant = 0;
10833 if (constant)
10835 changed = 1;
10836 x = gen_rtx_PLUS (Pmode,
10837 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10838 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10839 plus_constant (other, INTVAL (constant)));
10843 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10844 return x;
10846 if (GET_CODE (XEXP (x, 0)) == MULT)
10848 changed = 1;
10849 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10852 if (GET_CODE (XEXP (x, 1)) == MULT)
10854 changed = 1;
10855 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10858 if (changed
10859 && REG_P (XEXP (x, 1))
10860 && REG_P (XEXP (x, 0)))
10861 return x;
10863 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10865 changed = 1;
10866 x = legitimize_pic_address (x, 0);
10869 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10870 return x;
10872 if (REG_P (XEXP (x, 0)))
10874 rtx temp = gen_reg_rtx (Pmode);
10875 rtx val = force_operand (XEXP (x, 1), temp);
10876 if (val != temp)
10877 emit_move_insn (temp, val);
10879 XEXP (x, 1) = temp;
10880 return x;
10883 else if (REG_P (XEXP (x, 1)))
10885 rtx temp = gen_reg_rtx (Pmode);
10886 rtx val = force_operand (XEXP (x, 0), temp);
10887 if (val != temp)
10888 emit_move_insn (temp, val);
10890 XEXP (x, 0) = temp;
10891 return x;
10895 return x;
10898 /* Print an integer constant expression in assembler syntax. Addition
10899 and subtraction are the only arithmetic that may appear in these
10900 expressions. FILE is the stdio stream to write to, X is the rtx, and
10901 CODE is the operand print code from the output string. */
10903 static void
10904 output_pic_addr_const (FILE *file, rtx x, int code)
10906 char buf[256];
10908 switch (GET_CODE (x))
10910 case PC:
10911 gcc_assert (flag_pic);
10912 putc ('.', file);
10913 break;
10915 case SYMBOL_REF:
10916 if (! TARGET_MACHO || TARGET_64BIT)
10917 output_addr_const (file, x);
10918 else
10920 const char *name = XSTR (x, 0);
10922 /* Mark the decl as referenced so that cgraph will
10923 output the function. */
10924 if (SYMBOL_REF_DECL (x))
10925 mark_decl_referenced (SYMBOL_REF_DECL (x));
10927 #if TARGET_MACHO
10928 if (MACHOPIC_INDIRECT
10929 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10930 name = machopic_indirection_name (x, /*stub_p=*/true);
10931 #endif
10932 assemble_name (file, name);
10934 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10935 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10936 fputs ("@PLT", file);
10937 break;
10939 case LABEL_REF:
10940 x = XEXP (x, 0);
10941 /* FALLTHRU */
10942 case CODE_LABEL:
10943 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10944 assemble_name (asm_out_file, buf);
10945 break;
10947 case CONST_INT:
10948 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10949 break;
10951 case CONST:
10952 /* This used to output parentheses around the expression,
10953 but that does not work on the 386 (either ATT or BSD assembler). */
10954 output_pic_addr_const (file, XEXP (x, 0), code);
10955 break;
10957 case CONST_DOUBLE:
10958 if (GET_MODE (x) == VOIDmode)
10960 /* We can use %d if the number is <32 bits and positive. */
10961 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10962 fprintf (file, "0x%lx%08lx",
10963 (unsigned long) CONST_DOUBLE_HIGH (x),
10964 (unsigned long) CONST_DOUBLE_LOW (x));
10965 else
10966 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10968 else
10969 /* We can't handle floating point constants;
10970 PRINT_OPERAND must handle them. */
10971 output_operand_lossage ("floating constant misused");
10972 break;
10974 case PLUS:
10975 /* Some assemblers need integer constants to appear first. */
10976 if (CONST_INT_P (XEXP (x, 0)))
10978 output_pic_addr_const (file, XEXP (x, 0), code);
10979 putc ('+', file);
10980 output_pic_addr_const (file, XEXP (x, 1), code);
10982 else
10984 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10985 output_pic_addr_const (file, XEXP (x, 1), code);
10986 putc ('+', file);
10987 output_pic_addr_const (file, XEXP (x, 0), code);
10989 break;
10991 case MINUS:
10992 if (!TARGET_MACHO)
10993 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10994 output_pic_addr_const (file, XEXP (x, 0), code);
10995 putc ('-', file);
10996 output_pic_addr_const (file, XEXP (x, 1), code);
10997 if (!TARGET_MACHO)
10998 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10999 break;
11001 case UNSPEC:
11002 gcc_assert (XVECLEN (x, 0) == 1);
11003 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
11004 switch (XINT (x, 1))
11006 case UNSPEC_GOT:
11007 fputs ("@GOT", file);
11008 break;
11009 case UNSPEC_GOTOFF:
11010 fputs ("@GOTOFF", file);
11011 break;
11012 case UNSPEC_PLTOFF:
11013 fputs ("@PLTOFF", file);
11014 break;
11015 case UNSPEC_GOTPCREL:
11016 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11017 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
11018 break;
11019 case UNSPEC_GOTTPOFF:
11020 /* FIXME: This might be @TPOFF in Sun ld too. */
11021 fputs ("@gottpoff", file);
11022 break;
11023 case UNSPEC_TPOFF:
11024 fputs ("@tpoff", file);
11025 break;
11026 case UNSPEC_NTPOFF:
11027 if (TARGET_64BIT)
11028 fputs ("@tpoff", file);
11029 else
11030 fputs ("@ntpoff", file);
11031 break;
11032 case UNSPEC_DTPOFF:
11033 fputs ("@dtpoff", file);
11034 break;
11035 case UNSPEC_GOTNTPOFF:
11036 if (TARGET_64BIT)
11037 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11038 "@gottpoff(%rip)": "@gottpoff[rip]", file);
11039 else
11040 fputs ("@gotntpoff", file);
11041 break;
11042 case UNSPEC_INDNTPOFF:
11043 fputs ("@indntpoff", file);
11044 break;
11045 #if TARGET_MACHO
11046 case UNSPEC_MACHOPIC_OFFSET:
11047 putc ('-', file);
11048 machopic_output_function_base_name (file);
11049 break;
11050 #endif
11051 default:
11052 output_operand_lossage ("invalid UNSPEC as operand");
11053 break;
11055 break;
11057 default:
11058 output_operand_lossage ("invalid expression as operand");
11062 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11063 We need to emit DTP-relative relocations. */
11065 static void ATTRIBUTE_UNUSED
11066 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
11068 fputs (ASM_LONG, file);
11069 output_addr_const (file, x);
11070 fputs ("@dtpoff", file);
11071 switch (size)
11073 case 4:
11074 break;
11075 case 8:
11076 fputs (", 0", file);
11077 break;
11078 default:
11079 gcc_unreachable ();
11083 /* Return true if X is a representation of the PIC register. This copes
11084 with calls from ix86_find_base_term, where the register might have
11085 been replaced by a cselib value. */
11087 static bool
11088 ix86_pic_register_p (rtx x)
11090 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
11091 return (pic_offset_table_rtx
11092 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
11093 else
11094 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
11097 /* In the name of slightly smaller debug output, and to cater to
11098 general assembler lossage, recognize PIC+GOTOFF and turn it back
11099 into a direct symbol reference.
11101 On Darwin, this is necessary to avoid a crash, because Darwin
11102 has a different PIC label for each routine but the DWARF debugging
11103 information is not associated with any particular routine, so it's
11104 necessary to remove references to the PIC label from RTL stored by
11105 the DWARF output code. */
11107 static rtx
11108 ix86_delegitimize_address (rtx x)
11110 rtx orig_x = delegitimize_mem_from_attrs (x);
11111 /* addend is NULL or some rtx if x is something+GOTOFF where
11112 something doesn't include the PIC register. */
11113 rtx addend = NULL_RTX;
11114 /* reg_addend is NULL or a multiple of some register. */
11115 rtx reg_addend = NULL_RTX;
11116 /* const_addend is NULL or a const_int. */
11117 rtx const_addend = NULL_RTX;
11118 /* This is the result, or NULL. */
11119 rtx result = NULL_RTX;
11121 x = orig_x;
11123 if (MEM_P (x))
11124 x = XEXP (x, 0);
11126 if (TARGET_64BIT)
11128 if (GET_CODE (x) != CONST
11129 || GET_CODE (XEXP (x, 0)) != UNSPEC
11130 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
11131 || !MEM_P (orig_x))
11132 return orig_x;
11133 x = XVECEXP (XEXP (x, 0), 0, 0);
11134 if (GET_MODE (orig_x) != Pmode)
11135 return simplify_gen_subreg (GET_MODE (orig_x), x, Pmode, 0);
11136 return x;
11139 if (GET_CODE (x) != PLUS
11140 || GET_CODE (XEXP (x, 1)) != CONST)
11141 return orig_x;
11143 if (ix86_pic_register_p (XEXP (x, 0)))
11144 /* %ebx + GOT/GOTOFF */
11146 else if (GET_CODE (XEXP (x, 0)) == PLUS)
11148 /* %ebx + %reg * scale + GOT/GOTOFF */
11149 reg_addend = XEXP (x, 0);
11150 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
11151 reg_addend = XEXP (reg_addend, 1);
11152 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
11153 reg_addend = XEXP (reg_addend, 0);
11154 else
11156 reg_addend = NULL_RTX;
11157 addend = XEXP (x, 0);
11160 else
11161 addend = XEXP (x, 0);
11163 x = XEXP (XEXP (x, 1), 0);
11164 if (GET_CODE (x) == PLUS
11165 && CONST_INT_P (XEXP (x, 1)))
11167 const_addend = XEXP (x, 1);
11168 x = XEXP (x, 0);
11171 if (GET_CODE (x) == UNSPEC
11172 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x) && !addend)
11173 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
11174 result = XVECEXP (x, 0, 0);
11176 if (TARGET_MACHO && darwin_local_data_pic (x)
11177 && !MEM_P (orig_x))
11178 result = XVECEXP (x, 0, 0);
11180 if (! result)
11181 return orig_x;
11183 if (const_addend)
11184 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
11185 if (reg_addend)
11186 result = gen_rtx_PLUS (Pmode, reg_addend, result);
11187 if (addend)
11189 /* If the rest of original X doesn't involve the PIC register, add
11190 addend and subtract pic_offset_table_rtx. This can happen e.g.
11191 for code like:
11192 leal (%ebx, %ecx, 4), %ecx
11194 movl foo@GOTOFF(%ecx), %edx
11195 in which case we return (%ecx - %ebx) + foo. */
11196 if (pic_offset_table_rtx)
11197 result = gen_rtx_PLUS (Pmode, gen_rtx_MINUS (Pmode, copy_rtx (addend),
11198 pic_offset_table_rtx),
11199 result);
11200 else
11201 return orig_x;
11203 if (GET_MODE (orig_x) != Pmode && MEM_P (orig_x))
11204 return simplify_gen_subreg (GET_MODE (orig_x), result, Pmode, 0);
11205 return result;
11208 /* If X is a machine specific address (i.e. a symbol or label being
11209 referenced as a displacement from the GOT implemented using an
11210 UNSPEC), then return the base term. Otherwise return X. */
11213 ix86_find_base_term (rtx x)
11215 rtx term;
11217 if (TARGET_64BIT)
11219 if (GET_CODE (x) != CONST)
11220 return x;
11221 term = XEXP (x, 0);
11222 if (GET_CODE (term) == PLUS
11223 && (CONST_INT_P (XEXP (term, 1))
11224 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
11225 term = XEXP (term, 0);
11226 if (GET_CODE (term) != UNSPEC
11227 || XINT (term, 1) != UNSPEC_GOTPCREL)
11228 return x;
11230 return XVECEXP (term, 0, 0);
11233 return ix86_delegitimize_address (x);
11236 static void
11237 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
11238 int fp, FILE *file)
11240 const char *suffix;
11242 if (mode == CCFPmode || mode == CCFPUmode)
11244 code = ix86_fp_compare_code_to_integer (code);
11245 mode = CCmode;
11247 if (reverse)
11248 code = reverse_condition (code);
11250 switch (code)
11252 case EQ:
11253 switch (mode)
11255 case CCAmode:
11256 suffix = "a";
11257 break;
11259 case CCCmode:
11260 suffix = "c";
11261 break;
11263 case CCOmode:
11264 suffix = "o";
11265 break;
11267 case CCSmode:
11268 suffix = "s";
11269 break;
11271 default:
11272 suffix = "e";
11274 break;
11275 case NE:
11276 switch (mode)
11278 case CCAmode:
11279 suffix = "na";
11280 break;
11282 case CCCmode:
11283 suffix = "nc";
11284 break;
11286 case CCOmode:
11287 suffix = "no";
11288 break;
11290 case CCSmode:
11291 suffix = "ns";
11292 break;
11294 default:
11295 suffix = "ne";
11297 break;
11298 case GT:
11299 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11300 suffix = "g";
11301 break;
11302 case GTU:
11303 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11304 Those same assemblers have the same but opposite lossage on cmov. */
11305 if (mode == CCmode)
11306 suffix = fp ? "nbe" : "a";
11307 else if (mode == CCCmode)
11308 suffix = "b";
11309 else
11310 gcc_unreachable ();
11311 break;
11312 case LT:
11313 switch (mode)
11315 case CCNOmode:
11316 case CCGOCmode:
11317 suffix = "s";
11318 break;
11320 case CCmode:
11321 case CCGCmode:
11322 suffix = "l";
11323 break;
11325 default:
11326 gcc_unreachable ();
11328 break;
11329 case LTU:
11330 gcc_assert (mode == CCmode || mode == CCCmode);
11331 suffix = "b";
11332 break;
11333 case GE:
11334 switch (mode)
11336 case CCNOmode:
11337 case CCGOCmode:
11338 suffix = "ns";
11339 break;
11341 case CCmode:
11342 case CCGCmode:
11343 suffix = "ge";
11344 break;
11346 default:
11347 gcc_unreachable ();
11349 break;
11350 case GEU:
11351 /* ??? As above. */
11352 gcc_assert (mode == CCmode || mode == CCCmode);
11353 suffix = fp ? "nb" : "ae";
11354 break;
11355 case LE:
11356 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11357 suffix = "le";
11358 break;
11359 case LEU:
11360 /* ??? As above. */
11361 if (mode == CCmode)
11362 suffix = "be";
11363 else if (mode == CCCmode)
11364 suffix = fp ? "nb" : "ae";
11365 else
11366 gcc_unreachable ();
11367 break;
11368 case UNORDERED:
11369 suffix = fp ? "u" : "p";
11370 break;
11371 case ORDERED:
11372 suffix = fp ? "nu" : "np";
11373 break;
11374 default:
11375 gcc_unreachable ();
11377 fputs (suffix, file);
11380 /* Print the name of register X to FILE based on its machine mode and number.
11381 If CODE is 'w', pretend the mode is HImode.
11382 If CODE is 'b', pretend the mode is QImode.
11383 If CODE is 'k', pretend the mode is SImode.
11384 If CODE is 'q', pretend the mode is DImode.
11385 If CODE is 'x', pretend the mode is V4SFmode.
11386 If CODE is 't', pretend the mode is V8SFmode.
11387 If CODE is 'h', pretend the reg is the 'high' byte register.
11388 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11389 If CODE is 'd', duplicate the operand for AVX instruction.
11392 void
11393 print_reg (rtx x, int code, FILE *file)
11395 const char *reg;
11396 bool duplicated = code == 'd' && TARGET_AVX;
11398 gcc_assert (x == pc_rtx
11399 || (REGNO (x) != ARG_POINTER_REGNUM
11400 && REGNO (x) != FRAME_POINTER_REGNUM
11401 && REGNO (x) != FLAGS_REG
11402 && REGNO (x) != FPSR_REG
11403 && REGNO (x) != FPCR_REG));
11405 if (ASSEMBLER_DIALECT == ASM_ATT)
11406 putc ('%', file);
11408 if (x == pc_rtx)
11410 gcc_assert (TARGET_64BIT);
11411 fputs ("rip", file);
11412 return;
11415 if (code == 'w' || MMX_REG_P (x))
11416 code = 2;
11417 else if (code == 'b')
11418 code = 1;
11419 else if (code == 'k')
11420 code = 4;
11421 else if (code == 'q')
11422 code = 8;
11423 else if (code == 'y')
11424 code = 3;
11425 else if (code == 'h')
11426 code = 0;
11427 else if (code == 'x')
11428 code = 16;
11429 else if (code == 't')
11430 code = 32;
11431 else
11432 code = GET_MODE_SIZE (GET_MODE (x));
11434 /* Irritatingly, AMD extended registers use different naming convention
11435 from the normal registers. */
11436 if (REX_INT_REG_P (x))
11438 gcc_assert (TARGET_64BIT);
11439 switch (code)
11441 case 0:
11442 error ("extended registers have no high halves");
11443 break;
11444 case 1:
11445 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11446 break;
11447 case 2:
11448 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11449 break;
11450 case 4:
11451 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11452 break;
11453 case 8:
11454 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11455 break;
11456 default:
11457 error ("unsupported operand size for extended register");
11458 break;
11460 return;
11463 reg = NULL;
11464 switch (code)
11466 case 3:
11467 if (STACK_TOP_P (x))
11469 reg = "st(0)";
11470 break;
11472 /* FALLTHRU */
11473 case 8:
11474 case 4:
11475 case 12:
11476 if (! ANY_FP_REG_P (x))
11477 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11478 /* FALLTHRU */
11479 case 16:
11480 case 2:
11481 normal:
11482 reg = hi_reg_name[REGNO (x)];
11483 break;
11484 case 1:
11485 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11486 goto normal;
11487 reg = qi_reg_name[REGNO (x)];
11488 break;
11489 case 0:
11490 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11491 goto normal;
11492 reg = qi_high_reg_name[REGNO (x)];
11493 break;
11494 case 32:
11495 if (SSE_REG_P (x))
11497 gcc_assert (!duplicated);
11498 putc ('y', file);
11499 fputs (hi_reg_name[REGNO (x)] + 1, file);
11500 return;
11502 break;
11503 default:
11504 gcc_unreachable ();
11507 fputs (reg, file);
11508 if (duplicated)
11510 if (ASSEMBLER_DIALECT == ASM_ATT)
11511 fprintf (file, ", %%%s", reg);
11512 else
11513 fprintf (file, ", %s", reg);
11517 /* Locate some local-dynamic symbol still in use by this function
11518 so that we can print its name in some tls_local_dynamic_base
11519 pattern. */
11521 static int
11522 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11524 rtx x = *px;
11526 if (GET_CODE (x) == SYMBOL_REF
11527 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11529 cfun->machine->some_ld_name = XSTR (x, 0);
11530 return 1;
11533 return 0;
11536 static const char *
11537 get_some_local_dynamic_name (void)
11539 rtx insn;
11541 if (cfun->machine->some_ld_name)
11542 return cfun->machine->some_ld_name;
11544 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11545 if (NONDEBUG_INSN_P (insn)
11546 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11547 return cfun->machine->some_ld_name;
11549 return NULL;
11552 /* Meaning of CODE:
11553 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11554 C -- print opcode suffix for set/cmov insn.
11555 c -- like C, but print reversed condition
11556 F,f -- likewise, but for floating-point.
11557 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11558 otherwise nothing
11559 R -- print the prefix for register names.
11560 z -- print the opcode suffix for the size of the current operand.
11561 Z -- likewise, with special suffixes for x87 instructions.
11562 * -- print a star (in certain assembler syntax)
11563 A -- print an absolute memory reference.
11564 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11565 s -- print a shift double count, followed by the assemblers argument
11566 delimiter.
11567 b -- print the QImode name of the register for the indicated operand.
11568 %b0 would print %al if operands[0] is reg 0.
11569 w -- likewise, print the HImode name of the register.
11570 k -- likewise, print the SImode name of the register.
11571 q -- likewise, print the DImode name of the register.
11572 x -- likewise, print the V4SFmode name of the register.
11573 t -- likewise, print the V8SFmode name of the register.
11574 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11575 y -- print "st(0)" instead of "st" as a register.
11576 d -- print duplicated register operand for AVX instruction.
11577 D -- print condition for SSE cmp instruction.
11578 P -- if PIC, print an @PLT suffix.
11579 X -- don't print any sort of PIC '@' suffix for a symbol.
11580 & -- print some in-use local-dynamic symbol name.
11581 H -- print a memory address offset by 8; used for sse high-parts
11582 Y -- print condition for XOP pcom* instruction.
11583 + -- print a branch hint as 'cs' or 'ds' prefix
11584 ; -- print a semicolon (after prefixes due to bug in older gas).
11587 void
11588 print_operand (FILE *file, rtx x, int code)
11590 if (code)
11592 switch (code)
11594 case '*':
11595 if (ASSEMBLER_DIALECT == ASM_ATT)
11596 putc ('*', file);
11597 return;
11599 case '&':
11601 const char *name = get_some_local_dynamic_name ();
11602 if (name == NULL)
11603 output_operand_lossage ("'%%&' used without any "
11604 "local dynamic TLS references");
11605 else
11606 assemble_name (file, name);
11607 return;
11610 case 'A':
11611 switch (ASSEMBLER_DIALECT)
11613 case ASM_ATT:
11614 putc ('*', file);
11615 break;
11617 case ASM_INTEL:
11618 /* Intel syntax. For absolute addresses, registers should not
11619 be surrounded by braces. */
11620 if (!REG_P (x))
11622 putc ('[', file);
11623 PRINT_OPERAND (file, x, 0);
11624 putc (']', file);
11625 return;
11627 break;
11629 default:
11630 gcc_unreachable ();
11633 PRINT_OPERAND (file, x, 0);
11634 return;
11637 case 'L':
11638 if (ASSEMBLER_DIALECT == ASM_ATT)
11639 putc ('l', file);
11640 return;
11642 case 'W':
11643 if (ASSEMBLER_DIALECT == ASM_ATT)
11644 putc ('w', file);
11645 return;
11647 case 'B':
11648 if (ASSEMBLER_DIALECT == ASM_ATT)
11649 putc ('b', file);
11650 return;
11652 case 'Q':
11653 if (ASSEMBLER_DIALECT == ASM_ATT)
11654 putc ('l', file);
11655 return;
11657 case 'S':
11658 if (ASSEMBLER_DIALECT == ASM_ATT)
11659 putc ('s', file);
11660 return;
11662 case 'T':
11663 if (ASSEMBLER_DIALECT == ASM_ATT)
11664 putc ('t', file);
11665 return;
11667 case 'z':
11668 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11670 /* Opcodes don't get size suffixes if using Intel opcodes. */
11671 if (ASSEMBLER_DIALECT == ASM_INTEL)
11672 return;
11674 switch (GET_MODE_SIZE (GET_MODE (x)))
11676 case 1:
11677 putc ('b', file);
11678 return;
11680 case 2:
11681 putc ('w', file);
11682 return;
11684 case 4:
11685 putc ('l', file);
11686 return;
11688 case 8:
11689 putc ('q', file);
11690 return;
11692 default:
11693 output_operand_lossage
11694 ("invalid operand size for operand code '%c'", code);
11695 return;
11699 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11700 warning
11701 (0, "non-integer operand used with operand code '%c'", code);
11702 /* FALLTHRU */
11704 case 'Z':
11705 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11706 if (ASSEMBLER_DIALECT == ASM_INTEL)
11707 return;
11709 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11711 switch (GET_MODE_SIZE (GET_MODE (x)))
11713 case 2:
11714 #ifdef HAVE_AS_IX86_FILDS
11715 putc ('s', file);
11716 #endif
11717 return;
11719 case 4:
11720 putc ('l', file);
11721 return;
11723 case 8:
11724 #ifdef HAVE_AS_IX86_FILDQ
11725 putc ('q', file);
11726 #else
11727 fputs ("ll", file);
11728 #endif
11729 return;
11731 default:
11732 break;
11735 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11737 /* 387 opcodes don't get size suffixes
11738 if the operands are registers. */
11739 if (STACK_REG_P (x))
11740 return;
11742 switch (GET_MODE_SIZE (GET_MODE (x)))
11744 case 4:
11745 putc ('s', file);
11746 return;
11748 case 8:
11749 putc ('l', file);
11750 return;
11752 case 12:
11753 case 16:
11754 putc ('t', file);
11755 return;
11757 default:
11758 break;
11761 else
11763 output_operand_lossage
11764 ("invalid operand type used with operand code '%c'", code);
11765 return;
11768 output_operand_lossage
11769 ("invalid operand size for operand code '%c'", code);
11770 return;
11772 case 'd':
11773 case 'b':
11774 case 'w':
11775 case 'k':
11776 case 'q':
11777 case 'h':
11778 case 't':
11779 case 'y':
11780 case 'x':
11781 case 'X':
11782 case 'P':
11783 break;
11785 case 's':
11786 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11788 PRINT_OPERAND (file, x, 0);
11789 fputs (", ", file);
11791 return;
11793 case 'D':
11794 /* Little bit of braindamage here. The SSE compare instructions
11795 does use completely different names for the comparisons that the
11796 fp conditional moves. */
11797 if (TARGET_AVX)
11799 switch (GET_CODE (x))
11801 case EQ:
11802 fputs ("eq", file);
11803 break;
11804 case UNEQ:
11805 fputs ("eq_us", file);
11806 break;
11807 case LT:
11808 fputs ("lt", file);
11809 break;
11810 case UNLT:
11811 fputs ("nge", file);
11812 break;
11813 case LE:
11814 fputs ("le", file);
11815 break;
11816 case UNLE:
11817 fputs ("ngt", file);
11818 break;
11819 case UNORDERED:
11820 fputs ("unord", file);
11821 break;
11822 case NE:
11823 fputs ("neq", file);
11824 break;
11825 case LTGT:
11826 fputs ("neq_oq", file);
11827 break;
11828 case GE:
11829 fputs ("ge", file);
11830 break;
11831 case UNGE:
11832 fputs ("nlt", file);
11833 break;
11834 case GT:
11835 fputs ("gt", file);
11836 break;
11837 case UNGT:
11838 fputs ("nle", file);
11839 break;
11840 case ORDERED:
11841 fputs ("ord", file);
11842 break;
11843 default:
11844 output_operand_lossage ("operand is not a condition code, "
11845 "invalid operand code 'D'");
11846 return;
11849 else
11851 switch (GET_CODE (x))
11853 case EQ:
11854 case UNEQ:
11855 fputs ("eq", file);
11856 break;
11857 case LT:
11858 case UNLT:
11859 fputs ("lt", file);
11860 break;
11861 case LE:
11862 case UNLE:
11863 fputs ("le", file);
11864 break;
11865 case UNORDERED:
11866 fputs ("unord", file);
11867 break;
11868 case NE:
11869 case LTGT:
11870 fputs ("neq", file);
11871 break;
11872 case UNGE:
11873 case GE:
11874 fputs ("nlt", file);
11875 break;
11876 case UNGT:
11877 case GT:
11878 fputs ("nle", file);
11879 break;
11880 case ORDERED:
11881 fputs ("ord", file);
11882 break;
11883 default:
11884 output_operand_lossage ("operand is not a condition code, "
11885 "invalid operand code 'D'");
11886 return;
11889 return;
11890 case 'O':
11891 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11892 if (ASSEMBLER_DIALECT == ASM_ATT)
11894 switch (GET_MODE (x))
11896 case HImode: putc ('w', file); break;
11897 case SImode:
11898 case SFmode: putc ('l', file); break;
11899 case DImode:
11900 case DFmode: putc ('q', file); break;
11901 default: gcc_unreachable ();
11903 putc ('.', file);
11905 #endif
11906 return;
11907 case 'C':
11908 if (!COMPARISON_P (x))
11910 output_operand_lossage ("operand is neither a constant nor a "
11911 "condition code, invalid operand code "
11912 "'C'");
11913 return;
11915 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11916 return;
11917 case 'F':
11918 if (!COMPARISON_P (x))
11920 output_operand_lossage ("operand is neither a constant nor a "
11921 "condition code, invalid operand code "
11922 "'F'");
11923 return;
11925 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11926 if (ASSEMBLER_DIALECT == ASM_ATT)
11927 putc ('.', file);
11928 #endif
11929 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11930 return;
11932 /* Like above, but reverse condition */
11933 case 'c':
11934 /* Check to see if argument to %c is really a constant
11935 and not a condition code which needs to be reversed. */
11936 if (!COMPARISON_P (x))
11938 output_operand_lossage ("operand is neither a constant nor a "
11939 "condition code, invalid operand "
11940 "code 'c'");
11941 return;
11943 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11944 return;
11945 case 'f':
11946 if (!COMPARISON_P (x))
11948 output_operand_lossage ("operand is neither a constant nor a "
11949 "condition code, invalid operand "
11950 "code 'f'");
11951 return;
11953 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11954 if (ASSEMBLER_DIALECT == ASM_ATT)
11955 putc ('.', file);
11956 #endif
11957 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11958 return;
11960 case 'H':
11961 /* It doesn't actually matter what mode we use here, as we're
11962 only going to use this for printing. */
11963 x = adjust_address_nv (x, DImode, 8);
11964 break;
11966 case '+':
11968 rtx x;
11970 if (!optimize
11971 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11972 return;
11974 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11975 if (x)
11977 int pred_val = INTVAL (XEXP (x, 0));
11979 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11980 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11982 int taken = pred_val > REG_BR_PROB_BASE / 2;
11983 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11985 /* Emit hints only in the case default branch prediction
11986 heuristics would fail. */
11987 if (taken != cputaken)
11989 /* We use 3e (DS) prefix for taken branches and
11990 2e (CS) prefix for not taken branches. */
11991 if (taken)
11992 fputs ("ds ; ", file);
11993 else
11994 fputs ("cs ; ", file);
11998 return;
12001 case 'Y':
12002 switch (GET_CODE (x))
12004 case NE:
12005 fputs ("neq", file);
12006 break;
12007 case EQ:
12008 fputs ("eq", file);
12009 break;
12010 case GE:
12011 case GEU:
12012 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
12013 break;
12014 case GT:
12015 case GTU:
12016 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
12017 break;
12018 case LE:
12019 case LEU:
12020 fputs ("le", file);
12021 break;
12022 case LT:
12023 case LTU:
12024 fputs ("lt", file);
12025 break;
12026 case UNORDERED:
12027 fputs ("unord", file);
12028 break;
12029 case ORDERED:
12030 fputs ("ord", file);
12031 break;
12032 case UNEQ:
12033 fputs ("ueq", file);
12034 break;
12035 case UNGE:
12036 fputs ("nlt", file);
12037 break;
12038 case UNGT:
12039 fputs ("nle", file);
12040 break;
12041 case UNLE:
12042 fputs ("ule", file);
12043 break;
12044 case UNLT:
12045 fputs ("ult", file);
12046 break;
12047 case LTGT:
12048 fputs ("une", file);
12049 break;
12050 default:
12051 output_operand_lossage ("operand is not a condition code, "
12052 "invalid operand code 'Y'");
12053 return;
12055 return;
12057 case ';':
12058 #if TARGET_MACHO || !HAVE_AS_IX86_REP_LOCK_PREFIX
12059 fputs (";", file);
12060 #endif
12061 return;
12063 default:
12064 output_operand_lossage ("invalid operand code '%c'", code);
12068 if (REG_P (x))
12069 print_reg (x, code, file);
12071 else if (MEM_P (x))
12073 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
12074 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
12075 && GET_MODE (x) != BLKmode)
12077 const char * size;
12078 switch (GET_MODE_SIZE (GET_MODE (x)))
12080 case 1: size = "BYTE"; break;
12081 case 2: size = "WORD"; break;
12082 case 4: size = "DWORD"; break;
12083 case 8: size = "QWORD"; break;
12084 case 12: size = "TBYTE"; break;
12085 case 16:
12086 if (GET_MODE (x) == XFmode)
12087 size = "TBYTE";
12088 else
12089 size = "XMMWORD";
12090 break;
12091 case 32: size = "YMMWORD"; break;
12092 default:
12093 gcc_unreachable ();
12096 /* Check for explicit size override (codes 'b', 'w' and 'k') */
12097 if (code == 'b')
12098 size = "BYTE";
12099 else if (code == 'w')
12100 size = "WORD";
12101 else if (code == 'k')
12102 size = "DWORD";
12104 fputs (size, file);
12105 fputs (" PTR ", file);
12108 x = XEXP (x, 0);
12109 /* Avoid (%rip) for call operands. */
12110 if (CONSTANT_ADDRESS_P (x) && code == 'P'
12111 && !CONST_INT_P (x))
12112 output_addr_const (file, x);
12113 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
12114 output_operand_lossage ("invalid constraints for operand");
12115 else
12116 output_address (x);
12119 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
12121 REAL_VALUE_TYPE r;
12122 long l;
12124 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
12125 REAL_VALUE_TO_TARGET_SINGLE (r, l);
12127 if (ASSEMBLER_DIALECT == ASM_ATT)
12128 putc ('$', file);
12129 fprintf (file, "0x%08lx", (long unsigned int) l);
12132 /* These float cases don't actually occur as immediate operands. */
12133 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
12135 char dstr[30];
12137 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12138 fputs (dstr, file);
12141 else if (GET_CODE (x) == CONST_DOUBLE
12142 && GET_MODE (x) == XFmode)
12144 char dstr[30];
12146 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
12147 fputs (dstr, file);
12150 else
12152 /* We have patterns that allow zero sets of memory, for instance.
12153 In 64-bit mode, we should probably support all 8-byte vectors,
12154 since we can in fact encode that into an immediate. */
12155 if (GET_CODE (x) == CONST_VECTOR)
12157 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
12158 x = const0_rtx;
12161 if (code != 'P')
12163 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
12165 if (ASSEMBLER_DIALECT == ASM_ATT)
12166 putc ('$', file);
12168 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
12169 || GET_CODE (x) == LABEL_REF)
12171 if (ASSEMBLER_DIALECT == ASM_ATT)
12172 putc ('$', file);
12173 else
12174 fputs ("OFFSET FLAT:", file);
12177 if (CONST_INT_P (x))
12178 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
12179 else if (flag_pic)
12180 output_pic_addr_const (file, x, code);
12181 else
12182 output_addr_const (file, x);
12186 /* Print a memory operand whose address is ADDR. */
12188 void
12189 print_operand_address (FILE *file, rtx addr)
12191 struct ix86_address parts;
12192 rtx base, index, disp;
12193 int scale;
12194 int ok = ix86_decompose_address (addr, &parts);
12196 gcc_assert (ok);
12198 base = parts.base;
12199 index = parts.index;
12200 disp = parts.disp;
12201 scale = parts.scale;
12203 switch (parts.seg)
12205 case SEG_DEFAULT:
12206 break;
12207 case SEG_FS:
12208 case SEG_GS:
12209 if (ASSEMBLER_DIALECT == ASM_ATT)
12210 putc ('%', file);
12211 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
12212 break;
12213 default:
12214 gcc_unreachable ();
12217 /* Use one byte shorter RIP relative addressing for 64bit mode. */
12218 if (TARGET_64BIT && !base && !index)
12220 rtx symbol = disp;
12222 if (GET_CODE (disp) == CONST
12223 && GET_CODE (XEXP (disp, 0)) == PLUS
12224 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12225 symbol = XEXP (XEXP (disp, 0), 0);
12227 if (GET_CODE (symbol) == LABEL_REF
12228 || (GET_CODE (symbol) == SYMBOL_REF
12229 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
12230 base = pc_rtx;
12232 if (!base && !index)
12234 /* Displacement only requires special attention. */
12236 if (CONST_INT_P (disp))
12238 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12239 fputs ("ds:", file);
12240 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12242 else if (flag_pic)
12243 output_pic_addr_const (file, disp, 0);
12244 else
12245 output_addr_const (file, disp);
12247 else
12249 if (ASSEMBLER_DIALECT == ASM_ATT)
12251 if (disp)
12253 if (flag_pic)
12254 output_pic_addr_const (file, disp, 0);
12255 else if (GET_CODE (disp) == LABEL_REF)
12256 output_asm_label (disp);
12257 else
12258 output_addr_const (file, disp);
12261 putc ('(', file);
12262 if (base)
12263 print_reg (base, 0, file);
12264 if (index)
12266 putc (',', file);
12267 print_reg (index, 0, file);
12268 if (scale != 1)
12269 fprintf (file, ",%d", scale);
12271 putc (')', file);
12273 else
12275 rtx offset = NULL_RTX;
12277 if (disp)
12279 /* Pull out the offset of a symbol; print any symbol itself. */
12280 if (GET_CODE (disp) == CONST
12281 && GET_CODE (XEXP (disp, 0)) == PLUS
12282 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12284 offset = XEXP (XEXP (disp, 0), 1);
12285 disp = gen_rtx_CONST (VOIDmode,
12286 XEXP (XEXP (disp, 0), 0));
12289 if (flag_pic)
12290 output_pic_addr_const (file, disp, 0);
12291 else if (GET_CODE (disp) == LABEL_REF)
12292 output_asm_label (disp);
12293 else if (CONST_INT_P (disp))
12294 offset = disp;
12295 else
12296 output_addr_const (file, disp);
12299 putc ('[', file);
12300 if (base)
12302 print_reg (base, 0, file);
12303 if (offset)
12305 if (INTVAL (offset) >= 0)
12306 putc ('+', file);
12307 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12310 else if (offset)
12311 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12312 else
12313 putc ('0', file);
12315 if (index)
12317 putc ('+', file);
12318 print_reg (index, 0, file);
12319 if (scale != 1)
12320 fprintf (file, "*%d", scale);
12322 putc (']', file);
12327 bool
12328 output_addr_const_extra (FILE *file, rtx x)
12330 rtx op;
12332 if (GET_CODE (x) != UNSPEC)
12333 return false;
12335 op = XVECEXP (x, 0, 0);
12336 switch (XINT (x, 1))
12338 case UNSPEC_GOTTPOFF:
12339 output_addr_const (file, op);
12340 /* FIXME: This might be @TPOFF in Sun ld. */
12341 fputs ("@gottpoff", file);
12342 break;
12343 case UNSPEC_TPOFF:
12344 output_addr_const (file, op);
12345 fputs ("@tpoff", file);
12346 break;
12347 case UNSPEC_NTPOFF:
12348 output_addr_const (file, op);
12349 if (TARGET_64BIT)
12350 fputs ("@tpoff", file);
12351 else
12352 fputs ("@ntpoff", file);
12353 break;
12354 case UNSPEC_DTPOFF:
12355 output_addr_const (file, op);
12356 fputs ("@dtpoff", file);
12357 break;
12358 case UNSPEC_GOTNTPOFF:
12359 output_addr_const (file, op);
12360 if (TARGET_64BIT)
12361 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12362 "@gottpoff(%rip)" : "@gottpoff[rip]", file);
12363 else
12364 fputs ("@gotntpoff", file);
12365 break;
12366 case UNSPEC_INDNTPOFF:
12367 output_addr_const (file, op);
12368 fputs ("@indntpoff", file);
12369 break;
12370 #if TARGET_MACHO
12371 case UNSPEC_MACHOPIC_OFFSET:
12372 output_addr_const (file, op);
12373 putc ('-', file);
12374 machopic_output_function_base_name (file);
12375 break;
12376 #endif
12378 default:
12379 return false;
12382 return true;
12385 /* Split one or more DImode RTL references into pairs of SImode
12386 references. The RTL can be REG, offsettable MEM, integer constant, or
12387 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12388 split and "num" is its length. lo_half and hi_half are output arrays
12389 that parallel "operands". */
12391 void
12392 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12394 while (num--)
12396 rtx op = operands[num];
12398 /* simplify_subreg refuse to split volatile memory addresses,
12399 but we still have to handle it. */
12400 if (MEM_P (op))
12402 lo_half[num] = adjust_address (op, SImode, 0);
12403 hi_half[num] = adjust_address (op, SImode, 4);
12405 else
12407 lo_half[num] = simplify_gen_subreg (SImode, op,
12408 GET_MODE (op) == VOIDmode
12409 ? DImode : GET_MODE (op), 0);
12410 hi_half[num] = simplify_gen_subreg (SImode, op,
12411 GET_MODE (op) == VOIDmode
12412 ? DImode : GET_MODE (op), 4);
12416 /* Split one or more TImode RTL references into pairs of DImode
12417 references. The RTL can be REG, offsettable MEM, integer constant, or
12418 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12419 split and "num" is its length. lo_half and hi_half are output arrays
12420 that parallel "operands". */
12422 void
12423 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12425 while (num--)
12427 rtx op = operands[num];
12429 /* simplify_subreg refuse to split volatile memory addresses, but we
12430 still have to handle it. */
12431 if (MEM_P (op))
12433 lo_half[num] = adjust_address (op, DImode, 0);
12434 hi_half[num] = adjust_address (op, DImode, 8);
12436 else
12438 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12439 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12444 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12445 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12446 is the expression of the binary operation. The output may either be
12447 emitted here, or returned to the caller, like all output_* functions.
12449 There is no guarantee that the operands are the same mode, as they
12450 might be within FLOAT or FLOAT_EXTEND expressions. */
12452 #ifndef SYSV386_COMPAT
12453 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12454 wants to fix the assemblers because that causes incompatibility
12455 with gcc. No-one wants to fix gcc because that causes
12456 incompatibility with assemblers... You can use the option of
12457 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12458 #define SYSV386_COMPAT 1
12459 #endif
12461 const char *
12462 output_387_binary_op (rtx insn, rtx *operands)
12464 static char buf[40];
12465 const char *p;
12466 const char *ssep;
12467 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12469 #ifdef ENABLE_CHECKING
12470 /* Even if we do not want to check the inputs, this documents input
12471 constraints. Which helps in understanding the following code. */
12472 if (STACK_REG_P (operands[0])
12473 && ((REG_P (operands[1])
12474 && REGNO (operands[0]) == REGNO (operands[1])
12475 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12476 || (REG_P (operands[2])
12477 && REGNO (operands[0]) == REGNO (operands[2])
12478 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12479 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12480 ; /* ok */
12481 else
12482 gcc_assert (is_sse);
12483 #endif
12485 switch (GET_CODE (operands[3]))
12487 case PLUS:
12488 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12489 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12490 p = "fiadd";
12491 else
12492 p = "fadd";
12493 ssep = "vadd";
12494 break;
12496 case MINUS:
12497 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12498 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12499 p = "fisub";
12500 else
12501 p = "fsub";
12502 ssep = "vsub";
12503 break;
12505 case MULT:
12506 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12507 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12508 p = "fimul";
12509 else
12510 p = "fmul";
12511 ssep = "vmul";
12512 break;
12514 case DIV:
12515 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12516 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12517 p = "fidiv";
12518 else
12519 p = "fdiv";
12520 ssep = "vdiv";
12521 break;
12523 default:
12524 gcc_unreachable ();
12527 if (is_sse)
12529 if (TARGET_AVX)
12531 strcpy (buf, ssep);
12532 if (GET_MODE (operands[0]) == SFmode)
12533 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12534 else
12535 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12537 else
12539 strcpy (buf, ssep + 1);
12540 if (GET_MODE (operands[0]) == SFmode)
12541 strcat (buf, "ss\t{%2, %0|%0, %2}");
12542 else
12543 strcat (buf, "sd\t{%2, %0|%0, %2}");
12545 return buf;
12547 strcpy (buf, p);
12549 switch (GET_CODE (operands[3]))
12551 case MULT:
12552 case PLUS:
12553 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12555 rtx temp = operands[2];
12556 operands[2] = operands[1];
12557 operands[1] = temp;
12560 /* know operands[0] == operands[1]. */
12562 if (MEM_P (operands[2]))
12564 p = "%Z2\t%2";
12565 break;
12568 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12570 if (STACK_TOP_P (operands[0]))
12571 /* How is it that we are storing to a dead operand[2]?
12572 Well, presumably operands[1] is dead too. We can't
12573 store the result to st(0) as st(0) gets popped on this
12574 instruction. Instead store to operands[2] (which I
12575 think has to be st(1)). st(1) will be popped later.
12576 gcc <= 2.8.1 didn't have this check and generated
12577 assembly code that the Unixware assembler rejected. */
12578 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12579 else
12580 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12581 break;
12584 if (STACK_TOP_P (operands[0]))
12585 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12586 else
12587 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12588 break;
12590 case MINUS:
12591 case DIV:
12592 if (MEM_P (operands[1]))
12594 p = "r%Z1\t%1";
12595 break;
12598 if (MEM_P (operands[2]))
12600 p = "%Z2\t%2";
12601 break;
12604 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12606 #if SYSV386_COMPAT
12607 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12608 derived assemblers, confusingly reverse the direction of
12609 the operation for fsub{r} and fdiv{r} when the
12610 destination register is not st(0). The Intel assembler
12611 doesn't have this brain damage. Read !SYSV386_COMPAT to
12612 figure out what the hardware really does. */
12613 if (STACK_TOP_P (operands[0]))
12614 p = "{p\t%0, %2|rp\t%2, %0}";
12615 else
12616 p = "{rp\t%2, %0|p\t%0, %2}";
12617 #else
12618 if (STACK_TOP_P (operands[0]))
12619 /* As above for fmul/fadd, we can't store to st(0). */
12620 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12621 else
12622 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12623 #endif
12624 break;
12627 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12629 #if SYSV386_COMPAT
12630 if (STACK_TOP_P (operands[0]))
12631 p = "{rp\t%0, %1|p\t%1, %0}";
12632 else
12633 p = "{p\t%1, %0|rp\t%0, %1}";
12634 #else
12635 if (STACK_TOP_P (operands[0]))
12636 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12637 else
12638 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12639 #endif
12640 break;
12643 if (STACK_TOP_P (operands[0]))
12645 if (STACK_TOP_P (operands[1]))
12646 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12647 else
12648 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12649 break;
12651 else if (STACK_TOP_P (operands[1]))
12653 #if SYSV386_COMPAT
12654 p = "{\t%1, %0|r\t%0, %1}";
12655 #else
12656 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12657 #endif
12659 else
12661 #if SYSV386_COMPAT
12662 p = "{r\t%2, %0|\t%0, %2}";
12663 #else
12664 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12665 #endif
12667 break;
12669 default:
12670 gcc_unreachable ();
12673 strcat (buf, p);
12674 return buf;
12677 /* Return needed mode for entity in optimize_mode_switching pass. */
12680 ix86_mode_needed (int entity, rtx insn)
12682 enum attr_i387_cw mode;
12684 /* The mode UNINITIALIZED is used to store control word after a
12685 function call or ASM pattern. The mode ANY specify that function
12686 has no requirements on the control word and make no changes in the
12687 bits we are interested in. */
12689 if (CALL_P (insn)
12690 || (NONJUMP_INSN_P (insn)
12691 && (asm_noperands (PATTERN (insn)) >= 0
12692 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12693 return I387_CW_UNINITIALIZED;
12695 if (recog_memoized (insn) < 0)
12696 return I387_CW_ANY;
12698 mode = get_attr_i387_cw (insn);
12700 switch (entity)
12702 case I387_TRUNC:
12703 if (mode == I387_CW_TRUNC)
12704 return mode;
12705 break;
12707 case I387_FLOOR:
12708 if (mode == I387_CW_FLOOR)
12709 return mode;
12710 break;
12712 case I387_CEIL:
12713 if (mode == I387_CW_CEIL)
12714 return mode;
12715 break;
12717 case I387_MASK_PM:
12718 if (mode == I387_CW_MASK_PM)
12719 return mode;
12720 break;
12722 default:
12723 gcc_unreachable ();
12726 return I387_CW_ANY;
12729 /* Output code to initialize control word copies used by trunc?f?i and
12730 rounding patterns. CURRENT_MODE is set to current control word,
12731 while NEW_MODE is set to new control word. */
12733 void
12734 emit_i387_cw_initialization (int mode)
12736 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12737 rtx new_mode;
12739 enum ix86_stack_slot slot;
12741 rtx reg = gen_reg_rtx (HImode);
12743 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12744 emit_move_insn (reg, copy_rtx (stored_mode));
12746 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12747 || optimize_function_for_size_p (cfun))
12749 switch (mode)
12751 case I387_CW_TRUNC:
12752 /* round toward zero (truncate) */
12753 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12754 slot = SLOT_CW_TRUNC;
12755 break;
12757 case I387_CW_FLOOR:
12758 /* round down toward -oo */
12759 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12760 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12761 slot = SLOT_CW_FLOOR;
12762 break;
12764 case I387_CW_CEIL:
12765 /* round up toward +oo */
12766 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12767 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12768 slot = SLOT_CW_CEIL;
12769 break;
12771 case I387_CW_MASK_PM:
12772 /* mask precision exception for nearbyint() */
12773 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12774 slot = SLOT_CW_MASK_PM;
12775 break;
12777 default:
12778 gcc_unreachable ();
12781 else
12783 switch (mode)
12785 case I387_CW_TRUNC:
12786 /* round toward zero (truncate) */
12787 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12788 slot = SLOT_CW_TRUNC;
12789 break;
12791 case I387_CW_FLOOR:
12792 /* round down toward -oo */
12793 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12794 slot = SLOT_CW_FLOOR;
12795 break;
12797 case I387_CW_CEIL:
12798 /* round up toward +oo */
12799 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12800 slot = SLOT_CW_CEIL;
12801 break;
12803 case I387_CW_MASK_PM:
12804 /* mask precision exception for nearbyint() */
12805 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12806 slot = SLOT_CW_MASK_PM;
12807 break;
12809 default:
12810 gcc_unreachable ();
12814 gcc_assert (slot < MAX_386_STACK_LOCALS);
12816 new_mode = assign_386_stack_local (HImode, slot);
12817 emit_move_insn (new_mode, reg);
12820 /* Output code for INSN to convert a float to a signed int. OPERANDS
12821 are the insn operands. The output may be [HSD]Imode and the input
12822 operand may be [SDX]Fmode. */
12824 const char *
12825 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12827 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12828 int dimode_p = GET_MODE (operands[0]) == DImode;
12829 int round_mode = get_attr_i387_cw (insn);
12831 /* Jump through a hoop or two for DImode, since the hardware has no
12832 non-popping instruction. We used to do this a different way, but
12833 that was somewhat fragile and broke with post-reload splitters. */
12834 if ((dimode_p || fisttp) && !stack_top_dies)
12835 output_asm_insn ("fld\t%y1", operands);
12837 gcc_assert (STACK_TOP_P (operands[1]));
12838 gcc_assert (MEM_P (operands[0]));
12839 gcc_assert (GET_MODE (operands[1]) != TFmode);
12841 if (fisttp)
12842 output_asm_insn ("fisttp%Z0\t%0", operands);
12843 else
12845 if (round_mode != I387_CW_ANY)
12846 output_asm_insn ("fldcw\t%3", operands);
12847 if (stack_top_dies || dimode_p)
12848 output_asm_insn ("fistp%Z0\t%0", operands);
12849 else
12850 output_asm_insn ("fist%Z0\t%0", operands);
12851 if (round_mode != I387_CW_ANY)
12852 output_asm_insn ("fldcw\t%2", operands);
12855 return "";
12858 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12859 have the values zero or one, indicates the ffreep insn's operand
12860 from the OPERANDS array. */
12862 static const char *
12863 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12865 if (TARGET_USE_FFREEP)
12866 #ifdef HAVE_AS_IX86_FFREEP
12867 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12868 #else
12870 static char retval[32];
12871 int regno = REGNO (operands[opno]);
12873 gcc_assert (FP_REGNO_P (regno));
12875 regno -= FIRST_STACK_REG;
12877 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12878 return retval;
12880 #endif
12882 return opno ? "fstp\t%y1" : "fstp\t%y0";
12886 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12887 should be used. UNORDERED_P is true when fucom should be used. */
12889 const char *
12890 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12892 int stack_top_dies;
12893 rtx cmp_op0, cmp_op1;
12894 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12896 if (eflags_p)
12898 cmp_op0 = operands[0];
12899 cmp_op1 = operands[1];
12901 else
12903 cmp_op0 = operands[1];
12904 cmp_op1 = operands[2];
12907 if (is_sse)
12909 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12910 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12911 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12912 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12914 if (GET_MODE (operands[0]) == SFmode)
12915 if (unordered_p)
12916 return &ucomiss[TARGET_AVX ? 0 : 1];
12917 else
12918 return &comiss[TARGET_AVX ? 0 : 1];
12919 else
12920 if (unordered_p)
12921 return &ucomisd[TARGET_AVX ? 0 : 1];
12922 else
12923 return &comisd[TARGET_AVX ? 0 : 1];
12926 gcc_assert (STACK_TOP_P (cmp_op0));
12928 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12930 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12932 if (stack_top_dies)
12934 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12935 return output_387_ffreep (operands, 1);
12937 else
12938 return "ftst\n\tfnstsw\t%0";
12941 if (STACK_REG_P (cmp_op1)
12942 && stack_top_dies
12943 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12944 && REGNO (cmp_op1) != FIRST_STACK_REG)
12946 /* If both the top of the 387 stack dies, and the other operand
12947 is also a stack register that dies, then this must be a
12948 `fcompp' float compare */
12950 if (eflags_p)
12952 /* There is no double popping fcomi variant. Fortunately,
12953 eflags is immune from the fstp's cc clobbering. */
12954 if (unordered_p)
12955 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12956 else
12957 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12958 return output_387_ffreep (operands, 0);
12960 else
12962 if (unordered_p)
12963 return "fucompp\n\tfnstsw\t%0";
12964 else
12965 return "fcompp\n\tfnstsw\t%0";
12968 else
12970 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12972 static const char * const alt[16] =
12974 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12975 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12976 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12977 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12979 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12980 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12981 NULL,
12982 NULL,
12984 "fcomi\t{%y1, %0|%0, %y1}",
12985 "fcomip\t{%y1, %0|%0, %y1}",
12986 "fucomi\t{%y1, %0|%0, %y1}",
12987 "fucomip\t{%y1, %0|%0, %y1}",
12989 NULL,
12990 NULL,
12991 NULL,
12992 NULL
12995 int mask;
12996 const char *ret;
12998 mask = eflags_p << 3;
12999 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
13000 mask |= unordered_p << 1;
13001 mask |= stack_top_dies;
13003 gcc_assert (mask < 16);
13004 ret = alt[mask];
13005 gcc_assert (ret);
13007 return ret;
13011 void
13012 ix86_output_addr_vec_elt (FILE *file, int value)
13014 const char *directive = ASM_LONG;
13016 #ifdef ASM_QUAD
13017 if (TARGET_64BIT)
13018 directive = ASM_QUAD;
13019 #else
13020 gcc_assert (!TARGET_64BIT);
13021 #endif
13023 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
13026 void
13027 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
13029 const char *directive = ASM_LONG;
13031 #ifdef ASM_QUAD
13032 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
13033 directive = ASM_QUAD;
13034 #else
13035 gcc_assert (!TARGET_64BIT);
13036 #endif
13037 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
13038 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
13039 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
13040 directive, value, rel);
13041 else if (HAVE_AS_GOTOFF_IN_DATA)
13042 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
13043 #if TARGET_MACHO
13044 else if (TARGET_MACHO)
13046 fprintf (file, ASM_LONG LPREFIX "%d-", value);
13047 machopic_output_function_base_name (file);
13048 putc ('\n', file);
13050 #endif
13051 else
13052 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
13053 GOT_SYMBOL_NAME, value);
13056 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
13057 for the target. */
13059 void
13060 ix86_expand_clear (rtx dest)
13062 rtx tmp;
13064 /* We play register width games, which are only valid after reload. */
13065 gcc_assert (reload_completed);
13067 /* Avoid HImode and its attendant prefix byte. */
13068 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
13069 dest = gen_rtx_REG (SImode, REGNO (dest));
13070 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
13072 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
13073 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
13075 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13076 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
13079 emit_insn (tmp);
13082 /* X is an unchanging MEM. If it is a constant pool reference, return
13083 the constant pool rtx, else NULL. */
13086 maybe_get_pool_constant (rtx x)
13088 x = ix86_delegitimize_address (XEXP (x, 0));
13090 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
13091 return get_pool_constant (x);
13093 return NULL_RTX;
13096 void
13097 ix86_expand_move (enum machine_mode mode, rtx operands[])
13099 rtx op0, op1;
13100 enum tls_model model;
13102 op0 = operands[0];
13103 op1 = operands[1];
13105 if (GET_CODE (op1) == SYMBOL_REF)
13107 model = SYMBOL_REF_TLS_MODEL (op1);
13108 if (model)
13110 op1 = legitimize_tls_address (op1, model, true);
13111 op1 = force_operand (op1, op0);
13112 if (op1 == op0)
13113 return;
13115 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13116 && SYMBOL_REF_DLLIMPORT_P (op1))
13117 op1 = legitimize_dllimport_symbol (op1, false);
13119 else if (GET_CODE (op1) == CONST
13120 && GET_CODE (XEXP (op1, 0)) == PLUS
13121 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
13123 rtx addend = XEXP (XEXP (op1, 0), 1);
13124 rtx symbol = XEXP (XEXP (op1, 0), 0);
13125 rtx tmp = NULL;
13127 model = SYMBOL_REF_TLS_MODEL (symbol);
13128 if (model)
13129 tmp = legitimize_tls_address (symbol, model, true);
13130 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
13131 && SYMBOL_REF_DLLIMPORT_P (symbol))
13132 tmp = legitimize_dllimport_symbol (symbol, true);
13134 if (tmp)
13136 tmp = force_operand (tmp, NULL);
13137 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
13138 op0, 1, OPTAB_DIRECT);
13139 if (tmp == op0)
13140 return;
13144 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
13146 if (TARGET_MACHO && !TARGET_64BIT)
13148 #if TARGET_MACHO
13149 if (MACHOPIC_PURE)
13151 rtx temp = ((reload_in_progress
13152 || ((op0 && REG_P (op0))
13153 && mode == Pmode))
13154 ? op0 : gen_reg_rtx (Pmode));
13155 op1 = machopic_indirect_data_reference (op1, temp);
13156 op1 = machopic_legitimize_pic_address (op1, mode,
13157 temp == op1 ? 0 : temp);
13159 else if (MACHOPIC_INDIRECT)
13160 op1 = machopic_indirect_data_reference (op1, 0);
13161 if (op0 == op1)
13162 return;
13163 #endif
13165 else
13167 if (MEM_P (op0))
13168 op1 = force_reg (Pmode, op1);
13169 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
13171 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
13172 op1 = legitimize_pic_address (op1, reg);
13173 if (op0 == op1)
13174 return;
13178 else
13180 if (MEM_P (op0)
13181 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
13182 || !push_operand (op0, mode))
13183 && MEM_P (op1))
13184 op1 = force_reg (mode, op1);
13186 if (push_operand (op0, mode)
13187 && ! general_no_elim_operand (op1, mode))
13188 op1 = copy_to_mode_reg (mode, op1);
13190 /* Force large constants in 64bit compilation into register
13191 to get them CSEed. */
13192 if (can_create_pseudo_p ()
13193 && (mode == DImode) && TARGET_64BIT
13194 && immediate_operand (op1, mode)
13195 && !x86_64_zext_immediate_operand (op1, VOIDmode)
13196 && !register_operand (op0, mode)
13197 && optimize)
13198 op1 = copy_to_mode_reg (mode, op1);
13200 if (can_create_pseudo_p ()
13201 && FLOAT_MODE_P (mode)
13202 && GET_CODE (op1) == CONST_DOUBLE)
13204 /* If we are loading a floating point constant to a register,
13205 force the value to memory now, since we'll get better code
13206 out the back end. */
13208 op1 = validize_mem (force_const_mem (mode, op1));
13209 if (!register_operand (op0, mode))
13211 rtx temp = gen_reg_rtx (mode);
13212 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
13213 emit_move_insn (op0, temp);
13214 return;
13219 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13222 void
13223 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
13225 rtx op0 = operands[0], op1 = operands[1];
13226 unsigned int align = GET_MODE_ALIGNMENT (mode);
13228 /* Force constants other than zero into memory. We do not know how
13229 the instructions used to build constants modify the upper 64 bits
13230 of the register, once we have that information we may be able
13231 to handle some of them more efficiently. */
13232 if (can_create_pseudo_p ()
13233 && register_operand (op0, mode)
13234 && (CONSTANT_P (op1)
13235 || (GET_CODE (op1) == SUBREG
13236 && CONSTANT_P (SUBREG_REG (op1))))
13237 && !standard_sse_constant_p (op1))
13238 op1 = validize_mem (force_const_mem (mode, op1));
13240 /* We need to check memory alignment for SSE mode since attribute
13241 can make operands unaligned. */
13242 if (can_create_pseudo_p ()
13243 && SSE_REG_MODE_P (mode)
13244 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13245 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13247 rtx tmp[2];
13249 /* ix86_expand_vector_move_misalign() does not like constants ... */
13250 if (CONSTANT_P (op1)
13251 || (GET_CODE (op1) == SUBREG
13252 && CONSTANT_P (SUBREG_REG (op1))))
13253 op1 = validize_mem (force_const_mem (mode, op1));
13255 /* ... nor both arguments in memory. */
13256 if (!register_operand (op0, mode)
13257 && !register_operand (op1, mode))
13258 op1 = force_reg (mode, op1);
13260 tmp[0] = op0; tmp[1] = op1;
13261 ix86_expand_vector_move_misalign (mode, tmp);
13262 return;
13265 /* Make operand1 a register if it isn't already. */
13266 if (can_create_pseudo_p ()
13267 && !register_operand (op0, mode)
13268 && !register_operand (op1, mode))
13270 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13271 return;
13274 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13277 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13278 straight to ix86_expand_vector_move. */
13279 /* Code generation for scalar reg-reg moves of single and double precision data:
13280 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13281 movaps reg, reg
13282 else
13283 movss reg, reg
13284 if (x86_sse_partial_reg_dependency == true)
13285 movapd reg, reg
13286 else
13287 movsd reg, reg
13289 Code generation for scalar loads of double precision data:
13290 if (x86_sse_split_regs == true)
13291 movlpd mem, reg (gas syntax)
13292 else
13293 movsd mem, reg
13295 Code generation for unaligned packed loads of single precision data
13296 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13297 if (x86_sse_unaligned_move_optimal)
13298 movups mem, reg
13300 if (x86_sse_partial_reg_dependency == true)
13302 xorps reg, reg
13303 movlps mem, reg
13304 movhps mem+8, reg
13306 else
13308 movlps mem, reg
13309 movhps mem+8, reg
13312 Code generation for unaligned packed loads of double precision data
13313 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13314 if (x86_sse_unaligned_move_optimal)
13315 movupd mem, reg
13317 if (x86_sse_split_regs == true)
13319 movlpd mem, reg
13320 movhpd mem+8, reg
13322 else
13324 movsd mem, reg
13325 movhpd mem+8, reg
13329 void
13330 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13332 rtx op0, op1, m;
13334 op0 = operands[0];
13335 op1 = operands[1];
13337 if (TARGET_AVX)
13339 switch (GET_MODE_CLASS (mode))
13341 case MODE_VECTOR_INT:
13342 case MODE_INT:
13343 switch (GET_MODE_SIZE (mode))
13345 case 16:
13346 /* If we're optimizing for size, movups is the smallest. */
13347 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13349 op0 = gen_lowpart (V4SFmode, op0);
13350 op1 = gen_lowpart (V4SFmode, op1);
13351 emit_insn (gen_avx_movups (op0, op1));
13352 return;
13354 op0 = gen_lowpart (V16QImode, op0);
13355 op1 = gen_lowpart (V16QImode, op1);
13356 emit_insn (gen_avx_movdqu (op0, op1));
13357 break;
13358 case 32:
13359 op0 = gen_lowpart (V32QImode, op0);
13360 op1 = gen_lowpart (V32QImode, op1);
13361 emit_insn (gen_avx_movdqu256 (op0, op1));
13362 break;
13363 default:
13364 gcc_unreachable ();
13366 break;
13367 case MODE_VECTOR_FLOAT:
13368 op0 = gen_lowpart (mode, op0);
13369 op1 = gen_lowpart (mode, op1);
13371 switch (mode)
13373 case V4SFmode:
13374 emit_insn (gen_avx_movups (op0, op1));
13375 break;
13376 case V8SFmode:
13377 emit_insn (gen_avx_movups256 (op0, op1));
13378 break;
13379 case V2DFmode:
13380 if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13382 op0 = gen_lowpart (V4SFmode, op0);
13383 op1 = gen_lowpart (V4SFmode, op1);
13384 emit_insn (gen_avx_movups (op0, op1));
13385 return;
13387 emit_insn (gen_avx_movupd (op0, op1));
13388 break;
13389 case V4DFmode:
13390 emit_insn (gen_avx_movupd256 (op0, op1));
13391 break;
13392 default:
13393 gcc_unreachable ();
13395 break;
13397 default:
13398 gcc_unreachable ();
13401 return;
13404 if (MEM_P (op1))
13406 /* If we're optimizing for size, movups is the smallest. */
13407 if (optimize_insn_for_size_p ()
13408 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13410 op0 = gen_lowpart (V4SFmode, op0);
13411 op1 = gen_lowpart (V4SFmode, op1);
13412 emit_insn (gen_sse_movups (op0, op1));
13413 return;
13416 /* ??? If we have typed data, then it would appear that using
13417 movdqu is the only way to get unaligned data loaded with
13418 integer type. */
13419 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13421 op0 = gen_lowpart (V16QImode, op0);
13422 op1 = gen_lowpart (V16QImode, op1);
13423 emit_insn (gen_sse2_movdqu (op0, op1));
13424 return;
13427 if (TARGET_SSE2 && mode == V2DFmode)
13429 rtx zero;
13431 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13433 op0 = gen_lowpart (V2DFmode, op0);
13434 op1 = gen_lowpart (V2DFmode, op1);
13435 emit_insn (gen_sse2_movupd (op0, op1));
13436 return;
13439 /* When SSE registers are split into halves, we can avoid
13440 writing to the top half twice. */
13441 if (TARGET_SSE_SPLIT_REGS)
13443 emit_clobber (op0);
13444 zero = op0;
13446 else
13448 /* ??? Not sure about the best option for the Intel chips.
13449 The following would seem to satisfy; the register is
13450 entirely cleared, breaking the dependency chain. We
13451 then store to the upper half, with a dependency depth
13452 of one. A rumor has it that Intel recommends two movsd
13453 followed by an unpacklpd, but this is unconfirmed. And
13454 given that the dependency depth of the unpacklpd would
13455 still be one, I'm not sure why this would be better. */
13456 zero = CONST0_RTX (V2DFmode);
13459 m = adjust_address (op1, DFmode, 0);
13460 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13461 m = adjust_address (op1, DFmode, 8);
13462 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13464 else
13466 if (TARGET_SSE_UNALIGNED_LOAD_OPTIMAL)
13468 op0 = gen_lowpart (V4SFmode, op0);
13469 op1 = gen_lowpart (V4SFmode, op1);
13470 emit_insn (gen_sse_movups (op0, op1));
13471 return;
13474 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13475 emit_move_insn (op0, CONST0_RTX (mode));
13476 else
13477 emit_clobber (op0);
13479 if (mode != V4SFmode)
13480 op0 = gen_lowpart (V4SFmode, op0);
13481 m = adjust_address (op1, V2SFmode, 0);
13482 emit_insn (gen_sse_loadlps (op0, op0, m));
13483 m = adjust_address (op1, V2SFmode, 8);
13484 emit_insn (gen_sse_loadhps (op0, op0, m));
13487 else if (MEM_P (op0))
13489 /* If we're optimizing for size, movups is the smallest. */
13490 if (optimize_insn_for_size_p ()
13491 || TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
13493 op0 = gen_lowpart (V4SFmode, op0);
13494 op1 = gen_lowpart (V4SFmode, op1);
13495 emit_insn (gen_sse_movups (op0, op1));
13496 return;
13499 /* ??? Similar to above, only less clear because of quote
13500 typeless stores unquote. */
13501 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13502 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13504 op0 = gen_lowpart (V16QImode, op0);
13505 op1 = gen_lowpart (V16QImode, op1);
13506 emit_insn (gen_sse2_movdqu (op0, op1));
13507 return;
13510 if (TARGET_SSE2 && mode == V2DFmode)
13512 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13514 op0 = gen_lowpart (V2DFmode, op0);
13515 op1 = gen_lowpart (V2DFmode, op1);
13516 emit_insn (gen_sse2_movupd (op0, op1));
13518 else
13520 m = adjust_address (op0, DFmode, 0);
13521 emit_insn (gen_sse2_storelpd (m, op1));
13522 m = adjust_address (op0, DFmode, 8);
13523 emit_insn (gen_sse2_storehpd (m, op1));
13526 else
13528 if (mode != V4SFmode)
13529 op1 = gen_lowpart (V4SFmode, op1);
13531 if (TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
13533 op0 = gen_lowpart (V4SFmode, op0);
13534 emit_insn (gen_sse_movups (op0, op1));
13536 else
13538 m = adjust_address (op0, V2SFmode, 0);
13539 emit_insn (gen_sse_storelps (m, op1));
13540 m = adjust_address (op0, V2SFmode, 8);
13541 emit_insn (gen_sse_storehps (m, op1));
13545 else
13546 gcc_unreachable ();
13549 /* Expand a push in MODE. This is some mode for which we do not support
13550 proper push instructions, at least from the registers that we expect
13551 the value to live in. */
13553 void
13554 ix86_expand_push (enum machine_mode mode, rtx x)
13556 rtx tmp;
13558 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13559 GEN_INT (-GET_MODE_SIZE (mode)),
13560 stack_pointer_rtx, 1, OPTAB_DIRECT);
13561 if (tmp != stack_pointer_rtx)
13562 emit_move_insn (stack_pointer_rtx, tmp);
13564 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13566 /* When we push an operand onto stack, it has to be aligned at least
13567 at the function argument boundary. However since we don't have
13568 the argument type, we can't determine the actual argument
13569 boundary. */
13570 emit_move_insn (tmp, x);
13573 /* Helper function of ix86_fixup_binary_operands to canonicalize
13574 operand order. Returns true if the operands should be swapped. */
13576 static bool
13577 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13578 rtx operands[])
13580 rtx dst = operands[0];
13581 rtx src1 = operands[1];
13582 rtx src2 = operands[2];
13584 /* If the operation is not commutative, we can't do anything. */
13585 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13586 return false;
13588 /* Highest priority is that src1 should match dst. */
13589 if (rtx_equal_p (dst, src1))
13590 return false;
13591 if (rtx_equal_p (dst, src2))
13592 return true;
13594 /* Next highest priority is that immediate constants come second. */
13595 if (immediate_operand (src2, mode))
13596 return false;
13597 if (immediate_operand (src1, mode))
13598 return true;
13600 /* Lowest priority is that memory references should come second. */
13601 if (MEM_P (src2))
13602 return false;
13603 if (MEM_P (src1))
13604 return true;
13606 return false;
13610 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13611 destination to use for the operation. If different from the true
13612 destination in operands[0], a copy operation will be required. */
13615 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13616 rtx operands[])
13618 rtx dst = operands[0];
13619 rtx src1 = operands[1];
13620 rtx src2 = operands[2];
13622 /* Canonicalize operand order. */
13623 if (ix86_swap_binary_operands_p (code, mode, operands))
13625 rtx temp;
13627 /* It is invalid to swap operands of different modes. */
13628 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13630 temp = src1;
13631 src1 = src2;
13632 src2 = temp;
13635 /* Both source operands cannot be in memory. */
13636 if (MEM_P (src1) && MEM_P (src2))
13638 /* Optimization: Only read from memory once. */
13639 if (rtx_equal_p (src1, src2))
13641 src2 = force_reg (mode, src2);
13642 src1 = src2;
13644 else
13645 src2 = force_reg (mode, src2);
13648 /* If the destination is memory, and we do not have matching source
13649 operands, do things in registers. */
13650 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13651 dst = gen_reg_rtx (mode);
13653 /* Source 1 cannot be a constant. */
13654 if (CONSTANT_P (src1))
13655 src1 = force_reg (mode, src1);
13657 /* Source 1 cannot be a non-matching memory. */
13658 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13659 src1 = force_reg (mode, src1);
13661 operands[1] = src1;
13662 operands[2] = src2;
13663 return dst;
13666 /* Similarly, but assume that the destination has already been
13667 set up properly. */
13669 void
13670 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13671 enum machine_mode mode, rtx operands[])
13673 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13674 gcc_assert (dst == operands[0]);
13677 /* Attempt to expand a binary operator. Make the expansion closer to the
13678 actual machine, then just general_operand, which will allow 3 separate
13679 memory references (one output, two input) in a single insn. */
13681 void
13682 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13683 rtx operands[])
13685 rtx src1, src2, dst, op, clob;
13687 dst = ix86_fixup_binary_operands (code, mode, operands);
13688 src1 = operands[1];
13689 src2 = operands[2];
13691 /* Emit the instruction. */
13693 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13694 if (reload_in_progress)
13696 /* Reload doesn't know about the flags register, and doesn't know that
13697 it doesn't want to clobber it. We can only do this with PLUS. */
13698 gcc_assert (code == PLUS);
13699 emit_insn (op);
13701 else
13703 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13704 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13707 /* Fix up the destination if needed. */
13708 if (dst != operands[0])
13709 emit_move_insn (operands[0], dst);
13712 /* Return TRUE or FALSE depending on whether the binary operator meets the
13713 appropriate constraints. */
13716 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13717 rtx operands[3])
13719 rtx dst = operands[0];
13720 rtx src1 = operands[1];
13721 rtx src2 = operands[2];
13723 /* Both source operands cannot be in memory. */
13724 if (MEM_P (src1) && MEM_P (src2))
13725 return 0;
13727 /* Canonicalize operand order for commutative operators. */
13728 if (ix86_swap_binary_operands_p (code, mode, operands))
13730 rtx temp = src1;
13731 src1 = src2;
13732 src2 = temp;
13735 /* If the destination is memory, we must have a matching source operand. */
13736 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13737 return 0;
13739 /* Source 1 cannot be a constant. */
13740 if (CONSTANT_P (src1))
13741 return 0;
13743 /* Source 1 cannot be a non-matching memory. */
13744 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13745 return 0;
13747 return 1;
13750 /* Attempt to expand a unary operator. Make the expansion closer to the
13751 actual machine, then just general_operand, which will allow 2 separate
13752 memory references (one output, one input) in a single insn. */
13754 void
13755 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13756 rtx operands[])
13758 int matching_memory;
13759 rtx src, dst, op, clob;
13761 dst = operands[0];
13762 src = operands[1];
13764 /* If the destination is memory, and we do not have matching source
13765 operands, do things in registers. */
13766 matching_memory = 0;
13767 if (MEM_P (dst))
13769 if (rtx_equal_p (dst, src))
13770 matching_memory = 1;
13771 else
13772 dst = gen_reg_rtx (mode);
13775 /* When source operand is memory, destination must match. */
13776 if (MEM_P (src) && !matching_memory)
13777 src = force_reg (mode, src);
13779 /* Emit the instruction. */
13781 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13782 if (reload_in_progress || code == NOT)
13784 /* Reload doesn't know about the flags register, and doesn't know that
13785 it doesn't want to clobber it. */
13786 gcc_assert (code == NOT);
13787 emit_insn (op);
13789 else
13791 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13792 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13795 /* Fix up the destination if needed. */
13796 if (dst != operands[0])
13797 emit_move_insn (operands[0], dst);
13800 #define LEA_SEARCH_THRESHOLD 12
13802 /* Search backward for non-agu definition of register number REGNO1
13803 or register number REGNO2 in INSN's basic block until
13804 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13805 2. Reach BB boundary, or
13806 3. Reach agu definition.
13807 Returns the distance between the non-agu definition point and INSN.
13808 If no definition point, returns -1. */
13810 static int
13811 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13812 rtx insn)
13814 basic_block bb = BLOCK_FOR_INSN (insn);
13815 int distance = 0;
13816 df_ref *def_rec;
13817 enum attr_type insn_type;
13819 if (insn != BB_HEAD (bb))
13821 rtx prev = PREV_INSN (insn);
13822 while (prev && distance < LEA_SEARCH_THRESHOLD)
13824 if (NONDEBUG_INSN_P (prev))
13826 distance++;
13827 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13828 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13829 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13830 && (regno1 == DF_REF_REGNO (*def_rec)
13831 || regno2 == DF_REF_REGNO (*def_rec)))
13833 insn_type = get_attr_type (prev);
13834 if (insn_type != TYPE_LEA)
13835 goto done;
13838 if (prev == BB_HEAD (bb))
13839 break;
13840 prev = PREV_INSN (prev);
13844 if (distance < LEA_SEARCH_THRESHOLD)
13846 edge e;
13847 edge_iterator ei;
13848 bool simple_loop = false;
13850 FOR_EACH_EDGE (e, ei, bb->preds)
13851 if (e->src == bb)
13853 simple_loop = true;
13854 break;
13857 if (simple_loop)
13859 rtx prev = BB_END (bb);
13860 while (prev
13861 && prev != insn
13862 && distance < LEA_SEARCH_THRESHOLD)
13864 if (NONDEBUG_INSN_P (prev))
13866 distance++;
13867 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13868 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13869 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13870 && (regno1 == DF_REF_REGNO (*def_rec)
13871 || regno2 == DF_REF_REGNO (*def_rec)))
13873 insn_type = get_attr_type (prev);
13874 if (insn_type != TYPE_LEA)
13875 goto done;
13878 prev = PREV_INSN (prev);
13883 distance = -1;
13885 done:
13886 /* get_attr_type may modify recog data. We want to make sure
13887 that recog data is valid for instruction INSN, on which
13888 distance_non_agu_define is called. INSN is unchanged here. */
13889 extract_insn_cached (insn);
13890 return distance;
13893 /* Return the distance between INSN and the next insn that uses
13894 register number REGNO0 in memory address. Return -1 if no such
13895 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13897 static int
13898 distance_agu_use (unsigned int regno0, rtx insn)
13900 basic_block bb = BLOCK_FOR_INSN (insn);
13901 int distance = 0;
13902 df_ref *def_rec;
13903 df_ref *use_rec;
13905 if (insn != BB_END (bb))
13907 rtx next = NEXT_INSN (insn);
13908 while (next && distance < LEA_SEARCH_THRESHOLD)
13910 if (NONDEBUG_INSN_P (next))
13912 distance++;
13914 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13915 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13916 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13917 && regno0 == DF_REF_REGNO (*use_rec))
13919 /* Return DISTANCE if OP0 is used in memory
13920 address in NEXT. */
13921 return distance;
13924 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13925 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13926 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13927 && regno0 == DF_REF_REGNO (*def_rec))
13929 /* Return -1 if OP0 is set in NEXT. */
13930 return -1;
13933 if (next == BB_END (bb))
13934 break;
13935 next = NEXT_INSN (next);
13939 if (distance < LEA_SEARCH_THRESHOLD)
13941 edge e;
13942 edge_iterator ei;
13943 bool simple_loop = false;
13945 FOR_EACH_EDGE (e, ei, bb->succs)
13946 if (e->dest == bb)
13948 simple_loop = true;
13949 break;
13952 if (simple_loop)
13954 rtx next = BB_HEAD (bb);
13955 while (next
13956 && next != insn
13957 && distance < LEA_SEARCH_THRESHOLD)
13959 if (NONDEBUG_INSN_P (next))
13961 distance++;
13963 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13964 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13965 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13966 && regno0 == DF_REF_REGNO (*use_rec))
13968 /* Return DISTANCE if OP0 is used in memory
13969 address in NEXT. */
13970 return distance;
13973 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13974 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13975 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13976 && regno0 == DF_REF_REGNO (*def_rec))
13978 /* Return -1 if OP0 is set in NEXT. */
13979 return -1;
13983 next = NEXT_INSN (next);
13988 return -1;
13991 /* Define this macro to tune LEA priority vs ADD, it take effect when
13992 there is a dilemma of choicing LEA or ADD
13993 Negative value: ADD is more preferred than LEA
13994 Zero: Netrual
13995 Positive value: LEA is more preferred than ADD*/
13996 #define IX86_LEA_PRIORITY 2
13998 /* Return true if it is ok to optimize an ADD operation to LEA
13999 operation to avoid flag register consumation. For the processors
14000 like ATOM, if the destination register of LEA holds an actual
14001 address which will be used soon, LEA is better and otherwise ADD
14002 is better. */
14004 bool
14005 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14006 rtx insn, rtx operands[])
14008 unsigned int regno0 = true_regnum (operands[0]);
14009 unsigned int regno1 = true_regnum (operands[1]);
14010 unsigned int regno2;
14012 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
14013 return regno0 != regno1;
14015 regno2 = true_regnum (operands[2]);
14017 /* If a = b + c, (a!=b && a!=c), must use lea form. */
14018 if (regno0 != regno1 && regno0 != regno2)
14019 return true;
14020 else
14022 int dist_define, dist_use;
14023 dist_define = distance_non_agu_define (regno1, regno2, insn);
14024 if (dist_define <= 0)
14025 return true;
14027 /* If this insn has both backward non-agu dependence and forward
14028 agu dependence, the one with short distance take effect. */
14029 dist_use = distance_agu_use (regno0, insn);
14030 if (dist_use <= 0
14031 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
14032 return false;
14034 return true;
14038 /* Return true if destination reg of SET_BODY is shift count of
14039 USE_BODY. */
14041 static bool
14042 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
14044 rtx set_dest;
14045 rtx shift_rtx;
14046 int i;
14048 /* Retrieve destination of SET_BODY. */
14049 switch (GET_CODE (set_body))
14051 case SET:
14052 set_dest = SET_DEST (set_body);
14053 if (!set_dest || !REG_P (set_dest))
14054 return false;
14055 break;
14056 case PARALLEL:
14057 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
14058 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
14059 use_body))
14060 return true;
14061 default:
14062 return false;
14063 break;
14066 /* Retrieve shift count of USE_BODY. */
14067 switch (GET_CODE (use_body))
14069 case SET:
14070 shift_rtx = XEXP (use_body, 1);
14071 break;
14072 case PARALLEL:
14073 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
14074 if (ix86_dep_by_shift_count_body (set_body,
14075 XVECEXP (use_body, 0, i)))
14076 return true;
14077 default:
14078 return false;
14079 break;
14082 if (shift_rtx
14083 && (GET_CODE (shift_rtx) == ASHIFT
14084 || GET_CODE (shift_rtx) == LSHIFTRT
14085 || GET_CODE (shift_rtx) == ASHIFTRT
14086 || GET_CODE (shift_rtx) == ROTATE
14087 || GET_CODE (shift_rtx) == ROTATERT))
14089 rtx shift_count = XEXP (shift_rtx, 1);
14091 /* Return true if shift count is dest of SET_BODY. */
14092 if (REG_P (shift_count)
14093 && true_regnum (set_dest) == true_regnum (shift_count))
14094 return true;
14097 return false;
14100 /* Return true if destination reg of SET_INSN is shift count of
14101 USE_INSN. */
14103 bool
14104 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
14106 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
14107 PATTERN (use_insn));
14110 /* Return TRUE or FALSE depending on whether the unary operator meets the
14111 appropriate constraints. */
14114 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
14115 enum machine_mode mode ATTRIBUTE_UNUSED,
14116 rtx operands[2] ATTRIBUTE_UNUSED)
14118 /* If one of operands is memory, source and destination must match. */
14119 if ((MEM_P (operands[0])
14120 || MEM_P (operands[1]))
14121 && ! rtx_equal_p (operands[0], operands[1]))
14122 return FALSE;
14123 return TRUE;
14126 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
14127 are ok, keeping in mind the possible movddup alternative. */
14129 bool
14130 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
14132 if (MEM_P (operands[0]))
14133 return rtx_equal_p (operands[0], operands[1 + high]);
14134 if (MEM_P (operands[1]) && MEM_P (operands[2]))
14135 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
14136 return true;
14139 /* Post-reload splitter for converting an SF or DFmode value in an
14140 SSE register into an unsigned SImode. */
14142 void
14143 ix86_split_convert_uns_si_sse (rtx operands[])
14145 enum machine_mode vecmode;
14146 rtx value, large, zero_or_two31, input, two31, x;
14148 large = operands[1];
14149 zero_or_two31 = operands[2];
14150 input = operands[3];
14151 two31 = operands[4];
14152 vecmode = GET_MODE (large);
14153 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
14155 /* Load up the value into the low element. We must ensure that the other
14156 elements are valid floats -- zero is the easiest such value. */
14157 if (MEM_P (input))
14159 if (vecmode == V4SFmode)
14160 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
14161 else
14162 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
14164 else
14166 input = gen_rtx_REG (vecmode, REGNO (input));
14167 emit_move_insn (value, CONST0_RTX (vecmode));
14168 if (vecmode == V4SFmode)
14169 emit_insn (gen_sse_movss (value, value, input));
14170 else
14171 emit_insn (gen_sse2_movsd (value, value, input));
14174 emit_move_insn (large, two31);
14175 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
14177 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
14178 emit_insn (gen_rtx_SET (VOIDmode, large, x));
14180 x = gen_rtx_AND (vecmode, zero_or_two31, large);
14181 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
14183 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
14184 emit_insn (gen_rtx_SET (VOIDmode, value, x));
14186 large = gen_rtx_REG (V4SImode, REGNO (large));
14187 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
14189 x = gen_rtx_REG (V4SImode, REGNO (value));
14190 if (vecmode == V4SFmode)
14191 emit_insn (gen_sse2_cvttps2dq (x, value));
14192 else
14193 emit_insn (gen_sse2_cvttpd2dq (x, value));
14194 value = x;
14196 emit_insn (gen_xorv4si3 (value, value, large));
14199 /* Convert an unsigned DImode value into a DFmode, using only SSE.
14200 Expects the 64-bit DImode to be supplied in a pair of integral
14201 registers. Requires SSE2; will use SSE3 if available. For x86_32,
14202 -mfpmath=sse, !optimize_size only. */
14204 void
14205 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
14207 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
14208 rtx int_xmm, fp_xmm;
14209 rtx biases, exponents;
14210 rtx x;
14212 int_xmm = gen_reg_rtx (V4SImode);
14213 if (TARGET_INTER_UNIT_MOVES)
14214 emit_insn (gen_movdi_to_sse (int_xmm, input));
14215 else if (TARGET_SSE_SPLIT_REGS)
14217 emit_clobber (int_xmm);
14218 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
14220 else
14222 x = gen_reg_rtx (V2DImode);
14223 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
14224 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
14227 x = gen_rtx_CONST_VECTOR (V4SImode,
14228 gen_rtvec (4, GEN_INT (0x43300000UL),
14229 GEN_INT (0x45300000UL),
14230 const0_rtx, const0_rtx));
14231 exponents = validize_mem (force_const_mem (V4SImode, x));
14233 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
14234 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
14236 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
14237 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
14238 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
14239 (0x1.0p84 + double(fp_value_hi_xmm)).
14240 Note these exponents differ by 32. */
14242 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
14244 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
14245 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
14246 real_ldexp (&bias_lo_rvt, &dconst1, 52);
14247 real_ldexp (&bias_hi_rvt, &dconst1, 84);
14248 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
14249 x = const_double_from_real_value (bias_hi_rvt, DFmode);
14250 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
14251 biases = validize_mem (force_const_mem (V2DFmode, biases));
14252 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
14254 /* Add the upper and lower DFmode values together. */
14255 if (TARGET_SSE3)
14256 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
14257 else
14259 x = copy_to_mode_reg (V2DFmode, fp_xmm);
14260 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
14261 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
14264 ix86_expand_vector_extract (false, target, fp_xmm, 0);
14267 /* Not used, but eases macroization of patterns. */
14268 void
14269 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
14270 rtx input ATTRIBUTE_UNUSED)
14272 gcc_unreachable ();
14275 /* Convert an unsigned SImode value into a DFmode. Only currently used
14276 for SSE, but applicable anywhere. */
14278 void
14279 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14281 REAL_VALUE_TYPE TWO31r;
14282 rtx x, fp;
14284 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14285 NULL, 1, OPTAB_DIRECT);
14287 fp = gen_reg_rtx (DFmode);
14288 emit_insn (gen_floatsidf2 (fp, x));
14290 real_ldexp (&TWO31r, &dconst1, 31);
14291 x = const_double_from_real_value (TWO31r, DFmode);
14293 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14294 if (x != target)
14295 emit_move_insn (target, x);
14298 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14299 32-bit mode; otherwise we have a direct convert instruction. */
14301 void
14302 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14304 REAL_VALUE_TYPE TWO32r;
14305 rtx fp_lo, fp_hi, x;
14307 fp_lo = gen_reg_rtx (DFmode);
14308 fp_hi = gen_reg_rtx (DFmode);
14310 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14312 real_ldexp (&TWO32r, &dconst1, 32);
14313 x = const_double_from_real_value (TWO32r, DFmode);
14314 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14316 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14318 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14319 0, OPTAB_DIRECT);
14320 if (x != target)
14321 emit_move_insn (target, x);
14324 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14325 For x86_32, -mfpmath=sse, !optimize_size only. */
14326 void
14327 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14329 REAL_VALUE_TYPE ONE16r;
14330 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14332 real_ldexp (&ONE16r, &dconst1, 16);
14333 x = const_double_from_real_value (ONE16r, SFmode);
14334 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14335 NULL, 0, OPTAB_DIRECT);
14336 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14337 NULL, 0, OPTAB_DIRECT);
14338 fp_hi = gen_reg_rtx (SFmode);
14339 fp_lo = gen_reg_rtx (SFmode);
14340 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14341 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14342 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14343 0, OPTAB_DIRECT);
14344 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14345 0, OPTAB_DIRECT);
14346 if (!rtx_equal_p (target, fp_hi))
14347 emit_move_insn (target, fp_hi);
14350 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14351 then replicate the value for all elements of the vector
14352 register. */
14355 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14357 rtvec v;
14358 switch (mode)
14360 case SImode:
14361 gcc_assert (vect);
14362 v = gen_rtvec (4, value, value, value, value);
14363 return gen_rtx_CONST_VECTOR (V4SImode, v);
14365 case DImode:
14366 gcc_assert (vect);
14367 v = gen_rtvec (2, value, value);
14368 return gen_rtx_CONST_VECTOR (V2DImode, v);
14370 case SFmode:
14371 if (vect)
14372 v = gen_rtvec (4, value, value, value, value);
14373 else
14374 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14375 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14376 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14378 case DFmode:
14379 if (vect)
14380 v = gen_rtvec (2, value, value);
14381 else
14382 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14383 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14385 default:
14386 gcc_unreachable ();
14390 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14391 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14392 for an SSE register. If VECT is true, then replicate the mask for
14393 all elements of the vector register. If INVERT is true, then create
14394 a mask excluding the sign bit. */
14397 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14399 enum machine_mode vec_mode, imode;
14400 HOST_WIDE_INT hi, lo;
14401 int shift = 63;
14402 rtx v;
14403 rtx mask;
14405 /* Find the sign bit, sign extended to 2*HWI. */
14406 switch (mode)
14408 case SImode:
14409 case SFmode:
14410 imode = SImode;
14411 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14412 lo = 0x80000000, hi = lo < 0;
14413 break;
14415 case DImode:
14416 case DFmode:
14417 imode = DImode;
14418 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14419 if (HOST_BITS_PER_WIDE_INT >= 64)
14420 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14421 else
14422 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14423 break;
14425 case TImode:
14426 case TFmode:
14427 vec_mode = VOIDmode;
14428 if (HOST_BITS_PER_WIDE_INT >= 64)
14430 imode = TImode;
14431 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14433 else
14435 rtvec vec;
14437 imode = DImode;
14438 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14440 if (invert)
14442 lo = ~lo, hi = ~hi;
14443 v = constm1_rtx;
14445 else
14446 v = const0_rtx;
14448 mask = immed_double_const (lo, hi, imode);
14450 vec = gen_rtvec (2, v, mask);
14451 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14452 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14454 return v;
14456 break;
14458 default:
14459 gcc_unreachable ();
14462 if (invert)
14463 lo = ~lo, hi = ~hi;
14465 /* Force this value into the low part of a fp vector constant. */
14466 mask = immed_double_const (lo, hi, imode);
14467 mask = gen_lowpart (mode, mask);
14469 if (vec_mode == VOIDmode)
14470 return force_reg (mode, mask);
14472 v = ix86_build_const_vector (mode, vect, mask);
14473 return force_reg (vec_mode, v);
14476 /* Generate code for floating point ABS or NEG. */
14478 void
14479 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14480 rtx operands[])
14482 rtx mask, set, use, clob, dst, src;
14483 bool use_sse = false;
14484 bool vector_mode = VECTOR_MODE_P (mode);
14485 enum machine_mode elt_mode = mode;
14487 if (vector_mode)
14489 elt_mode = GET_MODE_INNER (mode);
14490 use_sse = true;
14492 else if (mode == TFmode)
14493 use_sse = true;
14494 else if (TARGET_SSE_MATH)
14495 use_sse = SSE_FLOAT_MODE_P (mode);
14497 /* NEG and ABS performed with SSE use bitwise mask operations.
14498 Create the appropriate mask now. */
14499 if (use_sse)
14500 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14501 else
14502 mask = NULL_RTX;
14504 dst = operands[0];
14505 src = operands[1];
14507 if (vector_mode)
14509 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14510 set = gen_rtx_SET (VOIDmode, dst, set);
14511 emit_insn (set);
14513 else
14515 set = gen_rtx_fmt_e (code, mode, src);
14516 set = gen_rtx_SET (VOIDmode, dst, set);
14517 if (mask)
14519 use = gen_rtx_USE (VOIDmode, mask);
14520 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14521 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14522 gen_rtvec (3, set, use, clob)));
14524 else
14525 emit_insn (set);
14529 /* Expand a copysign operation. Special case operand 0 being a constant. */
14531 void
14532 ix86_expand_copysign (rtx operands[])
14534 enum machine_mode mode;
14535 rtx dest, op0, op1, mask, nmask;
14537 dest = operands[0];
14538 op0 = operands[1];
14539 op1 = operands[2];
14541 mode = GET_MODE (dest);
14543 if (GET_CODE (op0) == CONST_DOUBLE)
14545 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14547 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14548 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14550 if (mode == SFmode || mode == DFmode)
14552 enum machine_mode vmode;
14554 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14556 if (op0 == CONST0_RTX (mode))
14557 op0 = CONST0_RTX (vmode);
14558 else
14560 rtx v = ix86_build_const_vector (mode, false, op0);
14562 op0 = force_reg (vmode, v);
14565 else if (op0 != CONST0_RTX (mode))
14566 op0 = force_reg (mode, op0);
14568 mask = ix86_build_signbit_mask (mode, 0, 0);
14570 if (mode == SFmode)
14571 copysign_insn = gen_copysignsf3_const;
14572 else if (mode == DFmode)
14573 copysign_insn = gen_copysigndf3_const;
14574 else
14575 copysign_insn = gen_copysigntf3_const;
14577 emit_insn (copysign_insn (dest, op0, op1, mask));
14579 else
14581 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14583 nmask = ix86_build_signbit_mask (mode, 0, 1);
14584 mask = ix86_build_signbit_mask (mode, 0, 0);
14586 if (mode == SFmode)
14587 copysign_insn = gen_copysignsf3_var;
14588 else if (mode == DFmode)
14589 copysign_insn = gen_copysigndf3_var;
14590 else
14591 copysign_insn = gen_copysigntf3_var;
14593 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14597 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14598 be a constant, and so has already been expanded into a vector constant. */
14600 void
14601 ix86_split_copysign_const (rtx operands[])
14603 enum machine_mode mode, vmode;
14604 rtx dest, op0, mask, x;
14606 dest = operands[0];
14607 op0 = operands[1];
14608 mask = operands[3];
14610 mode = GET_MODE (dest);
14611 vmode = GET_MODE (mask);
14613 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14614 x = gen_rtx_AND (vmode, dest, mask);
14615 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14617 if (op0 != CONST0_RTX (vmode))
14619 x = gen_rtx_IOR (vmode, dest, op0);
14620 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14624 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14625 so we have to do two masks. */
14627 void
14628 ix86_split_copysign_var (rtx operands[])
14630 enum machine_mode mode, vmode;
14631 rtx dest, scratch, op0, op1, mask, nmask, x;
14633 dest = operands[0];
14634 scratch = operands[1];
14635 op0 = operands[2];
14636 op1 = operands[3];
14637 nmask = operands[4];
14638 mask = operands[5];
14640 mode = GET_MODE (dest);
14641 vmode = GET_MODE (mask);
14643 if (rtx_equal_p (op0, op1))
14645 /* Shouldn't happen often (it's useless, obviously), but when it does
14646 we'd generate incorrect code if we continue below. */
14647 emit_move_insn (dest, op0);
14648 return;
14651 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14653 gcc_assert (REGNO (op1) == REGNO (scratch));
14655 x = gen_rtx_AND (vmode, scratch, mask);
14656 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14658 dest = mask;
14659 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14660 x = gen_rtx_NOT (vmode, dest);
14661 x = gen_rtx_AND (vmode, x, op0);
14662 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14664 else
14666 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14668 x = gen_rtx_AND (vmode, scratch, mask);
14670 else /* alternative 2,4 */
14672 gcc_assert (REGNO (mask) == REGNO (scratch));
14673 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14674 x = gen_rtx_AND (vmode, scratch, op1);
14676 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14678 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14680 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14681 x = gen_rtx_AND (vmode, dest, nmask);
14683 else /* alternative 3,4 */
14685 gcc_assert (REGNO (nmask) == REGNO (dest));
14686 dest = nmask;
14687 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14688 x = gen_rtx_AND (vmode, dest, op0);
14690 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14693 x = gen_rtx_IOR (vmode, dest, scratch);
14694 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14697 /* Return TRUE or FALSE depending on whether the first SET in INSN
14698 has source and destination with matching CC modes, and that the
14699 CC mode is at least as constrained as REQ_MODE. */
14702 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14704 rtx set;
14705 enum machine_mode set_mode;
14707 set = PATTERN (insn);
14708 if (GET_CODE (set) == PARALLEL)
14709 set = XVECEXP (set, 0, 0);
14710 gcc_assert (GET_CODE (set) == SET);
14711 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14713 set_mode = GET_MODE (SET_DEST (set));
14714 switch (set_mode)
14716 case CCNOmode:
14717 if (req_mode != CCNOmode
14718 && (req_mode != CCmode
14719 || XEXP (SET_SRC (set), 1) != const0_rtx))
14720 return 0;
14721 break;
14722 case CCmode:
14723 if (req_mode == CCGCmode)
14724 return 0;
14725 /* FALLTHRU */
14726 case CCGCmode:
14727 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14728 return 0;
14729 /* FALLTHRU */
14730 case CCGOCmode:
14731 if (req_mode == CCZmode)
14732 return 0;
14733 /* FALLTHRU */
14734 case CCAmode:
14735 case CCCmode:
14736 case CCOmode:
14737 case CCSmode:
14738 case CCZmode:
14739 break;
14741 default:
14742 gcc_unreachable ();
14745 return (GET_MODE (SET_SRC (set)) == set_mode);
14748 /* Generate insn patterns to do an integer compare of OPERANDS. */
14750 static rtx
14751 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14753 enum machine_mode cmpmode;
14754 rtx tmp, flags;
14756 cmpmode = SELECT_CC_MODE (code, op0, op1);
14757 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14759 /* This is very simple, but making the interface the same as in the
14760 FP case makes the rest of the code easier. */
14761 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14762 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14764 /* Return the test that should be put into the flags user, i.e.
14765 the bcc, scc, or cmov instruction. */
14766 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14769 /* Figure out whether to use ordered or unordered fp comparisons.
14770 Return the appropriate mode to use. */
14772 enum machine_mode
14773 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14775 /* ??? In order to make all comparisons reversible, we do all comparisons
14776 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14777 all forms trapping and nontrapping comparisons, we can make inequality
14778 comparisons trapping again, since it results in better code when using
14779 FCOM based compares. */
14780 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14783 enum machine_mode
14784 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14786 enum machine_mode mode = GET_MODE (op0);
14788 if (SCALAR_FLOAT_MODE_P (mode))
14790 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14791 return ix86_fp_compare_mode (code);
14794 switch (code)
14796 /* Only zero flag is needed. */
14797 case EQ: /* ZF=0 */
14798 case NE: /* ZF!=0 */
14799 return CCZmode;
14800 /* Codes needing carry flag. */
14801 case GEU: /* CF=0 */
14802 case LTU: /* CF=1 */
14803 /* Detect overflow checks. They need just the carry flag. */
14804 if (GET_CODE (op0) == PLUS
14805 && rtx_equal_p (op1, XEXP (op0, 0)))
14806 return CCCmode;
14807 else
14808 return CCmode;
14809 case GTU: /* CF=0 & ZF=0 */
14810 case LEU: /* CF=1 | ZF=1 */
14811 /* Detect overflow checks. They need just the carry flag. */
14812 if (GET_CODE (op0) == MINUS
14813 && rtx_equal_p (op1, XEXP (op0, 0)))
14814 return CCCmode;
14815 else
14816 return CCmode;
14817 /* Codes possibly doable only with sign flag when
14818 comparing against zero. */
14819 case GE: /* SF=OF or SF=0 */
14820 case LT: /* SF<>OF or SF=1 */
14821 if (op1 == const0_rtx)
14822 return CCGOCmode;
14823 else
14824 /* For other cases Carry flag is not required. */
14825 return CCGCmode;
14826 /* Codes doable only with sign flag when comparing
14827 against zero, but we miss jump instruction for it
14828 so we need to use relational tests against overflow
14829 that thus needs to be zero. */
14830 case GT: /* ZF=0 & SF=OF */
14831 case LE: /* ZF=1 | SF<>OF */
14832 if (op1 == const0_rtx)
14833 return CCNOmode;
14834 else
14835 return CCGCmode;
14836 /* strcmp pattern do (use flags) and combine may ask us for proper
14837 mode. */
14838 case USE:
14839 return CCmode;
14840 default:
14841 gcc_unreachable ();
14845 /* Return the fixed registers used for condition codes. */
14847 static bool
14848 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14850 *p1 = FLAGS_REG;
14851 *p2 = FPSR_REG;
14852 return true;
14855 /* If two condition code modes are compatible, return a condition code
14856 mode which is compatible with both. Otherwise, return
14857 VOIDmode. */
14859 static enum machine_mode
14860 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14862 if (m1 == m2)
14863 return m1;
14865 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14866 return VOIDmode;
14868 if ((m1 == CCGCmode && m2 == CCGOCmode)
14869 || (m1 == CCGOCmode && m2 == CCGCmode))
14870 return CCGCmode;
14872 switch (m1)
14874 default:
14875 gcc_unreachable ();
14877 case CCmode:
14878 case CCGCmode:
14879 case CCGOCmode:
14880 case CCNOmode:
14881 case CCAmode:
14882 case CCCmode:
14883 case CCOmode:
14884 case CCSmode:
14885 case CCZmode:
14886 switch (m2)
14888 default:
14889 return VOIDmode;
14891 case CCmode:
14892 case CCGCmode:
14893 case CCGOCmode:
14894 case CCNOmode:
14895 case CCAmode:
14896 case CCCmode:
14897 case CCOmode:
14898 case CCSmode:
14899 case CCZmode:
14900 return CCmode;
14903 case CCFPmode:
14904 case CCFPUmode:
14905 /* These are only compatible with themselves, which we already
14906 checked above. */
14907 return VOIDmode;
14912 /* Return a comparison we can do and that it is equivalent to
14913 swap_condition (code) apart possibly from orderedness.
14914 But, never change orderedness if TARGET_IEEE_FP, returning
14915 UNKNOWN in that case if necessary. */
14917 static enum rtx_code
14918 ix86_fp_swap_condition (enum rtx_code code)
14920 switch (code)
14922 case GT: /* GTU - CF=0 & ZF=0 */
14923 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14924 case GE: /* GEU - CF=0 */
14925 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14926 case UNLT: /* LTU - CF=1 */
14927 return TARGET_IEEE_FP ? UNKNOWN : GT;
14928 case UNLE: /* LEU - CF=1 | ZF=1 */
14929 return TARGET_IEEE_FP ? UNKNOWN : GE;
14930 default:
14931 return swap_condition (code);
14935 /* Return cost of comparison CODE using the best strategy for performance.
14936 All following functions do use number of instructions as a cost metrics.
14937 In future this should be tweaked to compute bytes for optimize_size and
14938 take into account performance of various instructions on various CPUs. */
14940 static int
14941 ix86_fp_comparison_cost (enum rtx_code code)
14943 int arith_cost;
14945 /* The cost of code using bit-twiddling on %ah. */
14946 switch (code)
14948 case UNLE:
14949 case UNLT:
14950 case LTGT:
14951 case GT:
14952 case GE:
14953 case UNORDERED:
14954 case ORDERED:
14955 case UNEQ:
14956 arith_cost = 4;
14957 break;
14958 case LT:
14959 case NE:
14960 case EQ:
14961 case UNGE:
14962 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14963 break;
14964 case LE:
14965 case UNGT:
14966 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14967 break;
14968 default:
14969 gcc_unreachable ();
14972 switch (ix86_fp_comparison_strategy (code))
14974 case IX86_FPCMP_COMI:
14975 return arith_cost > 4 ? 3 : 2;
14976 case IX86_FPCMP_SAHF:
14977 return arith_cost > 4 ? 4 : 3;
14978 default:
14979 return arith_cost;
14983 /* Return strategy to use for floating-point. We assume that fcomi is always
14984 preferrable where available, since that is also true when looking at size
14985 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14987 enum ix86_fpcmp_strategy
14988 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14990 /* Do fcomi/sahf based test when profitable. */
14992 if (TARGET_CMOVE)
14993 return IX86_FPCMP_COMI;
14995 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14996 return IX86_FPCMP_SAHF;
14998 return IX86_FPCMP_ARITH;
15001 /* Swap, force into registers, or otherwise massage the two operands
15002 to a fp comparison. The operands are updated in place; the new
15003 comparison code is returned. */
15005 static enum rtx_code
15006 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
15008 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
15009 rtx op0 = *pop0, op1 = *pop1;
15010 enum machine_mode op_mode = GET_MODE (op0);
15011 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
15013 /* All of the unordered compare instructions only work on registers.
15014 The same is true of the fcomi compare instructions. The XFmode
15015 compare instructions require registers except when comparing
15016 against zero or when converting operand 1 from fixed point to
15017 floating point. */
15019 if (!is_sse
15020 && (fpcmp_mode == CCFPUmode
15021 || (op_mode == XFmode
15022 && ! (standard_80387_constant_p (op0) == 1
15023 || standard_80387_constant_p (op1) == 1)
15024 && GET_CODE (op1) != FLOAT)
15025 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
15027 op0 = force_reg (op_mode, op0);
15028 op1 = force_reg (op_mode, op1);
15030 else
15032 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
15033 things around if they appear profitable, otherwise force op0
15034 into a register. */
15036 if (standard_80387_constant_p (op0) == 0
15037 || (MEM_P (op0)
15038 && ! (standard_80387_constant_p (op1) == 0
15039 || MEM_P (op1))))
15041 enum rtx_code new_code = ix86_fp_swap_condition (code);
15042 if (new_code != UNKNOWN)
15044 rtx tmp;
15045 tmp = op0, op0 = op1, op1 = tmp;
15046 code = new_code;
15050 if (!REG_P (op0))
15051 op0 = force_reg (op_mode, op0);
15053 if (CONSTANT_P (op1))
15055 int tmp = standard_80387_constant_p (op1);
15056 if (tmp == 0)
15057 op1 = validize_mem (force_const_mem (op_mode, op1));
15058 else if (tmp == 1)
15060 if (TARGET_CMOVE)
15061 op1 = force_reg (op_mode, op1);
15063 else
15064 op1 = force_reg (op_mode, op1);
15068 /* Try to rearrange the comparison to make it cheaper. */
15069 if (ix86_fp_comparison_cost (code)
15070 > ix86_fp_comparison_cost (swap_condition (code))
15071 && (REG_P (op1) || can_create_pseudo_p ()))
15073 rtx tmp;
15074 tmp = op0, op0 = op1, op1 = tmp;
15075 code = swap_condition (code);
15076 if (!REG_P (op0))
15077 op0 = force_reg (op_mode, op0);
15080 *pop0 = op0;
15081 *pop1 = op1;
15082 return code;
15085 /* Convert comparison codes we use to represent FP comparison to integer
15086 code that will result in proper branch. Return UNKNOWN if no such code
15087 is available. */
15089 enum rtx_code
15090 ix86_fp_compare_code_to_integer (enum rtx_code code)
15092 switch (code)
15094 case GT:
15095 return GTU;
15096 case GE:
15097 return GEU;
15098 case ORDERED:
15099 case UNORDERED:
15100 return code;
15101 break;
15102 case UNEQ:
15103 return EQ;
15104 break;
15105 case UNLT:
15106 return LTU;
15107 break;
15108 case UNLE:
15109 return LEU;
15110 break;
15111 case LTGT:
15112 return NE;
15113 break;
15114 default:
15115 return UNKNOWN;
15119 /* Generate insn patterns to do a floating point compare of OPERANDS. */
15121 static rtx
15122 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
15124 enum machine_mode fpcmp_mode, intcmp_mode;
15125 rtx tmp, tmp2;
15127 fpcmp_mode = ix86_fp_compare_mode (code);
15128 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
15130 /* Do fcomi/sahf based test when profitable. */
15131 switch (ix86_fp_comparison_strategy (code))
15133 case IX86_FPCMP_COMI:
15134 intcmp_mode = fpcmp_mode;
15135 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15136 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15137 tmp);
15138 emit_insn (tmp);
15139 break;
15141 case IX86_FPCMP_SAHF:
15142 intcmp_mode = fpcmp_mode;
15143 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15144 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
15145 tmp);
15147 if (!scratch)
15148 scratch = gen_reg_rtx (HImode);
15149 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
15150 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
15151 break;
15153 case IX86_FPCMP_ARITH:
15154 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
15155 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
15156 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
15157 if (!scratch)
15158 scratch = gen_reg_rtx (HImode);
15159 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
15161 /* In the unordered case, we have to check C2 for NaN's, which
15162 doesn't happen to work out to anything nice combination-wise.
15163 So do some bit twiddling on the value we've got in AH to come
15164 up with an appropriate set of condition codes. */
15166 intcmp_mode = CCNOmode;
15167 switch (code)
15169 case GT:
15170 case UNGT:
15171 if (code == GT || !TARGET_IEEE_FP)
15173 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15174 code = EQ;
15176 else
15178 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15179 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15180 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
15181 intcmp_mode = CCmode;
15182 code = GEU;
15184 break;
15185 case LT:
15186 case UNLT:
15187 if (code == LT && TARGET_IEEE_FP)
15189 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15190 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
15191 intcmp_mode = CCmode;
15192 code = EQ;
15194 else
15196 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
15197 code = NE;
15199 break;
15200 case GE:
15201 case UNGE:
15202 if (code == GE || !TARGET_IEEE_FP)
15204 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
15205 code = EQ;
15207 else
15209 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15210 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
15211 code = NE;
15213 break;
15214 case LE:
15215 case UNLE:
15216 if (code == LE && TARGET_IEEE_FP)
15218 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15219 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
15220 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15221 intcmp_mode = CCmode;
15222 code = LTU;
15224 else
15226 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
15227 code = NE;
15229 break;
15230 case EQ:
15231 case UNEQ:
15232 if (code == EQ && TARGET_IEEE_FP)
15234 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15235 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
15236 intcmp_mode = CCmode;
15237 code = EQ;
15239 else
15241 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15242 code = NE;
15244 break;
15245 case NE:
15246 case LTGT:
15247 if (code == NE && TARGET_IEEE_FP)
15249 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
15250 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
15251 GEN_INT (0x40)));
15252 code = NE;
15254 else
15256 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
15257 code = EQ;
15259 break;
15261 case UNORDERED:
15262 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15263 code = NE;
15264 break;
15265 case ORDERED:
15266 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
15267 code = EQ;
15268 break;
15270 default:
15271 gcc_unreachable ();
15273 break;
15275 default:
15276 gcc_unreachable();
15279 /* Return the test that should be put into the flags user, i.e.
15280 the bcc, scc, or cmov instruction. */
15281 return gen_rtx_fmt_ee (code, VOIDmode,
15282 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15283 const0_rtx);
15287 ix86_expand_compare (enum rtx_code code)
15289 rtx op0, op1, ret;
15290 op0 = ix86_compare_op0;
15291 op1 = ix86_compare_op1;
15293 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15294 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15296 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15298 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15299 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15301 else
15302 ret = ix86_expand_int_compare (code, op0, op1);
15304 return ret;
15307 void
15308 ix86_expand_branch (enum rtx_code code, rtx label)
15310 rtx tmp;
15312 switch (GET_MODE (ix86_compare_op0))
15314 case SFmode:
15315 case DFmode:
15316 case XFmode:
15317 case QImode:
15318 case HImode:
15319 case SImode:
15320 simple:
15321 tmp = ix86_expand_compare (code);
15322 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15323 gen_rtx_LABEL_REF (VOIDmode, label),
15324 pc_rtx);
15325 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15326 return;
15328 case DImode:
15329 if (TARGET_64BIT)
15330 goto simple;
15331 case TImode:
15332 /* Expand DImode branch into multiple compare+branch. */
15334 rtx lo[2], hi[2], label2;
15335 enum rtx_code code1, code2, code3;
15336 enum machine_mode submode;
15338 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15340 tmp = ix86_compare_op0;
15341 ix86_compare_op0 = ix86_compare_op1;
15342 ix86_compare_op1 = tmp;
15343 code = swap_condition (code);
15345 if (GET_MODE (ix86_compare_op0) == DImode)
15347 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15348 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15349 submode = SImode;
15351 else
15353 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15354 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15355 submode = DImode;
15358 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15359 avoid two branches. This costs one extra insn, so disable when
15360 optimizing for size. */
15362 if ((code == EQ || code == NE)
15363 && (!optimize_insn_for_size_p ()
15364 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15366 rtx xor0, xor1;
15368 xor1 = hi[0];
15369 if (hi[1] != const0_rtx)
15370 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15371 NULL_RTX, 0, OPTAB_WIDEN);
15373 xor0 = lo[0];
15374 if (lo[1] != const0_rtx)
15375 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15376 NULL_RTX, 0, OPTAB_WIDEN);
15378 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15379 NULL_RTX, 0, OPTAB_WIDEN);
15381 ix86_compare_op0 = tmp;
15382 ix86_compare_op1 = const0_rtx;
15383 ix86_expand_branch (code, label);
15384 return;
15387 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15388 op1 is a constant and the low word is zero, then we can just
15389 examine the high word. Similarly for low word -1 and
15390 less-or-equal-than or greater-than. */
15392 if (CONST_INT_P (hi[1]))
15393 switch (code)
15395 case LT: case LTU: case GE: case GEU:
15396 if (lo[1] == const0_rtx)
15398 ix86_compare_op0 = hi[0];
15399 ix86_compare_op1 = hi[1];
15400 ix86_expand_branch (code, label);
15401 return;
15403 break;
15404 case LE: case LEU: case GT: case GTU:
15405 if (lo[1] == constm1_rtx)
15407 ix86_compare_op0 = hi[0];
15408 ix86_compare_op1 = hi[1];
15409 ix86_expand_branch (code, label);
15410 return;
15412 break;
15413 default:
15414 break;
15417 /* Otherwise, we need two or three jumps. */
15419 label2 = gen_label_rtx ();
15421 code1 = code;
15422 code2 = swap_condition (code);
15423 code3 = unsigned_condition (code);
15425 switch (code)
15427 case LT: case GT: case LTU: case GTU:
15428 break;
15430 case LE: code1 = LT; code2 = GT; break;
15431 case GE: code1 = GT; code2 = LT; break;
15432 case LEU: code1 = LTU; code2 = GTU; break;
15433 case GEU: code1 = GTU; code2 = LTU; break;
15435 case EQ: code1 = UNKNOWN; code2 = NE; break;
15436 case NE: code2 = UNKNOWN; break;
15438 default:
15439 gcc_unreachable ();
15443 * a < b =>
15444 * if (hi(a) < hi(b)) goto true;
15445 * if (hi(a) > hi(b)) goto false;
15446 * if (lo(a) < lo(b)) goto true;
15447 * false:
15450 ix86_compare_op0 = hi[0];
15451 ix86_compare_op1 = hi[1];
15453 if (code1 != UNKNOWN)
15454 ix86_expand_branch (code1, label);
15455 if (code2 != UNKNOWN)
15456 ix86_expand_branch (code2, label2);
15458 ix86_compare_op0 = lo[0];
15459 ix86_compare_op1 = lo[1];
15460 ix86_expand_branch (code3, label);
15462 if (code2 != UNKNOWN)
15463 emit_label (label2);
15464 return;
15467 default:
15468 /* If we have already emitted a compare insn, go straight to simple.
15469 ix86_expand_compare won't emit anything if ix86_compare_emitted
15470 is non NULL. */
15471 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15472 goto simple;
15476 /* Split branch based on floating point condition. */
15477 void
15478 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15479 rtx target1, rtx target2, rtx tmp, rtx pushed)
15481 rtx condition;
15482 rtx i;
15484 if (target2 != pc_rtx)
15486 rtx tmp = target2;
15487 code = reverse_condition_maybe_unordered (code);
15488 target2 = target1;
15489 target1 = tmp;
15492 condition = ix86_expand_fp_compare (code, op1, op2,
15493 tmp);
15495 /* Remove pushed operand from stack. */
15496 if (pushed)
15497 ix86_free_from_memory (GET_MODE (pushed));
15499 i = emit_jump_insn (gen_rtx_SET
15500 (VOIDmode, pc_rtx,
15501 gen_rtx_IF_THEN_ELSE (VOIDmode,
15502 condition, target1, target2)));
15503 if (split_branch_probability >= 0)
15504 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15507 void
15508 ix86_expand_setcc (enum rtx_code code, rtx dest)
15510 rtx ret;
15512 gcc_assert (GET_MODE (dest) == QImode);
15514 ret = ix86_expand_compare (code);
15515 PUT_MODE (ret, QImode);
15516 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15519 /* Expand comparison setting or clearing carry flag. Return true when
15520 successful and set pop for the operation. */
15521 static bool
15522 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15524 enum machine_mode mode =
15525 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15527 /* Do not handle DImode compares that go through special path. */
15528 if (mode == (TARGET_64BIT ? TImode : DImode))
15529 return false;
15531 if (SCALAR_FLOAT_MODE_P (mode))
15533 rtx compare_op, compare_seq;
15535 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15537 /* Shortcut: following common codes never translate
15538 into carry flag compares. */
15539 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15540 || code == ORDERED || code == UNORDERED)
15541 return false;
15543 /* These comparisons require zero flag; swap operands so they won't. */
15544 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15545 && !TARGET_IEEE_FP)
15547 rtx tmp = op0;
15548 op0 = op1;
15549 op1 = tmp;
15550 code = swap_condition (code);
15553 /* Try to expand the comparison and verify that we end up with
15554 carry flag based comparison. This fails to be true only when
15555 we decide to expand comparison using arithmetic that is not
15556 too common scenario. */
15557 start_sequence ();
15558 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15559 compare_seq = get_insns ();
15560 end_sequence ();
15562 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15563 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15564 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15565 else
15566 code = GET_CODE (compare_op);
15568 if (code != LTU && code != GEU)
15569 return false;
15571 emit_insn (compare_seq);
15572 *pop = compare_op;
15573 return true;
15576 if (!INTEGRAL_MODE_P (mode))
15577 return false;
15579 switch (code)
15581 case LTU:
15582 case GEU:
15583 break;
15585 /* Convert a==0 into (unsigned)a<1. */
15586 case EQ:
15587 case NE:
15588 if (op1 != const0_rtx)
15589 return false;
15590 op1 = const1_rtx;
15591 code = (code == EQ ? LTU : GEU);
15592 break;
15594 /* Convert a>b into b<a or a>=b-1. */
15595 case GTU:
15596 case LEU:
15597 if (CONST_INT_P (op1))
15599 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15600 /* Bail out on overflow. We still can swap operands but that
15601 would force loading of the constant into register. */
15602 if (op1 == const0_rtx
15603 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15604 return false;
15605 code = (code == GTU ? GEU : LTU);
15607 else
15609 rtx tmp = op1;
15610 op1 = op0;
15611 op0 = tmp;
15612 code = (code == GTU ? LTU : GEU);
15614 break;
15616 /* Convert a>=0 into (unsigned)a<0x80000000. */
15617 case LT:
15618 case GE:
15619 if (mode == DImode || op1 != const0_rtx)
15620 return false;
15621 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15622 code = (code == LT ? GEU : LTU);
15623 break;
15624 case LE:
15625 case GT:
15626 if (mode == DImode || op1 != constm1_rtx)
15627 return false;
15628 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15629 code = (code == LE ? GEU : LTU);
15630 break;
15632 default:
15633 return false;
15635 /* Swapping operands may cause constant to appear as first operand. */
15636 if (!nonimmediate_operand (op0, VOIDmode))
15638 if (!can_create_pseudo_p ())
15639 return false;
15640 op0 = force_reg (mode, op0);
15642 ix86_compare_op0 = op0;
15643 ix86_compare_op1 = op1;
15644 *pop = ix86_expand_compare (code);
15645 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15646 return true;
15650 ix86_expand_int_movcc (rtx operands[])
15652 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15653 rtx compare_seq, compare_op;
15654 enum machine_mode mode = GET_MODE (operands[0]);
15655 bool sign_bit_compare_p = false;
15657 start_sequence ();
15658 ix86_compare_op0 = XEXP (operands[1], 0);
15659 ix86_compare_op1 = XEXP (operands[1], 1);
15660 compare_op = ix86_expand_compare (code);
15661 compare_seq = get_insns ();
15662 end_sequence ();
15664 compare_code = GET_CODE (compare_op);
15666 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15667 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15668 sign_bit_compare_p = true;
15670 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15671 HImode insns, we'd be swallowed in word prefix ops. */
15673 if ((mode != HImode || TARGET_FAST_PREFIX)
15674 && (mode != (TARGET_64BIT ? TImode : DImode))
15675 && CONST_INT_P (operands[2])
15676 && CONST_INT_P (operands[3]))
15678 rtx out = operands[0];
15679 HOST_WIDE_INT ct = INTVAL (operands[2]);
15680 HOST_WIDE_INT cf = INTVAL (operands[3]);
15681 HOST_WIDE_INT diff;
15683 diff = ct - cf;
15684 /* Sign bit compares are better done using shifts than we do by using
15685 sbb. */
15686 if (sign_bit_compare_p
15687 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15688 ix86_compare_op1, &compare_op))
15690 /* Detect overlap between destination and compare sources. */
15691 rtx tmp = out;
15693 if (!sign_bit_compare_p)
15695 rtx flags;
15696 bool fpcmp = false;
15698 compare_code = GET_CODE (compare_op);
15700 flags = XEXP (compare_op, 0);
15702 if (GET_MODE (flags) == CCFPmode
15703 || GET_MODE (flags) == CCFPUmode)
15705 fpcmp = true;
15706 compare_code
15707 = ix86_fp_compare_code_to_integer (compare_code);
15710 /* To simplify rest of code, restrict to the GEU case. */
15711 if (compare_code == LTU)
15713 HOST_WIDE_INT tmp = ct;
15714 ct = cf;
15715 cf = tmp;
15716 compare_code = reverse_condition (compare_code);
15717 code = reverse_condition (code);
15719 else
15721 if (fpcmp)
15722 PUT_CODE (compare_op,
15723 reverse_condition_maybe_unordered
15724 (GET_CODE (compare_op)));
15725 else
15726 PUT_CODE (compare_op,
15727 reverse_condition (GET_CODE (compare_op)));
15729 diff = ct - cf;
15731 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15732 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15733 tmp = gen_reg_rtx (mode);
15735 if (mode == DImode)
15736 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15737 else
15738 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15739 flags, compare_op));
15741 else
15743 if (code == GT || code == GE)
15744 code = reverse_condition (code);
15745 else
15747 HOST_WIDE_INT tmp = ct;
15748 ct = cf;
15749 cf = tmp;
15750 diff = ct - cf;
15752 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15753 ix86_compare_op1, VOIDmode, 0, -1);
15756 if (diff == 1)
15759 * cmpl op0,op1
15760 * sbbl dest,dest
15761 * [addl dest, ct]
15763 * Size 5 - 8.
15765 if (ct)
15766 tmp = expand_simple_binop (mode, PLUS,
15767 tmp, GEN_INT (ct),
15768 copy_rtx (tmp), 1, OPTAB_DIRECT);
15770 else if (cf == -1)
15773 * cmpl op0,op1
15774 * sbbl dest,dest
15775 * orl $ct, dest
15777 * Size 8.
15779 tmp = expand_simple_binop (mode, IOR,
15780 tmp, GEN_INT (ct),
15781 copy_rtx (tmp), 1, OPTAB_DIRECT);
15783 else if (diff == -1 && ct)
15786 * cmpl op0,op1
15787 * sbbl dest,dest
15788 * notl dest
15789 * [addl dest, cf]
15791 * Size 8 - 11.
15793 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15794 if (cf)
15795 tmp = expand_simple_binop (mode, PLUS,
15796 copy_rtx (tmp), GEN_INT (cf),
15797 copy_rtx (tmp), 1, OPTAB_DIRECT);
15799 else
15802 * cmpl op0,op1
15803 * sbbl dest,dest
15804 * [notl dest]
15805 * andl cf - ct, dest
15806 * [addl dest, ct]
15808 * Size 8 - 11.
15811 if (cf == 0)
15813 cf = ct;
15814 ct = 0;
15815 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15818 tmp = expand_simple_binop (mode, AND,
15819 copy_rtx (tmp),
15820 gen_int_mode (cf - ct, mode),
15821 copy_rtx (tmp), 1, OPTAB_DIRECT);
15822 if (ct)
15823 tmp = expand_simple_binop (mode, PLUS,
15824 copy_rtx (tmp), GEN_INT (ct),
15825 copy_rtx (tmp), 1, OPTAB_DIRECT);
15828 if (!rtx_equal_p (tmp, out))
15829 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15831 return 1; /* DONE */
15834 if (diff < 0)
15836 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15838 HOST_WIDE_INT tmp;
15839 tmp = ct, ct = cf, cf = tmp;
15840 diff = -diff;
15842 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15844 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15846 /* We may be reversing unordered compare to normal compare, that
15847 is not valid in general (we may convert non-trapping condition
15848 to trapping one), however on i386 we currently emit all
15849 comparisons unordered. */
15850 compare_code = reverse_condition_maybe_unordered (compare_code);
15851 code = reverse_condition_maybe_unordered (code);
15853 else
15855 compare_code = reverse_condition (compare_code);
15856 code = reverse_condition (code);
15860 compare_code = UNKNOWN;
15861 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15862 && CONST_INT_P (ix86_compare_op1))
15864 if (ix86_compare_op1 == const0_rtx
15865 && (code == LT || code == GE))
15866 compare_code = code;
15867 else if (ix86_compare_op1 == constm1_rtx)
15869 if (code == LE)
15870 compare_code = LT;
15871 else if (code == GT)
15872 compare_code = GE;
15876 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15877 if (compare_code != UNKNOWN
15878 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15879 && (cf == -1 || ct == -1))
15881 /* If lea code below could be used, only optimize
15882 if it results in a 2 insn sequence. */
15884 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15885 || diff == 3 || diff == 5 || diff == 9)
15886 || (compare_code == LT && ct == -1)
15887 || (compare_code == GE && cf == -1))
15890 * notl op1 (if necessary)
15891 * sarl $31, op1
15892 * orl cf, op1
15894 if (ct != -1)
15896 cf = ct;
15897 ct = -1;
15898 code = reverse_condition (code);
15901 out = emit_store_flag (out, code, ix86_compare_op0,
15902 ix86_compare_op1, VOIDmode, 0, -1);
15904 out = expand_simple_binop (mode, IOR,
15905 out, GEN_INT (cf),
15906 out, 1, OPTAB_DIRECT);
15907 if (out != operands[0])
15908 emit_move_insn (operands[0], out);
15910 return 1; /* DONE */
15915 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15916 || diff == 3 || diff == 5 || diff == 9)
15917 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15918 && (mode != DImode
15919 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15922 * xorl dest,dest
15923 * cmpl op1,op2
15924 * setcc dest
15925 * lea cf(dest*(ct-cf)),dest
15927 * Size 14.
15929 * This also catches the degenerate setcc-only case.
15932 rtx tmp;
15933 int nops;
15935 out = emit_store_flag (out, code, ix86_compare_op0,
15936 ix86_compare_op1, VOIDmode, 0, 1);
15938 nops = 0;
15939 /* On x86_64 the lea instruction operates on Pmode, so we need
15940 to get arithmetics done in proper mode to match. */
15941 if (diff == 1)
15942 tmp = copy_rtx (out);
15943 else
15945 rtx out1;
15946 out1 = copy_rtx (out);
15947 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15948 nops++;
15949 if (diff & 1)
15951 tmp = gen_rtx_PLUS (mode, tmp, out1);
15952 nops++;
15955 if (cf != 0)
15957 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15958 nops++;
15960 if (!rtx_equal_p (tmp, out))
15962 if (nops == 1)
15963 out = force_operand (tmp, copy_rtx (out));
15964 else
15965 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15967 if (!rtx_equal_p (out, operands[0]))
15968 emit_move_insn (operands[0], copy_rtx (out));
15970 return 1; /* DONE */
15974 * General case: Jumpful:
15975 * xorl dest,dest cmpl op1, op2
15976 * cmpl op1, op2 movl ct, dest
15977 * setcc dest jcc 1f
15978 * decl dest movl cf, dest
15979 * andl (cf-ct),dest 1:
15980 * addl ct,dest
15982 * Size 20. Size 14.
15984 * This is reasonably steep, but branch mispredict costs are
15985 * high on modern cpus, so consider failing only if optimizing
15986 * for space.
15989 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15990 && BRANCH_COST (optimize_insn_for_speed_p (),
15991 false) >= 2)
15993 if (cf == 0)
15995 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15997 cf = ct;
15998 ct = 0;
16000 if (SCALAR_FLOAT_MODE_P (cmp_mode))
16002 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
16004 /* We may be reversing unordered compare to normal compare,
16005 that is not valid in general (we may convert non-trapping
16006 condition to trapping one), however on i386 we currently
16007 emit all comparisons unordered. */
16008 code = reverse_condition_maybe_unordered (code);
16010 else
16012 code = reverse_condition (code);
16013 if (compare_code != UNKNOWN)
16014 compare_code = reverse_condition (compare_code);
16018 if (compare_code != UNKNOWN)
16020 /* notl op1 (if needed)
16021 sarl $31, op1
16022 andl (cf-ct), op1
16023 addl ct, op1
16025 For x < 0 (resp. x <= -1) there will be no notl,
16026 so if possible swap the constants to get rid of the
16027 complement.
16028 True/false will be -1/0 while code below (store flag
16029 followed by decrement) is 0/-1, so the constants need
16030 to be exchanged once more. */
16032 if (compare_code == GE || !cf)
16034 code = reverse_condition (code);
16035 compare_code = LT;
16037 else
16039 HOST_WIDE_INT tmp = cf;
16040 cf = ct;
16041 ct = tmp;
16044 out = emit_store_flag (out, code, ix86_compare_op0,
16045 ix86_compare_op1, VOIDmode, 0, -1);
16047 else
16049 out = emit_store_flag (out, code, ix86_compare_op0,
16050 ix86_compare_op1, VOIDmode, 0, 1);
16052 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
16053 copy_rtx (out), 1, OPTAB_DIRECT);
16056 out = expand_simple_binop (mode, AND, copy_rtx (out),
16057 gen_int_mode (cf - ct, mode),
16058 copy_rtx (out), 1, OPTAB_DIRECT);
16059 if (ct)
16060 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
16061 copy_rtx (out), 1, OPTAB_DIRECT);
16062 if (!rtx_equal_p (out, operands[0]))
16063 emit_move_insn (operands[0], copy_rtx (out));
16065 return 1; /* DONE */
16069 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
16071 /* Try a few things more with specific constants and a variable. */
16073 optab op;
16074 rtx var, orig_out, out, tmp;
16076 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
16077 return 0; /* FAIL */
16079 /* If one of the two operands is an interesting constant, load a
16080 constant with the above and mask it in with a logical operation. */
16082 if (CONST_INT_P (operands[2]))
16084 var = operands[3];
16085 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
16086 operands[3] = constm1_rtx, op = and_optab;
16087 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
16088 operands[3] = const0_rtx, op = ior_optab;
16089 else
16090 return 0; /* FAIL */
16092 else if (CONST_INT_P (operands[3]))
16094 var = operands[2];
16095 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
16096 operands[2] = constm1_rtx, op = and_optab;
16097 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
16098 operands[2] = const0_rtx, op = ior_optab;
16099 else
16100 return 0; /* FAIL */
16102 else
16103 return 0; /* FAIL */
16105 orig_out = operands[0];
16106 tmp = gen_reg_rtx (mode);
16107 operands[0] = tmp;
16109 /* Recurse to get the constant loaded. */
16110 if (ix86_expand_int_movcc (operands) == 0)
16111 return 0; /* FAIL */
16113 /* Mask in the interesting variable. */
16114 out = expand_binop (mode, op, var, tmp, orig_out, 0,
16115 OPTAB_WIDEN);
16116 if (!rtx_equal_p (out, orig_out))
16117 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
16119 return 1; /* DONE */
16123 * For comparison with above,
16125 * movl cf,dest
16126 * movl ct,tmp
16127 * cmpl op1,op2
16128 * cmovcc tmp,dest
16130 * Size 15.
16133 if (! nonimmediate_operand (operands[2], mode))
16134 operands[2] = force_reg (mode, operands[2]);
16135 if (! nonimmediate_operand (operands[3], mode))
16136 operands[3] = force_reg (mode, operands[3]);
16138 if (! register_operand (operands[2], VOIDmode)
16139 && (mode == QImode
16140 || ! register_operand (operands[3], VOIDmode)))
16141 operands[2] = force_reg (mode, operands[2]);
16143 if (mode == QImode
16144 && ! register_operand (operands[3], VOIDmode))
16145 operands[3] = force_reg (mode, operands[3]);
16147 emit_insn (compare_seq);
16148 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16149 gen_rtx_IF_THEN_ELSE (mode,
16150 compare_op, operands[2],
16151 operands[3])));
16153 return 1; /* DONE */
16156 /* Swap, force into registers, or otherwise massage the two operands
16157 to an sse comparison with a mask result. Thus we differ a bit from
16158 ix86_prepare_fp_compare_args which expects to produce a flags result.
16160 The DEST operand exists to help determine whether to commute commutative
16161 operators. The POP0/POP1 operands are updated in place. The new
16162 comparison code is returned, or UNKNOWN if not implementable. */
16164 static enum rtx_code
16165 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
16166 rtx *pop0, rtx *pop1)
16168 rtx tmp;
16170 switch (code)
16172 case LTGT:
16173 case UNEQ:
16174 /* We have no LTGT as an operator. We could implement it with
16175 NE & ORDERED, but this requires an extra temporary. It's
16176 not clear that it's worth it. */
16177 return UNKNOWN;
16179 case LT:
16180 case LE:
16181 case UNGT:
16182 case UNGE:
16183 /* These are supported directly. */
16184 break;
16186 case EQ:
16187 case NE:
16188 case UNORDERED:
16189 case ORDERED:
16190 /* For commutative operators, try to canonicalize the destination
16191 operand to be first in the comparison - this helps reload to
16192 avoid extra moves. */
16193 if (!dest || !rtx_equal_p (dest, *pop1))
16194 break;
16195 /* FALLTHRU */
16197 case GE:
16198 case GT:
16199 case UNLE:
16200 case UNLT:
16201 /* These are not supported directly. Swap the comparison operands
16202 to transform into something that is supported. */
16203 tmp = *pop0;
16204 *pop0 = *pop1;
16205 *pop1 = tmp;
16206 code = swap_condition (code);
16207 break;
16209 default:
16210 gcc_unreachable ();
16213 return code;
16216 /* Detect conditional moves that exactly match min/max operational
16217 semantics. Note that this is IEEE safe, as long as we don't
16218 interchange the operands.
16220 Returns FALSE if this conditional move doesn't match a MIN/MAX,
16221 and TRUE if the operation is successful and instructions are emitted. */
16223 static bool
16224 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
16225 rtx cmp_op1, rtx if_true, rtx if_false)
16227 enum machine_mode mode;
16228 bool is_min;
16229 rtx tmp;
16231 if (code == LT)
16233 else if (code == UNGE)
16235 tmp = if_true;
16236 if_true = if_false;
16237 if_false = tmp;
16239 else
16240 return false;
16242 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
16243 is_min = true;
16244 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
16245 is_min = false;
16246 else
16247 return false;
16249 mode = GET_MODE (dest);
16251 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
16252 but MODE may be a vector mode and thus not appropriate. */
16253 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
16255 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
16256 rtvec v;
16258 if_true = force_reg (mode, if_true);
16259 v = gen_rtvec (2, if_true, if_false);
16260 tmp = gen_rtx_UNSPEC (mode, v, u);
16262 else
16264 code = is_min ? SMIN : SMAX;
16265 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
16268 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
16269 return true;
16272 /* Expand an sse vector comparison. Return the register with the result. */
16274 static rtx
16275 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16276 rtx op_true, rtx op_false)
16278 enum machine_mode mode = GET_MODE (dest);
16279 rtx x;
16281 cmp_op0 = force_reg (mode, cmp_op0);
16282 if (!nonimmediate_operand (cmp_op1, mode))
16283 cmp_op1 = force_reg (mode, cmp_op1);
16285 if (optimize
16286 || reg_overlap_mentioned_p (dest, op_true)
16287 || reg_overlap_mentioned_p (dest, op_false))
16288 dest = gen_reg_rtx (mode);
16290 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16291 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16293 return dest;
16296 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16297 operations. This is used for both scalar and vector conditional moves. */
16299 static void
16300 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16302 enum machine_mode mode = GET_MODE (dest);
16303 rtx t2, t3, x;
16305 if (op_false == CONST0_RTX (mode))
16307 op_true = force_reg (mode, op_true);
16308 x = gen_rtx_AND (mode, cmp, op_true);
16309 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16311 else if (op_true == CONST0_RTX (mode))
16313 op_false = force_reg (mode, op_false);
16314 x = gen_rtx_NOT (mode, cmp);
16315 x = gen_rtx_AND (mode, x, op_false);
16316 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16318 else if (TARGET_XOP)
16320 rtx pcmov = gen_rtx_SET (mode, dest,
16321 gen_rtx_IF_THEN_ELSE (mode, cmp,
16322 op_true,
16323 op_false));
16324 emit_insn (pcmov);
16326 else
16328 op_true = force_reg (mode, op_true);
16329 op_false = force_reg (mode, op_false);
16331 t2 = gen_reg_rtx (mode);
16332 if (optimize)
16333 t3 = gen_reg_rtx (mode);
16334 else
16335 t3 = dest;
16337 x = gen_rtx_AND (mode, op_true, cmp);
16338 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16340 x = gen_rtx_NOT (mode, cmp);
16341 x = gen_rtx_AND (mode, x, op_false);
16342 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16344 x = gen_rtx_IOR (mode, t3, t2);
16345 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16349 /* Expand a floating-point conditional move. Return true if successful. */
16352 ix86_expand_fp_movcc (rtx operands[])
16354 enum machine_mode mode = GET_MODE (operands[0]);
16355 enum rtx_code code = GET_CODE (operands[1]);
16356 rtx tmp, compare_op;
16358 ix86_compare_op0 = XEXP (operands[1], 0);
16359 ix86_compare_op1 = XEXP (operands[1], 1);
16360 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16362 enum machine_mode cmode;
16364 /* Since we've no cmove for sse registers, don't force bad register
16365 allocation just to gain access to it. Deny movcc when the
16366 comparison mode doesn't match the move mode. */
16367 cmode = GET_MODE (ix86_compare_op0);
16368 if (cmode == VOIDmode)
16369 cmode = GET_MODE (ix86_compare_op1);
16370 if (cmode != mode)
16371 return 0;
16373 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16374 &ix86_compare_op0,
16375 &ix86_compare_op1);
16376 if (code == UNKNOWN)
16377 return 0;
16379 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16380 ix86_compare_op1, operands[2],
16381 operands[3]))
16382 return 1;
16384 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16385 ix86_compare_op1, operands[2], operands[3]);
16386 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16387 return 1;
16390 /* The floating point conditional move instructions don't directly
16391 support conditions resulting from a signed integer comparison. */
16393 compare_op = ix86_expand_compare (code);
16394 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16396 tmp = gen_reg_rtx (QImode);
16397 ix86_expand_setcc (code, tmp);
16398 code = NE;
16399 ix86_compare_op0 = tmp;
16400 ix86_compare_op1 = const0_rtx;
16401 compare_op = ix86_expand_compare (code);
16404 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16405 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16406 operands[2], operands[3])));
16408 return 1;
16411 /* Expand a floating-point vector conditional move; a vcond operation
16412 rather than a movcc operation. */
16414 bool
16415 ix86_expand_fp_vcond (rtx operands[])
16417 enum rtx_code code = GET_CODE (operands[3]);
16418 rtx cmp;
16420 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16421 &operands[4], &operands[5]);
16422 if (code == UNKNOWN)
16423 return false;
16425 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16426 operands[5], operands[1], operands[2]))
16427 return true;
16429 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16430 operands[1], operands[2]);
16431 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16432 return true;
16435 /* Expand a signed/unsigned integral vector conditional move. */
16437 bool
16438 ix86_expand_int_vcond (rtx operands[])
16440 enum machine_mode mode = GET_MODE (operands[0]);
16441 enum rtx_code code = GET_CODE (operands[3]);
16442 bool negate = false;
16443 rtx x, cop0, cop1;
16445 cop0 = operands[4];
16446 cop1 = operands[5];
16448 /* XOP supports all of the comparisons on all vector int types. */
16449 if (!TARGET_XOP)
16451 /* Canonicalize the comparison to EQ, GT, GTU. */
16452 switch (code)
16454 case EQ:
16455 case GT:
16456 case GTU:
16457 break;
16459 case NE:
16460 case LE:
16461 case LEU:
16462 code = reverse_condition (code);
16463 negate = true;
16464 break;
16466 case GE:
16467 case GEU:
16468 code = reverse_condition (code);
16469 negate = true;
16470 /* FALLTHRU */
16472 case LT:
16473 case LTU:
16474 code = swap_condition (code);
16475 x = cop0, cop0 = cop1, cop1 = x;
16476 break;
16478 default:
16479 gcc_unreachable ();
16482 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16483 if (mode == V2DImode)
16485 switch (code)
16487 case EQ:
16488 /* SSE4.1 supports EQ. */
16489 if (!TARGET_SSE4_1)
16490 return false;
16491 break;
16493 case GT:
16494 case GTU:
16495 /* SSE4.2 supports GT/GTU. */
16496 if (!TARGET_SSE4_2)
16497 return false;
16498 break;
16500 default:
16501 gcc_unreachable ();
16505 /* Unsigned parallel compare is not supported by the hardware.
16506 Play some tricks to turn this into a signed comparison
16507 against 0. */
16508 if (code == GTU)
16510 cop0 = force_reg (mode, cop0);
16512 switch (mode)
16514 case V4SImode:
16515 case V2DImode:
16517 rtx t1, t2, mask;
16518 rtx (*gen_sub3) (rtx, rtx, rtx);
16520 /* Subtract (-(INT MAX) - 1) from both operands to make
16521 them signed. */
16522 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16523 true, false);
16524 gen_sub3 = (mode == V4SImode
16525 ? gen_subv4si3 : gen_subv2di3);
16526 t1 = gen_reg_rtx (mode);
16527 emit_insn (gen_sub3 (t1, cop0, mask));
16529 t2 = gen_reg_rtx (mode);
16530 emit_insn (gen_sub3 (t2, cop1, mask));
16532 cop0 = t1;
16533 cop1 = t2;
16534 code = GT;
16536 break;
16538 case V16QImode:
16539 case V8HImode:
16540 /* Perform a parallel unsigned saturating subtraction. */
16541 x = gen_reg_rtx (mode);
16542 emit_insn (gen_rtx_SET (VOIDmode, x,
16543 gen_rtx_US_MINUS (mode, cop0, cop1)));
16545 cop0 = x;
16546 cop1 = CONST0_RTX (mode);
16547 code = EQ;
16548 negate = !negate;
16549 break;
16551 default:
16552 gcc_unreachable ();
16557 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16558 operands[1+negate], operands[2-negate]);
16560 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16561 operands[2-negate]);
16562 return true;
16565 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16566 true if we should do zero extension, else sign extension. HIGH_P is
16567 true if we want the N/2 high elements, else the low elements. */
16569 void
16570 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16572 enum machine_mode imode = GET_MODE (operands[1]);
16573 rtx (*unpack)(rtx, rtx, rtx);
16574 rtx se, dest;
16576 switch (imode)
16578 case V16QImode:
16579 if (high_p)
16580 unpack = gen_vec_interleave_highv16qi;
16581 else
16582 unpack = gen_vec_interleave_lowv16qi;
16583 break;
16584 case V8HImode:
16585 if (high_p)
16586 unpack = gen_vec_interleave_highv8hi;
16587 else
16588 unpack = gen_vec_interleave_lowv8hi;
16589 break;
16590 case V4SImode:
16591 if (high_p)
16592 unpack = gen_vec_interleave_highv4si;
16593 else
16594 unpack = gen_vec_interleave_lowv4si;
16595 break;
16596 default:
16597 gcc_unreachable ();
16600 dest = gen_lowpart (imode, operands[0]);
16602 if (unsigned_p)
16603 se = force_reg (imode, CONST0_RTX (imode));
16604 else
16605 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16606 operands[1], pc_rtx, pc_rtx);
16608 emit_insn (unpack (dest, operands[1], se));
16611 /* This function performs the same task as ix86_expand_sse_unpack,
16612 but with SSE4.1 instructions. */
16614 void
16615 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16617 enum machine_mode imode = GET_MODE (operands[1]);
16618 rtx (*unpack)(rtx, rtx);
16619 rtx src, dest;
16621 switch (imode)
16623 case V16QImode:
16624 if (unsigned_p)
16625 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16626 else
16627 unpack = gen_sse4_1_extendv8qiv8hi2;
16628 break;
16629 case V8HImode:
16630 if (unsigned_p)
16631 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16632 else
16633 unpack = gen_sse4_1_extendv4hiv4si2;
16634 break;
16635 case V4SImode:
16636 if (unsigned_p)
16637 unpack = gen_sse4_1_zero_extendv2siv2di2;
16638 else
16639 unpack = gen_sse4_1_extendv2siv2di2;
16640 break;
16641 default:
16642 gcc_unreachable ();
16645 dest = operands[0];
16646 if (high_p)
16648 /* Shift higher 8 bytes to lower 8 bytes. */
16649 src = gen_reg_rtx (imode);
16650 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16651 gen_lowpart (V1TImode, operands[1]),
16652 GEN_INT (64)));
16654 else
16655 src = operands[1];
16657 emit_insn (unpack (dest, src));
16660 /* Expand conditional increment or decrement using adb/sbb instructions.
16661 The default case using setcc followed by the conditional move can be
16662 done by generic code. */
16664 ix86_expand_int_addcc (rtx operands[])
16666 enum rtx_code code = GET_CODE (operands[1]);
16667 rtx flags;
16668 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16669 rtx compare_op;
16670 rtx val = const0_rtx;
16671 bool fpcmp = false;
16672 enum machine_mode mode;
16674 ix86_compare_op0 = XEXP (operands[1], 0);
16675 ix86_compare_op1 = XEXP (operands[1], 1);
16676 if (operands[3] != const1_rtx
16677 && operands[3] != constm1_rtx)
16678 return 0;
16679 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16680 ix86_compare_op1, &compare_op))
16681 return 0;
16682 code = GET_CODE (compare_op);
16684 flags = XEXP (compare_op, 0);
16686 if (GET_MODE (flags) == CCFPmode
16687 || GET_MODE (flags) == CCFPUmode)
16689 fpcmp = true;
16690 code = ix86_fp_compare_code_to_integer (code);
16693 if (code != LTU)
16695 val = constm1_rtx;
16696 if (fpcmp)
16697 PUT_CODE (compare_op,
16698 reverse_condition_maybe_unordered
16699 (GET_CODE (compare_op)));
16700 else
16701 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16704 mode = GET_MODE (operands[0]);
16706 /* Construct either adc or sbb insn. */
16707 if ((code == LTU) == (operands[3] == constm1_rtx))
16709 switch (mode)
16711 case QImode:
16712 insn = gen_subqi3_carry;
16713 break;
16714 case HImode:
16715 insn = gen_subhi3_carry;
16716 break;
16717 case SImode:
16718 insn = gen_subsi3_carry;
16719 break;
16720 case DImode:
16721 insn = gen_subdi3_carry;
16722 break;
16723 default:
16724 gcc_unreachable ();
16727 else
16729 switch (mode)
16731 case QImode:
16732 insn = gen_addqi3_carry;
16733 break;
16734 case HImode:
16735 insn = gen_addhi3_carry;
16736 break;
16737 case SImode:
16738 insn = gen_addsi3_carry;
16739 break;
16740 case DImode:
16741 insn = gen_adddi3_carry;
16742 break;
16743 default:
16744 gcc_unreachable ();
16747 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16749 return 1; /* DONE */
16753 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16754 works for floating pointer parameters and nonoffsetable memories.
16755 For pushes, it returns just stack offsets; the values will be saved
16756 in the right order. Maximally three parts are generated. */
16758 static int
16759 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16761 int size;
16763 if (!TARGET_64BIT)
16764 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16765 else
16766 size = (GET_MODE_SIZE (mode) + 4) / 8;
16768 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16769 gcc_assert (size >= 2 && size <= 4);
16771 /* Optimize constant pool reference to immediates. This is used by fp
16772 moves, that force all constants to memory to allow combining. */
16773 if (MEM_P (operand) && MEM_READONLY_P (operand))
16775 rtx tmp = maybe_get_pool_constant (operand);
16776 if (tmp)
16777 operand = tmp;
16780 if (MEM_P (operand) && !offsettable_memref_p (operand))
16782 /* The only non-offsetable memories we handle are pushes. */
16783 int ok = push_operand (operand, VOIDmode);
16785 gcc_assert (ok);
16787 operand = copy_rtx (operand);
16788 PUT_MODE (operand, Pmode);
16789 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16790 return size;
16793 if (GET_CODE (operand) == CONST_VECTOR)
16795 enum machine_mode imode = int_mode_for_mode (mode);
16796 /* Caution: if we looked through a constant pool memory above,
16797 the operand may actually have a different mode now. That's
16798 ok, since we want to pun this all the way back to an integer. */
16799 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16800 gcc_assert (operand != NULL);
16801 mode = imode;
16804 if (!TARGET_64BIT)
16806 if (mode == DImode)
16807 split_di (&operand, 1, &parts[0], &parts[1]);
16808 else
16810 int i;
16812 if (REG_P (operand))
16814 gcc_assert (reload_completed);
16815 for (i = 0; i < size; i++)
16816 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16818 else if (offsettable_memref_p (operand))
16820 operand = adjust_address (operand, SImode, 0);
16821 parts[0] = operand;
16822 for (i = 1; i < size; i++)
16823 parts[i] = adjust_address (operand, SImode, 4 * i);
16825 else if (GET_CODE (operand) == CONST_DOUBLE)
16827 REAL_VALUE_TYPE r;
16828 long l[4];
16830 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16831 switch (mode)
16833 case TFmode:
16834 real_to_target (l, &r, mode);
16835 parts[3] = gen_int_mode (l[3], SImode);
16836 parts[2] = gen_int_mode (l[2], SImode);
16837 break;
16838 case XFmode:
16839 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16840 parts[2] = gen_int_mode (l[2], SImode);
16841 break;
16842 case DFmode:
16843 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16844 break;
16845 default:
16846 gcc_unreachable ();
16848 parts[1] = gen_int_mode (l[1], SImode);
16849 parts[0] = gen_int_mode (l[0], SImode);
16851 else
16852 gcc_unreachable ();
16855 else
16857 if (mode == TImode)
16858 split_ti (&operand, 1, &parts[0], &parts[1]);
16859 if (mode == XFmode || mode == TFmode)
16861 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16862 if (REG_P (operand))
16864 gcc_assert (reload_completed);
16865 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16866 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16868 else if (offsettable_memref_p (operand))
16870 operand = adjust_address (operand, DImode, 0);
16871 parts[0] = operand;
16872 parts[1] = adjust_address (operand, upper_mode, 8);
16874 else if (GET_CODE (operand) == CONST_DOUBLE)
16876 REAL_VALUE_TYPE r;
16877 long l[4];
16879 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16880 real_to_target (l, &r, mode);
16882 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16883 if (HOST_BITS_PER_WIDE_INT >= 64)
16884 parts[0]
16885 = gen_int_mode
16886 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16887 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16888 DImode);
16889 else
16890 parts[0] = immed_double_const (l[0], l[1], DImode);
16892 if (upper_mode == SImode)
16893 parts[1] = gen_int_mode (l[2], SImode);
16894 else if (HOST_BITS_PER_WIDE_INT >= 64)
16895 parts[1]
16896 = gen_int_mode
16897 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16898 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16899 DImode);
16900 else
16901 parts[1] = immed_double_const (l[2], l[3], DImode);
16903 else
16904 gcc_unreachable ();
16908 return size;
16911 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16912 Return false when normal moves are needed; true when all required
16913 insns have been emitted. Operands 2-4 contain the input values
16914 int the correct order; operands 5-7 contain the output values. */
16916 void
16917 ix86_split_long_move (rtx operands[])
16919 rtx part[2][4];
16920 int nparts, i, j;
16921 int push = 0;
16922 int collisions = 0;
16923 enum machine_mode mode = GET_MODE (operands[0]);
16924 bool collisionparts[4];
16926 /* The DFmode expanders may ask us to move double.
16927 For 64bit target this is single move. By hiding the fact
16928 here we simplify i386.md splitters. */
16929 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16931 /* Optimize constant pool reference to immediates. This is used by
16932 fp moves, that force all constants to memory to allow combining. */
16934 if (MEM_P (operands[1])
16935 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16936 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16937 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16938 if (push_operand (operands[0], VOIDmode))
16940 operands[0] = copy_rtx (operands[0]);
16941 PUT_MODE (operands[0], Pmode);
16943 else
16944 operands[0] = gen_lowpart (DImode, operands[0]);
16945 operands[1] = gen_lowpart (DImode, operands[1]);
16946 emit_move_insn (operands[0], operands[1]);
16947 return;
16950 /* The only non-offsettable memory we handle is push. */
16951 if (push_operand (operands[0], VOIDmode))
16952 push = 1;
16953 else
16954 gcc_assert (!MEM_P (operands[0])
16955 || offsettable_memref_p (operands[0]));
16957 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16958 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16960 /* When emitting push, take care for source operands on the stack. */
16961 if (push && MEM_P (operands[1])
16962 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16964 rtx src_base = XEXP (part[1][nparts - 1], 0);
16966 /* Compensate for the stack decrement by 4. */
16967 if (!TARGET_64BIT && nparts == 3
16968 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16969 src_base = plus_constant (src_base, 4);
16971 /* src_base refers to the stack pointer and is
16972 automatically decreased by emitted push. */
16973 for (i = 0; i < nparts; i++)
16974 part[1][i] = change_address (part[1][i],
16975 GET_MODE (part[1][i]), src_base);
16978 /* We need to do copy in the right order in case an address register
16979 of the source overlaps the destination. */
16980 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16982 rtx tmp;
16984 for (i = 0; i < nparts; i++)
16986 collisionparts[i]
16987 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16988 if (collisionparts[i])
16989 collisions++;
16992 /* Collision in the middle part can be handled by reordering. */
16993 if (collisions == 1 && nparts == 3 && collisionparts [1])
16995 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16996 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16998 else if (collisions == 1
16999 && nparts == 4
17000 && (collisionparts [1] || collisionparts [2]))
17002 if (collisionparts [1])
17004 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
17005 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
17007 else
17009 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
17010 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
17014 /* If there are more collisions, we can't handle it by reordering.
17015 Do an lea to the last part and use only one colliding move. */
17016 else if (collisions > 1)
17018 rtx base;
17020 collisions = 1;
17022 base = part[0][nparts - 1];
17024 /* Handle the case when the last part isn't valid for lea.
17025 Happens in 64-bit mode storing the 12-byte XFmode. */
17026 if (GET_MODE (base) != Pmode)
17027 base = gen_rtx_REG (Pmode, REGNO (base));
17029 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
17030 part[1][0] = replace_equiv_address (part[1][0], base);
17031 for (i = 1; i < nparts; i++)
17033 tmp = plus_constant (base, UNITS_PER_WORD * i);
17034 part[1][i] = replace_equiv_address (part[1][i], tmp);
17039 if (push)
17041 if (!TARGET_64BIT)
17043 if (nparts == 3)
17045 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
17046 emit_insn (gen_addsi3 (stack_pointer_rtx,
17047 stack_pointer_rtx, GEN_INT (-4)));
17048 emit_move_insn (part[0][2], part[1][2]);
17050 else if (nparts == 4)
17052 emit_move_insn (part[0][3], part[1][3]);
17053 emit_move_insn (part[0][2], part[1][2]);
17056 else
17058 /* In 64bit mode we don't have 32bit push available. In case this is
17059 register, it is OK - we will just use larger counterpart. We also
17060 retype memory - these comes from attempt to avoid REX prefix on
17061 moving of second half of TFmode value. */
17062 if (GET_MODE (part[1][1]) == SImode)
17064 switch (GET_CODE (part[1][1]))
17066 case MEM:
17067 part[1][1] = adjust_address (part[1][1], DImode, 0);
17068 break;
17070 case REG:
17071 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
17072 break;
17074 default:
17075 gcc_unreachable ();
17078 if (GET_MODE (part[1][0]) == SImode)
17079 part[1][0] = part[1][1];
17082 emit_move_insn (part[0][1], part[1][1]);
17083 emit_move_insn (part[0][0], part[1][0]);
17084 return;
17087 /* Choose correct order to not overwrite the source before it is copied. */
17088 if ((REG_P (part[0][0])
17089 && REG_P (part[1][1])
17090 && (REGNO (part[0][0]) == REGNO (part[1][1])
17091 || (nparts == 3
17092 && REGNO (part[0][0]) == REGNO (part[1][2]))
17093 || (nparts == 4
17094 && REGNO (part[0][0]) == REGNO (part[1][3]))))
17095 || (collisions > 0
17096 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
17098 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
17100 operands[2 + i] = part[0][j];
17101 operands[6 + i] = part[1][j];
17104 else
17106 for (i = 0; i < nparts; i++)
17108 operands[2 + i] = part[0][i];
17109 operands[6 + i] = part[1][i];
17113 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
17114 if (optimize_insn_for_size_p ())
17116 for (j = 0; j < nparts - 1; j++)
17117 if (CONST_INT_P (operands[6 + j])
17118 && operands[6 + j] != const0_rtx
17119 && REG_P (operands[2 + j]))
17120 for (i = j; i < nparts - 1; i++)
17121 if (CONST_INT_P (operands[7 + i])
17122 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
17123 operands[7 + i] = operands[2 + j];
17126 for (i = 0; i < nparts; i++)
17127 emit_move_insn (operands[2 + i], operands[6 + i]);
17129 return;
17132 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
17133 left shift by a constant, either using a single shift or
17134 a sequence of add instructions. */
17136 static void
17137 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
17139 if (count == 1)
17141 emit_insn ((mode == DImode
17142 ? gen_addsi3
17143 : gen_adddi3) (operand, operand, operand));
17145 else if (!optimize_insn_for_size_p ()
17146 && count * ix86_cost->add <= ix86_cost->shift_const)
17148 int i;
17149 for (i=0; i<count; i++)
17151 emit_insn ((mode == DImode
17152 ? gen_addsi3
17153 : gen_adddi3) (operand, operand, operand));
17156 else
17157 emit_insn ((mode == DImode
17158 ? gen_ashlsi3
17159 : gen_ashldi3) (operand, operand, GEN_INT (count)));
17162 void
17163 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
17165 rtx low[2], high[2];
17166 int count;
17167 const int single_width = mode == DImode ? 32 : 64;
17169 if (CONST_INT_P (operands[2]))
17171 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17172 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17174 if (count >= single_width)
17176 emit_move_insn (high[0], low[1]);
17177 emit_move_insn (low[0], const0_rtx);
17179 if (count > single_width)
17180 ix86_expand_ashl_const (high[0], count - single_width, mode);
17182 else
17184 if (!rtx_equal_p (operands[0], operands[1]))
17185 emit_move_insn (operands[0], operands[1]);
17186 emit_insn ((mode == DImode
17187 ? gen_x86_shld
17188 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
17189 ix86_expand_ashl_const (low[0], count, mode);
17191 return;
17194 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17196 if (operands[1] == const1_rtx)
17198 /* Assuming we've chosen a QImode capable registers, then 1 << N
17199 can be done with two 32/64-bit shifts, no branches, no cmoves. */
17200 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
17202 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
17204 ix86_expand_clear (low[0]);
17205 ix86_expand_clear (high[0]);
17206 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
17208 d = gen_lowpart (QImode, low[0]);
17209 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17210 s = gen_rtx_EQ (QImode, flags, const0_rtx);
17211 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17213 d = gen_lowpart (QImode, high[0]);
17214 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
17215 s = gen_rtx_NE (QImode, flags, const0_rtx);
17216 emit_insn (gen_rtx_SET (VOIDmode, d, s));
17219 /* Otherwise, we can get the same results by manually performing
17220 a bit extract operation on bit 5/6, and then performing the two
17221 shifts. The two methods of getting 0/1 into low/high are exactly
17222 the same size. Avoiding the shift in the bit extract case helps
17223 pentium4 a bit; no one else seems to care much either way. */
17224 else
17226 rtx x;
17228 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
17229 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
17230 else
17231 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
17232 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
17234 emit_insn ((mode == DImode
17235 ? gen_lshrsi3
17236 : gen_lshrdi3) (high[0], high[0],
17237 GEN_INT (mode == DImode ? 5 : 6)));
17238 emit_insn ((mode == DImode
17239 ? gen_andsi3
17240 : gen_anddi3) (high[0], high[0], const1_rtx));
17241 emit_move_insn (low[0], high[0]);
17242 emit_insn ((mode == DImode
17243 ? gen_xorsi3
17244 : gen_xordi3) (low[0], low[0], const1_rtx));
17247 emit_insn ((mode == DImode
17248 ? gen_ashlsi3
17249 : gen_ashldi3) (low[0], low[0], operands[2]));
17250 emit_insn ((mode == DImode
17251 ? gen_ashlsi3
17252 : gen_ashldi3) (high[0], high[0], operands[2]));
17253 return;
17256 if (operands[1] == constm1_rtx)
17258 /* For -1 << N, we can avoid the shld instruction, because we
17259 know that we're shifting 0...31/63 ones into a -1. */
17260 emit_move_insn (low[0], constm1_rtx);
17261 if (optimize_insn_for_size_p ())
17262 emit_move_insn (high[0], low[0]);
17263 else
17264 emit_move_insn (high[0], constm1_rtx);
17266 else
17268 if (!rtx_equal_p (operands[0], operands[1]))
17269 emit_move_insn (operands[0], operands[1]);
17271 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17272 emit_insn ((mode == DImode
17273 ? gen_x86_shld
17274 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17277 emit_insn ((mode == DImode
17278 ? gen_ashlsi3
17279 : gen_ashldi3) (low[0], low[0], operands[2]));
17281 if (TARGET_CMOVE && scratch)
17283 ix86_expand_clear (scratch);
17284 emit_insn ((mode == DImode
17285 ? gen_x86_shiftsi_adj_1
17286 : gen_x86_shiftdi_adj_1) (high[0], low[0], operands[2],
17287 scratch));
17289 else
17290 emit_insn ((mode == DImode
17291 ? gen_x86_shiftsi_adj_2
17292 : gen_x86_shiftdi_adj_2) (high[0], low[0], operands[2]));
17295 void
17296 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17298 rtx low[2], high[2];
17299 int count;
17300 const int single_width = mode == DImode ? 32 : 64;
17302 if (CONST_INT_P (operands[2]))
17304 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17305 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17307 if (count == single_width * 2 - 1)
17309 emit_move_insn (high[0], high[1]);
17310 emit_insn ((mode == DImode
17311 ? gen_ashrsi3
17312 : gen_ashrdi3) (high[0], high[0],
17313 GEN_INT (single_width - 1)));
17314 emit_move_insn (low[0], high[0]);
17317 else if (count >= single_width)
17319 emit_move_insn (low[0], high[1]);
17320 emit_move_insn (high[0], low[0]);
17321 emit_insn ((mode == DImode
17322 ? gen_ashrsi3
17323 : gen_ashrdi3) (high[0], high[0],
17324 GEN_INT (single_width - 1)));
17325 if (count > single_width)
17326 emit_insn ((mode == DImode
17327 ? gen_ashrsi3
17328 : gen_ashrdi3) (low[0], low[0],
17329 GEN_INT (count - single_width)));
17331 else
17333 if (!rtx_equal_p (operands[0], operands[1]))
17334 emit_move_insn (operands[0], operands[1]);
17335 emit_insn ((mode == DImode
17336 ? gen_x86_shrd
17337 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17338 emit_insn ((mode == DImode
17339 ? gen_ashrsi3
17340 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17343 else
17345 if (!rtx_equal_p (operands[0], operands[1]))
17346 emit_move_insn (operands[0], operands[1]);
17348 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17350 emit_insn ((mode == DImode
17351 ? gen_x86_shrd
17352 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17353 emit_insn ((mode == DImode
17354 ? gen_ashrsi3
17355 : gen_ashrdi3) (high[0], high[0], operands[2]));
17357 if (TARGET_CMOVE && scratch)
17359 emit_move_insn (scratch, high[0]);
17360 emit_insn ((mode == DImode
17361 ? gen_ashrsi3
17362 : gen_ashrdi3) (scratch, scratch,
17363 GEN_INT (single_width - 1)));
17364 emit_insn ((mode == DImode
17365 ? gen_x86_shiftsi_adj_1
17366 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17367 scratch));
17369 else
17370 emit_insn ((mode == DImode
17371 ? gen_x86_shiftsi_adj_3
17372 : gen_x86_shiftdi_adj_3) (low[0], high[0], operands[2]));
17376 void
17377 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17379 rtx low[2], high[2];
17380 int count;
17381 const int single_width = mode == DImode ? 32 : 64;
17383 if (CONST_INT_P (operands[2]))
17385 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17386 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17388 if (count >= single_width)
17390 emit_move_insn (low[0], high[1]);
17391 ix86_expand_clear (high[0]);
17393 if (count > single_width)
17394 emit_insn ((mode == DImode
17395 ? gen_lshrsi3
17396 : gen_lshrdi3) (low[0], low[0],
17397 GEN_INT (count - single_width)));
17399 else
17401 if (!rtx_equal_p (operands[0], operands[1]))
17402 emit_move_insn (operands[0], operands[1]);
17403 emit_insn ((mode == DImode
17404 ? gen_x86_shrd
17405 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17406 emit_insn ((mode == DImode
17407 ? gen_lshrsi3
17408 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17411 else
17413 if (!rtx_equal_p (operands[0], operands[1]))
17414 emit_move_insn (operands[0], operands[1]);
17416 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17418 emit_insn ((mode == DImode
17419 ? gen_x86_shrd
17420 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17421 emit_insn ((mode == DImode
17422 ? gen_lshrsi3
17423 : gen_lshrdi3) (high[0], high[0], operands[2]));
17425 /* Heh. By reversing the arguments, we can reuse this pattern. */
17426 if (TARGET_CMOVE && scratch)
17428 ix86_expand_clear (scratch);
17429 emit_insn ((mode == DImode
17430 ? gen_x86_shiftsi_adj_1
17431 : gen_x86_shiftdi_adj_1) (low[0], high[0], operands[2],
17432 scratch));
17434 else
17435 emit_insn ((mode == DImode
17436 ? gen_x86_shiftsi_adj_2
17437 : gen_x86_shiftdi_adj_2) (low[0], high[0], operands[2]));
17441 /* Predict just emitted jump instruction to be taken with probability PROB. */
17442 static void
17443 predict_jump (int prob)
17445 rtx insn = get_last_insn ();
17446 gcc_assert (JUMP_P (insn));
17447 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17450 /* Helper function for the string operations below. Dest VARIABLE whether
17451 it is aligned to VALUE bytes. If true, jump to the label. */
17452 static rtx
17453 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17455 rtx label = gen_label_rtx ();
17456 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17457 if (GET_MODE (variable) == DImode)
17458 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17459 else
17460 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17461 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17462 1, label);
17463 if (epilogue)
17464 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17465 else
17466 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17467 return label;
17470 /* Adjust COUNTER by the VALUE. */
17471 static void
17472 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17474 if (GET_MODE (countreg) == DImode)
17475 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17476 else
17477 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17480 /* Zero extend possibly SImode EXP to Pmode register. */
17482 ix86_zero_extend_to_Pmode (rtx exp)
17484 rtx r;
17485 if (GET_MODE (exp) == VOIDmode)
17486 return force_reg (Pmode, exp);
17487 if (GET_MODE (exp) == Pmode)
17488 return copy_to_mode_reg (Pmode, exp);
17489 r = gen_reg_rtx (Pmode);
17490 emit_insn (gen_zero_extendsidi2 (r, exp));
17491 return r;
17494 /* Divide COUNTREG by SCALE. */
17495 static rtx
17496 scale_counter (rtx countreg, int scale)
17498 rtx sc;
17500 if (scale == 1)
17501 return countreg;
17502 if (CONST_INT_P (countreg))
17503 return GEN_INT (INTVAL (countreg) / scale);
17504 gcc_assert (REG_P (countreg));
17506 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17507 GEN_INT (exact_log2 (scale)),
17508 NULL, 1, OPTAB_DIRECT);
17509 return sc;
17512 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17513 DImode for constant loop counts. */
17515 static enum machine_mode
17516 counter_mode (rtx count_exp)
17518 if (GET_MODE (count_exp) != VOIDmode)
17519 return GET_MODE (count_exp);
17520 if (!CONST_INT_P (count_exp))
17521 return Pmode;
17522 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17523 return DImode;
17524 return SImode;
17527 /* When SRCPTR is non-NULL, output simple loop to move memory
17528 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17529 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17530 equivalent loop to set memory by VALUE (supposed to be in MODE).
17532 The size is rounded down to whole number of chunk size moved at once.
17533 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17536 static void
17537 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17538 rtx destptr, rtx srcptr, rtx value,
17539 rtx count, enum machine_mode mode, int unroll,
17540 int expected_size)
17542 rtx out_label, top_label, iter, tmp;
17543 enum machine_mode iter_mode = counter_mode (count);
17544 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17545 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17546 rtx size;
17547 rtx x_addr;
17548 rtx y_addr;
17549 int i;
17551 top_label = gen_label_rtx ();
17552 out_label = gen_label_rtx ();
17553 iter = gen_reg_rtx (iter_mode);
17555 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17556 NULL, 1, OPTAB_DIRECT);
17557 /* Those two should combine. */
17558 if (piece_size == const1_rtx)
17560 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17561 true, out_label);
17562 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17564 emit_move_insn (iter, const0_rtx);
17566 emit_label (top_label);
17568 tmp = convert_modes (Pmode, iter_mode, iter, true);
17569 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17570 destmem = change_address (destmem, mode, x_addr);
17572 if (srcmem)
17574 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17575 srcmem = change_address (srcmem, mode, y_addr);
17577 /* When unrolling for chips that reorder memory reads and writes,
17578 we can save registers by using single temporary.
17579 Also using 4 temporaries is overkill in 32bit mode. */
17580 if (!TARGET_64BIT && 0)
17582 for (i = 0; i < unroll; i++)
17584 if (i)
17586 destmem =
17587 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17588 srcmem =
17589 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17591 emit_move_insn (destmem, srcmem);
17594 else
17596 rtx tmpreg[4];
17597 gcc_assert (unroll <= 4);
17598 for (i = 0; i < unroll; i++)
17600 tmpreg[i] = gen_reg_rtx (mode);
17601 if (i)
17603 srcmem =
17604 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17606 emit_move_insn (tmpreg[i], srcmem);
17608 for (i = 0; i < unroll; i++)
17610 if (i)
17612 destmem =
17613 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17615 emit_move_insn (destmem, tmpreg[i]);
17619 else
17620 for (i = 0; i < unroll; i++)
17622 if (i)
17623 destmem =
17624 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17625 emit_move_insn (destmem, value);
17628 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17629 true, OPTAB_LIB_WIDEN);
17630 if (tmp != iter)
17631 emit_move_insn (iter, tmp);
17633 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17634 true, top_label);
17635 if (expected_size != -1)
17637 expected_size /= GET_MODE_SIZE (mode) * unroll;
17638 if (expected_size == 0)
17639 predict_jump (0);
17640 else if (expected_size > REG_BR_PROB_BASE)
17641 predict_jump (REG_BR_PROB_BASE - 1);
17642 else
17643 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17645 else
17646 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17647 iter = ix86_zero_extend_to_Pmode (iter);
17648 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17649 true, OPTAB_LIB_WIDEN);
17650 if (tmp != destptr)
17651 emit_move_insn (destptr, tmp);
17652 if (srcptr)
17654 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17655 true, OPTAB_LIB_WIDEN);
17656 if (tmp != srcptr)
17657 emit_move_insn (srcptr, tmp);
17659 emit_label (out_label);
17662 /* Output "rep; mov" instruction.
17663 Arguments have same meaning as for previous function */
17664 static void
17665 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17666 rtx destptr, rtx srcptr,
17667 rtx count,
17668 enum machine_mode mode)
17670 rtx destexp;
17671 rtx srcexp;
17672 rtx countreg;
17674 /* If the size is known, it is shorter to use rep movs. */
17675 if (mode == QImode && CONST_INT_P (count)
17676 && !(INTVAL (count) & 3))
17677 mode = SImode;
17679 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17680 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17681 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17682 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17683 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17684 if (mode != QImode)
17686 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17687 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17688 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17689 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17690 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17691 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17693 else
17695 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17696 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17698 if (CONST_INT_P (count))
17700 count = GEN_INT (INTVAL (count)
17701 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17702 destmem = shallow_copy_rtx (destmem);
17703 srcmem = shallow_copy_rtx (srcmem);
17704 set_mem_size (destmem, count);
17705 set_mem_size (srcmem, count);
17707 else
17709 if (MEM_SIZE (destmem))
17710 set_mem_size (destmem, NULL_RTX);
17711 if (MEM_SIZE (srcmem))
17712 set_mem_size (srcmem, NULL_RTX);
17714 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17715 destexp, srcexp));
17718 /* Output "rep; stos" instruction.
17719 Arguments have same meaning as for previous function */
17720 static void
17721 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17722 rtx count, enum machine_mode mode,
17723 rtx orig_value)
17725 rtx destexp;
17726 rtx countreg;
17728 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17729 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17730 value = force_reg (mode, gen_lowpart (mode, value));
17731 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17732 if (mode != QImode)
17734 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17735 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17736 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17738 else
17739 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17740 if (orig_value == const0_rtx && CONST_INT_P (count))
17742 count = GEN_INT (INTVAL (count)
17743 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17744 destmem = shallow_copy_rtx (destmem);
17745 set_mem_size (destmem, count);
17747 else if (MEM_SIZE (destmem))
17748 set_mem_size (destmem, NULL_RTX);
17749 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17752 static void
17753 emit_strmov (rtx destmem, rtx srcmem,
17754 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17756 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17757 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17758 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17761 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17762 static void
17763 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17764 rtx destptr, rtx srcptr, rtx count, int max_size)
17766 rtx src, dest;
17767 if (CONST_INT_P (count))
17769 HOST_WIDE_INT countval = INTVAL (count);
17770 int offset = 0;
17772 if ((countval & 0x10) && max_size > 16)
17774 if (TARGET_64BIT)
17776 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17777 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17779 else
17780 gcc_unreachable ();
17781 offset += 16;
17783 if ((countval & 0x08) && max_size > 8)
17785 if (TARGET_64BIT)
17786 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17787 else
17789 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17790 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17792 offset += 8;
17794 if ((countval & 0x04) && max_size > 4)
17796 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17797 offset += 4;
17799 if ((countval & 0x02) && max_size > 2)
17801 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17802 offset += 2;
17804 if ((countval & 0x01) && max_size > 1)
17806 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17807 offset += 1;
17809 return;
17811 if (max_size > 8)
17813 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17814 count, 1, OPTAB_DIRECT);
17815 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17816 count, QImode, 1, 4);
17817 return;
17820 /* When there are stringops, we can cheaply increase dest and src pointers.
17821 Otherwise we save code size by maintaining offset (zero is readily
17822 available from preceding rep operation) and using x86 addressing modes.
17824 if (TARGET_SINGLE_STRINGOP)
17826 if (max_size > 4)
17828 rtx label = ix86_expand_aligntest (count, 4, true);
17829 src = change_address (srcmem, SImode, srcptr);
17830 dest = change_address (destmem, SImode, destptr);
17831 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17832 emit_label (label);
17833 LABEL_NUSES (label) = 1;
17835 if (max_size > 2)
17837 rtx label = ix86_expand_aligntest (count, 2, true);
17838 src = change_address (srcmem, HImode, srcptr);
17839 dest = change_address (destmem, HImode, destptr);
17840 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17841 emit_label (label);
17842 LABEL_NUSES (label) = 1;
17844 if (max_size > 1)
17846 rtx label = ix86_expand_aligntest (count, 1, true);
17847 src = change_address (srcmem, QImode, srcptr);
17848 dest = change_address (destmem, QImode, destptr);
17849 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17850 emit_label (label);
17851 LABEL_NUSES (label) = 1;
17854 else
17856 rtx offset = force_reg (Pmode, const0_rtx);
17857 rtx tmp;
17859 if (max_size > 4)
17861 rtx label = ix86_expand_aligntest (count, 4, true);
17862 src = change_address (srcmem, SImode, srcptr);
17863 dest = change_address (destmem, SImode, destptr);
17864 emit_move_insn (dest, src);
17865 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17866 true, OPTAB_LIB_WIDEN);
17867 if (tmp != offset)
17868 emit_move_insn (offset, tmp);
17869 emit_label (label);
17870 LABEL_NUSES (label) = 1;
17872 if (max_size > 2)
17874 rtx label = ix86_expand_aligntest (count, 2, true);
17875 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17876 src = change_address (srcmem, HImode, tmp);
17877 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17878 dest = change_address (destmem, HImode, tmp);
17879 emit_move_insn (dest, src);
17880 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17881 true, OPTAB_LIB_WIDEN);
17882 if (tmp != offset)
17883 emit_move_insn (offset, tmp);
17884 emit_label (label);
17885 LABEL_NUSES (label) = 1;
17887 if (max_size > 1)
17889 rtx label = ix86_expand_aligntest (count, 1, true);
17890 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17891 src = change_address (srcmem, QImode, tmp);
17892 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17893 dest = change_address (destmem, QImode, tmp);
17894 emit_move_insn (dest, src);
17895 emit_label (label);
17896 LABEL_NUSES (label) = 1;
17901 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17902 static void
17903 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17904 rtx count, int max_size)
17906 count =
17907 expand_simple_binop (counter_mode (count), AND, count,
17908 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17909 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17910 gen_lowpart (QImode, value), count, QImode,
17911 1, max_size / 2);
17914 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17915 static void
17916 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17918 rtx dest;
17920 if (CONST_INT_P (count))
17922 HOST_WIDE_INT countval = INTVAL (count);
17923 int offset = 0;
17925 if ((countval & 0x10) && max_size > 16)
17927 if (TARGET_64BIT)
17929 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17930 emit_insn (gen_strset (destptr, dest, value));
17931 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17932 emit_insn (gen_strset (destptr, dest, value));
17934 else
17935 gcc_unreachable ();
17936 offset += 16;
17938 if ((countval & 0x08) && max_size > 8)
17940 if (TARGET_64BIT)
17942 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17943 emit_insn (gen_strset (destptr, dest, value));
17945 else
17947 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17948 emit_insn (gen_strset (destptr, dest, value));
17949 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17950 emit_insn (gen_strset (destptr, dest, value));
17952 offset += 8;
17954 if ((countval & 0x04) && max_size > 4)
17956 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17957 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17958 offset += 4;
17960 if ((countval & 0x02) && max_size > 2)
17962 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17963 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17964 offset += 2;
17966 if ((countval & 0x01) && max_size > 1)
17968 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17969 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17970 offset += 1;
17972 return;
17974 if (max_size > 32)
17976 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17977 return;
17979 if (max_size > 16)
17981 rtx label = ix86_expand_aligntest (count, 16, true);
17982 if (TARGET_64BIT)
17984 dest = change_address (destmem, DImode, destptr);
17985 emit_insn (gen_strset (destptr, dest, value));
17986 emit_insn (gen_strset (destptr, dest, value));
17988 else
17990 dest = change_address (destmem, SImode, destptr);
17991 emit_insn (gen_strset (destptr, dest, value));
17992 emit_insn (gen_strset (destptr, dest, value));
17993 emit_insn (gen_strset (destptr, dest, value));
17994 emit_insn (gen_strset (destptr, dest, value));
17996 emit_label (label);
17997 LABEL_NUSES (label) = 1;
17999 if (max_size > 8)
18001 rtx label = ix86_expand_aligntest (count, 8, true);
18002 if (TARGET_64BIT)
18004 dest = change_address (destmem, DImode, destptr);
18005 emit_insn (gen_strset (destptr, dest, value));
18007 else
18009 dest = change_address (destmem, SImode, destptr);
18010 emit_insn (gen_strset (destptr, dest, value));
18011 emit_insn (gen_strset (destptr, dest, value));
18013 emit_label (label);
18014 LABEL_NUSES (label) = 1;
18016 if (max_size > 4)
18018 rtx label = ix86_expand_aligntest (count, 4, true);
18019 dest = change_address (destmem, SImode, destptr);
18020 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
18021 emit_label (label);
18022 LABEL_NUSES (label) = 1;
18024 if (max_size > 2)
18026 rtx label = ix86_expand_aligntest (count, 2, true);
18027 dest = change_address (destmem, HImode, destptr);
18028 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
18029 emit_label (label);
18030 LABEL_NUSES (label) = 1;
18032 if (max_size > 1)
18034 rtx label = ix86_expand_aligntest (count, 1, true);
18035 dest = change_address (destmem, QImode, destptr);
18036 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
18037 emit_label (label);
18038 LABEL_NUSES (label) = 1;
18042 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
18043 DESIRED_ALIGNMENT. */
18044 static void
18045 expand_movmem_prologue (rtx destmem, rtx srcmem,
18046 rtx destptr, rtx srcptr, rtx count,
18047 int align, int desired_alignment)
18049 if (align <= 1 && desired_alignment > 1)
18051 rtx label = ix86_expand_aligntest (destptr, 1, false);
18052 srcmem = change_address (srcmem, QImode, srcptr);
18053 destmem = change_address (destmem, QImode, destptr);
18054 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18055 ix86_adjust_counter (count, 1);
18056 emit_label (label);
18057 LABEL_NUSES (label) = 1;
18059 if (align <= 2 && desired_alignment > 2)
18061 rtx label = ix86_expand_aligntest (destptr, 2, false);
18062 srcmem = change_address (srcmem, HImode, srcptr);
18063 destmem = change_address (destmem, HImode, destptr);
18064 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18065 ix86_adjust_counter (count, 2);
18066 emit_label (label);
18067 LABEL_NUSES (label) = 1;
18069 if (align <= 4 && desired_alignment > 4)
18071 rtx label = ix86_expand_aligntest (destptr, 4, false);
18072 srcmem = change_address (srcmem, SImode, srcptr);
18073 destmem = change_address (destmem, SImode, destptr);
18074 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
18075 ix86_adjust_counter (count, 4);
18076 emit_label (label);
18077 LABEL_NUSES (label) = 1;
18079 gcc_assert (desired_alignment <= 8);
18082 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
18083 ALIGN_BYTES is how many bytes need to be copied. */
18084 static rtx
18085 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
18086 int desired_align, int align_bytes)
18088 rtx src = *srcp;
18089 rtx src_size, dst_size;
18090 int off = 0;
18091 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
18092 if (src_align_bytes >= 0)
18093 src_align_bytes = desired_align - src_align_bytes;
18094 src_size = MEM_SIZE (src);
18095 dst_size = MEM_SIZE (dst);
18096 if (align_bytes & 1)
18098 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18099 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
18100 off = 1;
18101 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18103 if (align_bytes & 2)
18105 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18106 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
18107 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18108 set_mem_align (dst, 2 * BITS_PER_UNIT);
18109 if (src_align_bytes >= 0
18110 && (src_align_bytes & 1) == (align_bytes & 1)
18111 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
18112 set_mem_align (src, 2 * BITS_PER_UNIT);
18113 off = 2;
18114 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18116 if (align_bytes & 4)
18118 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18119 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
18120 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18121 set_mem_align (dst, 4 * BITS_PER_UNIT);
18122 if (src_align_bytes >= 0)
18124 unsigned int src_align = 0;
18125 if ((src_align_bytes & 3) == (align_bytes & 3))
18126 src_align = 4;
18127 else if ((src_align_bytes & 1) == (align_bytes & 1))
18128 src_align = 2;
18129 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18130 set_mem_align (src, src_align * BITS_PER_UNIT);
18132 off = 4;
18133 emit_insn (gen_strmov (destreg, dst, srcreg, src));
18135 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18136 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
18137 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18138 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18139 if (src_align_bytes >= 0)
18141 unsigned int src_align = 0;
18142 if ((src_align_bytes & 7) == (align_bytes & 7))
18143 src_align = 8;
18144 else if ((src_align_bytes & 3) == (align_bytes & 3))
18145 src_align = 4;
18146 else if ((src_align_bytes & 1) == (align_bytes & 1))
18147 src_align = 2;
18148 if (src_align > (unsigned int) desired_align)
18149 src_align = desired_align;
18150 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
18151 set_mem_align (src, src_align * BITS_PER_UNIT);
18153 if (dst_size)
18154 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18155 if (src_size)
18156 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
18157 *srcp = src;
18158 return dst;
18161 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
18162 DESIRED_ALIGNMENT. */
18163 static void
18164 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
18165 int align, int desired_alignment)
18167 if (align <= 1 && desired_alignment > 1)
18169 rtx label = ix86_expand_aligntest (destptr, 1, false);
18170 destmem = change_address (destmem, QImode, destptr);
18171 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
18172 ix86_adjust_counter (count, 1);
18173 emit_label (label);
18174 LABEL_NUSES (label) = 1;
18176 if (align <= 2 && desired_alignment > 2)
18178 rtx label = ix86_expand_aligntest (destptr, 2, false);
18179 destmem = change_address (destmem, HImode, destptr);
18180 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
18181 ix86_adjust_counter (count, 2);
18182 emit_label (label);
18183 LABEL_NUSES (label) = 1;
18185 if (align <= 4 && desired_alignment > 4)
18187 rtx label = ix86_expand_aligntest (destptr, 4, false);
18188 destmem = change_address (destmem, SImode, destptr);
18189 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
18190 ix86_adjust_counter (count, 4);
18191 emit_label (label);
18192 LABEL_NUSES (label) = 1;
18194 gcc_assert (desired_alignment <= 8);
18197 /* Set enough from DST to align DST known to by aligned by ALIGN to
18198 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
18199 static rtx
18200 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
18201 int desired_align, int align_bytes)
18203 int off = 0;
18204 rtx dst_size = MEM_SIZE (dst);
18205 if (align_bytes & 1)
18207 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
18208 off = 1;
18209 emit_insn (gen_strset (destreg, dst,
18210 gen_lowpart (QImode, value)));
18212 if (align_bytes & 2)
18214 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
18215 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
18216 set_mem_align (dst, 2 * BITS_PER_UNIT);
18217 off = 2;
18218 emit_insn (gen_strset (destreg, dst,
18219 gen_lowpart (HImode, value)));
18221 if (align_bytes & 4)
18223 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
18224 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
18225 set_mem_align (dst, 4 * BITS_PER_UNIT);
18226 off = 4;
18227 emit_insn (gen_strset (destreg, dst,
18228 gen_lowpart (SImode, value)));
18230 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
18231 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
18232 set_mem_align (dst, desired_align * BITS_PER_UNIT);
18233 if (dst_size)
18234 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
18235 return dst;
18238 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
18239 static enum stringop_alg
18240 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
18241 int *dynamic_check)
18243 const struct stringop_algs * algs;
18244 bool optimize_for_speed;
18245 /* Algorithms using the rep prefix want at least edi and ecx;
18246 additionally, memset wants eax and memcpy wants esi. Don't
18247 consider such algorithms if the user has appropriated those
18248 registers for their own purposes. */
18249 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
18250 || (memset
18251 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
18253 #define ALG_USABLE_P(alg) (rep_prefix_usable \
18254 || (alg != rep_prefix_1_byte \
18255 && alg != rep_prefix_4_byte \
18256 && alg != rep_prefix_8_byte))
18257 const struct processor_costs *cost;
18259 /* Even if the string operation call is cold, we still might spend a lot
18260 of time processing large blocks. */
18261 if (optimize_function_for_size_p (cfun)
18262 || (optimize_insn_for_size_p ()
18263 && expected_size != -1 && expected_size < 256))
18264 optimize_for_speed = false;
18265 else
18266 optimize_for_speed = true;
18268 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
18270 *dynamic_check = -1;
18271 if (memset)
18272 algs = &cost->memset[TARGET_64BIT != 0];
18273 else
18274 algs = &cost->memcpy[TARGET_64BIT != 0];
18275 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18276 return stringop_alg;
18277 /* rep; movq or rep; movl is the smallest variant. */
18278 else if (!optimize_for_speed)
18280 if (!count || (count & 3))
18281 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18282 else
18283 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18285 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18287 else if (expected_size != -1 && expected_size < 4)
18288 return loop_1_byte;
18289 else if (expected_size != -1)
18291 unsigned int i;
18292 enum stringop_alg alg = libcall;
18293 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18295 /* We get here if the algorithms that were not libcall-based
18296 were rep-prefix based and we are unable to use rep prefixes
18297 based on global register usage. Break out of the loop and
18298 use the heuristic below. */
18299 if (algs->size[i].max == 0)
18300 break;
18301 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18303 enum stringop_alg candidate = algs->size[i].alg;
18305 if (candidate != libcall && ALG_USABLE_P (candidate))
18306 alg = candidate;
18307 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18308 last non-libcall inline algorithm. */
18309 if (TARGET_INLINE_ALL_STRINGOPS)
18311 /* When the current size is best to be copied by a libcall,
18312 but we are still forced to inline, run the heuristic below
18313 that will pick code for medium sized blocks. */
18314 if (alg != libcall)
18315 return alg;
18316 break;
18318 else if (ALG_USABLE_P (candidate))
18319 return candidate;
18322 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18324 /* When asked to inline the call anyway, try to pick meaningful choice.
18325 We look for maximal size of block that is faster to copy by hand and
18326 take blocks of at most of that size guessing that average size will
18327 be roughly half of the block.
18329 If this turns out to be bad, we might simply specify the preferred
18330 choice in ix86_costs. */
18331 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18332 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18334 int max = -1;
18335 enum stringop_alg alg;
18336 int i;
18337 bool any_alg_usable_p = true;
18339 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18341 enum stringop_alg candidate = algs->size[i].alg;
18342 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18344 if (candidate != libcall && candidate
18345 && ALG_USABLE_P (candidate))
18346 max = algs->size[i].max;
18348 /* If there aren't any usable algorithms, then recursing on
18349 smaller sizes isn't going to find anything. Just return the
18350 simple byte-at-a-time copy loop. */
18351 if (!any_alg_usable_p)
18353 /* Pick something reasonable. */
18354 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18355 *dynamic_check = 128;
18356 return loop_1_byte;
18358 if (max == -1)
18359 max = 4096;
18360 alg = decide_alg (count, max / 2, memset, dynamic_check);
18361 gcc_assert (*dynamic_check == -1);
18362 gcc_assert (alg != libcall);
18363 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18364 *dynamic_check = max;
18365 return alg;
18367 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18368 #undef ALG_USABLE_P
18371 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18372 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18373 static int
18374 decide_alignment (int align,
18375 enum stringop_alg alg,
18376 int expected_size)
18378 int desired_align = 0;
18379 switch (alg)
18381 case no_stringop:
18382 gcc_unreachable ();
18383 case loop:
18384 case unrolled_loop:
18385 desired_align = GET_MODE_SIZE (Pmode);
18386 break;
18387 case rep_prefix_8_byte:
18388 desired_align = 8;
18389 break;
18390 case rep_prefix_4_byte:
18391 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18392 copying whole cacheline at once. */
18393 if (TARGET_PENTIUMPRO)
18394 desired_align = 8;
18395 else
18396 desired_align = 4;
18397 break;
18398 case rep_prefix_1_byte:
18399 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18400 copying whole cacheline at once. */
18401 if (TARGET_PENTIUMPRO)
18402 desired_align = 8;
18403 else
18404 desired_align = 1;
18405 break;
18406 case loop_1_byte:
18407 desired_align = 1;
18408 break;
18409 case libcall:
18410 return 0;
18413 if (optimize_size)
18414 desired_align = 1;
18415 if (desired_align < align)
18416 desired_align = align;
18417 if (expected_size != -1 && expected_size < 4)
18418 desired_align = align;
18419 return desired_align;
18422 /* Return the smallest power of 2 greater than VAL. */
18423 static int
18424 smallest_pow2_greater_than (int val)
18426 int ret = 1;
18427 while (ret <= val)
18428 ret <<= 1;
18429 return ret;
18432 /* Expand string move (memcpy) operation. Use i386 string operations when
18433 profitable. expand_setmem contains similar code. The code depends upon
18434 architecture, block size and alignment, but always has the same
18435 overall structure:
18437 1) Prologue guard: Conditional that jumps up to epilogues for small
18438 blocks that can be handled by epilogue alone. This is faster but
18439 also needed for correctness, since prologue assume the block is larger
18440 than the desired alignment.
18442 Optional dynamic check for size and libcall for large
18443 blocks is emitted here too, with -minline-stringops-dynamically.
18445 2) Prologue: copy first few bytes in order to get destination aligned
18446 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18447 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18448 We emit either a jump tree on power of two sized blocks, or a byte loop.
18450 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18451 with specified algorithm.
18453 4) Epilogue: code copying tail of the block that is too small to be
18454 handled by main body (or up to size guarded by prologue guard). */
18457 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18458 rtx expected_align_exp, rtx expected_size_exp)
18460 rtx destreg;
18461 rtx srcreg;
18462 rtx label = NULL;
18463 rtx tmp;
18464 rtx jump_around_label = NULL;
18465 HOST_WIDE_INT align = 1;
18466 unsigned HOST_WIDE_INT count = 0;
18467 HOST_WIDE_INT expected_size = -1;
18468 int size_needed = 0, epilogue_size_needed;
18469 int desired_align = 0, align_bytes = 0;
18470 enum stringop_alg alg;
18471 int dynamic_check;
18472 bool need_zero_guard = false;
18474 if (CONST_INT_P (align_exp))
18475 align = INTVAL (align_exp);
18476 /* i386 can do misaligned access on reasonably increased cost. */
18477 if (CONST_INT_P (expected_align_exp)
18478 && INTVAL (expected_align_exp) > align)
18479 align = INTVAL (expected_align_exp);
18480 /* ALIGN is the minimum of destination and source alignment, but we care here
18481 just about destination alignment. */
18482 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18483 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18485 if (CONST_INT_P (count_exp))
18486 count = expected_size = INTVAL (count_exp);
18487 if (CONST_INT_P (expected_size_exp) && count == 0)
18488 expected_size = INTVAL (expected_size_exp);
18490 /* Make sure we don't need to care about overflow later on. */
18491 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18492 return 0;
18494 /* Step 0: Decide on preferred algorithm, desired alignment and
18495 size of chunks to be copied by main loop. */
18497 alg = decide_alg (count, expected_size, false, &dynamic_check);
18498 desired_align = decide_alignment (align, alg, expected_size);
18500 if (!TARGET_ALIGN_STRINGOPS)
18501 align = desired_align;
18503 if (alg == libcall)
18504 return 0;
18505 gcc_assert (alg != no_stringop);
18506 if (!count)
18507 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18508 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18509 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18510 switch (alg)
18512 case libcall:
18513 case no_stringop:
18514 gcc_unreachable ();
18515 case loop:
18516 need_zero_guard = true;
18517 size_needed = GET_MODE_SIZE (Pmode);
18518 break;
18519 case unrolled_loop:
18520 need_zero_guard = true;
18521 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18522 break;
18523 case rep_prefix_8_byte:
18524 size_needed = 8;
18525 break;
18526 case rep_prefix_4_byte:
18527 size_needed = 4;
18528 break;
18529 case rep_prefix_1_byte:
18530 size_needed = 1;
18531 break;
18532 case loop_1_byte:
18533 need_zero_guard = true;
18534 size_needed = 1;
18535 break;
18538 epilogue_size_needed = size_needed;
18540 /* Step 1: Prologue guard. */
18542 /* Alignment code needs count to be in register. */
18543 if (CONST_INT_P (count_exp) && desired_align > align)
18545 if (INTVAL (count_exp) > desired_align
18546 && INTVAL (count_exp) > size_needed)
18548 align_bytes
18549 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18550 if (align_bytes <= 0)
18551 align_bytes = 0;
18552 else
18553 align_bytes = desired_align - align_bytes;
18555 if (align_bytes == 0)
18556 count_exp = force_reg (counter_mode (count_exp), count_exp);
18558 gcc_assert (desired_align >= 1 && align >= 1);
18560 /* Ensure that alignment prologue won't copy past end of block. */
18561 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18563 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18564 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18565 Make sure it is power of 2. */
18566 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18568 if (count)
18570 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18572 /* If main algorithm works on QImode, no epilogue is needed.
18573 For small sizes just don't align anything. */
18574 if (size_needed == 1)
18575 desired_align = align;
18576 else
18577 goto epilogue;
18580 else
18582 label = gen_label_rtx ();
18583 emit_cmp_and_jump_insns (count_exp,
18584 GEN_INT (epilogue_size_needed),
18585 LTU, 0, counter_mode (count_exp), 1, label);
18586 if (expected_size == -1 || expected_size < epilogue_size_needed)
18587 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18588 else
18589 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18593 /* Emit code to decide on runtime whether library call or inline should be
18594 used. */
18595 if (dynamic_check != -1)
18597 if (CONST_INT_P (count_exp))
18599 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18601 emit_block_move_via_libcall (dst, src, count_exp, false);
18602 count_exp = const0_rtx;
18603 goto epilogue;
18606 else
18608 rtx hot_label = gen_label_rtx ();
18609 jump_around_label = gen_label_rtx ();
18610 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18611 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18612 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18613 emit_block_move_via_libcall (dst, src, count_exp, false);
18614 emit_jump (jump_around_label);
18615 emit_label (hot_label);
18619 /* Step 2: Alignment prologue. */
18621 if (desired_align > align)
18623 if (align_bytes == 0)
18625 /* Except for the first move in epilogue, we no longer know
18626 constant offset in aliasing info. It don't seems to worth
18627 the pain to maintain it for the first move, so throw away
18628 the info early. */
18629 src = change_address (src, BLKmode, srcreg);
18630 dst = change_address (dst, BLKmode, destreg);
18631 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18632 desired_align);
18634 else
18636 /* If we know how many bytes need to be stored before dst is
18637 sufficiently aligned, maintain aliasing info accurately. */
18638 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18639 desired_align, align_bytes);
18640 count_exp = plus_constant (count_exp, -align_bytes);
18641 count -= align_bytes;
18643 if (need_zero_guard
18644 && (count < (unsigned HOST_WIDE_INT) size_needed
18645 || (align_bytes == 0
18646 && count < ((unsigned HOST_WIDE_INT) size_needed
18647 + desired_align - align))))
18649 /* It is possible that we copied enough so the main loop will not
18650 execute. */
18651 gcc_assert (size_needed > 1);
18652 if (label == NULL_RTX)
18653 label = gen_label_rtx ();
18654 emit_cmp_and_jump_insns (count_exp,
18655 GEN_INT (size_needed),
18656 LTU, 0, counter_mode (count_exp), 1, label);
18657 if (expected_size == -1
18658 || expected_size < (desired_align - align) / 2 + size_needed)
18659 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18660 else
18661 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18664 if (label && size_needed == 1)
18666 emit_label (label);
18667 LABEL_NUSES (label) = 1;
18668 label = NULL;
18669 epilogue_size_needed = 1;
18671 else if (label == NULL_RTX)
18672 epilogue_size_needed = size_needed;
18674 /* Step 3: Main loop. */
18676 switch (alg)
18678 case libcall:
18679 case no_stringop:
18680 gcc_unreachable ();
18681 case loop_1_byte:
18682 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18683 count_exp, QImode, 1, expected_size);
18684 break;
18685 case loop:
18686 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18687 count_exp, Pmode, 1, expected_size);
18688 break;
18689 case unrolled_loop:
18690 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18691 registers for 4 temporaries anyway. */
18692 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18693 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18694 expected_size);
18695 break;
18696 case rep_prefix_8_byte:
18697 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18698 DImode);
18699 break;
18700 case rep_prefix_4_byte:
18701 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18702 SImode);
18703 break;
18704 case rep_prefix_1_byte:
18705 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18706 QImode);
18707 break;
18709 /* Adjust properly the offset of src and dest memory for aliasing. */
18710 if (CONST_INT_P (count_exp))
18712 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18713 (count / size_needed) * size_needed);
18714 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18715 (count / size_needed) * size_needed);
18717 else
18719 src = change_address (src, BLKmode, srcreg);
18720 dst = change_address (dst, BLKmode, destreg);
18723 /* Step 4: Epilogue to copy the remaining bytes. */
18724 epilogue:
18725 if (label)
18727 /* When the main loop is done, COUNT_EXP might hold original count,
18728 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18729 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18730 bytes. Compensate if needed. */
18732 if (size_needed < epilogue_size_needed)
18734 tmp =
18735 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18736 GEN_INT (size_needed - 1), count_exp, 1,
18737 OPTAB_DIRECT);
18738 if (tmp != count_exp)
18739 emit_move_insn (count_exp, tmp);
18741 emit_label (label);
18742 LABEL_NUSES (label) = 1;
18745 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18746 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18747 epilogue_size_needed);
18748 if (jump_around_label)
18749 emit_label (jump_around_label);
18750 return 1;
18753 /* Helper function for memcpy. For QImode value 0xXY produce
18754 0xXYXYXYXY of wide specified by MODE. This is essentially
18755 a * 0x10101010, but we can do slightly better than
18756 synth_mult by unwinding the sequence by hand on CPUs with
18757 slow multiply. */
18758 static rtx
18759 promote_duplicated_reg (enum machine_mode mode, rtx val)
18761 enum machine_mode valmode = GET_MODE (val);
18762 rtx tmp;
18763 int nops = mode == DImode ? 3 : 2;
18765 gcc_assert (mode == SImode || mode == DImode);
18766 if (val == const0_rtx)
18767 return copy_to_mode_reg (mode, const0_rtx);
18768 if (CONST_INT_P (val))
18770 HOST_WIDE_INT v = INTVAL (val) & 255;
18772 v |= v << 8;
18773 v |= v << 16;
18774 if (mode == DImode)
18775 v |= (v << 16) << 16;
18776 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18779 if (valmode == VOIDmode)
18780 valmode = QImode;
18781 if (valmode != QImode)
18782 val = gen_lowpart (QImode, val);
18783 if (mode == QImode)
18784 return val;
18785 if (!TARGET_PARTIAL_REG_STALL)
18786 nops--;
18787 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18788 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18789 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18790 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18792 rtx reg = convert_modes (mode, QImode, val, true);
18793 tmp = promote_duplicated_reg (mode, const1_rtx);
18794 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18795 OPTAB_DIRECT);
18797 else
18799 rtx reg = convert_modes (mode, QImode, val, true);
18801 if (!TARGET_PARTIAL_REG_STALL)
18802 if (mode == SImode)
18803 emit_insn (gen_movsi_insv_1 (reg, reg));
18804 else
18805 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18806 else
18808 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18809 NULL, 1, OPTAB_DIRECT);
18810 reg =
18811 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18813 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18814 NULL, 1, OPTAB_DIRECT);
18815 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18816 if (mode == SImode)
18817 return reg;
18818 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18819 NULL, 1, OPTAB_DIRECT);
18820 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18821 return reg;
18825 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18826 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18827 alignment from ALIGN to DESIRED_ALIGN. */
18828 static rtx
18829 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18831 rtx promoted_val;
18833 if (TARGET_64BIT
18834 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18835 promoted_val = promote_duplicated_reg (DImode, val);
18836 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18837 promoted_val = promote_duplicated_reg (SImode, val);
18838 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18839 promoted_val = promote_duplicated_reg (HImode, val);
18840 else
18841 promoted_val = val;
18843 return promoted_val;
18846 /* Expand string clear operation (bzero). Use i386 string operations when
18847 profitable. See expand_movmem comment for explanation of individual
18848 steps performed. */
18850 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18851 rtx expected_align_exp, rtx expected_size_exp)
18853 rtx destreg;
18854 rtx label = NULL;
18855 rtx tmp;
18856 rtx jump_around_label = NULL;
18857 HOST_WIDE_INT align = 1;
18858 unsigned HOST_WIDE_INT count = 0;
18859 HOST_WIDE_INT expected_size = -1;
18860 int size_needed = 0, epilogue_size_needed;
18861 int desired_align = 0, align_bytes = 0;
18862 enum stringop_alg alg;
18863 rtx promoted_val = NULL;
18864 bool force_loopy_epilogue = false;
18865 int dynamic_check;
18866 bool need_zero_guard = false;
18868 if (CONST_INT_P (align_exp))
18869 align = INTVAL (align_exp);
18870 /* i386 can do misaligned access on reasonably increased cost. */
18871 if (CONST_INT_P (expected_align_exp)
18872 && INTVAL (expected_align_exp) > align)
18873 align = INTVAL (expected_align_exp);
18874 if (CONST_INT_P (count_exp))
18875 count = expected_size = INTVAL (count_exp);
18876 if (CONST_INT_P (expected_size_exp) && count == 0)
18877 expected_size = INTVAL (expected_size_exp);
18879 /* Make sure we don't need to care about overflow later on. */
18880 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18881 return 0;
18883 /* Step 0: Decide on preferred algorithm, desired alignment and
18884 size of chunks to be copied by main loop. */
18886 alg = decide_alg (count, expected_size, true, &dynamic_check);
18887 desired_align = decide_alignment (align, alg, expected_size);
18889 if (!TARGET_ALIGN_STRINGOPS)
18890 align = desired_align;
18892 if (alg == libcall)
18893 return 0;
18894 gcc_assert (alg != no_stringop);
18895 if (!count)
18896 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18897 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18898 switch (alg)
18900 case libcall:
18901 case no_stringop:
18902 gcc_unreachable ();
18903 case loop:
18904 need_zero_guard = true;
18905 size_needed = GET_MODE_SIZE (Pmode);
18906 break;
18907 case unrolled_loop:
18908 need_zero_guard = true;
18909 size_needed = GET_MODE_SIZE (Pmode) * 4;
18910 break;
18911 case rep_prefix_8_byte:
18912 size_needed = 8;
18913 break;
18914 case rep_prefix_4_byte:
18915 size_needed = 4;
18916 break;
18917 case rep_prefix_1_byte:
18918 size_needed = 1;
18919 break;
18920 case loop_1_byte:
18921 need_zero_guard = true;
18922 size_needed = 1;
18923 break;
18925 epilogue_size_needed = size_needed;
18927 /* Step 1: Prologue guard. */
18929 /* Alignment code needs count to be in register. */
18930 if (CONST_INT_P (count_exp) && desired_align > align)
18932 if (INTVAL (count_exp) > desired_align
18933 && INTVAL (count_exp) > size_needed)
18935 align_bytes
18936 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18937 if (align_bytes <= 0)
18938 align_bytes = 0;
18939 else
18940 align_bytes = desired_align - align_bytes;
18942 if (align_bytes == 0)
18944 enum machine_mode mode = SImode;
18945 if (TARGET_64BIT && (count & ~0xffffffff))
18946 mode = DImode;
18947 count_exp = force_reg (mode, count_exp);
18950 /* Do the cheap promotion to allow better CSE across the
18951 main loop and epilogue (ie one load of the big constant in the
18952 front of all code. */
18953 if (CONST_INT_P (val_exp))
18954 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18955 desired_align, align);
18956 /* Ensure that alignment prologue won't copy past end of block. */
18957 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18959 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18960 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18961 Make sure it is power of 2. */
18962 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18964 /* To improve performance of small blocks, we jump around the VAL
18965 promoting mode. This mean that if the promoted VAL is not constant,
18966 we might not use it in the epilogue and have to use byte
18967 loop variant. */
18968 if (epilogue_size_needed > 2 && !promoted_val)
18969 force_loopy_epilogue = true;
18970 if (count)
18972 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18974 /* If main algorithm works on QImode, no epilogue is needed.
18975 For small sizes just don't align anything. */
18976 if (size_needed == 1)
18977 desired_align = align;
18978 else
18979 goto epilogue;
18982 else
18984 label = gen_label_rtx ();
18985 emit_cmp_and_jump_insns (count_exp,
18986 GEN_INT (epilogue_size_needed),
18987 LTU, 0, counter_mode (count_exp), 1, label);
18988 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18989 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18990 else
18991 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18994 if (dynamic_check != -1)
18996 rtx hot_label = gen_label_rtx ();
18997 jump_around_label = gen_label_rtx ();
18998 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18999 LEU, 0, counter_mode (count_exp), 1, hot_label);
19000 predict_jump (REG_BR_PROB_BASE * 90 / 100);
19001 set_storage_via_libcall (dst, count_exp, val_exp, false);
19002 emit_jump (jump_around_label);
19003 emit_label (hot_label);
19006 /* Step 2: Alignment prologue. */
19008 /* Do the expensive promotion once we branched off the small blocks. */
19009 if (!promoted_val)
19010 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
19011 desired_align, align);
19012 gcc_assert (desired_align >= 1 && align >= 1);
19014 if (desired_align > align)
19016 if (align_bytes == 0)
19018 /* Except for the first move in epilogue, we no longer know
19019 constant offset in aliasing info. It don't seems to worth
19020 the pain to maintain it for the first move, so throw away
19021 the info early. */
19022 dst = change_address (dst, BLKmode, destreg);
19023 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
19024 desired_align);
19026 else
19028 /* If we know how many bytes need to be stored before dst is
19029 sufficiently aligned, maintain aliasing info accurately. */
19030 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
19031 desired_align, align_bytes);
19032 count_exp = plus_constant (count_exp, -align_bytes);
19033 count -= align_bytes;
19035 if (need_zero_guard
19036 && (count < (unsigned HOST_WIDE_INT) size_needed
19037 || (align_bytes == 0
19038 && count < ((unsigned HOST_WIDE_INT) size_needed
19039 + desired_align - align))))
19041 /* It is possible that we copied enough so the main loop will not
19042 execute. */
19043 gcc_assert (size_needed > 1);
19044 if (label == NULL_RTX)
19045 label = gen_label_rtx ();
19046 emit_cmp_and_jump_insns (count_exp,
19047 GEN_INT (size_needed),
19048 LTU, 0, counter_mode (count_exp), 1, label);
19049 if (expected_size == -1
19050 || expected_size < (desired_align - align) / 2 + size_needed)
19051 predict_jump (REG_BR_PROB_BASE * 20 / 100);
19052 else
19053 predict_jump (REG_BR_PROB_BASE * 60 / 100);
19056 if (label && size_needed == 1)
19058 emit_label (label);
19059 LABEL_NUSES (label) = 1;
19060 label = NULL;
19061 promoted_val = val_exp;
19062 epilogue_size_needed = 1;
19064 else if (label == NULL_RTX)
19065 epilogue_size_needed = size_needed;
19067 /* Step 3: Main loop. */
19069 switch (alg)
19071 case libcall:
19072 case no_stringop:
19073 gcc_unreachable ();
19074 case loop_1_byte:
19075 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19076 count_exp, QImode, 1, expected_size);
19077 break;
19078 case loop:
19079 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19080 count_exp, Pmode, 1, expected_size);
19081 break;
19082 case unrolled_loop:
19083 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
19084 count_exp, Pmode, 4, expected_size);
19085 break;
19086 case rep_prefix_8_byte:
19087 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19088 DImode, val_exp);
19089 break;
19090 case rep_prefix_4_byte:
19091 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19092 SImode, val_exp);
19093 break;
19094 case rep_prefix_1_byte:
19095 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
19096 QImode, val_exp);
19097 break;
19099 /* Adjust properly the offset of src and dest memory for aliasing. */
19100 if (CONST_INT_P (count_exp))
19101 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
19102 (count / size_needed) * size_needed);
19103 else
19104 dst = change_address (dst, BLKmode, destreg);
19106 /* Step 4: Epilogue to copy the remaining bytes. */
19108 if (label)
19110 /* When the main loop is done, COUNT_EXP might hold original count,
19111 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
19112 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
19113 bytes. Compensate if needed. */
19115 if (size_needed < epilogue_size_needed)
19117 tmp =
19118 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
19119 GEN_INT (size_needed - 1), count_exp, 1,
19120 OPTAB_DIRECT);
19121 if (tmp != count_exp)
19122 emit_move_insn (count_exp, tmp);
19124 emit_label (label);
19125 LABEL_NUSES (label) = 1;
19127 epilogue:
19128 if (count_exp != const0_rtx && epilogue_size_needed > 1)
19130 if (force_loopy_epilogue)
19131 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
19132 epilogue_size_needed);
19133 else
19134 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
19135 epilogue_size_needed);
19137 if (jump_around_label)
19138 emit_label (jump_around_label);
19139 return 1;
19142 /* Expand the appropriate insns for doing strlen if not just doing
19143 repnz; scasb
19145 out = result, initialized with the start address
19146 align_rtx = alignment of the address.
19147 scratch = scratch register, initialized with the startaddress when
19148 not aligned, otherwise undefined
19150 This is just the body. It needs the initializations mentioned above and
19151 some address computing at the end. These things are done in i386.md. */
19153 static void
19154 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
19156 int align;
19157 rtx tmp;
19158 rtx align_2_label = NULL_RTX;
19159 rtx align_3_label = NULL_RTX;
19160 rtx align_4_label = gen_label_rtx ();
19161 rtx end_0_label = gen_label_rtx ();
19162 rtx mem;
19163 rtx tmpreg = gen_reg_rtx (SImode);
19164 rtx scratch = gen_reg_rtx (SImode);
19165 rtx cmp;
19167 align = 0;
19168 if (CONST_INT_P (align_rtx))
19169 align = INTVAL (align_rtx);
19171 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
19173 /* Is there a known alignment and is it less than 4? */
19174 if (align < 4)
19176 rtx scratch1 = gen_reg_rtx (Pmode);
19177 emit_move_insn (scratch1, out);
19178 /* Is there a known alignment and is it not 2? */
19179 if (align != 2)
19181 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
19182 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
19184 /* Leave just the 3 lower bits. */
19185 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
19186 NULL_RTX, 0, OPTAB_WIDEN);
19188 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19189 Pmode, 1, align_4_label);
19190 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
19191 Pmode, 1, align_2_label);
19192 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
19193 Pmode, 1, align_3_label);
19195 else
19197 /* Since the alignment is 2, we have to check 2 or 0 bytes;
19198 check if is aligned to 4 - byte. */
19200 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
19201 NULL_RTX, 0, OPTAB_WIDEN);
19203 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
19204 Pmode, 1, align_4_label);
19207 mem = change_address (src, QImode, out);
19209 /* Now compare the bytes. */
19211 /* Compare the first n unaligned byte on a byte per byte basis. */
19212 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
19213 QImode, 1, end_0_label);
19215 /* Increment the address. */
19216 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19218 /* Not needed with an alignment of 2 */
19219 if (align != 2)
19221 emit_label (align_2_label);
19223 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19224 end_0_label);
19226 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19228 emit_label (align_3_label);
19231 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
19232 end_0_label);
19234 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
19237 /* Generate loop to check 4 bytes at a time. It is not a good idea to
19238 align this loop. It gives only huge programs, but does not help to
19239 speed up. */
19240 emit_label (align_4_label);
19242 mem = change_address (src, SImode, out);
19243 emit_move_insn (scratch, mem);
19244 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
19246 /* This formula yields a nonzero result iff one of the bytes is zero.
19247 This saves three branches inside loop and many cycles. */
19249 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
19250 emit_insn (gen_one_cmplsi2 (scratch, scratch));
19251 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
19252 emit_insn (gen_andsi3 (tmpreg, tmpreg,
19253 gen_int_mode (0x80808080, SImode)));
19254 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
19255 align_4_label);
19257 if (TARGET_CMOVE)
19259 rtx reg = gen_reg_rtx (SImode);
19260 rtx reg2 = gen_reg_rtx (Pmode);
19261 emit_move_insn (reg, tmpreg);
19262 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
19264 /* If zero is not in the first two bytes, move two bytes forward. */
19265 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19266 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19267 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19268 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
19269 gen_rtx_IF_THEN_ELSE (SImode, tmp,
19270 reg,
19271 tmpreg)));
19272 /* Emit lea manually to avoid clobbering of flags. */
19273 emit_insn (gen_rtx_SET (SImode, reg2,
19274 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19276 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19277 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19278 emit_insn (gen_rtx_SET (VOIDmode, out,
19279 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19280 reg2,
19281 out)));
19283 else
19285 rtx end_2_label = gen_label_rtx ();
19286 /* Is zero in the first two bytes? */
19288 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19289 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19290 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19291 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19292 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19293 pc_rtx);
19294 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19295 JUMP_LABEL (tmp) = end_2_label;
19297 /* Not in the first two. Move two bytes forward. */
19298 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19299 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19301 emit_label (end_2_label);
19305 /* Avoid branch in fixing the byte. */
19306 tmpreg = gen_lowpart (QImode, tmpreg);
19307 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19308 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19309 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19310 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19312 emit_label (end_0_label);
19315 /* Expand strlen. */
19318 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19320 rtx addr, scratch1, scratch2, scratch3, scratch4;
19322 /* The generic case of strlen expander is long. Avoid it's
19323 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19325 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19326 && !TARGET_INLINE_ALL_STRINGOPS
19327 && !optimize_insn_for_size_p ()
19328 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19329 return 0;
19331 addr = force_reg (Pmode, XEXP (src, 0));
19332 scratch1 = gen_reg_rtx (Pmode);
19334 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19335 && !optimize_insn_for_size_p ())
19337 /* Well it seems that some optimizer does not combine a call like
19338 foo(strlen(bar), strlen(bar));
19339 when the move and the subtraction is done here. It does calculate
19340 the length just once when these instructions are done inside of
19341 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19342 often used and I use one fewer register for the lifetime of
19343 output_strlen_unroll() this is better. */
19345 emit_move_insn (out, addr);
19347 ix86_expand_strlensi_unroll_1 (out, src, align);
19349 /* strlensi_unroll_1 returns the address of the zero at the end of
19350 the string, like memchr(), so compute the length by subtracting
19351 the start address. */
19352 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19354 else
19356 rtx unspec;
19358 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19359 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19360 return false;
19362 scratch2 = gen_reg_rtx (Pmode);
19363 scratch3 = gen_reg_rtx (Pmode);
19364 scratch4 = force_reg (Pmode, constm1_rtx);
19366 emit_move_insn (scratch3, addr);
19367 eoschar = force_reg (QImode, eoschar);
19369 src = replace_equiv_address_nv (src, scratch3);
19371 /* If .md starts supporting :P, this can be done in .md. */
19372 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19373 scratch4), UNSPEC_SCAS);
19374 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19375 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19376 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19378 return 1;
19381 /* For given symbol (function) construct code to compute address of it's PLT
19382 entry in large x86-64 PIC model. */
19384 construct_plt_address (rtx symbol)
19386 rtx tmp = gen_reg_rtx (Pmode);
19387 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19389 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19390 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19392 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19393 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19394 return tmp;
19397 void
19398 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19399 rtx callarg2,
19400 rtx pop, int sibcall)
19402 rtx use = NULL, call;
19404 if (pop == const0_rtx)
19405 pop = NULL;
19406 gcc_assert (!TARGET_64BIT || !pop);
19408 if (TARGET_MACHO && !TARGET_64BIT)
19410 #if TARGET_MACHO
19411 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19412 fnaddr = machopic_indirect_call_target (fnaddr);
19413 #endif
19415 else
19417 /* Static functions and indirect calls don't need the pic register. */
19418 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19419 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19420 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19421 use_reg (&use, pic_offset_table_rtx);
19424 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19426 rtx al = gen_rtx_REG (QImode, AX_REG);
19427 emit_move_insn (al, callarg2);
19428 use_reg (&use, al);
19431 if (ix86_cmodel == CM_LARGE_PIC
19432 && MEM_P (fnaddr)
19433 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19434 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19435 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19436 else if (sibcall
19437 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19438 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19440 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19441 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19444 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19445 if (retval)
19446 call = gen_rtx_SET (VOIDmode, retval, call);
19447 if (pop)
19449 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19450 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19451 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19453 if (TARGET_64BIT
19454 && ix86_cfun_abi () == MS_ABI
19455 && (!callarg2 || INTVAL (callarg2) != -2))
19457 /* We need to represent that SI and DI registers are clobbered
19458 by SYSV calls. */
19459 static int clobbered_registers[] = {
19460 XMM6_REG, XMM7_REG, XMM8_REG,
19461 XMM9_REG, XMM10_REG, XMM11_REG,
19462 XMM12_REG, XMM13_REG, XMM14_REG,
19463 XMM15_REG, SI_REG, DI_REG
19465 unsigned int i;
19466 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19467 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19468 UNSPEC_MS_TO_SYSV_CALL);
19470 vec[0] = call;
19471 vec[1] = unspec;
19472 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19473 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19474 ? TImode : DImode,
19475 gen_rtx_REG
19476 (SSE_REGNO_P (clobbered_registers[i])
19477 ? TImode : DImode,
19478 clobbered_registers[i]));
19480 call = gen_rtx_PARALLEL (VOIDmode,
19481 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19482 + 2, vec));
19485 call = emit_call_insn (call);
19486 if (use)
19487 CALL_INSN_FUNCTION_USAGE (call) = use;
19491 /* Clear stack slot assignments remembered from previous functions.
19492 This is called from INIT_EXPANDERS once before RTL is emitted for each
19493 function. */
19495 static struct machine_function *
19496 ix86_init_machine_status (void)
19498 struct machine_function *f;
19500 f = GGC_CNEW (struct machine_function);
19501 f->use_fast_prologue_epilogue_nregs = -1;
19502 f->tls_descriptor_call_expanded_p = 0;
19503 f->call_abi = ix86_abi;
19505 return f;
19508 /* Return a MEM corresponding to a stack slot with mode MODE.
19509 Allocate a new slot if necessary.
19511 The RTL for a function can have several slots available: N is
19512 which slot to use. */
19515 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19517 struct stack_local_entry *s;
19519 gcc_assert (n < MAX_386_STACK_LOCALS);
19521 /* Virtual slot is valid only before vregs are instantiated. */
19522 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19524 for (s = ix86_stack_locals; s; s = s->next)
19525 if (s->mode == mode && s->n == n)
19526 return copy_rtx (s->rtl);
19528 s = (struct stack_local_entry *)
19529 ggc_alloc (sizeof (struct stack_local_entry));
19530 s->n = n;
19531 s->mode = mode;
19532 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19534 s->next = ix86_stack_locals;
19535 ix86_stack_locals = s;
19536 return s->rtl;
19539 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19541 static GTY(()) rtx ix86_tls_symbol;
19543 ix86_tls_get_addr (void)
19546 if (!ix86_tls_symbol)
19548 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19549 (TARGET_ANY_GNU_TLS
19550 && !TARGET_64BIT)
19551 ? "___tls_get_addr"
19552 : "__tls_get_addr");
19555 return ix86_tls_symbol;
19558 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19560 static GTY(()) rtx ix86_tls_module_base_symbol;
19562 ix86_tls_module_base (void)
19565 if (!ix86_tls_module_base_symbol)
19567 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19568 "_TLS_MODULE_BASE_");
19569 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19570 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19573 return ix86_tls_module_base_symbol;
19576 /* Calculate the length of the memory address in the instruction
19577 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19580 memory_address_length (rtx addr)
19582 struct ix86_address parts;
19583 rtx base, index, disp;
19584 int len;
19585 int ok;
19587 if (GET_CODE (addr) == PRE_DEC
19588 || GET_CODE (addr) == POST_INC
19589 || GET_CODE (addr) == PRE_MODIFY
19590 || GET_CODE (addr) == POST_MODIFY)
19591 return 0;
19593 ok = ix86_decompose_address (addr, &parts);
19594 gcc_assert (ok);
19596 if (parts.base && GET_CODE (parts.base) == SUBREG)
19597 parts.base = SUBREG_REG (parts.base);
19598 if (parts.index && GET_CODE (parts.index) == SUBREG)
19599 parts.index = SUBREG_REG (parts.index);
19601 base = parts.base;
19602 index = parts.index;
19603 disp = parts.disp;
19604 len = 0;
19606 /* Rule of thumb:
19607 - esp as the base always wants an index,
19608 - ebp as the base always wants a displacement,
19609 - r12 as the base always wants an index,
19610 - r13 as the base always wants a displacement. */
19612 /* Register Indirect. */
19613 if (base && !index && !disp)
19615 /* esp (for its index) and ebp (for its displacement) need
19616 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19617 code. */
19618 if (REG_P (addr)
19619 && (addr == arg_pointer_rtx
19620 || addr == frame_pointer_rtx
19621 || REGNO (addr) == SP_REG
19622 || REGNO (addr) == BP_REG
19623 || REGNO (addr) == R12_REG
19624 || REGNO (addr) == R13_REG))
19625 len = 1;
19628 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19629 is not disp32, but disp32(%rip), so for disp32
19630 SIB byte is needed, unless print_operand_address
19631 optimizes it into disp32(%rip) or (%rip) is implied
19632 by UNSPEC. */
19633 else if (disp && !base && !index)
19635 len = 4;
19636 if (TARGET_64BIT)
19638 rtx symbol = disp;
19640 if (GET_CODE (disp) == CONST)
19641 symbol = XEXP (disp, 0);
19642 if (GET_CODE (symbol) == PLUS
19643 && CONST_INT_P (XEXP (symbol, 1)))
19644 symbol = XEXP (symbol, 0);
19646 if (GET_CODE (symbol) != LABEL_REF
19647 && (GET_CODE (symbol) != SYMBOL_REF
19648 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19649 && (GET_CODE (symbol) != UNSPEC
19650 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19651 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19652 len += 1;
19656 else
19658 /* Find the length of the displacement constant. */
19659 if (disp)
19661 if (base && satisfies_constraint_K (disp))
19662 len = 1;
19663 else
19664 len = 4;
19666 /* ebp always wants a displacement. Similarly r13. */
19667 else if (base && REG_P (base)
19668 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19669 len = 1;
19671 /* An index requires the two-byte modrm form.... */
19672 if (index
19673 /* ...like esp (or r12), which always wants an index. */
19674 || base == arg_pointer_rtx
19675 || base == frame_pointer_rtx
19676 || (base && REG_P (base)
19677 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19678 len += 1;
19681 switch (parts.seg)
19683 case SEG_FS:
19684 case SEG_GS:
19685 len += 1;
19686 break;
19687 default:
19688 break;
19691 return len;
19694 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19695 is set, expect that insn have 8bit immediate alternative. */
19697 ix86_attr_length_immediate_default (rtx insn, int shortform)
19699 int len = 0;
19700 int i;
19701 extract_insn_cached (insn);
19702 for (i = recog_data.n_operands - 1; i >= 0; --i)
19703 if (CONSTANT_P (recog_data.operand[i]))
19705 enum attr_mode mode = get_attr_mode (insn);
19707 gcc_assert (!len);
19708 if (shortform && CONST_INT_P (recog_data.operand[i]))
19710 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19711 switch (mode)
19713 case MODE_QI:
19714 len = 1;
19715 continue;
19716 case MODE_HI:
19717 ival = trunc_int_for_mode (ival, HImode);
19718 break;
19719 case MODE_SI:
19720 ival = trunc_int_for_mode (ival, SImode);
19721 break;
19722 default:
19723 break;
19725 if (IN_RANGE (ival, -128, 127))
19727 len = 1;
19728 continue;
19731 switch (mode)
19733 case MODE_QI:
19734 len = 1;
19735 break;
19736 case MODE_HI:
19737 len = 2;
19738 break;
19739 case MODE_SI:
19740 len = 4;
19741 break;
19742 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19743 case MODE_DI:
19744 len = 4;
19745 break;
19746 default:
19747 fatal_insn ("unknown insn mode", insn);
19750 return len;
19752 /* Compute default value for "length_address" attribute. */
19754 ix86_attr_length_address_default (rtx insn)
19756 int i;
19758 if (get_attr_type (insn) == TYPE_LEA)
19760 rtx set = PATTERN (insn), addr;
19762 if (GET_CODE (set) == PARALLEL)
19763 set = XVECEXP (set, 0, 0);
19765 gcc_assert (GET_CODE (set) == SET);
19767 addr = SET_SRC (set);
19768 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19770 if (GET_CODE (addr) == ZERO_EXTEND)
19771 addr = XEXP (addr, 0);
19772 if (GET_CODE (addr) == SUBREG)
19773 addr = SUBREG_REG (addr);
19776 return memory_address_length (addr);
19779 extract_insn_cached (insn);
19780 for (i = recog_data.n_operands - 1; i >= 0; --i)
19781 if (MEM_P (recog_data.operand[i]))
19783 constrain_operands_cached (reload_completed);
19784 if (which_alternative != -1)
19786 const char *constraints = recog_data.constraints[i];
19787 int alt = which_alternative;
19789 while (*constraints == '=' || *constraints == '+')
19790 constraints++;
19791 while (alt-- > 0)
19792 while (*constraints++ != ',')
19794 /* Skip ignored operands. */
19795 if (*constraints == 'X')
19796 continue;
19798 return memory_address_length (XEXP (recog_data.operand[i], 0));
19800 return 0;
19803 /* Compute default value for "length_vex" attribute. It includes
19804 2 or 3 byte VEX prefix and 1 opcode byte. */
19807 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19808 int has_vex_w)
19810 int i;
19812 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19813 byte VEX prefix. */
19814 if (!has_0f_opcode || has_vex_w)
19815 return 3 + 1;
19817 /* We can always use 2 byte VEX prefix in 32bit. */
19818 if (!TARGET_64BIT)
19819 return 2 + 1;
19821 extract_insn_cached (insn);
19823 for (i = recog_data.n_operands - 1; i >= 0; --i)
19824 if (REG_P (recog_data.operand[i]))
19826 /* REX.W bit uses 3 byte VEX prefix. */
19827 if (GET_MODE (recog_data.operand[i]) == DImode
19828 && GENERAL_REG_P (recog_data.operand[i]))
19829 return 3 + 1;
19831 else
19833 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19834 if (MEM_P (recog_data.operand[i])
19835 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19836 return 3 + 1;
19839 return 2 + 1;
19842 /* Return the maximum number of instructions a cpu can issue. */
19844 static int
19845 ix86_issue_rate (void)
19847 switch (ix86_tune)
19849 case PROCESSOR_PENTIUM:
19850 case PROCESSOR_ATOM:
19851 case PROCESSOR_K6:
19852 return 2;
19854 case PROCESSOR_PENTIUMPRO:
19855 case PROCESSOR_PENTIUM4:
19856 case PROCESSOR_ATHLON:
19857 case PROCESSOR_K8:
19858 case PROCESSOR_AMDFAM10:
19859 case PROCESSOR_NOCONA:
19860 case PROCESSOR_GENERIC32:
19861 case PROCESSOR_GENERIC64:
19862 case PROCESSOR_BDVER1:
19863 return 3;
19865 case PROCESSOR_CORE2:
19866 return 4;
19868 default:
19869 return 1;
19873 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19874 by DEP_INSN and nothing set by DEP_INSN. */
19876 static int
19877 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19879 rtx set, set2;
19881 /* Simplify the test for uninteresting insns. */
19882 if (insn_type != TYPE_SETCC
19883 && insn_type != TYPE_ICMOV
19884 && insn_type != TYPE_FCMOV
19885 && insn_type != TYPE_IBR)
19886 return 0;
19888 if ((set = single_set (dep_insn)) != 0)
19890 set = SET_DEST (set);
19891 set2 = NULL_RTX;
19893 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19894 && XVECLEN (PATTERN (dep_insn), 0) == 2
19895 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19896 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19898 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19899 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19901 else
19902 return 0;
19904 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19905 return 0;
19907 /* This test is true if the dependent insn reads the flags but
19908 not any other potentially set register. */
19909 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19910 return 0;
19912 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19913 return 0;
19915 return 1;
19918 /* Return true iff USE_INSN has a memory address with operands set by
19919 SET_INSN. */
19921 bool
19922 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19924 int i;
19925 extract_insn_cached (use_insn);
19926 for (i = recog_data.n_operands - 1; i >= 0; --i)
19927 if (MEM_P (recog_data.operand[i]))
19929 rtx addr = XEXP (recog_data.operand[i], 0);
19930 return modified_in_p (addr, set_insn) != 0;
19932 return false;
19935 static int
19936 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19938 enum attr_type insn_type, dep_insn_type;
19939 enum attr_memory memory;
19940 rtx set, set2;
19941 int dep_insn_code_number;
19943 /* Anti and output dependencies have zero cost on all CPUs. */
19944 if (REG_NOTE_KIND (link) != 0)
19945 return 0;
19947 dep_insn_code_number = recog_memoized (dep_insn);
19949 /* If we can't recognize the insns, we can't really do anything. */
19950 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19951 return cost;
19953 insn_type = get_attr_type (insn);
19954 dep_insn_type = get_attr_type (dep_insn);
19956 switch (ix86_tune)
19958 case PROCESSOR_PENTIUM:
19959 /* Address Generation Interlock adds a cycle of latency. */
19960 if (insn_type == TYPE_LEA)
19962 rtx addr = PATTERN (insn);
19964 if (GET_CODE (addr) == PARALLEL)
19965 addr = XVECEXP (addr, 0, 0);
19967 gcc_assert (GET_CODE (addr) == SET);
19969 addr = SET_SRC (addr);
19970 if (modified_in_p (addr, dep_insn))
19971 cost += 1;
19973 else if (ix86_agi_dependent (dep_insn, insn))
19974 cost += 1;
19976 /* ??? Compares pair with jump/setcc. */
19977 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19978 cost = 0;
19980 /* Floating point stores require value to be ready one cycle earlier. */
19981 if (insn_type == TYPE_FMOV
19982 && get_attr_memory (insn) == MEMORY_STORE
19983 && !ix86_agi_dependent (dep_insn, insn))
19984 cost += 1;
19985 break;
19987 case PROCESSOR_PENTIUMPRO:
19988 memory = get_attr_memory (insn);
19990 /* INT->FP conversion is expensive. */
19991 if (get_attr_fp_int_src (dep_insn))
19992 cost += 5;
19994 /* There is one cycle extra latency between an FP op and a store. */
19995 if (insn_type == TYPE_FMOV
19996 && (set = single_set (dep_insn)) != NULL_RTX
19997 && (set2 = single_set (insn)) != NULL_RTX
19998 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19999 && MEM_P (SET_DEST (set2)))
20000 cost += 1;
20002 /* Show ability of reorder buffer to hide latency of load by executing
20003 in parallel with previous instruction in case
20004 previous instruction is not needed to compute the address. */
20005 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20006 && !ix86_agi_dependent (dep_insn, insn))
20008 /* Claim moves to take one cycle, as core can issue one load
20009 at time and the next load can start cycle later. */
20010 if (dep_insn_type == TYPE_IMOV
20011 || dep_insn_type == TYPE_FMOV)
20012 cost = 1;
20013 else if (cost > 1)
20014 cost--;
20016 break;
20018 case PROCESSOR_K6:
20019 memory = get_attr_memory (insn);
20021 /* The esp dependency is resolved before the instruction is really
20022 finished. */
20023 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
20024 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
20025 return 1;
20027 /* INT->FP conversion is expensive. */
20028 if (get_attr_fp_int_src (dep_insn))
20029 cost += 5;
20031 /* Show ability of reorder buffer to hide latency of load by executing
20032 in parallel with previous instruction in case
20033 previous instruction is not needed to compute the address. */
20034 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20035 && !ix86_agi_dependent (dep_insn, insn))
20037 /* Claim moves to take one cycle, as core can issue one load
20038 at time and the next load can start cycle later. */
20039 if (dep_insn_type == TYPE_IMOV
20040 || dep_insn_type == TYPE_FMOV)
20041 cost = 1;
20042 else if (cost > 2)
20043 cost -= 2;
20044 else
20045 cost = 1;
20047 break;
20049 case PROCESSOR_ATHLON:
20050 case PROCESSOR_K8:
20051 case PROCESSOR_AMDFAM10:
20052 case PROCESSOR_BDVER1:
20053 case PROCESSOR_ATOM:
20054 case PROCESSOR_GENERIC32:
20055 case PROCESSOR_GENERIC64:
20056 memory = get_attr_memory (insn);
20058 /* Show ability of reorder buffer to hide latency of load by executing
20059 in parallel with previous instruction in case
20060 previous instruction is not needed to compute the address. */
20061 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
20062 && !ix86_agi_dependent (dep_insn, insn))
20064 enum attr_unit unit = get_attr_unit (insn);
20065 int loadcost = 3;
20067 /* Because of the difference between the length of integer and
20068 floating unit pipeline preparation stages, the memory operands
20069 for floating point are cheaper.
20071 ??? For Athlon it the difference is most probably 2. */
20072 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
20073 loadcost = 3;
20074 else
20075 loadcost = TARGET_ATHLON ? 2 : 0;
20077 if (cost >= loadcost)
20078 cost -= loadcost;
20079 else
20080 cost = 0;
20083 default:
20084 break;
20087 return cost;
20090 /* How many alternative schedules to try. This should be as wide as the
20091 scheduling freedom in the DFA, but no wider. Making this value too
20092 large results extra work for the scheduler. */
20094 static int
20095 ia32_multipass_dfa_lookahead (void)
20097 switch (ix86_tune)
20099 case PROCESSOR_PENTIUM:
20100 return 2;
20102 case PROCESSOR_PENTIUMPRO:
20103 case PROCESSOR_K6:
20104 return 1;
20106 default:
20107 return 0;
20112 /* Compute the alignment given to a constant that is being placed in memory.
20113 EXP is the constant and ALIGN is the alignment that the object would
20114 ordinarily have.
20115 The value of this function is used instead of that alignment to align
20116 the object. */
20119 ix86_constant_alignment (tree exp, int align)
20121 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
20122 || TREE_CODE (exp) == INTEGER_CST)
20124 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
20125 return 64;
20126 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
20127 return 128;
20129 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
20130 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
20131 return BITS_PER_WORD;
20133 return align;
20136 /* Compute the alignment for a static variable.
20137 TYPE is the data type, and ALIGN is the alignment that
20138 the object would ordinarily have. The value of this function is used
20139 instead of that alignment to align the object. */
20142 ix86_data_alignment (tree type, int align)
20144 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
20146 if (AGGREGATE_TYPE_P (type)
20147 && TYPE_SIZE (type)
20148 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20149 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
20150 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
20151 && align < max_align)
20152 align = max_align;
20154 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20155 to 16byte boundary. */
20156 if (TARGET_64BIT)
20158 if (AGGREGATE_TYPE_P (type)
20159 && TYPE_SIZE (type)
20160 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20161 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
20162 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20163 return 128;
20166 if (TREE_CODE (type) == ARRAY_TYPE)
20168 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20169 return 64;
20170 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20171 return 128;
20173 else if (TREE_CODE (type) == COMPLEX_TYPE)
20176 if (TYPE_MODE (type) == DCmode && align < 64)
20177 return 64;
20178 if ((TYPE_MODE (type) == XCmode
20179 || TYPE_MODE (type) == TCmode) && align < 128)
20180 return 128;
20182 else if ((TREE_CODE (type) == RECORD_TYPE
20183 || TREE_CODE (type) == UNION_TYPE
20184 || TREE_CODE (type) == QUAL_UNION_TYPE)
20185 && TYPE_FIELDS (type))
20187 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20188 return 64;
20189 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20190 return 128;
20192 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20193 || TREE_CODE (type) == INTEGER_TYPE)
20195 if (TYPE_MODE (type) == DFmode && align < 64)
20196 return 64;
20197 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20198 return 128;
20201 return align;
20204 /* Compute the alignment for a local variable or a stack slot. EXP is
20205 the data type or decl itself, MODE is the widest mode available and
20206 ALIGN is the alignment that the object would ordinarily have. The
20207 value of this macro is used instead of that alignment to align the
20208 object. */
20210 unsigned int
20211 ix86_local_alignment (tree exp, enum machine_mode mode,
20212 unsigned int align)
20214 tree type, decl;
20216 if (exp && DECL_P (exp))
20218 type = TREE_TYPE (exp);
20219 decl = exp;
20221 else
20223 type = exp;
20224 decl = NULL;
20227 /* Don't do dynamic stack realignment for long long objects with
20228 -mpreferred-stack-boundary=2. */
20229 if (!TARGET_64BIT
20230 && align == 64
20231 && ix86_preferred_stack_boundary < 64
20232 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
20233 && (!type || !TYPE_USER_ALIGN (type))
20234 && (!decl || !DECL_USER_ALIGN (decl)))
20235 align = 32;
20237 /* If TYPE is NULL, we are allocating a stack slot for caller-save
20238 register in MODE. We will return the largest alignment of XF
20239 and DF. */
20240 if (!type)
20242 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
20243 align = GET_MODE_ALIGNMENT (DFmode);
20244 return align;
20247 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
20248 to 16byte boundary. Exact wording is:
20250 An array uses the same alignment as its elements, except that a local or
20251 global array variable of length at least 16 bytes or
20252 a C99 variable-length array variable always has alignment of at least 16 bytes.
20254 This was added to allow use of aligned SSE instructions at arrays. This
20255 rule is meant for static storage (where compiler can not do the analysis
20256 by itself). We follow it for automatic variables only when convenient.
20257 We fully control everything in the function compiled and functions from
20258 other unit can not rely on the alignment.
20260 Exclude va_list type. It is the common case of local array where
20261 we can not benefit from the alignment. */
20262 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
20263 && TARGET_SSE)
20265 if (AGGREGATE_TYPE_P (type)
20266 && (TYPE_MAIN_VARIANT (type)
20267 != TYPE_MAIN_VARIANT (va_list_type_node))
20268 && TYPE_SIZE (type)
20269 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
20270 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
20271 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
20272 return 128;
20274 if (TREE_CODE (type) == ARRAY_TYPE)
20276 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
20277 return 64;
20278 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
20279 return 128;
20281 else if (TREE_CODE (type) == COMPLEX_TYPE)
20283 if (TYPE_MODE (type) == DCmode && align < 64)
20284 return 64;
20285 if ((TYPE_MODE (type) == XCmode
20286 || TYPE_MODE (type) == TCmode) && align < 128)
20287 return 128;
20289 else if ((TREE_CODE (type) == RECORD_TYPE
20290 || TREE_CODE (type) == UNION_TYPE
20291 || TREE_CODE (type) == QUAL_UNION_TYPE)
20292 && TYPE_FIELDS (type))
20294 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20295 return 64;
20296 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20297 return 128;
20299 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20300 || TREE_CODE (type) == INTEGER_TYPE)
20303 if (TYPE_MODE (type) == DFmode && align < 64)
20304 return 64;
20305 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20306 return 128;
20308 return align;
20311 /* Compute the minimum required alignment for dynamic stack realignment
20312 purposes for a local variable, parameter or a stack slot. EXP is
20313 the data type or decl itself, MODE is its mode and ALIGN is the
20314 alignment that the object would ordinarily have. */
20316 unsigned int
20317 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20318 unsigned int align)
20320 tree type, decl;
20322 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20323 return align;
20325 if (exp && DECL_P (exp))
20327 type = TREE_TYPE (exp);
20328 decl = exp;
20330 else
20332 type = exp;
20333 decl = NULL;
20336 /* Don't do dynamic stack realignment for long long objects with
20337 -mpreferred-stack-boundary=2. */
20338 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20339 && (!type || !TYPE_USER_ALIGN (type))
20340 && (!decl || !DECL_USER_ALIGN (decl)))
20341 return 32;
20343 return align;
20346 /* Find a location for the static chain incoming to a nested function.
20347 This is a register, unless all free registers are used by arguments. */
20349 static rtx
20350 ix86_static_chain (const_tree fndecl, bool incoming_p)
20352 unsigned regno;
20354 if (!DECL_STATIC_CHAIN (fndecl))
20355 return NULL;
20357 if (TARGET_64BIT)
20359 /* We always use R10 in 64-bit mode. */
20360 regno = R10_REG;
20362 else
20364 tree fntype;
20365 /* By default in 32-bit mode we use ECX to pass the static chain. */
20366 regno = CX_REG;
20368 fntype = TREE_TYPE (fndecl);
20369 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20371 /* Fastcall functions use ecx/edx for arguments, which leaves
20372 us with EAX for the static chain. */
20373 regno = AX_REG;
20375 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)))
20377 /* Thiscall functions use ecx for arguments, which leaves
20378 us with EAX for the static chain. */
20379 regno = AX_REG;
20381 else if (ix86_function_regparm (fntype, fndecl) == 3)
20383 /* For regparm 3, we have no free call-clobbered registers in
20384 which to store the static chain. In order to implement this,
20385 we have the trampoline push the static chain to the stack.
20386 However, we can't push a value below the return address when
20387 we call the nested function directly, so we have to use an
20388 alternate entry point. For this we use ESI, and have the
20389 alternate entry point push ESI, so that things appear the
20390 same once we're executing the nested function. */
20391 if (incoming_p)
20393 if (fndecl == current_function_decl)
20394 ix86_static_chain_on_stack = true;
20395 return gen_frame_mem (SImode,
20396 plus_constant (arg_pointer_rtx, -8));
20398 regno = SI_REG;
20402 return gen_rtx_REG (Pmode, regno);
20405 /* Emit RTL insns to initialize the variable parts of a trampoline.
20406 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20407 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20408 to be passed to the target function. */
20410 static void
20411 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20413 rtx mem, fnaddr;
20415 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20417 if (!TARGET_64BIT)
20419 rtx disp, chain;
20420 int opcode;
20422 /* Depending on the static chain location, either load a register
20423 with a constant, or push the constant to the stack. All of the
20424 instructions are the same size. */
20425 chain = ix86_static_chain (fndecl, true);
20426 if (REG_P (chain))
20428 if (REGNO (chain) == CX_REG)
20429 opcode = 0xb9;
20430 else if (REGNO (chain) == AX_REG)
20431 opcode = 0xb8;
20432 else
20433 gcc_unreachable ();
20435 else
20436 opcode = 0x68;
20438 mem = adjust_address (m_tramp, QImode, 0);
20439 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20441 mem = adjust_address (m_tramp, SImode, 1);
20442 emit_move_insn (mem, chain_value);
20444 /* Compute offset from the end of the jmp to the target function.
20445 In the case in which the trampoline stores the static chain on
20446 the stack, we need to skip the first insn which pushes the
20447 (call-saved) register static chain; this push is 1 byte. */
20448 disp = expand_binop (SImode, sub_optab, fnaddr,
20449 plus_constant (XEXP (m_tramp, 0),
20450 MEM_P (chain) ? 9 : 10),
20451 NULL_RTX, 1, OPTAB_DIRECT);
20453 mem = adjust_address (m_tramp, QImode, 5);
20454 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20456 mem = adjust_address (m_tramp, SImode, 6);
20457 emit_move_insn (mem, disp);
20459 else
20461 int offset = 0;
20463 /* Load the function address to r11. Try to load address using
20464 the shorter movl instead of movabs. We may want to support
20465 movq for kernel mode, but kernel does not use trampolines at
20466 the moment. */
20467 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20469 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20471 mem = adjust_address (m_tramp, HImode, offset);
20472 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20474 mem = adjust_address (m_tramp, SImode, offset + 2);
20475 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20476 offset += 6;
20478 else
20480 mem = adjust_address (m_tramp, HImode, offset);
20481 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20483 mem = adjust_address (m_tramp, DImode, offset + 2);
20484 emit_move_insn (mem, fnaddr);
20485 offset += 10;
20488 /* Load static chain using movabs to r10. */
20489 mem = adjust_address (m_tramp, HImode, offset);
20490 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20492 mem = adjust_address (m_tramp, DImode, offset + 2);
20493 emit_move_insn (mem, chain_value);
20494 offset += 10;
20496 /* Jump to r11; the last (unused) byte is a nop, only there to
20497 pad the write out to a single 32-bit store. */
20498 mem = adjust_address (m_tramp, SImode, offset);
20499 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20500 offset += 4;
20502 gcc_assert (offset <= TRAMPOLINE_SIZE);
20505 #ifdef ENABLE_EXECUTE_STACK
20506 #ifdef CHECK_EXECUTE_STACK_ENABLED
20507 if (CHECK_EXECUTE_STACK_ENABLED)
20508 #endif
20509 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20510 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20511 #endif
20514 /* The following file contains several enumerations and data structures
20515 built from the definitions in i386-builtin-types.def. */
20517 #include "i386-builtin-types.inc"
20519 /* Table for the ix86 builtin non-function types. */
20520 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20522 /* Retrieve an element from the above table, building some of
20523 the types lazily. */
20525 static tree
20526 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20528 unsigned int index;
20529 tree type, itype;
20531 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20533 type = ix86_builtin_type_tab[(int) tcode];
20534 if (type != NULL)
20535 return type;
20537 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20538 if (tcode <= IX86_BT_LAST_VECT)
20540 enum machine_mode mode;
20542 index = tcode - IX86_BT_LAST_PRIM - 1;
20543 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20544 mode = ix86_builtin_type_vect_mode[index];
20546 type = build_vector_type_for_mode (itype, mode);
20548 else
20550 int quals;
20552 index = tcode - IX86_BT_LAST_VECT - 1;
20553 if (tcode <= IX86_BT_LAST_PTR)
20554 quals = TYPE_UNQUALIFIED;
20555 else
20556 quals = TYPE_QUAL_CONST;
20558 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20559 if (quals != TYPE_UNQUALIFIED)
20560 itype = build_qualified_type (itype, quals);
20562 type = build_pointer_type (itype);
20565 ix86_builtin_type_tab[(int) tcode] = type;
20566 return type;
20569 /* Table for the ix86 builtin function types. */
20570 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20572 /* Retrieve an element from the above table, building some of
20573 the types lazily. */
20575 static tree
20576 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20578 tree type;
20580 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20582 type = ix86_builtin_func_type_tab[(int) tcode];
20583 if (type != NULL)
20584 return type;
20586 if (tcode <= IX86_BT_LAST_FUNC)
20588 unsigned start = ix86_builtin_func_start[(int) tcode];
20589 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20590 tree rtype, atype, args = void_list_node;
20591 unsigned i;
20593 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20594 for (i = after - 1; i > start; --i)
20596 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20597 args = tree_cons (NULL, atype, args);
20600 type = build_function_type (rtype, args);
20602 else
20604 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20605 enum ix86_builtin_func_type icode;
20607 icode = ix86_builtin_func_alias_base[index];
20608 type = ix86_get_builtin_func_type (icode);
20611 ix86_builtin_func_type_tab[(int) tcode] = type;
20612 return type;
20616 /* Codes for all the SSE/MMX builtins. */
20617 enum ix86_builtins
20619 IX86_BUILTIN_ADDPS,
20620 IX86_BUILTIN_ADDSS,
20621 IX86_BUILTIN_DIVPS,
20622 IX86_BUILTIN_DIVSS,
20623 IX86_BUILTIN_MULPS,
20624 IX86_BUILTIN_MULSS,
20625 IX86_BUILTIN_SUBPS,
20626 IX86_BUILTIN_SUBSS,
20628 IX86_BUILTIN_CMPEQPS,
20629 IX86_BUILTIN_CMPLTPS,
20630 IX86_BUILTIN_CMPLEPS,
20631 IX86_BUILTIN_CMPGTPS,
20632 IX86_BUILTIN_CMPGEPS,
20633 IX86_BUILTIN_CMPNEQPS,
20634 IX86_BUILTIN_CMPNLTPS,
20635 IX86_BUILTIN_CMPNLEPS,
20636 IX86_BUILTIN_CMPNGTPS,
20637 IX86_BUILTIN_CMPNGEPS,
20638 IX86_BUILTIN_CMPORDPS,
20639 IX86_BUILTIN_CMPUNORDPS,
20640 IX86_BUILTIN_CMPEQSS,
20641 IX86_BUILTIN_CMPLTSS,
20642 IX86_BUILTIN_CMPLESS,
20643 IX86_BUILTIN_CMPNEQSS,
20644 IX86_BUILTIN_CMPNLTSS,
20645 IX86_BUILTIN_CMPNLESS,
20646 IX86_BUILTIN_CMPNGTSS,
20647 IX86_BUILTIN_CMPNGESS,
20648 IX86_BUILTIN_CMPORDSS,
20649 IX86_BUILTIN_CMPUNORDSS,
20651 IX86_BUILTIN_COMIEQSS,
20652 IX86_BUILTIN_COMILTSS,
20653 IX86_BUILTIN_COMILESS,
20654 IX86_BUILTIN_COMIGTSS,
20655 IX86_BUILTIN_COMIGESS,
20656 IX86_BUILTIN_COMINEQSS,
20657 IX86_BUILTIN_UCOMIEQSS,
20658 IX86_BUILTIN_UCOMILTSS,
20659 IX86_BUILTIN_UCOMILESS,
20660 IX86_BUILTIN_UCOMIGTSS,
20661 IX86_BUILTIN_UCOMIGESS,
20662 IX86_BUILTIN_UCOMINEQSS,
20664 IX86_BUILTIN_CVTPI2PS,
20665 IX86_BUILTIN_CVTPS2PI,
20666 IX86_BUILTIN_CVTSI2SS,
20667 IX86_BUILTIN_CVTSI642SS,
20668 IX86_BUILTIN_CVTSS2SI,
20669 IX86_BUILTIN_CVTSS2SI64,
20670 IX86_BUILTIN_CVTTPS2PI,
20671 IX86_BUILTIN_CVTTSS2SI,
20672 IX86_BUILTIN_CVTTSS2SI64,
20674 IX86_BUILTIN_MAXPS,
20675 IX86_BUILTIN_MAXSS,
20676 IX86_BUILTIN_MINPS,
20677 IX86_BUILTIN_MINSS,
20679 IX86_BUILTIN_LOADUPS,
20680 IX86_BUILTIN_STOREUPS,
20681 IX86_BUILTIN_MOVSS,
20683 IX86_BUILTIN_MOVHLPS,
20684 IX86_BUILTIN_MOVLHPS,
20685 IX86_BUILTIN_LOADHPS,
20686 IX86_BUILTIN_LOADLPS,
20687 IX86_BUILTIN_STOREHPS,
20688 IX86_BUILTIN_STORELPS,
20690 IX86_BUILTIN_MASKMOVQ,
20691 IX86_BUILTIN_MOVMSKPS,
20692 IX86_BUILTIN_PMOVMSKB,
20694 IX86_BUILTIN_MOVNTPS,
20695 IX86_BUILTIN_MOVNTQ,
20697 IX86_BUILTIN_LOADDQU,
20698 IX86_BUILTIN_STOREDQU,
20700 IX86_BUILTIN_PACKSSWB,
20701 IX86_BUILTIN_PACKSSDW,
20702 IX86_BUILTIN_PACKUSWB,
20704 IX86_BUILTIN_PADDB,
20705 IX86_BUILTIN_PADDW,
20706 IX86_BUILTIN_PADDD,
20707 IX86_BUILTIN_PADDQ,
20708 IX86_BUILTIN_PADDSB,
20709 IX86_BUILTIN_PADDSW,
20710 IX86_BUILTIN_PADDUSB,
20711 IX86_BUILTIN_PADDUSW,
20712 IX86_BUILTIN_PSUBB,
20713 IX86_BUILTIN_PSUBW,
20714 IX86_BUILTIN_PSUBD,
20715 IX86_BUILTIN_PSUBQ,
20716 IX86_BUILTIN_PSUBSB,
20717 IX86_BUILTIN_PSUBSW,
20718 IX86_BUILTIN_PSUBUSB,
20719 IX86_BUILTIN_PSUBUSW,
20721 IX86_BUILTIN_PAND,
20722 IX86_BUILTIN_PANDN,
20723 IX86_BUILTIN_POR,
20724 IX86_BUILTIN_PXOR,
20726 IX86_BUILTIN_PAVGB,
20727 IX86_BUILTIN_PAVGW,
20729 IX86_BUILTIN_PCMPEQB,
20730 IX86_BUILTIN_PCMPEQW,
20731 IX86_BUILTIN_PCMPEQD,
20732 IX86_BUILTIN_PCMPGTB,
20733 IX86_BUILTIN_PCMPGTW,
20734 IX86_BUILTIN_PCMPGTD,
20736 IX86_BUILTIN_PMADDWD,
20738 IX86_BUILTIN_PMAXSW,
20739 IX86_BUILTIN_PMAXUB,
20740 IX86_BUILTIN_PMINSW,
20741 IX86_BUILTIN_PMINUB,
20743 IX86_BUILTIN_PMULHUW,
20744 IX86_BUILTIN_PMULHW,
20745 IX86_BUILTIN_PMULLW,
20747 IX86_BUILTIN_PSADBW,
20748 IX86_BUILTIN_PSHUFW,
20750 IX86_BUILTIN_PSLLW,
20751 IX86_BUILTIN_PSLLD,
20752 IX86_BUILTIN_PSLLQ,
20753 IX86_BUILTIN_PSRAW,
20754 IX86_BUILTIN_PSRAD,
20755 IX86_BUILTIN_PSRLW,
20756 IX86_BUILTIN_PSRLD,
20757 IX86_BUILTIN_PSRLQ,
20758 IX86_BUILTIN_PSLLWI,
20759 IX86_BUILTIN_PSLLDI,
20760 IX86_BUILTIN_PSLLQI,
20761 IX86_BUILTIN_PSRAWI,
20762 IX86_BUILTIN_PSRADI,
20763 IX86_BUILTIN_PSRLWI,
20764 IX86_BUILTIN_PSRLDI,
20765 IX86_BUILTIN_PSRLQI,
20767 IX86_BUILTIN_PUNPCKHBW,
20768 IX86_BUILTIN_PUNPCKHWD,
20769 IX86_BUILTIN_PUNPCKHDQ,
20770 IX86_BUILTIN_PUNPCKLBW,
20771 IX86_BUILTIN_PUNPCKLWD,
20772 IX86_BUILTIN_PUNPCKLDQ,
20774 IX86_BUILTIN_SHUFPS,
20776 IX86_BUILTIN_RCPPS,
20777 IX86_BUILTIN_RCPSS,
20778 IX86_BUILTIN_RSQRTPS,
20779 IX86_BUILTIN_RSQRTPS_NR,
20780 IX86_BUILTIN_RSQRTSS,
20781 IX86_BUILTIN_RSQRTF,
20782 IX86_BUILTIN_SQRTPS,
20783 IX86_BUILTIN_SQRTPS_NR,
20784 IX86_BUILTIN_SQRTSS,
20786 IX86_BUILTIN_UNPCKHPS,
20787 IX86_BUILTIN_UNPCKLPS,
20789 IX86_BUILTIN_ANDPS,
20790 IX86_BUILTIN_ANDNPS,
20791 IX86_BUILTIN_ORPS,
20792 IX86_BUILTIN_XORPS,
20794 IX86_BUILTIN_EMMS,
20795 IX86_BUILTIN_LDMXCSR,
20796 IX86_BUILTIN_STMXCSR,
20797 IX86_BUILTIN_SFENCE,
20799 /* 3DNow! Original */
20800 IX86_BUILTIN_FEMMS,
20801 IX86_BUILTIN_PAVGUSB,
20802 IX86_BUILTIN_PF2ID,
20803 IX86_BUILTIN_PFACC,
20804 IX86_BUILTIN_PFADD,
20805 IX86_BUILTIN_PFCMPEQ,
20806 IX86_BUILTIN_PFCMPGE,
20807 IX86_BUILTIN_PFCMPGT,
20808 IX86_BUILTIN_PFMAX,
20809 IX86_BUILTIN_PFMIN,
20810 IX86_BUILTIN_PFMUL,
20811 IX86_BUILTIN_PFRCP,
20812 IX86_BUILTIN_PFRCPIT1,
20813 IX86_BUILTIN_PFRCPIT2,
20814 IX86_BUILTIN_PFRSQIT1,
20815 IX86_BUILTIN_PFRSQRT,
20816 IX86_BUILTIN_PFSUB,
20817 IX86_BUILTIN_PFSUBR,
20818 IX86_BUILTIN_PI2FD,
20819 IX86_BUILTIN_PMULHRW,
20821 /* 3DNow! Athlon Extensions */
20822 IX86_BUILTIN_PF2IW,
20823 IX86_BUILTIN_PFNACC,
20824 IX86_BUILTIN_PFPNACC,
20825 IX86_BUILTIN_PI2FW,
20826 IX86_BUILTIN_PSWAPDSI,
20827 IX86_BUILTIN_PSWAPDSF,
20829 /* SSE2 */
20830 IX86_BUILTIN_ADDPD,
20831 IX86_BUILTIN_ADDSD,
20832 IX86_BUILTIN_DIVPD,
20833 IX86_BUILTIN_DIVSD,
20834 IX86_BUILTIN_MULPD,
20835 IX86_BUILTIN_MULSD,
20836 IX86_BUILTIN_SUBPD,
20837 IX86_BUILTIN_SUBSD,
20839 IX86_BUILTIN_CMPEQPD,
20840 IX86_BUILTIN_CMPLTPD,
20841 IX86_BUILTIN_CMPLEPD,
20842 IX86_BUILTIN_CMPGTPD,
20843 IX86_BUILTIN_CMPGEPD,
20844 IX86_BUILTIN_CMPNEQPD,
20845 IX86_BUILTIN_CMPNLTPD,
20846 IX86_BUILTIN_CMPNLEPD,
20847 IX86_BUILTIN_CMPNGTPD,
20848 IX86_BUILTIN_CMPNGEPD,
20849 IX86_BUILTIN_CMPORDPD,
20850 IX86_BUILTIN_CMPUNORDPD,
20851 IX86_BUILTIN_CMPEQSD,
20852 IX86_BUILTIN_CMPLTSD,
20853 IX86_BUILTIN_CMPLESD,
20854 IX86_BUILTIN_CMPNEQSD,
20855 IX86_BUILTIN_CMPNLTSD,
20856 IX86_BUILTIN_CMPNLESD,
20857 IX86_BUILTIN_CMPORDSD,
20858 IX86_BUILTIN_CMPUNORDSD,
20860 IX86_BUILTIN_COMIEQSD,
20861 IX86_BUILTIN_COMILTSD,
20862 IX86_BUILTIN_COMILESD,
20863 IX86_BUILTIN_COMIGTSD,
20864 IX86_BUILTIN_COMIGESD,
20865 IX86_BUILTIN_COMINEQSD,
20866 IX86_BUILTIN_UCOMIEQSD,
20867 IX86_BUILTIN_UCOMILTSD,
20868 IX86_BUILTIN_UCOMILESD,
20869 IX86_BUILTIN_UCOMIGTSD,
20870 IX86_BUILTIN_UCOMIGESD,
20871 IX86_BUILTIN_UCOMINEQSD,
20873 IX86_BUILTIN_MAXPD,
20874 IX86_BUILTIN_MAXSD,
20875 IX86_BUILTIN_MINPD,
20876 IX86_BUILTIN_MINSD,
20878 IX86_BUILTIN_ANDPD,
20879 IX86_BUILTIN_ANDNPD,
20880 IX86_BUILTIN_ORPD,
20881 IX86_BUILTIN_XORPD,
20883 IX86_BUILTIN_SQRTPD,
20884 IX86_BUILTIN_SQRTSD,
20886 IX86_BUILTIN_UNPCKHPD,
20887 IX86_BUILTIN_UNPCKLPD,
20889 IX86_BUILTIN_SHUFPD,
20891 IX86_BUILTIN_LOADUPD,
20892 IX86_BUILTIN_STOREUPD,
20893 IX86_BUILTIN_MOVSD,
20895 IX86_BUILTIN_LOADHPD,
20896 IX86_BUILTIN_LOADLPD,
20898 IX86_BUILTIN_CVTDQ2PD,
20899 IX86_BUILTIN_CVTDQ2PS,
20901 IX86_BUILTIN_CVTPD2DQ,
20902 IX86_BUILTIN_CVTPD2PI,
20903 IX86_BUILTIN_CVTPD2PS,
20904 IX86_BUILTIN_CVTTPD2DQ,
20905 IX86_BUILTIN_CVTTPD2PI,
20907 IX86_BUILTIN_CVTPI2PD,
20908 IX86_BUILTIN_CVTSI2SD,
20909 IX86_BUILTIN_CVTSI642SD,
20911 IX86_BUILTIN_CVTSD2SI,
20912 IX86_BUILTIN_CVTSD2SI64,
20913 IX86_BUILTIN_CVTSD2SS,
20914 IX86_BUILTIN_CVTSS2SD,
20915 IX86_BUILTIN_CVTTSD2SI,
20916 IX86_BUILTIN_CVTTSD2SI64,
20918 IX86_BUILTIN_CVTPS2DQ,
20919 IX86_BUILTIN_CVTPS2PD,
20920 IX86_BUILTIN_CVTTPS2DQ,
20922 IX86_BUILTIN_MOVNTI,
20923 IX86_BUILTIN_MOVNTPD,
20924 IX86_BUILTIN_MOVNTDQ,
20926 IX86_BUILTIN_MOVQ128,
20928 /* SSE2 MMX */
20929 IX86_BUILTIN_MASKMOVDQU,
20930 IX86_BUILTIN_MOVMSKPD,
20931 IX86_BUILTIN_PMOVMSKB128,
20933 IX86_BUILTIN_PACKSSWB128,
20934 IX86_BUILTIN_PACKSSDW128,
20935 IX86_BUILTIN_PACKUSWB128,
20937 IX86_BUILTIN_PADDB128,
20938 IX86_BUILTIN_PADDW128,
20939 IX86_BUILTIN_PADDD128,
20940 IX86_BUILTIN_PADDQ128,
20941 IX86_BUILTIN_PADDSB128,
20942 IX86_BUILTIN_PADDSW128,
20943 IX86_BUILTIN_PADDUSB128,
20944 IX86_BUILTIN_PADDUSW128,
20945 IX86_BUILTIN_PSUBB128,
20946 IX86_BUILTIN_PSUBW128,
20947 IX86_BUILTIN_PSUBD128,
20948 IX86_BUILTIN_PSUBQ128,
20949 IX86_BUILTIN_PSUBSB128,
20950 IX86_BUILTIN_PSUBSW128,
20951 IX86_BUILTIN_PSUBUSB128,
20952 IX86_BUILTIN_PSUBUSW128,
20954 IX86_BUILTIN_PAND128,
20955 IX86_BUILTIN_PANDN128,
20956 IX86_BUILTIN_POR128,
20957 IX86_BUILTIN_PXOR128,
20959 IX86_BUILTIN_PAVGB128,
20960 IX86_BUILTIN_PAVGW128,
20962 IX86_BUILTIN_PCMPEQB128,
20963 IX86_BUILTIN_PCMPEQW128,
20964 IX86_BUILTIN_PCMPEQD128,
20965 IX86_BUILTIN_PCMPGTB128,
20966 IX86_BUILTIN_PCMPGTW128,
20967 IX86_BUILTIN_PCMPGTD128,
20969 IX86_BUILTIN_PMADDWD128,
20971 IX86_BUILTIN_PMAXSW128,
20972 IX86_BUILTIN_PMAXUB128,
20973 IX86_BUILTIN_PMINSW128,
20974 IX86_BUILTIN_PMINUB128,
20976 IX86_BUILTIN_PMULUDQ,
20977 IX86_BUILTIN_PMULUDQ128,
20978 IX86_BUILTIN_PMULHUW128,
20979 IX86_BUILTIN_PMULHW128,
20980 IX86_BUILTIN_PMULLW128,
20982 IX86_BUILTIN_PSADBW128,
20983 IX86_BUILTIN_PSHUFHW,
20984 IX86_BUILTIN_PSHUFLW,
20985 IX86_BUILTIN_PSHUFD,
20987 IX86_BUILTIN_PSLLDQI128,
20988 IX86_BUILTIN_PSLLWI128,
20989 IX86_BUILTIN_PSLLDI128,
20990 IX86_BUILTIN_PSLLQI128,
20991 IX86_BUILTIN_PSRAWI128,
20992 IX86_BUILTIN_PSRADI128,
20993 IX86_BUILTIN_PSRLDQI128,
20994 IX86_BUILTIN_PSRLWI128,
20995 IX86_BUILTIN_PSRLDI128,
20996 IX86_BUILTIN_PSRLQI128,
20998 IX86_BUILTIN_PSLLDQ128,
20999 IX86_BUILTIN_PSLLW128,
21000 IX86_BUILTIN_PSLLD128,
21001 IX86_BUILTIN_PSLLQ128,
21002 IX86_BUILTIN_PSRAW128,
21003 IX86_BUILTIN_PSRAD128,
21004 IX86_BUILTIN_PSRLW128,
21005 IX86_BUILTIN_PSRLD128,
21006 IX86_BUILTIN_PSRLQ128,
21008 IX86_BUILTIN_PUNPCKHBW128,
21009 IX86_BUILTIN_PUNPCKHWD128,
21010 IX86_BUILTIN_PUNPCKHDQ128,
21011 IX86_BUILTIN_PUNPCKHQDQ128,
21012 IX86_BUILTIN_PUNPCKLBW128,
21013 IX86_BUILTIN_PUNPCKLWD128,
21014 IX86_BUILTIN_PUNPCKLDQ128,
21015 IX86_BUILTIN_PUNPCKLQDQ128,
21017 IX86_BUILTIN_CLFLUSH,
21018 IX86_BUILTIN_MFENCE,
21019 IX86_BUILTIN_LFENCE,
21021 IX86_BUILTIN_BSRSI,
21022 IX86_BUILTIN_BSRDI,
21023 IX86_BUILTIN_RDPMC,
21024 IX86_BUILTIN_RDTSC,
21025 IX86_BUILTIN_RDTSCP,
21026 IX86_BUILTIN_ROLQI,
21027 IX86_BUILTIN_ROLHI,
21028 IX86_BUILTIN_RORQI,
21029 IX86_BUILTIN_RORHI,
21031 /* SSE3. */
21032 IX86_BUILTIN_ADDSUBPS,
21033 IX86_BUILTIN_HADDPS,
21034 IX86_BUILTIN_HSUBPS,
21035 IX86_BUILTIN_MOVSHDUP,
21036 IX86_BUILTIN_MOVSLDUP,
21037 IX86_BUILTIN_ADDSUBPD,
21038 IX86_BUILTIN_HADDPD,
21039 IX86_BUILTIN_HSUBPD,
21040 IX86_BUILTIN_LDDQU,
21042 IX86_BUILTIN_MONITOR,
21043 IX86_BUILTIN_MWAIT,
21045 /* SSSE3. */
21046 IX86_BUILTIN_PHADDW,
21047 IX86_BUILTIN_PHADDD,
21048 IX86_BUILTIN_PHADDSW,
21049 IX86_BUILTIN_PHSUBW,
21050 IX86_BUILTIN_PHSUBD,
21051 IX86_BUILTIN_PHSUBSW,
21052 IX86_BUILTIN_PMADDUBSW,
21053 IX86_BUILTIN_PMULHRSW,
21054 IX86_BUILTIN_PSHUFB,
21055 IX86_BUILTIN_PSIGNB,
21056 IX86_BUILTIN_PSIGNW,
21057 IX86_BUILTIN_PSIGND,
21058 IX86_BUILTIN_PALIGNR,
21059 IX86_BUILTIN_PABSB,
21060 IX86_BUILTIN_PABSW,
21061 IX86_BUILTIN_PABSD,
21063 IX86_BUILTIN_PHADDW128,
21064 IX86_BUILTIN_PHADDD128,
21065 IX86_BUILTIN_PHADDSW128,
21066 IX86_BUILTIN_PHSUBW128,
21067 IX86_BUILTIN_PHSUBD128,
21068 IX86_BUILTIN_PHSUBSW128,
21069 IX86_BUILTIN_PMADDUBSW128,
21070 IX86_BUILTIN_PMULHRSW128,
21071 IX86_BUILTIN_PSHUFB128,
21072 IX86_BUILTIN_PSIGNB128,
21073 IX86_BUILTIN_PSIGNW128,
21074 IX86_BUILTIN_PSIGND128,
21075 IX86_BUILTIN_PALIGNR128,
21076 IX86_BUILTIN_PABSB128,
21077 IX86_BUILTIN_PABSW128,
21078 IX86_BUILTIN_PABSD128,
21080 /* AMDFAM10 - SSE4A New Instructions. */
21081 IX86_BUILTIN_MOVNTSD,
21082 IX86_BUILTIN_MOVNTSS,
21083 IX86_BUILTIN_EXTRQI,
21084 IX86_BUILTIN_EXTRQ,
21085 IX86_BUILTIN_INSERTQI,
21086 IX86_BUILTIN_INSERTQ,
21088 /* SSE4.1. */
21089 IX86_BUILTIN_BLENDPD,
21090 IX86_BUILTIN_BLENDPS,
21091 IX86_BUILTIN_BLENDVPD,
21092 IX86_BUILTIN_BLENDVPS,
21093 IX86_BUILTIN_PBLENDVB128,
21094 IX86_BUILTIN_PBLENDW128,
21096 IX86_BUILTIN_DPPD,
21097 IX86_BUILTIN_DPPS,
21099 IX86_BUILTIN_INSERTPS128,
21101 IX86_BUILTIN_MOVNTDQA,
21102 IX86_BUILTIN_MPSADBW128,
21103 IX86_BUILTIN_PACKUSDW128,
21104 IX86_BUILTIN_PCMPEQQ,
21105 IX86_BUILTIN_PHMINPOSUW128,
21107 IX86_BUILTIN_PMAXSB128,
21108 IX86_BUILTIN_PMAXSD128,
21109 IX86_BUILTIN_PMAXUD128,
21110 IX86_BUILTIN_PMAXUW128,
21112 IX86_BUILTIN_PMINSB128,
21113 IX86_BUILTIN_PMINSD128,
21114 IX86_BUILTIN_PMINUD128,
21115 IX86_BUILTIN_PMINUW128,
21117 IX86_BUILTIN_PMOVSXBW128,
21118 IX86_BUILTIN_PMOVSXBD128,
21119 IX86_BUILTIN_PMOVSXBQ128,
21120 IX86_BUILTIN_PMOVSXWD128,
21121 IX86_BUILTIN_PMOVSXWQ128,
21122 IX86_BUILTIN_PMOVSXDQ128,
21124 IX86_BUILTIN_PMOVZXBW128,
21125 IX86_BUILTIN_PMOVZXBD128,
21126 IX86_BUILTIN_PMOVZXBQ128,
21127 IX86_BUILTIN_PMOVZXWD128,
21128 IX86_BUILTIN_PMOVZXWQ128,
21129 IX86_BUILTIN_PMOVZXDQ128,
21131 IX86_BUILTIN_PMULDQ128,
21132 IX86_BUILTIN_PMULLD128,
21134 IX86_BUILTIN_ROUNDPD,
21135 IX86_BUILTIN_ROUNDPS,
21136 IX86_BUILTIN_ROUNDSD,
21137 IX86_BUILTIN_ROUNDSS,
21139 IX86_BUILTIN_PTESTZ,
21140 IX86_BUILTIN_PTESTC,
21141 IX86_BUILTIN_PTESTNZC,
21143 IX86_BUILTIN_VEC_INIT_V2SI,
21144 IX86_BUILTIN_VEC_INIT_V4HI,
21145 IX86_BUILTIN_VEC_INIT_V8QI,
21146 IX86_BUILTIN_VEC_EXT_V2DF,
21147 IX86_BUILTIN_VEC_EXT_V2DI,
21148 IX86_BUILTIN_VEC_EXT_V4SF,
21149 IX86_BUILTIN_VEC_EXT_V4SI,
21150 IX86_BUILTIN_VEC_EXT_V8HI,
21151 IX86_BUILTIN_VEC_EXT_V2SI,
21152 IX86_BUILTIN_VEC_EXT_V4HI,
21153 IX86_BUILTIN_VEC_EXT_V16QI,
21154 IX86_BUILTIN_VEC_SET_V2DI,
21155 IX86_BUILTIN_VEC_SET_V4SF,
21156 IX86_BUILTIN_VEC_SET_V4SI,
21157 IX86_BUILTIN_VEC_SET_V8HI,
21158 IX86_BUILTIN_VEC_SET_V4HI,
21159 IX86_BUILTIN_VEC_SET_V16QI,
21161 IX86_BUILTIN_VEC_PACK_SFIX,
21163 /* SSE4.2. */
21164 IX86_BUILTIN_CRC32QI,
21165 IX86_BUILTIN_CRC32HI,
21166 IX86_BUILTIN_CRC32SI,
21167 IX86_BUILTIN_CRC32DI,
21169 IX86_BUILTIN_PCMPESTRI128,
21170 IX86_BUILTIN_PCMPESTRM128,
21171 IX86_BUILTIN_PCMPESTRA128,
21172 IX86_BUILTIN_PCMPESTRC128,
21173 IX86_BUILTIN_PCMPESTRO128,
21174 IX86_BUILTIN_PCMPESTRS128,
21175 IX86_BUILTIN_PCMPESTRZ128,
21176 IX86_BUILTIN_PCMPISTRI128,
21177 IX86_BUILTIN_PCMPISTRM128,
21178 IX86_BUILTIN_PCMPISTRA128,
21179 IX86_BUILTIN_PCMPISTRC128,
21180 IX86_BUILTIN_PCMPISTRO128,
21181 IX86_BUILTIN_PCMPISTRS128,
21182 IX86_BUILTIN_PCMPISTRZ128,
21184 IX86_BUILTIN_PCMPGTQ,
21186 /* AES instructions */
21187 IX86_BUILTIN_AESENC128,
21188 IX86_BUILTIN_AESENCLAST128,
21189 IX86_BUILTIN_AESDEC128,
21190 IX86_BUILTIN_AESDECLAST128,
21191 IX86_BUILTIN_AESIMC128,
21192 IX86_BUILTIN_AESKEYGENASSIST128,
21194 /* PCLMUL instruction */
21195 IX86_BUILTIN_PCLMULQDQ128,
21197 /* AVX */
21198 IX86_BUILTIN_ADDPD256,
21199 IX86_BUILTIN_ADDPS256,
21200 IX86_BUILTIN_ADDSUBPD256,
21201 IX86_BUILTIN_ADDSUBPS256,
21202 IX86_BUILTIN_ANDPD256,
21203 IX86_BUILTIN_ANDPS256,
21204 IX86_BUILTIN_ANDNPD256,
21205 IX86_BUILTIN_ANDNPS256,
21206 IX86_BUILTIN_BLENDPD256,
21207 IX86_BUILTIN_BLENDPS256,
21208 IX86_BUILTIN_BLENDVPD256,
21209 IX86_BUILTIN_BLENDVPS256,
21210 IX86_BUILTIN_DIVPD256,
21211 IX86_BUILTIN_DIVPS256,
21212 IX86_BUILTIN_DPPS256,
21213 IX86_BUILTIN_HADDPD256,
21214 IX86_BUILTIN_HADDPS256,
21215 IX86_BUILTIN_HSUBPD256,
21216 IX86_BUILTIN_HSUBPS256,
21217 IX86_BUILTIN_MAXPD256,
21218 IX86_BUILTIN_MAXPS256,
21219 IX86_BUILTIN_MINPD256,
21220 IX86_BUILTIN_MINPS256,
21221 IX86_BUILTIN_MULPD256,
21222 IX86_BUILTIN_MULPS256,
21223 IX86_BUILTIN_ORPD256,
21224 IX86_BUILTIN_ORPS256,
21225 IX86_BUILTIN_SHUFPD256,
21226 IX86_BUILTIN_SHUFPS256,
21227 IX86_BUILTIN_SUBPD256,
21228 IX86_BUILTIN_SUBPS256,
21229 IX86_BUILTIN_XORPD256,
21230 IX86_BUILTIN_XORPS256,
21231 IX86_BUILTIN_CMPSD,
21232 IX86_BUILTIN_CMPSS,
21233 IX86_BUILTIN_CMPPD,
21234 IX86_BUILTIN_CMPPS,
21235 IX86_BUILTIN_CMPPD256,
21236 IX86_BUILTIN_CMPPS256,
21237 IX86_BUILTIN_CVTDQ2PD256,
21238 IX86_BUILTIN_CVTDQ2PS256,
21239 IX86_BUILTIN_CVTPD2PS256,
21240 IX86_BUILTIN_CVTPS2DQ256,
21241 IX86_BUILTIN_CVTPS2PD256,
21242 IX86_BUILTIN_CVTTPD2DQ256,
21243 IX86_BUILTIN_CVTPD2DQ256,
21244 IX86_BUILTIN_CVTTPS2DQ256,
21245 IX86_BUILTIN_EXTRACTF128PD256,
21246 IX86_BUILTIN_EXTRACTF128PS256,
21247 IX86_BUILTIN_EXTRACTF128SI256,
21248 IX86_BUILTIN_VZEROALL,
21249 IX86_BUILTIN_VZEROUPPER,
21250 IX86_BUILTIN_VPERMILVARPD,
21251 IX86_BUILTIN_VPERMILVARPS,
21252 IX86_BUILTIN_VPERMILVARPD256,
21253 IX86_BUILTIN_VPERMILVARPS256,
21254 IX86_BUILTIN_VPERMILPD,
21255 IX86_BUILTIN_VPERMILPS,
21256 IX86_BUILTIN_VPERMILPD256,
21257 IX86_BUILTIN_VPERMILPS256,
21258 IX86_BUILTIN_VPERMIL2PD,
21259 IX86_BUILTIN_VPERMIL2PS,
21260 IX86_BUILTIN_VPERMIL2PD256,
21261 IX86_BUILTIN_VPERMIL2PS256,
21262 IX86_BUILTIN_VPERM2F128PD256,
21263 IX86_BUILTIN_VPERM2F128PS256,
21264 IX86_BUILTIN_VPERM2F128SI256,
21265 IX86_BUILTIN_VBROADCASTSS,
21266 IX86_BUILTIN_VBROADCASTSD256,
21267 IX86_BUILTIN_VBROADCASTSS256,
21268 IX86_BUILTIN_VBROADCASTPD256,
21269 IX86_BUILTIN_VBROADCASTPS256,
21270 IX86_BUILTIN_VINSERTF128PD256,
21271 IX86_BUILTIN_VINSERTF128PS256,
21272 IX86_BUILTIN_VINSERTF128SI256,
21273 IX86_BUILTIN_LOADUPD256,
21274 IX86_BUILTIN_LOADUPS256,
21275 IX86_BUILTIN_STOREUPD256,
21276 IX86_BUILTIN_STOREUPS256,
21277 IX86_BUILTIN_LDDQU256,
21278 IX86_BUILTIN_MOVNTDQ256,
21279 IX86_BUILTIN_MOVNTPD256,
21280 IX86_BUILTIN_MOVNTPS256,
21281 IX86_BUILTIN_LOADDQU256,
21282 IX86_BUILTIN_STOREDQU256,
21283 IX86_BUILTIN_MASKLOADPD,
21284 IX86_BUILTIN_MASKLOADPS,
21285 IX86_BUILTIN_MASKSTOREPD,
21286 IX86_BUILTIN_MASKSTOREPS,
21287 IX86_BUILTIN_MASKLOADPD256,
21288 IX86_BUILTIN_MASKLOADPS256,
21289 IX86_BUILTIN_MASKSTOREPD256,
21290 IX86_BUILTIN_MASKSTOREPS256,
21291 IX86_BUILTIN_MOVSHDUP256,
21292 IX86_BUILTIN_MOVSLDUP256,
21293 IX86_BUILTIN_MOVDDUP256,
21295 IX86_BUILTIN_SQRTPD256,
21296 IX86_BUILTIN_SQRTPS256,
21297 IX86_BUILTIN_SQRTPS_NR256,
21298 IX86_BUILTIN_RSQRTPS256,
21299 IX86_BUILTIN_RSQRTPS_NR256,
21301 IX86_BUILTIN_RCPPS256,
21303 IX86_BUILTIN_ROUNDPD256,
21304 IX86_BUILTIN_ROUNDPS256,
21306 IX86_BUILTIN_UNPCKHPD256,
21307 IX86_BUILTIN_UNPCKLPD256,
21308 IX86_BUILTIN_UNPCKHPS256,
21309 IX86_BUILTIN_UNPCKLPS256,
21311 IX86_BUILTIN_SI256_SI,
21312 IX86_BUILTIN_PS256_PS,
21313 IX86_BUILTIN_PD256_PD,
21314 IX86_BUILTIN_SI_SI256,
21315 IX86_BUILTIN_PS_PS256,
21316 IX86_BUILTIN_PD_PD256,
21318 IX86_BUILTIN_VTESTZPD,
21319 IX86_BUILTIN_VTESTCPD,
21320 IX86_BUILTIN_VTESTNZCPD,
21321 IX86_BUILTIN_VTESTZPS,
21322 IX86_BUILTIN_VTESTCPS,
21323 IX86_BUILTIN_VTESTNZCPS,
21324 IX86_BUILTIN_VTESTZPD256,
21325 IX86_BUILTIN_VTESTCPD256,
21326 IX86_BUILTIN_VTESTNZCPD256,
21327 IX86_BUILTIN_VTESTZPS256,
21328 IX86_BUILTIN_VTESTCPS256,
21329 IX86_BUILTIN_VTESTNZCPS256,
21330 IX86_BUILTIN_PTESTZ256,
21331 IX86_BUILTIN_PTESTC256,
21332 IX86_BUILTIN_PTESTNZC256,
21334 IX86_BUILTIN_MOVMSKPD256,
21335 IX86_BUILTIN_MOVMSKPS256,
21337 /* TFmode support builtins. */
21338 IX86_BUILTIN_INFQ,
21339 IX86_BUILTIN_HUGE_VALQ,
21340 IX86_BUILTIN_FABSQ,
21341 IX86_BUILTIN_COPYSIGNQ,
21343 /* Vectorizer support builtins. */
21344 IX86_BUILTIN_CPYSGNPS,
21345 IX86_BUILTIN_CPYSGNPD,
21347 IX86_BUILTIN_CVTUDQ2PS,
21349 IX86_BUILTIN_VEC_PERM_V2DF,
21350 IX86_BUILTIN_VEC_PERM_V4SF,
21351 IX86_BUILTIN_VEC_PERM_V2DI,
21352 IX86_BUILTIN_VEC_PERM_V4SI,
21353 IX86_BUILTIN_VEC_PERM_V8HI,
21354 IX86_BUILTIN_VEC_PERM_V16QI,
21355 IX86_BUILTIN_VEC_PERM_V2DI_U,
21356 IX86_BUILTIN_VEC_PERM_V4SI_U,
21357 IX86_BUILTIN_VEC_PERM_V8HI_U,
21358 IX86_BUILTIN_VEC_PERM_V16QI_U,
21359 IX86_BUILTIN_VEC_PERM_V4DF,
21360 IX86_BUILTIN_VEC_PERM_V8SF,
21362 /* FMA4 and XOP instructions. */
21363 IX86_BUILTIN_VFMADDSS,
21364 IX86_BUILTIN_VFMADDSD,
21365 IX86_BUILTIN_VFMADDPS,
21366 IX86_BUILTIN_VFMADDPD,
21367 IX86_BUILTIN_VFMSUBSS,
21368 IX86_BUILTIN_VFMSUBSD,
21369 IX86_BUILTIN_VFMSUBPS,
21370 IX86_BUILTIN_VFMSUBPD,
21371 IX86_BUILTIN_VFMADDSUBPS,
21372 IX86_BUILTIN_VFMADDSUBPD,
21373 IX86_BUILTIN_VFMSUBADDPS,
21374 IX86_BUILTIN_VFMSUBADDPD,
21375 IX86_BUILTIN_VFNMADDSS,
21376 IX86_BUILTIN_VFNMADDSD,
21377 IX86_BUILTIN_VFNMADDPS,
21378 IX86_BUILTIN_VFNMADDPD,
21379 IX86_BUILTIN_VFNMSUBSS,
21380 IX86_BUILTIN_VFNMSUBSD,
21381 IX86_BUILTIN_VFNMSUBPS,
21382 IX86_BUILTIN_VFNMSUBPD,
21383 IX86_BUILTIN_VFMADDPS256,
21384 IX86_BUILTIN_VFMADDPD256,
21385 IX86_BUILTIN_VFMSUBPS256,
21386 IX86_BUILTIN_VFMSUBPD256,
21387 IX86_BUILTIN_VFMADDSUBPS256,
21388 IX86_BUILTIN_VFMADDSUBPD256,
21389 IX86_BUILTIN_VFMSUBADDPS256,
21390 IX86_BUILTIN_VFMSUBADDPD256,
21391 IX86_BUILTIN_VFNMADDPS256,
21392 IX86_BUILTIN_VFNMADDPD256,
21393 IX86_BUILTIN_VFNMSUBPS256,
21394 IX86_BUILTIN_VFNMSUBPD256,
21396 IX86_BUILTIN_VPCMOV,
21397 IX86_BUILTIN_VPCMOV_V2DI,
21398 IX86_BUILTIN_VPCMOV_V4SI,
21399 IX86_BUILTIN_VPCMOV_V8HI,
21400 IX86_BUILTIN_VPCMOV_V16QI,
21401 IX86_BUILTIN_VPCMOV_V4SF,
21402 IX86_BUILTIN_VPCMOV_V2DF,
21403 IX86_BUILTIN_VPCMOV256,
21404 IX86_BUILTIN_VPCMOV_V4DI256,
21405 IX86_BUILTIN_VPCMOV_V8SI256,
21406 IX86_BUILTIN_VPCMOV_V16HI256,
21407 IX86_BUILTIN_VPCMOV_V32QI256,
21408 IX86_BUILTIN_VPCMOV_V8SF256,
21409 IX86_BUILTIN_VPCMOV_V4DF256,
21411 IX86_BUILTIN_VPPERM,
21413 IX86_BUILTIN_VPMACSSWW,
21414 IX86_BUILTIN_VPMACSWW,
21415 IX86_BUILTIN_VPMACSSWD,
21416 IX86_BUILTIN_VPMACSWD,
21417 IX86_BUILTIN_VPMACSSDD,
21418 IX86_BUILTIN_VPMACSDD,
21419 IX86_BUILTIN_VPMACSSDQL,
21420 IX86_BUILTIN_VPMACSSDQH,
21421 IX86_BUILTIN_VPMACSDQL,
21422 IX86_BUILTIN_VPMACSDQH,
21423 IX86_BUILTIN_VPMADCSSWD,
21424 IX86_BUILTIN_VPMADCSWD,
21426 IX86_BUILTIN_VPHADDBW,
21427 IX86_BUILTIN_VPHADDBD,
21428 IX86_BUILTIN_VPHADDBQ,
21429 IX86_BUILTIN_VPHADDWD,
21430 IX86_BUILTIN_VPHADDWQ,
21431 IX86_BUILTIN_VPHADDDQ,
21432 IX86_BUILTIN_VPHADDUBW,
21433 IX86_BUILTIN_VPHADDUBD,
21434 IX86_BUILTIN_VPHADDUBQ,
21435 IX86_BUILTIN_VPHADDUWD,
21436 IX86_BUILTIN_VPHADDUWQ,
21437 IX86_BUILTIN_VPHADDUDQ,
21438 IX86_BUILTIN_VPHSUBBW,
21439 IX86_BUILTIN_VPHSUBWD,
21440 IX86_BUILTIN_VPHSUBDQ,
21442 IX86_BUILTIN_VPROTB,
21443 IX86_BUILTIN_VPROTW,
21444 IX86_BUILTIN_VPROTD,
21445 IX86_BUILTIN_VPROTQ,
21446 IX86_BUILTIN_VPROTB_IMM,
21447 IX86_BUILTIN_VPROTW_IMM,
21448 IX86_BUILTIN_VPROTD_IMM,
21449 IX86_BUILTIN_VPROTQ_IMM,
21451 IX86_BUILTIN_VPSHLB,
21452 IX86_BUILTIN_VPSHLW,
21453 IX86_BUILTIN_VPSHLD,
21454 IX86_BUILTIN_VPSHLQ,
21455 IX86_BUILTIN_VPSHAB,
21456 IX86_BUILTIN_VPSHAW,
21457 IX86_BUILTIN_VPSHAD,
21458 IX86_BUILTIN_VPSHAQ,
21460 IX86_BUILTIN_VFRCZSS,
21461 IX86_BUILTIN_VFRCZSD,
21462 IX86_BUILTIN_VFRCZPS,
21463 IX86_BUILTIN_VFRCZPD,
21464 IX86_BUILTIN_VFRCZPS256,
21465 IX86_BUILTIN_VFRCZPD256,
21467 IX86_BUILTIN_VPCOMEQUB,
21468 IX86_BUILTIN_VPCOMNEUB,
21469 IX86_BUILTIN_VPCOMLTUB,
21470 IX86_BUILTIN_VPCOMLEUB,
21471 IX86_BUILTIN_VPCOMGTUB,
21472 IX86_BUILTIN_VPCOMGEUB,
21473 IX86_BUILTIN_VPCOMFALSEUB,
21474 IX86_BUILTIN_VPCOMTRUEUB,
21476 IX86_BUILTIN_VPCOMEQUW,
21477 IX86_BUILTIN_VPCOMNEUW,
21478 IX86_BUILTIN_VPCOMLTUW,
21479 IX86_BUILTIN_VPCOMLEUW,
21480 IX86_BUILTIN_VPCOMGTUW,
21481 IX86_BUILTIN_VPCOMGEUW,
21482 IX86_BUILTIN_VPCOMFALSEUW,
21483 IX86_BUILTIN_VPCOMTRUEUW,
21485 IX86_BUILTIN_VPCOMEQUD,
21486 IX86_BUILTIN_VPCOMNEUD,
21487 IX86_BUILTIN_VPCOMLTUD,
21488 IX86_BUILTIN_VPCOMLEUD,
21489 IX86_BUILTIN_VPCOMGTUD,
21490 IX86_BUILTIN_VPCOMGEUD,
21491 IX86_BUILTIN_VPCOMFALSEUD,
21492 IX86_BUILTIN_VPCOMTRUEUD,
21494 IX86_BUILTIN_VPCOMEQUQ,
21495 IX86_BUILTIN_VPCOMNEUQ,
21496 IX86_BUILTIN_VPCOMLTUQ,
21497 IX86_BUILTIN_VPCOMLEUQ,
21498 IX86_BUILTIN_VPCOMGTUQ,
21499 IX86_BUILTIN_VPCOMGEUQ,
21500 IX86_BUILTIN_VPCOMFALSEUQ,
21501 IX86_BUILTIN_VPCOMTRUEUQ,
21503 IX86_BUILTIN_VPCOMEQB,
21504 IX86_BUILTIN_VPCOMNEB,
21505 IX86_BUILTIN_VPCOMLTB,
21506 IX86_BUILTIN_VPCOMLEB,
21507 IX86_BUILTIN_VPCOMGTB,
21508 IX86_BUILTIN_VPCOMGEB,
21509 IX86_BUILTIN_VPCOMFALSEB,
21510 IX86_BUILTIN_VPCOMTRUEB,
21512 IX86_BUILTIN_VPCOMEQW,
21513 IX86_BUILTIN_VPCOMNEW,
21514 IX86_BUILTIN_VPCOMLTW,
21515 IX86_BUILTIN_VPCOMLEW,
21516 IX86_BUILTIN_VPCOMGTW,
21517 IX86_BUILTIN_VPCOMGEW,
21518 IX86_BUILTIN_VPCOMFALSEW,
21519 IX86_BUILTIN_VPCOMTRUEW,
21521 IX86_BUILTIN_VPCOMEQD,
21522 IX86_BUILTIN_VPCOMNED,
21523 IX86_BUILTIN_VPCOMLTD,
21524 IX86_BUILTIN_VPCOMLED,
21525 IX86_BUILTIN_VPCOMGTD,
21526 IX86_BUILTIN_VPCOMGED,
21527 IX86_BUILTIN_VPCOMFALSED,
21528 IX86_BUILTIN_VPCOMTRUED,
21530 IX86_BUILTIN_VPCOMEQQ,
21531 IX86_BUILTIN_VPCOMNEQ,
21532 IX86_BUILTIN_VPCOMLTQ,
21533 IX86_BUILTIN_VPCOMLEQ,
21534 IX86_BUILTIN_VPCOMGTQ,
21535 IX86_BUILTIN_VPCOMGEQ,
21536 IX86_BUILTIN_VPCOMFALSEQ,
21537 IX86_BUILTIN_VPCOMTRUEQ,
21539 /* LWP instructions. */
21540 IX86_BUILTIN_LLWPCB,
21541 IX86_BUILTIN_SLWPCB,
21542 IX86_BUILTIN_LWPVAL32,
21543 IX86_BUILTIN_LWPVAL64,
21544 IX86_BUILTIN_LWPINS32,
21545 IX86_BUILTIN_LWPINS64,
21547 IX86_BUILTIN_CLZS,
21549 IX86_BUILTIN_MAX
21552 /* Table for the ix86 builtin decls. */
21553 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21555 /* Table of all of the builtin functions that are possible with different ISA's
21556 but are waiting to be built until a function is declared to use that
21557 ISA. */
21558 struct builtin_isa {
21559 const char *name; /* function name */
21560 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21561 int isa; /* isa_flags this builtin is defined for */
21562 bool const_p; /* true if the declaration is constant */
21563 bool set_and_not_built_p;
21566 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21569 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21570 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21571 function decl in the ix86_builtins array. Returns the function decl or
21572 NULL_TREE, if the builtin was not added.
21574 If the front end has a special hook for builtin functions, delay adding
21575 builtin functions that aren't in the current ISA until the ISA is changed
21576 with function specific optimization. Doing so, can save about 300K for the
21577 default compiler. When the builtin is expanded, check at that time whether
21578 it is valid.
21580 If the front end doesn't have a special hook, record all builtins, even if
21581 it isn't an instruction set in the current ISA in case the user uses
21582 function specific options for a different ISA, so that we don't get scope
21583 errors if a builtin is added in the middle of a function scope. */
21585 static inline tree
21586 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21587 enum ix86_builtins code)
21589 tree decl = NULL_TREE;
21591 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21593 ix86_builtins_isa[(int) code].isa = mask;
21595 if (mask == 0
21596 || (mask & ix86_isa_flags) != 0
21597 || (lang_hooks.builtin_function
21598 == lang_hooks.builtin_function_ext_scope))
21601 tree type = ix86_get_builtin_func_type (tcode);
21602 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21603 NULL, NULL_TREE);
21604 ix86_builtins[(int) code] = decl;
21605 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21607 else
21609 ix86_builtins[(int) code] = NULL_TREE;
21610 ix86_builtins_isa[(int) code].tcode = tcode;
21611 ix86_builtins_isa[(int) code].name = name;
21612 ix86_builtins_isa[(int) code].const_p = false;
21613 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21617 return decl;
21620 /* Like def_builtin, but also marks the function decl "const". */
21622 static inline tree
21623 def_builtin_const (int mask, const char *name,
21624 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21626 tree decl = def_builtin (mask, name, tcode, code);
21627 if (decl)
21628 TREE_READONLY (decl) = 1;
21629 else
21630 ix86_builtins_isa[(int) code].const_p = true;
21632 return decl;
21635 /* Add any new builtin functions for a given ISA that may not have been
21636 declared. This saves a bit of space compared to adding all of the
21637 declarations to the tree, even if we didn't use them. */
21639 static void
21640 ix86_add_new_builtins (int isa)
21642 int i;
21644 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21646 if ((ix86_builtins_isa[i].isa & isa) != 0
21647 && ix86_builtins_isa[i].set_and_not_built_p)
21649 tree decl, type;
21651 /* Don't define the builtin again. */
21652 ix86_builtins_isa[i].set_and_not_built_p = false;
21654 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21655 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21656 type, i, BUILT_IN_MD, NULL,
21657 NULL_TREE);
21659 ix86_builtins[i] = decl;
21660 if (ix86_builtins_isa[i].const_p)
21661 TREE_READONLY (decl) = 1;
21666 /* Bits for builtin_description.flag. */
21668 /* Set when we don't support the comparison natively, and should
21669 swap_comparison in order to support it. */
21670 #define BUILTIN_DESC_SWAP_OPERANDS 1
21672 struct builtin_description
21674 const unsigned int mask;
21675 const enum insn_code icode;
21676 const char *const name;
21677 const enum ix86_builtins code;
21678 const enum rtx_code comparison;
21679 const int flag;
21682 static const struct builtin_description bdesc_comi[] =
21684 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21685 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21690 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21691 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21695 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21696 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21697 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21698 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21706 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21707 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21710 static const struct builtin_description bdesc_pcmpestr[] =
21712 /* SSE4.2 */
21713 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21714 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21715 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21716 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21717 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21718 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21719 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21722 static const struct builtin_description bdesc_pcmpistr[] =
21724 /* SSE4.2 */
21725 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21726 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21727 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21728 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21729 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21730 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21731 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21734 /* Special builtins with variable number of arguments. */
21735 static const struct builtin_description bdesc_special_args[] =
21737 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21738 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21740 /* MMX */
21741 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21743 /* 3DNow! */
21744 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21746 /* SSE */
21747 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21748 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21749 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21751 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21752 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21753 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21754 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21756 /* SSE or 3DNow!A */
21757 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21758 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21760 /* SSE2 */
21761 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21762 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21763 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21764 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21765 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21766 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21767 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21768 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21771 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21772 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21774 /* SSE3 */
21775 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21777 /* SSE4.1 */
21778 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21780 /* SSE4A */
21781 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21782 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21784 /* AVX */
21785 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21786 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21788 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21789 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21790 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21791 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21792 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21794 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21795 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21796 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21797 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21798 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21799 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21800 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21802 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21803 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21804 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21806 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21807 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21808 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21809 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21810 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21811 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21812 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21813 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21815 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21816 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21817 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21818 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21819 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21820 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21824 /* Builtins with variable number of arguments. */
21825 static const struct builtin_description bdesc_args[] =
21827 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21828 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21829 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21830 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21831 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21832 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21833 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21835 /* MMX */
21836 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21837 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21838 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21839 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21840 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21841 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21843 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21844 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21845 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21846 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21847 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21848 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21849 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21850 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21852 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21853 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21855 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21856 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21857 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21858 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21860 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21861 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21862 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21863 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21864 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21865 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21867 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21868 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21869 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21870 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21871 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21872 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21874 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21875 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21876 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21878 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21880 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21881 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21882 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21883 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21884 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21885 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21887 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21888 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21889 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21890 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21891 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21892 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21894 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21895 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21896 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21897 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21899 /* 3DNow! */
21900 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21901 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21902 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21903 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21905 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21906 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21907 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21908 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21909 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21910 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21911 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21912 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21913 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21914 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21915 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21916 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21917 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21918 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21919 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21921 /* 3DNow!A */
21922 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21923 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21924 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21925 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21926 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21927 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21929 /* SSE */
21930 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21931 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21932 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21933 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21934 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21935 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21936 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21937 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21938 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21939 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21940 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21941 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21943 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21945 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21946 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21947 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21948 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21960 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21961 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21962 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21963 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21964 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21965 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21966 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21967 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21968 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21969 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21970 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21971 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21972 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21973 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21974 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21975 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21977 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21978 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21979 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21980 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21982 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21983 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21984 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21985 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21987 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21989 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21990 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21991 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21992 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21993 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21995 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21996 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21997 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21999 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
22001 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22002 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22003 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
22005 /* SSE MMX or 3Dnow!A */
22006 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22007 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22008 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22010 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22011 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22012 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22013 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22015 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
22016 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
22018 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
22020 /* SSE2 */
22021 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22023 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
22024 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
22027 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
22032 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
22033 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
22034 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
22036 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
22037 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
22038 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
22039 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
22040 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22041 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
22043 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22044 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22045 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
22046 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
22047 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
22049 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
22051 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22052 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
22053 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22054 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
22056 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22057 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
22058 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
22060 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22061 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22062 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22063 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22064 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22069 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22070 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22071 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22072 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22073 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
22074 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22075 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22076 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22077 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22078 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22079 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
22080 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22081 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
22082 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
22083 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
22084 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22085 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
22086 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
22087 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
22088 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
22090 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22091 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22092 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22093 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22095 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22096 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22097 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22098 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22100 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22102 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22103 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22104 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22106 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
22108 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22109 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22110 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22111 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22112 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22113 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22114 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22115 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22117 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22118 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22119 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22120 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22121 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22122 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22123 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22124 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22126 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22127 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
22129 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22130 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22131 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22132 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22134 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22135 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22137 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22138 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22139 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22140 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22141 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22142 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22144 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22145 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22146 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22147 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22149 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22150 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22151 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22152 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22153 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22154 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22155 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22156 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22158 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22159 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
22162 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
22165 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
22166 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22168 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
22170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
22171 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
22172 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
22173 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
22175 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22176 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22177 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22178 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22179 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22180 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22181 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22183 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
22184 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22185 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22186 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
22187 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22188 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22189 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
22191 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
22192 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
22193 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
22194 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
22196 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
22197 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22198 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
22200 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
22202 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
22203 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
22205 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22207 /* SSE2 MMX */
22208 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22209 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
22211 /* SSE3 */
22212 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
22213 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
22215 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22216 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22217 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22218 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22219 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
22220 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
22222 /* SSSE3 */
22223 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
22224 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
22225 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22226 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
22227 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
22228 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
22230 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22231 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22232 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22233 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22234 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22235 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22236 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22237 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22238 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22239 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22240 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22241 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22242 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
22243 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
22244 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22245 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22246 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22247 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22248 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22249 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
22250 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22251 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
22252 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22253 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
22255 /* SSSE3. */
22256 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
22257 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
22259 /* SSE4.1 */
22260 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22261 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22262 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
22263 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
22264 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22265 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22266 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22267 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
22268 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
22269 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
22271 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22272 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22273 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22274 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22275 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22276 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22277 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
22278 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
22279 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
22280 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
22281 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
22282 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
22283 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
22285 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
22286 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22287 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22288 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22289 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22290 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22291 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
22292 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22293 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22294 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
22295 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
22296 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22298 /* SSE4.1 */
22299 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22300 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22301 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22302 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22304 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22305 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22306 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22308 /* SSE4.2 */
22309 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22310 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22311 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22312 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22313 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22315 /* SSE4A */
22316 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22317 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22318 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22319 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22321 /* AES */
22322 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22323 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22325 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22326 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22327 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22328 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22330 /* PCLMUL */
22331 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22333 /* AVX */
22334 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22335 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22336 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22337 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22338 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22339 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22340 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22341 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22342 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22343 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22344 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22345 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22346 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22347 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22348 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22349 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22350 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22351 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22352 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22353 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22354 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22355 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22356 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22357 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22358 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22359 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22361 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22362 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22363 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22364 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22366 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22367 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22368 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22369 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22370 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22371 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22372 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22373 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22374 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22375 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22376 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22377 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22378 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22379 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22380 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22381 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22382 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22383 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22384 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22385 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22386 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22387 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22388 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22389 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22390 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22391 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22392 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22393 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22394 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22395 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22396 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22397 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22398 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22399 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22401 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22402 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22403 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22405 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22406 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22407 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22408 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22409 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22411 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22413 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22414 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22416 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22417 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22418 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22419 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22421 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22422 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22423 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22424 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22425 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22426 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22428 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22429 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22430 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22431 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22432 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22433 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22434 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22435 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22436 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22437 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22438 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22439 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22440 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22441 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22442 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22444 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22445 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22447 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22450 /* FMA4 and XOP. */
22451 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22452 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22453 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22454 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22455 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22456 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22457 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22458 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22459 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22460 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22461 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22462 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22463 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22464 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22465 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22466 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22467 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22468 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22469 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22470 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22471 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22472 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22473 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22474 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22475 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22476 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22477 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22478 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22479 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22480 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22481 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22482 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22483 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22484 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22485 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22486 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22487 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22488 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22489 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22490 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22491 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22492 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22493 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22494 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22495 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22496 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22497 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22498 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22499 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22500 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22501 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22502 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22504 static const struct builtin_description bdesc_multi_arg[] =
22506 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22507 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22508 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22509 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22510 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22511 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22512 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22513 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22515 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22516 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22517 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22518 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22519 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22520 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22521 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22522 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22524 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22525 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22526 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22527 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22529 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22530 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22531 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22532 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22534 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22535 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22536 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22537 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22539 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22540 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22541 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22542 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22544 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22545 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22546 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22547 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22548 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22549 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22550 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22552 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22553 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22554 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22555 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22556 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22557 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22558 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22560 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22562 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22563 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22564 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22565 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22566 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22567 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22568 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22569 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22570 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22571 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22572 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22573 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22575 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22576 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22577 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22578 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22579 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22580 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22581 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22582 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22583 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22584 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22585 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22586 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22587 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22588 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22589 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22590 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22592 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22593 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22594 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22595 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22596 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22597 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22599 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22600 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22601 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22602 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22603 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22604 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22605 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22606 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22607 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22608 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22609 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22610 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22611 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22612 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22613 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22615 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22616 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22617 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22618 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22619 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22620 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22621 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22623 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22624 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22625 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22626 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22627 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22628 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22629 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22631 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22632 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22633 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22634 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22635 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22636 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22637 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22639 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22640 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22641 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22642 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22643 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22644 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22645 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22647 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22648 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22649 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22650 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22651 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22652 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22653 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22655 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22656 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22657 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22658 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22659 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22660 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22661 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22663 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22664 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22665 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22666 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22667 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22668 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22669 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22671 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22672 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22673 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22674 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22675 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22676 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22677 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22679 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22680 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22681 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22682 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22683 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22684 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22685 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22686 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22688 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22689 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22690 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22691 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22692 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22693 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22694 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22695 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22697 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22698 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22699 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22700 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22704 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22705 in the current target ISA to allow the user to compile particular modules
22706 with different target specific options that differ from the command line
22707 options. */
22708 static void
22709 ix86_init_mmx_sse_builtins (void)
22711 const struct builtin_description * d;
22712 enum ix86_builtin_func_type ftype;
22713 size_t i;
22715 /* Add all special builtins with variable number of operands. */
22716 for (i = 0, d = bdesc_special_args;
22717 i < ARRAY_SIZE (bdesc_special_args);
22718 i++, d++)
22720 if (d->name == 0)
22721 continue;
22723 ftype = (enum ix86_builtin_func_type) d->flag;
22724 def_builtin (d->mask, d->name, ftype, d->code);
22727 /* Add all builtins with variable number of operands. */
22728 for (i = 0, d = bdesc_args;
22729 i < ARRAY_SIZE (bdesc_args);
22730 i++, d++)
22732 if (d->name == 0)
22733 continue;
22735 ftype = (enum ix86_builtin_func_type) d->flag;
22736 def_builtin_const (d->mask, d->name, ftype, d->code);
22739 /* pcmpestr[im] insns. */
22740 for (i = 0, d = bdesc_pcmpestr;
22741 i < ARRAY_SIZE (bdesc_pcmpestr);
22742 i++, d++)
22744 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22745 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22746 else
22747 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22748 def_builtin_const (d->mask, d->name, ftype, d->code);
22751 /* pcmpistr[im] insns. */
22752 for (i = 0, d = bdesc_pcmpistr;
22753 i < ARRAY_SIZE (bdesc_pcmpistr);
22754 i++, d++)
22756 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22757 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22758 else
22759 ftype = INT_FTYPE_V16QI_V16QI_INT;
22760 def_builtin_const (d->mask, d->name, ftype, d->code);
22763 /* comi/ucomi insns. */
22764 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22766 if (d->mask == OPTION_MASK_ISA_SSE2)
22767 ftype = INT_FTYPE_V2DF_V2DF;
22768 else
22769 ftype = INT_FTYPE_V4SF_V4SF;
22770 def_builtin_const (d->mask, d->name, ftype, d->code);
22773 /* SSE */
22774 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22775 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22776 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22777 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22779 /* SSE or 3DNow!A */
22780 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22781 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22782 IX86_BUILTIN_MASKMOVQ);
22784 /* SSE2 */
22785 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22786 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22788 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22789 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22790 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22791 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22793 /* SSE3. */
22794 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22795 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22796 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22797 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22799 /* AES */
22800 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22801 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22802 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22803 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22804 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22805 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22806 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22807 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22808 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22809 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22810 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22811 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22813 /* PCLMUL */
22814 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22815 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22817 /* MMX access to the vec_init patterns. */
22818 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22819 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22821 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22822 V4HI_FTYPE_HI_HI_HI_HI,
22823 IX86_BUILTIN_VEC_INIT_V4HI);
22825 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22826 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22827 IX86_BUILTIN_VEC_INIT_V8QI);
22829 /* Access to the vec_extract patterns. */
22830 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22831 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22832 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22833 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22834 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22835 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22836 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22837 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22838 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22839 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22841 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22842 "__builtin_ia32_vec_ext_v4hi",
22843 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22845 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22846 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22848 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22849 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22851 /* Access to the vec_set patterns. */
22852 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22853 "__builtin_ia32_vec_set_v2di",
22854 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22856 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22857 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22859 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22860 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22862 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22863 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22865 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22866 "__builtin_ia32_vec_set_v4hi",
22867 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22869 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22870 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22872 /* Add FMA4 multi-arg argument instructions */
22873 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22875 if (d->name == 0)
22876 continue;
22878 ftype = (enum ix86_builtin_func_type) d->flag;
22879 def_builtin_const (d->mask, d->name, ftype, d->code);
22883 /* Internal method for ix86_init_builtins. */
22885 static void
22886 ix86_init_builtins_va_builtins_abi (void)
22888 tree ms_va_ref, sysv_va_ref;
22889 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22890 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22891 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22892 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22894 if (!TARGET_64BIT)
22895 return;
22896 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22897 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22898 ms_va_ref = build_reference_type (ms_va_list_type_node);
22899 sysv_va_ref =
22900 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22902 fnvoid_va_end_ms =
22903 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22904 fnvoid_va_start_ms =
22905 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22906 fnvoid_va_end_sysv =
22907 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22908 fnvoid_va_start_sysv =
22909 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22910 NULL_TREE);
22911 fnvoid_va_copy_ms =
22912 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22913 NULL_TREE);
22914 fnvoid_va_copy_sysv =
22915 build_function_type_list (void_type_node, sysv_va_ref,
22916 sysv_va_ref, NULL_TREE);
22918 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22919 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22920 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22921 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22922 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22923 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22924 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22925 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22926 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22927 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22928 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22929 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22932 static void
22933 ix86_init_builtin_types (void)
22935 tree float128_type_node, float80_type_node;
22937 /* The __float80 type. */
22938 float80_type_node = long_double_type_node;
22939 if (TYPE_MODE (float80_type_node) != XFmode)
22941 /* The __float80 type. */
22942 float80_type_node = make_node (REAL_TYPE);
22944 TYPE_PRECISION (float80_type_node) = 80;
22945 layout_type (float80_type_node);
22947 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22949 /* The __float128 type. */
22950 float128_type_node = make_node (REAL_TYPE);
22951 TYPE_PRECISION (float128_type_node) = 128;
22952 layout_type (float128_type_node);
22953 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22955 /* This macro is built by i386-builtin-types.awk. */
22956 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22959 static void
22960 ix86_init_builtins (void)
22962 tree t;
22964 ix86_init_builtin_types ();
22966 /* TFmode support builtins. */
22967 def_builtin_const (0, "__builtin_infq",
22968 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22969 def_builtin_const (0, "__builtin_huge_valq",
22970 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22972 /* We will expand them to normal call if SSE2 isn't available since
22973 they are used by libgcc. */
22974 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22975 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22976 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22977 TREE_READONLY (t) = 1;
22978 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22980 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22981 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22982 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22983 TREE_READONLY (t) = 1;
22984 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22986 ix86_init_mmx_sse_builtins ();
22988 if (TARGET_64BIT)
22989 ix86_init_builtins_va_builtins_abi ();
22992 /* Return the ix86 builtin for CODE. */
22994 static tree
22995 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22997 if (code >= IX86_BUILTIN_MAX)
22998 return error_mark_node;
23000 return ix86_builtins[code];
23003 /* Errors in the source file can cause expand_expr to return const0_rtx
23004 where we expect a vector. To avoid crashing, use one of the vector
23005 clear instructions. */
23006 static rtx
23007 safe_vector_operand (rtx x, enum machine_mode mode)
23009 if (x == const0_rtx)
23010 x = CONST0_RTX (mode);
23011 return x;
23014 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23016 static rtx
23017 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23019 rtx pat;
23020 tree arg0 = CALL_EXPR_ARG (exp, 0);
23021 tree arg1 = CALL_EXPR_ARG (exp, 1);
23022 rtx op0 = expand_normal (arg0);
23023 rtx op1 = expand_normal (arg1);
23024 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23025 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23026 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23028 if (VECTOR_MODE_P (mode0))
23029 op0 = safe_vector_operand (op0, mode0);
23030 if (VECTOR_MODE_P (mode1))
23031 op1 = safe_vector_operand (op1, mode1);
23033 if (optimize || !target
23034 || GET_MODE (target) != tmode
23035 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23036 target = gen_reg_rtx (tmode);
23038 if (GET_MODE (op1) == SImode && mode1 == TImode)
23040 rtx x = gen_reg_rtx (V4SImode);
23041 emit_insn (gen_sse2_loadd (x, op1));
23042 op1 = gen_lowpart (TImode, x);
23045 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23046 op0 = copy_to_mode_reg (mode0, op0);
23047 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23048 op1 = copy_to_mode_reg (mode1, op1);
23050 pat = GEN_FCN (icode) (target, op0, op1);
23051 if (! pat)
23052 return 0;
23054 emit_insn (pat);
23056 return target;
23059 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
23061 static rtx
23062 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
23063 enum ix86_builtin_func_type m_type,
23064 enum rtx_code sub_code)
23066 rtx pat;
23067 int i;
23068 int nargs;
23069 bool comparison_p = false;
23070 bool tf_p = false;
23071 bool last_arg_constant = false;
23072 int num_memory = 0;
23073 struct {
23074 rtx op;
23075 enum machine_mode mode;
23076 } args[4];
23078 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23080 switch (m_type)
23082 case MULTI_ARG_4_DF2_DI_I:
23083 case MULTI_ARG_4_DF2_DI_I1:
23084 case MULTI_ARG_4_SF2_SI_I:
23085 case MULTI_ARG_4_SF2_SI_I1:
23086 nargs = 4;
23087 last_arg_constant = true;
23088 break;
23090 case MULTI_ARG_3_SF:
23091 case MULTI_ARG_3_DF:
23092 case MULTI_ARG_3_SF2:
23093 case MULTI_ARG_3_DF2:
23094 case MULTI_ARG_3_DI:
23095 case MULTI_ARG_3_SI:
23096 case MULTI_ARG_3_SI_DI:
23097 case MULTI_ARG_3_HI:
23098 case MULTI_ARG_3_HI_SI:
23099 case MULTI_ARG_3_QI:
23100 case MULTI_ARG_3_DI2:
23101 case MULTI_ARG_3_SI2:
23102 case MULTI_ARG_3_HI2:
23103 case MULTI_ARG_3_QI2:
23104 nargs = 3;
23105 break;
23107 case MULTI_ARG_2_SF:
23108 case MULTI_ARG_2_DF:
23109 case MULTI_ARG_2_DI:
23110 case MULTI_ARG_2_SI:
23111 case MULTI_ARG_2_HI:
23112 case MULTI_ARG_2_QI:
23113 nargs = 2;
23114 break;
23116 case MULTI_ARG_2_DI_IMM:
23117 case MULTI_ARG_2_SI_IMM:
23118 case MULTI_ARG_2_HI_IMM:
23119 case MULTI_ARG_2_QI_IMM:
23120 nargs = 2;
23121 last_arg_constant = true;
23122 break;
23124 case MULTI_ARG_1_SF:
23125 case MULTI_ARG_1_DF:
23126 case MULTI_ARG_1_SF2:
23127 case MULTI_ARG_1_DF2:
23128 case MULTI_ARG_1_DI:
23129 case MULTI_ARG_1_SI:
23130 case MULTI_ARG_1_HI:
23131 case MULTI_ARG_1_QI:
23132 case MULTI_ARG_1_SI_DI:
23133 case MULTI_ARG_1_HI_DI:
23134 case MULTI_ARG_1_HI_SI:
23135 case MULTI_ARG_1_QI_DI:
23136 case MULTI_ARG_1_QI_SI:
23137 case MULTI_ARG_1_QI_HI:
23138 nargs = 1;
23139 break;
23141 case MULTI_ARG_2_DI_CMP:
23142 case MULTI_ARG_2_SI_CMP:
23143 case MULTI_ARG_2_HI_CMP:
23144 case MULTI_ARG_2_QI_CMP:
23145 nargs = 2;
23146 comparison_p = true;
23147 break;
23149 case MULTI_ARG_2_SF_TF:
23150 case MULTI_ARG_2_DF_TF:
23151 case MULTI_ARG_2_DI_TF:
23152 case MULTI_ARG_2_SI_TF:
23153 case MULTI_ARG_2_HI_TF:
23154 case MULTI_ARG_2_QI_TF:
23155 nargs = 2;
23156 tf_p = true;
23157 break;
23159 default:
23160 gcc_unreachable ();
23163 if (optimize || !target
23164 || GET_MODE (target) != tmode
23165 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23166 target = gen_reg_rtx (tmode);
23168 gcc_assert (nargs <= 4);
23170 for (i = 0; i < nargs; i++)
23172 tree arg = CALL_EXPR_ARG (exp, i);
23173 rtx op = expand_normal (arg);
23174 int adjust = (comparison_p) ? 1 : 0;
23175 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
23177 if (last_arg_constant && i == nargs-1)
23179 if (!CONST_INT_P (op))
23181 error ("last argument must be an immediate");
23182 return gen_reg_rtx (tmode);
23185 else
23187 if (VECTOR_MODE_P (mode))
23188 op = safe_vector_operand (op, mode);
23190 /* If we aren't optimizing, only allow one memory operand to be
23191 generated. */
23192 if (memory_operand (op, mode))
23193 num_memory++;
23195 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
23197 if (optimize
23198 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
23199 || num_memory > 1)
23200 op = force_reg (mode, op);
23203 args[i].op = op;
23204 args[i].mode = mode;
23207 switch (nargs)
23209 case 1:
23210 pat = GEN_FCN (icode) (target, args[0].op);
23211 break;
23213 case 2:
23214 if (tf_p)
23215 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
23216 GEN_INT ((int)sub_code));
23217 else if (! comparison_p)
23218 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23219 else
23221 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
23222 args[0].op,
23223 args[1].op);
23225 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
23227 break;
23229 case 3:
23230 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23231 break;
23233 case 4:
23234 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
23235 break;
23237 default:
23238 gcc_unreachable ();
23241 if (! pat)
23242 return 0;
23244 emit_insn (pat);
23245 return target;
23248 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23249 insns with vec_merge. */
23251 static rtx
23252 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23253 rtx target)
23255 rtx pat;
23256 tree arg0 = CALL_EXPR_ARG (exp, 0);
23257 rtx op1, op0 = expand_normal (arg0);
23258 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23259 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23261 if (optimize || !target
23262 || GET_MODE (target) != tmode
23263 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23264 target = gen_reg_rtx (tmode);
23266 if (VECTOR_MODE_P (mode0))
23267 op0 = safe_vector_operand (op0, mode0);
23269 if ((optimize && !register_operand (op0, mode0))
23270 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23271 op0 = copy_to_mode_reg (mode0, op0);
23273 op1 = op0;
23274 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23275 op1 = copy_to_mode_reg (mode0, op1);
23277 pat = GEN_FCN (icode) (target, op0, op1);
23278 if (! pat)
23279 return 0;
23280 emit_insn (pat);
23281 return target;
23284 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23286 static rtx
23287 ix86_expand_sse_compare (const struct builtin_description *d,
23288 tree exp, rtx target, bool swap)
23290 rtx pat;
23291 tree arg0 = CALL_EXPR_ARG (exp, 0);
23292 tree arg1 = CALL_EXPR_ARG (exp, 1);
23293 rtx op0 = expand_normal (arg0);
23294 rtx op1 = expand_normal (arg1);
23295 rtx op2;
23296 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23297 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23298 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23299 enum rtx_code comparison = d->comparison;
23301 if (VECTOR_MODE_P (mode0))
23302 op0 = safe_vector_operand (op0, mode0);
23303 if (VECTOR_MODE_P (mode1))
23304 op1 = safe_vector_operand (op1, mode1);
23306 /* Swap operands if we have a comparison that isn't available in
23307 hardware. */
23308 if (swap)
23310 rtx tmp = gen_reg_rtx (mode1);
23311 emit_move_insn (tmp, op1);
23312 op1 = op0;
23313 op0 = tmp;
23316 if (optimize || !target
23317 || GET_MODE (target) != tmode
23318 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23319 target = gen_reg_rtx (tmode);
23321 if ((optimize && !register_operand (op0, mode0))
23322 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23323 op0 = copy_to_mode_reg (mode0, op0);
23324 if ((optimize && !register_operand (op1, mode1))
23325 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23326 op1 = copy_to_mode_reg (mode1, op1);
23328 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23329 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23330 if (! pat)
23331 return 0;
23332 emit_insn (pat);
23333 return target;
23336 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23338 static rtx
23339 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23340 rtx target)
23342 rtx pat;
23343 tree arg0 = CALL_EXPR_ARG (exp, 0);
23344 tree arg1 = CALL_EXPR_ARG (exp, 1);
23345 rtx op0 = expand_normal (arg0);
23346 rtx op1 = expand_normal (arg1);
23347 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23348 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23349 enum rtx_code comparison = d->comparison;
23351 if (VECTOR_MODE_P (mode0))
23352 op0 = safe_vector_operand (op0, mode0);
23353 if (VECTOR_MODE_P (mode1))
23354 op1 = safe_vector_operand (op1, mode1);
23356 /* Swap operands if we have a comparison that isn't available in
23357 hardware. */
23358 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23360 rtx tmp = op1;
23361 op1 = op0;
23362 op0 = tmp;
23365 target = gen_reg_rtx (SImode);
23366 emit_move_insn (target, const0_rtx);
23367 target = gen_rtx_SUBREG (QImode, target, 0);
23369 if ((optimize && !register_operand (op0, mode0))
23370 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23371 op0 = copy_to_mode_reg (mode0, op0);
23372 if ((optimize && !register_operand (op1, mode1))
23373 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23374 op1 = copy_to_mode_reg (mode1, op1);
23376 pat = GEN_FCN (d->icode) (op0, op1);
23377 if (! pat)
23378 return 0;
23379 emit_insn (pat);
23380 emit_insn (gen_rtx_SET (VOIDmode,
23381 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23382 gen_rtx_fmt_ee (comparison, QImode,
23383 SET_DEST (pat),
23384 const0_rtx)));
23386 return SUBREG_REG (target);
23389 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23391 static rtx
23392 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23393 rtx target)
23395 rtx pat;
23396 tree arg0 = CALL_EXPR_ARG (exp, 0);
23397 tree arg1 = CALL_EXPR_ARG (exp, 1);
23398 rtx op0 = expand_normal (arg0);
23399 rtx op1 = expand_normal (arg1);
23400 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23401 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23402 enum rtx_code comparison = d->comparison;
23404 if (VECTOR_MODE_P (mode0))
23405 op0 = safe_vector_operand (op0, mode0);
23406 if (VECTOR_MODE_P (mode1))
23407 op1 = safe_vector_operand (op1, mode1);
23409 target = gen_reg_rtx (SImode);
23410 emit_move_insn (target, const0_rtx);
23411 target = gen_rtx_SUBREG (QImode, target, 0);
23413 if ((optimize && !register_operand (op0, mode0))
23414 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23415 op0 = copy_to_mode_reg (mode0, op0);
23416 if ((optimize && !register_operand (op1, mode1))
23417 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23418 op1 = copy_to_mode_reg (mode1, op1);
23420 pat = GEN_FCN (d->icode) (op0, op1);
23421 if (! pat)
23422 return 0;
23423 emit_insn (pat);
23424 emit_insn (gen_rtx_SET (VOIDmode,
23425 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23426 gen_rtx_fmt_ee (comparison, QImode,
23427 SET_DEST (pat),
23428 const0_rtx)));
23430 return SUBREG_REG (target);
23433 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23435 static rtx
23436 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23437 tree exp, rtx target)
23439 rtx pat;
23440 tree arg0 = CALL_EXPR_ARG (exp, 0);
23441 tree arg1 = CALL_EXPR_ARG (exp, 1);
23442 tree arg2 = CALL_EXPR_ARG (exp, 2);
23443 tree arg3 = CALL_EXPR_ARG (exp, 3);
23444 tree arg4 = CALL_EXPR_ARG (exp, 4);
23445 rtx scratch0, scratch1;
23446 rtx op0 = expand_normal (arg0);
23447 rtx op1 = expand_normal (arg1);
23448 rtx op2 = expand_normal (arg2);
23449 rtx op3 = expand_normal (arg3);
23450 rtx op4 = expand_normal (arg4);
23451 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23453 tmode0 = insn_data[d->icode].operand[0].mode;
23454 tmode1 = insn_data[d->icode].operand[1].mode;
23455 modev2 = insn_data[d->icode].operand[2].mode;
23456 modei3 = insn_data[d->icode].operand[3].mode;
23457 modev4 = insn_data[d->icode].operand[4].mode;
23458 modei5 = insn_data[d->icode].operand[5].mode;
23459 modeimm = insn_data[d->icode].operand[6].mode;
23461 if (VECTOR_MODE_P (modev2))
23462 op0 = safe_vector_operand (op0, modev2);
23463 if (VECTOR_MODE_P (modev4))
23464 op2 = safe_vector_operand (op2, modev4);
23466 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23467 op0 = copy_to_mode_reg (modev2, op0);
23468 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23469 op1 = copy_to_mode_reg (modei3, op1);
23470 if ((optimize && !register_operand (op2, modev4))
23471 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23472 op2 = copy_to_mode_reg (modev4, op2);
23473 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23474 op3 = copy_to_mode_reg (modei5, op3);
23476 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23478 error ("the fifth argument must be a 8-bit immediate");
23479 return const0_rtx;
23482 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23484 if (optimize || !target
23485 || GET_MODE (target) != tmode0
23486 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23487 target = gen_reg_rtx (tmode0);
23489 scratch1 = gen_reg_rtx (tmode1);
23491 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23493 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23495 if (optimize || !target
23496 || GET_MODE (target) != tmode1
23497 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23498 target = gen_reg_rtx (tmode1);
23500 scratch0 = gen_reg_rtx (tmode0);
23502 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23504 else
23506 gcc_assert (d->flag);
23508 scratch0 = gen_reg_rtx (tmode0);
23509 scratch1 = gen_reg_rtx (tmode1);
23511 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23514 if (! pat)
23515 return 0;
23517 emit_insn (pat);
23519 if (d->flag)
23521 target = gen_reg_rtx (SImode);
23522 emit_move_insn (target, const0_rtx);
23523 target = gen_rtx_SUBREG (QImode, target, 0);
23525 emit_insn
23526 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23527 gen_rtx_fmt_ee (EQ, QImode,
23528 gen_rtx_REG ((enum machine_mode) d->flag,
23529 FLAGS_REG),
23530 const0_rtx)));
23531 return SUBREG_REG (target);
23533 else
23534 return target;
23538 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23540 static rtx
23541 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23542 tree exp, rtx target)
23544 rtx pat;
23545 tree arg0 = CALL_EXPR_ARG (exp, 0);
23546 tree arg1 = CALL_EXPR_ARG (exp, 1);
23547 tree arg2 = CALL_EXPR_ARG (exp, 2);
23548 rtx scratch0, scratch1;
23549 rtx op0 = expand_normal (arg0);
23550 rtx op1 = expand_normal (arg1);
23551 rtx op2 = expand_normal (arg2);
23552 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23554 tmode0 = insn_data[d->icode].operand[0].mode;
23555 tmode1 = insn_data[d->icode].operand[1].mode;
23556 modev2 = insn_data[d->icode].operand[2].mode;
23557 modev3 = insn_data[d->icode].operand[3].mode;
23558 modeimm = insn_data[d->icode].operand[4].mode;
23560 if (VECTOR_MODE_P (modev2))
23561 op0 = safe_vector_operand (op0, modev2);
23562 if (VECTOR_MODE_P (modev3))
23563 op1 = safe_vector_operand (op1, modev3);
23565 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23566 op0 = copy_to_mode_reg (modev2, op0);
23567 if ((optimize && !register_operand (op1, modev3))
23568 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23569 op1 = copy_to_mode_reg (modev3, op1);
23571 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23573 error ("the third argument must be a 8-bit immediate");
23574 return const0_rtx;
23577 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23579 if (optimize || !target
23580 || GET_MODE (target) != tmode0
23581 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23582 target = gen_reg_rtx (tmode0);
23584 scratch1 = gen_reg_rtx (tmode1);
23586 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23588 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23590 if (optimize || !target
23591 || GET_MODE (target) != tmode1
23592 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23593 target = gen_reg_rtx (tmode1);
23595 scratch0 = gen_reg_rtx (tmode0);
23597 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23599 else
23601 gcc_assert (d->flag);
23603 scratch0 = gen_reg_rtx (tmode0);
23604 scratch1 = gen_reg_rtx (tmode1);
23606 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23609 if (! pat)
23610 return 0;
23612 emit_insn (pat);
23614 if (d->flag)
23616 target = gen_reg_rtx (SImode);
23617 emit_move_insn (target, const0_rtx);
23618 target = gen_rtx_SUBREG (QImode, target, 0);
23620 emit_insn
23621 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23622 gen_rtx_fmt_ee (EQ, QImode,
23623 gen_rtx_REG ((enum machine_mode) d->flag,
23624 FLAGS_REG),
23625 const0_rtx)));
23626 return SUBREG_REG (target);
23628 else
23629 return target;
23632 /* Subroutine of ix86_expand_builtin to take care of insns with
23633 variable number of operands. */
23635 static rtx
23636 ix86_expand_args_builtin (const struct builtin_description *d,
23637 tree exp, rtx target)
23639 rtx pat, real_target;
23640 unsigned int i, nargs;
23641 unsigned int nargs_constant = 0;
23642 int num_memory = 0;
23643 struct
23645 rtx op;
23646 enum machine_mode mode;
23647 } args[4];
23648 bool last_arg_count = false;
23649 enum insn_code icode = d->icode;
23650 const struct insn_data *insn_p = &insn_data[icode];
23651 enum machine_mode tmode = insn_p->operand[0].mode;
23652 enum machine_mode rmode = VOIDmode;
23653 bool swap = false;
23654 enum rtx_code comparison = d->comparison;
23656 switch ((enum ix86_builtin_func_type) d->flag)
23658 case INT_FTYPE_V8SF_V8SF_PTEST:
23659 case INT_FTYPE_V4DI_V4DI_PTEST:
23660 case INT_FTYPE_V4DF_V4DF_PTEST:
23661 case INT_FTYPE_V4SF_V4SF_PTEST:
23662 case INT_FTYPE_V2DI_V2DI_PTEST:
23663 case INT_FTYPE_V2DF_V2DF_PTEST:
23664 return ix86_expand_sse_ptest (d, exp, target);
23665 case FLOAT128_FTYPE_FLOAT128:
23666 case FLOAT_FTYPE_FLOAT:
23667 case INT_FTYPE_INT:
23668 case UINT64_FTYPE_INT:
23669 case UINT16_FTYPE_UINT16:
23670 case INT64_FTYPE_INT64:
23671 case INT64_FTYPE_V4SF:
23672 case INT64_FTYPE_V2DF:
23673 case INT_FTYPE_V16QI:
23674 case INT_FTYPE_V8QI:
23675 case INT_FTYPE_V8SF:
23676 case INT_FTYPE_V4DF:
23677 case INT_FTYPE_V4SF:
23678 case INT_FTYPE_V2DF:
23679 case V16QI_FTYPE_V16QI:
23680 case V8SI_FTYPE_V8SF:
23681 case V8SI_FTYPE_V4SI:
23682 case V8HI_FTYPE_V8HI:
23683 case V8HI_FTYPE_V16QI:
23684 case V8QI_FTYPE_V8QI:
23685 case V8SF_FTYPE_V8SF:
23686 case V8SF_FTYPE_V8SI:
23687 case V8SF_FTYPE_V4SF:
23688 case V4SI_FTYPE_V4SI:
23689 case V4SI_FTYPE_V16QI:
23690 case V4SI_FTYPE_V4SF:
23691 case V4SI_FTYPE_V8SI:
23692 case V4SI_FTYPE_V8HI:
23693 case V4SI_FTYPE_V4DF:
23694 case V4SI_FTYPE_V2DF:
23695 case V4HI_FTYPE_V4HI:
23696 case V4DF_FTYPE_V4DF:
23697 case V4DF_FTYPE_V4SI:
23698 case V4DF_FTYPE_V4SF:
23699 case V4DF_FTYPE_V2DF:
23700 case V4SF_FTYPE_V4SF:
23701 case V4SF_FTYPE_V4SI:
23702 case V4SF_FTYPE_V8SF:
23703 case V4SF_FTYPE_V4DF:
23704 case V4SF_FTYPE_V2DF:
23705 case V2DI_FTYPE_V2DI:
23706 case V2DI_FTYPE_V16QI:
23707 case V2DI_FTYPE_V8HI:
23708 case V2DI_FTYPE_V4SI:
23709 case V2DF_FTYPE_V2DF:
23710 case V2DF_FTYPE_V4SI:
23711 case V2DF_FTYPE_V4DF:
23712 case V2DF_FTYPE_V4SF:
23713 case V2DF_FTYPE_V2SI:
23714 case V2SI_FTYPE_V2SI:
23715 case V2SI_FTYPE_V4SF:
23716 case V2SI_FTYPE_V2SF:
23717 case V2SI_FTYPE_V2DF:
23718 case V2SF_FTYPE_V2SF:
23719 case V2SF_FTYPE_V2SI:
23720 nargs = 1;
23721 break;
23722 case V4SF_FTYPE_V4SF_VEC_MERGE:
23723 case V2DF_FTYPE_V2DF_VEC_MERGE:
23724 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23725 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23726 case V16QI_FTYPE_V16QI_V16QI:
23727 case V16QI_FTYPE_V8HI_V8HI:
23728 case V8QI_FTYPE_V8QI_V8QI:
23729 case V8QI_FTYPE_V4HI_V4HI:
23730 case V8HI_FTYPE_V8HI_V8HI:
23731 case V8HI_FTYPE_V16QI_V16QI:
23732 case V8HI_FTYPE_V4SI_V4SI:
23733 case V8SF_FTYPE_V8SF_V8SF:
23734 case V8SF_FTYPE_V8SF_V8SI:
23735 case V4SI_FTYPE_V4SI_V4SI:
23736 case V4SI_FTYPE_V8HI_V8HI:
23737 case V4SI_FTYPE_V4SF_V4SF:
23738 case V4SI_FTYPE_V2DF_V2DF:
23739 case V4HI_FTYPE_V4HI_V4HI:
23740 case V4HI_FTYPE_V8QI_V8QI:
23741 case V4HI_FTYPE_V2SI_V2SI:
23742 case V4DF_FTYPE_V4DF_V4DF:
23743 case V4DF_FTYPE_V4DF_V4DI:
23744 case V4SF_FTYPE_V4SF_V4SF:
23745 case V4SF_FTYPE_V4SF_V4SI:
23746 case V4SF_FTYPE_V4SF_V2SI:
23747 case V4SF_FTYPE_V4SF_V2DF:
23748 case V4SF_FTYPE_V4SF_DI:
23749 case V4SF_FTYPE_V4SF_SI:
23750 case V2DI_FTYPE_V2DI_V2DI:
23751 case V2DI_FTYPE_V16QI_V16QI:
23752 case V2DI_FTYPE_V4SI_V4SI:
23753 case V2DI_FTYPE_V2DI_V16QI:
23754 case V2DI_FTYPE_V2DF_V2DF:
23755 case V2SI_FTYPE_V2SI_V2SI:
23756 case V2SI_FTYPE_V4HI_V4HI:
23757 case V2SI_FTYPE_V2SF_V2SF:
23758 case V2DF_FTYPE_V2DF_V2DF:
23759 case V2DF_FTYPE_V2DF_V4SF:
23760 case V2DF_FTYPE_V2DF_V2DI:
23761 case V2DF_FTYPE_V2DF_DI:
23762 case V2DF_FTYPE_V2DF_SI:
23763 case V2SF_FTYPE_V2SF_V2SF:
23764 case V1DI_FTYPE_V1DI_V1DI:
23765 case V1DI_FTYPE_V8QI_V8QI:
23766 case V1DI_FTYPE_V2SI_V2SI:
23767 if (comparison == UNKNOWN)
23768 return ix86_expand_binop_builtin (icode, exp, target);
23769 nargs = 2;
23770 break;
23771 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23772 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23773 gcc_assert (comparison != UNKNOWN);
23774 nargs = 2;
23775 swap = true;
23776 break;
23777 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23778 case V8HI_FTYPE_V8HI_SI_COUNT:
23779 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23780 case V4SI_FTYPE_V4SI_SI_COUNT:
23781 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23782 case V4HI_FTYPE_V4HI_SI_COUNT:
23783 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23784 case V2DI_FTYPE_V2DI_SI_COUNT:
23785 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23786 case V2SI_FTYPE_V2SI_SI_COUNT:
23787 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23788 case V1DI_FTYPE_V1DI_SI_COUNT:
23789 nargs = 2;
23790 last_arg_count = true;
23791 break;
23792 case UINT64_FTYPE_UINT64_UINT64:
23793 case UINT_FTYPE_UINT_UINT:
23794 case UINT_FTYPE_UINT_USHORT:
23795 case UINT_FTYPE_UINT_UCHAR:
23796 case UINT16_FTYPE_UINT16_INT:
23797 case UINT8_FTYPE_UINT8_INT:
23798 nargs = 2;
23799 break;
23800 case V2DI_FTYPE_V2DI_INT_CONVERT:
23801 nargs = 2;
23802 rmode = V1TImode;
23803 nargs_constant = 1;
23804 break;
23805 case V8HI_FTYPE_V8HI_INT:
23806 case V8SF_FTYPE_V8SF_INT:
23807 case V4SI_FTYPE_V4SI_INT:
23808 case V4SI_FTYPE_V8SI_INT:
23809 case V4HI_FTYPE_V4HI_INT:
23810 case V4DF_FTYPE_V4DF_INT:
23811 case V4SF_FTYPE_V4SF_INT:
23812 case V4SF_FTYPE_V8SF_INT:
23813 case V2DI_FTYPE_V2DI_INT:
23814 case V2DF_FTYPE_V2DF_INT:
23815 case V2DF_FTYPE_V4DF_INT:
23816 nargs = 2;
23817 nargs_constant = 1;
23818 break;
23819 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23820 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23821 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23822 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23823 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23824 nargs = 3;
23825 break;
23826 case V16QI_FTYPE_V16QI_V16QI_INT:
23827 case V8HI_FTYPE_V8HI_V8HI_INT:
23828 case V8SI_FTYPE_V8SI_V8SI_INT:
23829 case V8SI_FTYPE_V8SI_V4SI_INT:
23830 case V8SF_FTYPE_V8SF_V8SF_INT:
23831 case V8SF_FTYPE_V8SF_V4SF_INT:
23832 case V4SI_FTYPE_V4SI_V4SI_INT:
23833 case V4DF_FTYPE_V4DF_V4DF_INT:
23834 case V4DF_FTYPE_V4DF_V2DF_INT:
23835 case V4SF_FTYPE_V4SF_V4SF_INT:
23836 case V2DI_FTYPE_V2DI_V2DI_INT:
23837 case V2DF_FTYPE_V2DF_V2DF_INT:
23838 nargs = 3;
23839 nargs_constant = 1;
23840 break;
23841 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23842 nargs = 3;
23843 rmode = V2DImode;
23844 nargs_constant = 1;
23845 break;
23846 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23847 nargs = 3;
23848 rmode = DImode;
23849 nargs_constant = 1;
23850 break;
23851 case V2DI_FTYPE_V2DI_UINT_UINT:
23852 nargs = 3;
23853 nargs_constant = 2;
23854 break;
23855 case V2DF_FTYPE_V2DF_V2DF_V2DI_INT:
23856 case V4DF_FTYPE_V4DF_V4DF_V4DI_INT:
23857 case V4SF_FTYPE_V4SF_V4SF_V4SI_INT:
23858 case V8SF_FTYPE_V8SF_V8SF_V8SI_INT:
23859 nargs = 4;
23860 nargs_constant = 1;
23861 break;
23862 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23863 nargs = 4;
23864 nargs_constant = 2;
23865 break;
23866 default:
23867 gcc_unreachable ();
23870 gcc_assert (nargs <= ARRAY_SIZE (args));
23872 if (comparison != UNKNOWN)
23874 gcc_assert (nargs == 2);
23875 return ix86_expand_sse_compare (d, exp, target, swap);
23878 if (rmode == VOIDmode || rmode == tmode)
23880 if (optimize
23881 || target == 0
23882 || GET_MODE (target) != tmode
23883 || ! (*insn_p->operand[0].predicate) (target, tmode))
23884 target = gen_reg_rtx (tmode);
23885 real_target = target;
23887 else
23889 target = gen_reg_rtx (rmode);
23890 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23893 for (i = 0; i < nargs; i++)
23895 tree arg = CALL_EXPR_ARG (exp, i);
23896 rtx op = expand_normal (arg);
23897 enum machine_mode mode = insn_p->operand[i + 1].mode;
23898 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23900 if (last_arg_count && (i + 1) == nargs)
23902 /* SIMD shift insns take either an 8-bit immediate or
23903 register as count. But builtin functions take int as
23904 count. If count doesn't match, we put it in register. */
23905 if (!match)
23907 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23908 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23909 op = copy_to_reg (op);
23912 else if ((nargs - i) <= nargs_constant)
23914 if (!match)
23915 switch (icode)
23917 case CODE_FOR_sse4_1_roundpd:
23918 case CODE_FOR_sse4_1_roundps:
23919 case CODE_FOR_sse4_1_roundsd:
23920 case CODE_FOR_sse4_1_roundss:
23921 case CODE_FOR_sse4_1_blendps:
23922 case CODE_FOR_avx_blendpd256:
23923 case CODE_FOR_avx_vpermilv4df:
23924 case CODE_FOR_avx_roundpd256:
23925 case CODE_FOR_avx_roundps256:
23926 error ("the last argument must be a 4-bit immediate");
23927 return const0_rtx;
23929 case CODE_FOR_sse4_1_blendpd:
23930 case CODE_FOR_avx_vpermilv2df:
23931 case CODE_FOR_xop_vpermil2v2df3:
23932 case CODE_FOR_xop_vpermil2v4sf3:
23933 case CODE_FOR_xop_vpermil2v4df3:
23934 case CODE_FOR_xop_vpermil2v8sf3:
23935 error ("the last argument must be a 2-bit immediate");
23936 return const0_rtx;
23938 case CODE_FOR_avx_vextractf128v4df:
23939 case CODE_FOR_avx_vextractf128v8sf:
23940 case CODE_FOR_avx_vextractf128v8si:
23941 case CODE_FOR_avx_vinsertf128v4df:
23942 case CODE_FOR_avx_vinsertf128v8sf:
23943 case CODE_FOR_avx_vinsertf128v8si:
23944 error ("the last argument must be a 1-bit immediate");
23945 return const0_rtx;
23947 case CODE_FOR_avx_cmpsdv2df3:
23948 case CODE_FOR_avx_cmpssv4sf3:
23949 case CODE_FOR_avx_cmppdv2df3:
23950 case CODE_FOR_avx_cmppsv4sf3:
23951 case CODE_FOR_avx_cmppdv4df3:
23952 case CODE_FOR_avx_cmppsv8sf3:
23953 error ("the last argument must be a 5-bit immediate");
23954 return const0_rtx;
23956 default:
23957 switch (nargs_constant)
23959 case 2:
23960 if ((nargs - i) == nargs_constant)
23962 error ("the next to last argument must be an 8-bit immediate");
23963 break;
23965 case 1:
23966 error ("the last argument must be an 8-bit immediate");
23967 break;
23968 default:
23969 gcc_unreachable ();
23971 return const0_rtx;
23974 else
23976 if (VECTOR_MODE_P (mode))
23977 op = safe_vector_operand (op, mode);
23979 /* If we aren't optimizing, only allow one memory operand to
23980 be generated. */
23981 if (memory_operand (op, mode))
23982 num_memory++;
23984 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23986 if (optimize || !match || num_memory > 1)
23987 op = copy_to_mode_reg (mode, op);
23989 else
23991 op = copy_to_reg (op);
23992 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23996 args[i].op = op;
23997 args[i].mode = mode;
24000 switch (nargs)
24002 case 1:
24003 pat = GEN_FCN (icode) (real_target, args[0].op);
24004 break;
24005 case 2:
24006 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
24007 break;
24008 case 3:
24009 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24010 args[2].op);
24011 break;
24012 case 4:
24013 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
24014 args[2].op, args[3].op);
24015 break;
24016 default:
24017 gcc_unreachable ();
24020 if (! pat)
24021 return 0;
24023 emit_insn (pat);
24024 return target;
24027 /* Subroutine of ix86_expand_builtin to take care of special insns
24028 with variable number of operands. */
24030 static rtx
24031 ix86_expand_special_args_builtin (const struct builtin_description *d,
24032 tree exp, rtx target)
24034 tree arg;
24035 rtx pat, op;
24036 unsigned int i, nargs, arg_adjust, memory;
24037 struct
24039 rtx op;
24040 enum machine_mode mode;
24041 } args[3];
24042 enum insn_code icode = d->icode;
24043 bool last_arg_constant = false;
24044 const struct insn_data *insn_p = &insn_data[icode];
24045 enum machine_mode tmode = insn_p->operand[0].mode;
24046 enum { load, store } klass;
24048 switch ((enum ix86_builtin_func_type) d->flag)
24050 case VOID_FTYPE_VOID:
24051 emit_insn (GEN_FCN (icode) (target));
24052 return 0;
24053 case UINT64_FTYPE_VOID:
24054 nargs = 0;
24055 klass = load;
24056 memory = 0;
24057 break;
24058 case UINT64_FTYPE_PUNSIGNED:
24059 case V2DI_FTYPE_PV2DI:
24060 case V32QI_FTYPE_PCCHAR:
24061 case V16QI_FTYPE_PCCHAR:
24062 case V8SF_FTYPE_PCV4SF:
24063 case V8SF_FTYPE_PCFLOAT:
24064 case V4SF_FTYPE_PCFLOAT:
24065 case V4DF_FTYPE_PCV2DF:
24066 case V4DF_FTYPE_PCDOUBLE:
24067 case V2DF_FTYPE_PCDOUBLE:
24068 case VOID_FTYPE_PVOID:
24069 nargs = 1;
24070 klass = load;
24071 memory = 0;
24072 break;
24073 case VOID_FTYPE_PV2SF_V4SF:
24074 case VOID_FTYPE_PV4DI_V4DI:
24075 case VOID_FTYPE_PV2DI_V2DI:
24076 case VOID_FTYPE_PCHAR_V32QI:
24077 case VOID_FTYPE_PCHAR_V16QI:
24078 case VOID_FTYPE_PFLOAT_V8SF:
24079 case VOID_FTYPE_PFLOAT_V4SF:
24080 case VOID_FTYPE_PDOUBLE_V4DF:
24081 case VOID_FTYPE_PDOUBLE_V2DF:
24082 case VOID_FTYPE_PULONGLONG_ULONGLONG:
24083 case VOID_FTYPE_PINT_INT:
24084 nargs = 1;
24085 klass = store;
24086 /* Reserve memory operand for target. */
24087 memory = ARRAY_SIZE (args);
24088 break;
24089 case V4SF_FTYPE_V4SF_PCV2SF:
24090 case V2DF_FTYPE_V2DF_PCDOUBLE:
24091 nargs = 2;
24092 klass = load;
24093 memory = 1;
24094 break;
24095 case V8SF_FTYPE_PCV8SF_V8SF:
24096 case V4DF_FTYPE_PCV4DF_V4DF:
24097 case V4SF_FTYPE_PCV4SF_V4SF:
24098 case V2DF_FTYPE_PCV2DF_V2DF:
24099 nargs = 2;
24100 klass = load;
24101 memory = 0;
24102 break;
24103 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24104 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24105 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24106 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24107 nargs = 2;
24108 klass = store;
24109 /* Reserve memory operand for target. */
24110 memory = ARRAY_SIZE (args);
24111 break;
24112 case VOID_FTYPE_UINT_UINT_UINT:
24113 case VOID_FTYPE_UINT64_UINT_UINT:
24114 case UCHAR_FTYPE_UINT_UINT_UINT:
24115 case UCHAR_FTYPE_UINT64_UINT_UINT:
24116 nargs = 3;
24117 klass = load;
24118 memory = ARRAY_SIZE (args);
24119 last_arg_constant = true;
24120 break;
24121 default:
24122 gcc_unreachable ();
24125 gcc_assert (nargs <= ARRAY_SIZE (args));
24127 if (klass == store)
24129 arg = CALL_EXPR_ARG (exp, 0);
24130 op = expand_normal (arg);
24131 gcc_assert (target == 0);
24132 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24133 arg_adjust = 1;
24135 else
24137 arg_adjust = 0;
24138 if (optimize
24139 || target == 0
24140 || GET_MODE (target) != tmode
24141 || ! (*insn_p->operand[0].predicate) (target, tmode))
24142 target = gen_reg_rtx (tmode);
24145 for (i = 0; i < nargs; i++)
24147 enum machine_mode mode = insn_p->operand[i + 1].mode;
24148 bool match;
24150 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24151 op = expand_normal (arg);
24152 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24154 if (last_arg_constant && (i + 1) == nargs)
24156 if (!match)
24158 if (icode == CODE_FOR_lwp_lwpvalsi3
24159 || icode == CODE_FOR_lwp_lwpinssi3
24160 || icode == CODE_FOR_lwp_lwpvaldi3
24161 || icode == CODE_FOR_lwp_lwpinsdi3)
24162 error ("the last argument must be a 32-bit immediate");
24163 else
24164 error ("the last argument must be an 8-bit immediate");
24165 return const0_rtx;
24168 else
24170 if (i == memory)
24172 /* This must be the memory operand. */
24173 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24174 gcc_assert (GET_MODE (op) == mode
24175 || GET_MODE (op) == VOIDmode);
24177 else
24179 /* This must be register. */
24180 if (VECTOR_MODE_P (mode))
24181 op = safe_vector_operand (op, mode);
24183 gcc_assert (GET_MODE (op) == mode
24184 || GET_MODE (op) == VOIDmode);
24185 op = copy_to_mode_reg (mode, op);
24189 args[i].op = op;
24190 args[i].mode = mode;
24193 switch (nargs)
24195 case 0:
24196 pat = GEN_FCN (icode) (target);
24197 break;
24198 case 1:
24199 pat = GEN_FCN (icode) (target, args[0].op);
24200 break;
24201 case 2:
24202 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24203 break;
24204 case 3:
24205 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
24206 break;
24207 default:
24208 gcc_unreachable ();
24211 if (! pat)
24212 return 0;
24213 emit_insn (pat);
24214 return klass == store ? 0 : target;
24217 /* Return the integer constant in ARG. Constrain it to be in the range
24218 of the subparts of VEC_TYPE; issue an error if not. */
24220 static int
24221 get_element_number (tree vec_type, tree arg)
24223 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24225 if (!host_integerp (arg, 1)
24226 || (elt = tree_low_cst (arg, 1), elt > max))
24228 error ("selector must be an integer constant in the range 0..%wi", max);
24229 return 0;
24232 return elt;
24235 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24236 ix86_expand_vector_init. We DO have language-level syntax for this, in
24237 the form of (type){ init-list }. Except that since we can't place emms
24238 instructions from inside the compiler, we can't allow the use of MMX
24239 registers unless the user explicitly asks for it. So we do *not* define
24240 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24241 we have builtins invoked by mmintrin.h that gives us license to emit
24242 these sorts of instructions. */
24244 static rtx
24245 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24247 enum machine_mode tmode = TYPE_MODE (type);
24248 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24249 int i, n_elt = GET_MODE_NUNITS (tmode);
24250 rtvec v = rtvec_alloc (n_elt);
24252 gcc_assert (VECTOR_MODE_P (tmode));
24253 gcc_assert (call_expr_nargs (exp) == n_elt);
24255 for (i = 0; i < n_elt; ++i)
24257 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24258 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24261 if (!target || !register_operand (target, tmode))
24262 target = gen_reg_rtx (tmode);
24264 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24265 return target;
24268 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24269 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24270 had a language-level syntax for referencing vector elements. */
24272 static rtx
24273 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24275 enum machine_mode tmode, mode0;
24276 tree arg0, arg1;
24277 int elt;
24278 rtx op0;
24280 arg0 = CALL_EXPR_ARG (exp, 0);
24281 arg1 = CALL_EXPR_ARG (exp, 1);
24283 op0 = expand_normal (arg0);
24284 elt = get_element_number (TREE_TYPE (arg0), arg1);
24286 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24287 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24288 gcc_assert (VECTOR_MODE_P (mode0));
24290 op0 = force_reg (mode0, op0);
24292 if (optimize || !target || !register_operand (target, tmode))
24293 target = gen_reg_rtx (tmode);
24295 ix86_expand_vector_extract (true, target, op0, elt);
24297 return target;
24300 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24301 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24302 a language-level syntax for referencing vector elements. */
24304 static rtx
24305 ix86_expand_vec_set_builtin (tree exp)
24307 enum machine_mode tmode, mode1;
24308 tree arg0, arg1, arg2;
24309 int elt;
24310 rtx op0, op1, target;
24312 arg0 = CALL_EXPR_ARG (exp, 0);
24313 arg1 = CALL_EXPR_ARG (exp, 1);
24314 arg2 = CALL_EXPR_ARG (exp, 2);
24316 tmode = TYPE_MODE (TREE_TYPE (arg0));
24317 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24318 gcc_assert (VECTOR_MODE_P (tmode));
24320 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24321 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24322 elt = get_element_number (TREE_TYPE (arg0), arg2);
24324 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24325 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24327 op0 = force_reg (tmode, op0);
24328 op1 = force_reg (mode1, op1);
24330 /* OP0 is the source of these builtin functions and shouldn't be
24331 modified. Create a copy, use it and return it as target. */
24332 target = gen_reg_rtx (tmode);
24333 emit_move_insn (target, op0);
24334 ix86_expand_vector_set (true, target, op1, elt);
24336 return target;
24339 /* Expand an expression EXP that calls a built-in function,
24340 with result going to TARGET if that's convenient
24341 (and in mode MODE if that's convenient).
24342 SUBTARGET may be used as the target for computing one of EXP's operands.
24343 IGNORE is nonzero if the value is to be ignored. */
24345 static rtx
24346 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24347 enum machine_mode mode ATTRIBUTE_UNUSED,
24348 int ignore ATTRIBUTE_UNUSED)
24350 const struct builtin_description *d;
24351 size_t i;
24352 enum insn_code icode;
24353 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24354 tree arg0, arg1, arg2;
24355 rtx op0, op1, op2, pat;
24356 enum machine_mode mode0, mode1, mode2;
24357 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24359 /* Determine whether the builtin function is available under the current ISA.
24360 Originally the builtin was not created if it wasn't applicable to the
24361 current ISA based on the command line switches. With function specific
24362 options, we need to check in the context of the function making the call
24363 whether it is supported. */
24364 if (ix86_builtins_isa[fcode].isa
24365 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24367 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24368 NULL, NULL, false);
24370 if (!opts)
24371 error ("%qE needs unknown isa option", fndecl);
24372 else
24374 gcc_assert (opts != NULL);
24375 error ("%qE needs isa option %s", fndecl, opts);
24376 free (opts);
24378 return const0_rtx;
24381 switch (fcode)
24383 case IX86_BUILTIN_MASKMOVQ:
24384 case IX86_BUILTIN_MASKMOVDQU:
24385 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24386 ? CODE_FOR_mmx_maskmovq
24387 : CODE_FOR_sse2_maskmovdqu);
24388 /* Note the arg order is different from the operand order. */
24389 arg1 = CALL_EXPR_ARG (exp, 0);
24390 arg2 = CALL_EXPR_ARG (exp, 1);
24391 arg0 = CALL_EXPR_ARG (exp, 2);
24392 op0 = expand_normal (arg0);
24393 op1 = expand_normal (arg1);
24394 op2 = expand_normal (arg2);
24395 mode0 = insn_data[icode].operand[0].mode;
24396 mode1 = insn_data[icode].operand[1].mode;
24397 mode2 = insn_data[icode].operand[2].mode;
24399 op0 = force_reg (Pmode, op0);
24400 op0 = gen_rtx_MEM (mode1, op0);
24402 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24403 op0 = copy_to_mode_reg (mode0, op0);
24404 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24405 op1 = copy_to_mode_reg (mode1, op1);
24406 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24407 op2 = copy_to_mode_reg (mode2, op2);
24408 pat = GEN_FCN (icode) (op0, op1, op2);
24409 if (! pat)
24410 return 0;
24411 emit_insn (pat);
24412 return 0;
24414 case IX86_BUILTIN_LDMXCSR:
24415 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24416 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24417 emit_move_insn (target, op0);
24418 emit_insn (gen_sse_ldmxcsr (target));
24419 return 0;
24421 case IX86_BUILTIN_STMXCSR:
24422 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24423 emit_insn (gen_sse_stmxcsr (target));
24424 return copy_to_mode_reg (SImode, target);
24426 case IX86_BUILTIN_CLFLUSH:
24427 arg0 = CALL_EXPR_ARG (exp, 0);
24428 op0 = expand_normal (arg0);
24429 icode = CODE_FOR_sse2_clflush;
24430 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24431 op0 = copy_to_mode_reg (Pmode, op0);
24433 emit_insn (gen_sse2_clflush (op0));
24434 return 0;
24436 case IX86_BUILTIN_MONITOR:
24437 arg0 = CALL_EXPR_ARG (exp, 0);
24438 arg1 = CALL_EXPR_ARG (exp, 1);
24439 arg2 = CALL_EXPR_ARG (exp, 2);
24440 op0 = expand_normal (arg0);
24441 op1 = expand_normal (arg1);
24442 op2 = expand_normal (arg2);
24443 if (!REG_P (op0))
24444 op0 = copy_to_mode_reg (Pmode, op0);
24445 if (!REG_P (op1))
24446 op1 = copy_to_mode_reg (SImode, op1);
24447 if (!REG_P (op2))
24448 op2 = copy_to_mode_reg (SImode, op2);
24449 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24450 return 0;
24452 case IX86_BUILTIN_MWAIT:
24453 arg0 = CALL_EXPR_ARG (exp, 0);
24454 arg1 = CALL_EXPR_ARG (exp, 1);
24455 op0 = expand_normal (arg0);
24456 op1 = expand_normal (arg1);
24457 if (!REG_P (op0))
24458 op0 = copy_to_mode_reg (SImode, op0);
24459 if (!REG_P (op1))
24460 op1 = copy_to_mode_reg (SImode, op1);
24461 emit_insn (gen_sse3_mwait (op0, op1));
24462 return 0;
24464 case IX86_BUILTIN_VEC_INIT_V2SI:
24465 case IX86_BUILTIN_VEC_INIT_V4HI:
24466 case IX86_BUILTIN_VEC_INIT_V8QI:
24467 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24469 case IX86_BUILTIN_VEC_EXT_V2DF:
24470 case IX86_BUILTIN_VEC_EXT_V2DI:
24471 case IX86_BUILTIN_VEC_EXT_V4SF:
24472 case IX86_BUILTIN_VEC_EXT_V4SI:
24473 case IX86_BUILTIN_VEC_EXT_V8HI:
24474 case IX86_BUILTIN_VEC_EXT_V2SI:
24475 case IX86_BUILTIN_VEC_EXT_V4HI:
24476 case IX86_BUILTIN_VEC_EXT_V16QI:
24477 return ix86_expand_vec_ext_builtin (exp, target);
24479 case IX86_BUILTIN_VEC_SET_V2DI:
24480 case IX86_BUILTIN_VEC_SET_V4SF:
24481 case IX86_BUILTIN_VEC_SET_V4SI:
24482 case IX86_BUILTIN_VEC_SET_V8HI:
24483 case IX86_BUILTIN_VEC_SET_V4HI:
24484 case IX86_BUILTIN_VEC_SET_V16QI:
24485 return ix86_expand_vec_set_builtin (exp);
24487 case IX86_BUILTIN_VEC_PERM_V2DF:
24488 case IX86_BUILTIN_VEC_PERM_V4SF:
24489 case IX86_BUILTIN_VEC_PERM_V2DI:
24490 case IX86_BUILTIN_VEC_PERM_V4SI:
24491 case IX86_BUILTIN_VEC_PERM_V8HI:
24492 case IX86_BUILTIN_VEC_PERM_V16QI:
24493 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24494 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24495 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24496 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24497 case IX86_BUILTIN_VEC_PERM_V4DF:
24498 case IX86_BUILTIN_VEC_PERM_V8SF:
24499 return ix86_expand_vec_perm_builtin (exp);
24501 case IX86_BUILTIN_INFQ:
24502 case IX86_BUILTIN_HUGE_VALQ:
24504 REAL_VALUE_TYPE inf;
24505 rtx tmp;
24507 real_inf (&inf);
24508 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24510 tmp = validize_mem (force_const_mem (mode, tmp));
24512 if (target == 0)
24513 target = gen_reg_rtx (mode);
24515 emit_move_insn (target, tmp);
24516 return target;
24519 case IX86_BUILTIN_LLWPCB:
24520 arg0 = CALL_EXPR_ARG (exp, 0);
24521 op0 = expand_normal (arg0);
24522 icode = CODE_FOR_lwp_llwpcb;
24523 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24524 op0 = copy_to_mode_reg (Pmode, op0);
24525 emit_insn (gen_lwp_llwpcb (op0));
24526 return 0;
24528 case IX86_BUILTIN_SLWPCB:
24529 icode = CODE_FOR_lwp_slwpcb;
24530 if (!target
24531 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24532 target = gen_reg_rtx (Pmode);
24533 emit_insn (gen_lwp_slwpcb (target));
24534 return target;
24536 default:
24537 break;
24540 for (i = 0, d = bdesc_special_args;
24541 i < ARRAY_SIZE (bdesc_special_args);
24542 i++, d++)
24543 if (d->code == fcode)
24544 return ix86_expand_special_args_builtin (d, exp, target);
24546 for (i = 0, d = bdesc_args;
24547 i < ARRAY_SIZE (bdesc_args);
24548 i++, d++)
24549 if (d->code == fcode)
24550 switch (fcode)
24552 case IX86_BUILTIN_FABSQ:
24553 case IX86_BUILTIN_COPYSIGNQ:
24554 if (!TARGET_SSE2)
24555 /* Emit a normal call if SSE2 isn't available. */
24556 return expand_call (exp, target, ignore);
24557 default:
24558 return ix86_expand_args_builtin (d, exp, target);
24561 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24562 if (d->code == fcode)
24563 return ix86_expand_sse_comi (d, exp, target);
24565 for (i = 0, d = bdesc_pcmpestr;
24566 i < ARRAY_SIZE (bdesc_pcmpestr);
24567 i++, d++)
24568 if (d->code == fcode)
24569 return ix86_expand_sse_pcmpestr (d, exp, target);
24571 for (i = 0, d = bdesc_pcmpistr;
24572 i < ARRAY_SIZE (bdesc_pcmpistr);
24573 i++, d++)
24574 if (d->code == fcode)
24575 return ix86_expand_sse_pcmpistr (d, exp, target);
24577 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24578 if (d->code == fcode)
24579 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24580 (enum ix86_builtin_func_type)
24581 d->flag, d->comparison);
24583 gcc_unreachable ();
24586 /* Returns a function decl for a vectorized version of the builtin function
24587 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24588 if it is not available. */
24590 static tree
24591 ix86_builtin_vectorized_function (tree fndecl, tree type_out,
24592 tree type_in)
24594 enum machine_mode in_mode, out_mode;
24595 int in_n, out_n;
24596 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
24598 if (TREE_CODE (type_out) != VECTOR_TYPE
24599 || TREE_CODE (type_in) != VECTOR_TYPE
24600 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
24601 return NULL_TREE;
24603 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24604 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24605 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24606 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24608 switch (fn)
24610 case BUILT_IN_SQRT:
24611 if (out_mode == DFmode && out_n == 2
24612 && in_mode == DFmode && in_n == 2)
24613 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24614 break;
24616 case BUILT_IN_SQRTF:
24617 if (out_mode == SFmode && out_n == 4
24618 && in_mode == SFmode && in_n == 4)
24619 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24620 break;
24622 case BUILT_IN_LRINT:
24623 if (out_mode == SImode && out_n == 4
24624 && in_mode == DFmode && in_n == 2)
24625 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24626 break;
24628 case BUILT_IN_LRINTF:
24629 if (out_mode == SImode && out_n == 4
24630 && in_mode == SFmode && in_n == 4)
24631 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24632 break;
24634 case BUILT_IN_COPYSIGN:
24635 if (out_mode == DFmode && out_n == 2
24636 && in_mode == DFmode && in_n == 2)
24637 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24638 break;
24640 case BUILT_IN_COPYSIGNF:
24641 if (out_mode == SFmode && out_n == 4
24642 && in_mode == SFmode && in_n == 4)
24643 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24644 break;
24646 default:
24650 /* Dispatch to a handler for a vectorization library. */
24651 if (ix86_veclib_handler)
24652 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24653 type_in);
24655 return NULL_TREE;
24658 /* Handler for an SVML-style interface to
24659 a library with vectorized intrinsics. */
24661 static tree
24662 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24664 char name[20];
24665 tree fntype, new_fndecl, args;
24666 unsigned arity;
24667 const char *bname;
24668 enum machine_mode el_mode, in_mode;
24669 int n, in_n;
24671 /* The SVML is suitable for unsafe math only. */
24672 if (!flag_unsafe_math_optimizations)
24673 return NULL_TREE;
24675 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24676 n = TYPE_VECTOR_SUBPARTS (type_out);
24677 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24678 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24679 if (el_mode != in_mode
24680 || n != in_n)
24681 return NULL_TREE;
24683 switch (fn)
24685 case BUILT_IN_EXP:
24686 case BUILT_IN_LOG:
24687 case BUILT_IN_LOG10:
24688 case BUILT_IN_POW:
24689 case BUILT_IN_TANH:
24690 case BUILT_IN_TAN:
24691 case BUILT_IN_ATAN:
24692 case BUILT_IN_ATAN2:
24693 case BUILT_IN_ATANH:
24694 case BUILT_IN_CBRT:
24695 case BUILT_IN_SINH:
24696 case BUILT_IN_SIN:
24697 case BUILT_IN_ASINH:
24698 case BUILT_IN_ASIN:
24699 case BUILT_IN_COSH:
24700 case BUILT_IN_COS:
24701 case BUILT_IN_ACOSH:
24702 case BUILT_IN_ACOS:
24703 if (el_mode != DFmode || n != 2)
24704 return NULL_TREE;
24705 break;
24707 case BUILT_IN_EXPF:
24708 case BUILT_IN_LOGF:
24709 case BUILT_IN_LOG10F:
24710 case BUILT_IN_POWF:
24711 case BUILT_IN_TANHF:
24712 case BUILT_IN_TANF:
24713 case BUILT_IN_ATANF:
24714 case BUILT_IN_ATAN2F:
24715 case BUILT_IN_ATANHF:
24716 case BUILT_IN_CBRTF:
24717 case BUILT_IN_SINHF:
24718 case BUILT_IN_SINF:
24719 case BUILT_IN_ASINHF:
24720 case BUILT_IN_ASINF:
24721 case BUILT_IN_COSHF:
24722 case BUILT_IN_COSF:
24723 case BUILT_IN_ACOSHF:
24724 case BUILT_IN_ACOSF:
24725 if (el_mode != SFmode || n != 4)
24726 return NULL_TREE;
24727 break;
24729 default:
24730 return NULL_TREE;
24733 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24735 if (fn == BUILT_IN_LOGF)
24736 strcpy (name, "vmlsLn4");
24737 else if (fn == BUILT_IN_LOG)
24738 strcpy (name, "vmldLn2");
24739 else if (n == 4)
24741 sprintf (name, "vmls%s", bname+10);
24742 name[strlen (name)-1] = '4';
24744 else
24745 sprintf (name, "vmld%s2", bname+10);
24747 /* Convert to uppercase. */
24748 name[4] &= ~0x20;
24750 arity = 0;
24751 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24752 args = TREE_CHAIN (args))
24753 arity++;
24755 if (arity == 1)
24756 fntype = build_function_type_list (type_out, type_in, NULL);
24757 else
24758 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24760 /* Build a function declaration for the vectorized function. */
24761 new_fndecl = build_decl (BUILTINS_LOCATION,
24762 FUNCTION_DECL, get_identifier (name), fntype);
24763 TREE_PUBLIC (new_fndecl) = 1;
24764 DECL_EXTERNAL (new_fndecl) = 1;
24765 DECL_IS_NOVOPS (new_fndecl) = 1;
24766 TREE_READONLY (new_fndecl) = 1;
24768 return new_fndecl;
24771 /* Handler for an ACML-style interface to
24772 a library with vectorized intrinsics. */
24774 static tree
24775 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24777 char name[20] = "__vr.._";
24778 tree fntype, new_fndecl, args;
24779 unsigned arity;
24780 const char *bname;
24781 enum machine_mode el_mode, in_mode;
24782 int n, in_n;
24784 /* The ACML is 64bits only and suitable for unsafe math only as
24785 it does not correctly support parts of IEEE with the required
24786 precision such as denormals. */
24787 if (!TARGET_64BIT
24788 || !flag_unsafe_math_optimizations)
24789 return NULL_TREE;
24791 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24792 n = TYPE_VECTOR_SUBPARTS (type_out);
24793 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24794 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24795 if (el_mode != in_mode
24796 || n != in_n)
24797 return NULL_TREE;
24799 switch (fn)
24801 case BUILT_IN_SIN:
24802 case BUILT_IN_COS:
24803 case BUILT_IN_EXP:
24804 case BUILT_IN_LOG:
24805 case BUILT_IN_LOG2:
24806 case BUILT_IN_LOG10:
24807 name[4] = 'd';
24808 name[5] = '2';
24809 if (el_mode != DFmode
24810 || n != 2)
24811 return NULL_TREE;
24812 break;
24814 case BUILT_IN_SINF:
24815 case BUILT_IN_COSF:
24816 case BUILT_IN_EXPF:
24817 case BUILT_IN_POWF:
24818 case BUILT_IN_LOGF:
24819 case BUILT_IN_LOG2F:
24820 case BUILT_IN_LOG10F:
24821 name[4] = 's';
24822 name[5] = '4';
24823 if (el_mode != SFmode
24824 || n != 4)
24825 return NULL_TREE;
24826 break;
24828 default:
24829 return NULL_TREE;
24832 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24833 sprintf (name + 7, "%s", bname+10);
24835 arity = 0;
24836 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24837 args = TREE_CHAIN (args))
24838 arity++;
24840 if (arity == 1)
24841 fntype = build_function_type_list (type_out, type_in, NULL);
24842 else
24843 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24845 /* Build a function declaration for the vectorized function. */
24846 new_fndecl = build_decl (BUILTINS_LOCATION,
24847 FUNCTION_DECL, get_identifier (name), fntype);
24848 TREE_PUBLIC (new_fndecl) = 1;
24849 DECL_EXTERNAL (new_fndecl) = 1;
24850 DECL_IS_NOVOPS (new_fndecl) = 1;
24851 TREE_READONLY (new_fndecl) = 1;
24853 return new_fndecl;
24857 /* Returns a decl of a function that implements conversion of an integer vector
24858 into a floating-point vector, or vice-versa. DEST_TYPE and SRC_TYPE
24859 are the types involved when converting according to CODE.
24860 Return NULL_TREE if it is not available. */
24862 static tree
24863 ix86_vectorize_builtin_conversion (unsigned int code,
24864 tree dest_type, tree src_type)
24866 if (! TARGET_SSE2)
24867 return NULL_TREE;
24869 switch (code)
24871 case FLOAT_EXPR:
24872 switch (TYPE_MODE (src_type))
24874 case V4SImode:
24875 switch (TYPE_MODE (dest_type))
24877 case V4SFmode:
24878 return (TYPE_UNSIGNED (src_type)
24879 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24880 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24881 case V4DFmode:
24882 return (TYPE_UNSIGNED (src_type)
24883 ? NULL_TREE
24884 : ix86_builtins[IX86_BUILTIN_CVTDQ2PD256]);
24885 default:
24886 return NULL_TREE;
24888 break;
24889 case V8SImode:
24890 switch (TYPE_MODE (dest_type))
24892 case V8SFmode:
24893 return (TYPE_UNSIGNED (src_type)
24894 ? NULL_TREE
24895 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS]);
24896 default:
24897 return NULL_TREE;
24899 break;
24900 default:
24901 return NULL_TREE;
24904 case FIX_TRUNC_EXPR:
24905 switch (TYPE_MODE (dest_type))
24907 case V4SImode:
24908 switch (TYPE_MODE (src_type))
24910 case V4SFmode:
24911 return (TYPE_UNSIGNED (dest_type)
24912 ? NULL_TREE
24913 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ]);
24914 case V4DFmode:
24915 return (TYPE_UNSIGNED (dest_type)
24916 ? NULL_TREE
24917 : ix86_builtins[IX86_BUILTIN_CVTTPD2DQ256]);
24918 default:
24919 return NULL_TREE;
24921 break;
24923 case V8SImode:
24924 switch (TYPE_MODE (src_type))
24926 case V8SFmode:
24927 return (TYPE_UNSIGNED (dest_type)
24928 ? NULL_TREE
24929 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ256]);
24930 default:
24931 return NULL_TREE;
24933 break;
24935 default:
24936 return NULL_TREE;
24939 default:
24940 return NULL_TREE;
24943 return NULL_TREE;
24946 /* Returns a code for a target-specific builtin that implements
24947 reciprocal of the function, or NULL_TREE if not available. */
24949 static tree
24950 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24951 bool sqrt ATTRIBUTE_UNUSED)
24953 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24954 && flag_finite_math_only && !flag_trapping_math
24955 && flag_unsafe_math_optimizations))
24956 return NULL_TREE;
24958 if (md_fn)
24959 /* Machine dependent builtins. */
24960 switch (fn)
24962 /* Vectorized version of sqrt to rsqrt conversion. */
24963 case IX86_BUILTIN_SQRTPS_NR:
24964 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24966 default:
24967 return NULL_TREE;
24969 else
24970 /* Normal builtins. */
24971 switch (fn)
24973 /* Sqrt to rsqrt conversion. */
24974 case BUILT_IN_SQRTF:
24975 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24977 default:
24978 return NULL_TREE;
24982 /* Helper for avx_vpermilps256_operand et al. This is also used by
24983 the expansion functions to turn the parallel back into a mask.
24984 The return value is 0 for no match and the imm8+1 for a match. */
24987 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24989 unsigned i, nelt = GET_MODE_NUNITS (mode);
24990 unsigned mask = 0;
24991 unsigned char ipar[8];
24993 if (XVECLEN (par, 0) != (int) nelt)
24994 return 0;
24996 /* Validate that all of the elements are constants, and not totally
24997 out of range. Copy the data into an integral array to make the
24998 subsequent checks easier. */
24999 for (i = 0; i < nelt; ++i)
25001 rtx er = XVECEXP (par, 0, i);
25002 unsigned HOST_WIDE_INT ei;
25004 if (!CONST_INT_P (er))
25005 return 0;
25006 ei = INTVAL (er);
25007 if (ei >= nelt)
25008 return 0;
25009 ipar[i] = ei;
25012 switch (mode)
25014 case V4DFmode:
25015 /* In the 256-bit DFmode case, we can only move elements within
25016 a 128-bit lane. */
25017 for (i = 0; i < 2; ++i)
25019 if (ipar[i] >= 2)
25020 return 0;
25021 mask |= ipar[i] << i;
25023 for (i = 2; i < 4; ++i)
25025 if (ipar[i] < 2)
25026 return 0;
25027 mask |= (ipar[i] - 2) << i;
25029 break;
25031 case V8SFmode:
25032 /* In the 256-bit SFmode case, we have full freedom of movement
25033 within the low 128-bit lane, but the high 128-bit lane must
25034 mirror the exact same pattern. */
25035 for (i = 0; i < 4; ++i)
25036 if (ipar[i] + 4 != ipar[i + 4])
25037 return 0;
25038 nelt = 4;
25039 /* FALLTHRU */
25041 case V2DFmode:
25042 case V4SFmode:
25043 /* In the 128-bit case, we've full freedom in the placement of
25044 the elements from the source operand. */
25045 for (i = 0; i < nelt; ++i)
25046 mask |= ipar[i] << (i * (nelt / 2));
25047 break;
25049 default:
25050 gcc_unreachable ();
25053 /* Make sure success has a non-zero value by adding one. */
25054 return mask + 1;
25057 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
25058 the expansion functions to turn the parallel back into a mask.
25059 The return value is 0 for no match and the imm8+1 for a match. */
25062 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
25064 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
25065 unsigned mask = 0;
25066 unsigned char ipar[8];
25068 if (XVECLEN (par, 0) != (int) nelt)
25069 return 0;
25071 /* Validate that all of the elements are constants, and not totally
25072 out of range. Copy the data into an integral array to make the
25073 subsequent checks easier. */
25074 for (i = 0; i < nelt; ++i)
25076 rtx er = XVECEXP (par, 0, i);
25077 unsigned HOST_WIDE_INT ei;
25079 if (!CONST_INT_P (er))
25080 return 0;
25081 ei = INTVAL (er);
25082 if (ei >= 2 * nelt)
25083 return 0;
25084 ipar[i] = ei;
25087 /* Validate that the halves of the permute are halves. */
25088 for (i = 0; i < nelt2 - 1; ++i)
25089 if (ipar[i] + 1 != ipar[i + 1])
25090 return 0;
25091 for (i = nelt2; i < nelt - 1; ++i)
25092 if (ipar[i] + 1 != ipar[i + 1])
25093 return 0;
25095 /* Reconstruct the mask. */
25096 for (i = 0; i < 2; ++i)
25098 unsigned e = ipar[i * nelt2];
25099 if (e % nelt2)
25100 return 0;
25101 e /= nelt2;
25102 mask |= e << (i * 4);
25105 /* Make sure success has a non-zero value by adding one. */
25106 return mask + 1;
25110 /* Store OPERAND to the memory after reload is completed. This means
25111 that we can't easily use assign_stack_local. */
25113 ix86_force_to_memory (enum machine_mode mode, rtx operand)
25115 rtx result;
25117 gcc_assert (reload_completed);
25118 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
25120 result = gen_rtx_MEM (mode,
25121 gen_rtx_PLUS (Pmode,
25122 stack_pointer_rtx,
25123 GEN_INT (-RED_ZONE_SIZE)));
25124 emit_move_insn (result, operand);
25126 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
25128 switch (mode)
25130 case HImode:
25131 case SImode:
25132 operand = gen_lowpart (DImode, operand);
25133 /* FALLTHRU */
25134 case DImode:
25135 emit_insn (
25136 gen_rtx_SET (VOIDmode,
25137 gen_rtx_MEM (DImode,
25138 gen_rtx_PRE_DEC (DImode,
25139 stack_pointer_rtx)),
25140 operand));
25141 break;
25142 default:
25143 gcc_unreachable ();
25145 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25147 else
25149 switch (mode)
25151 case DImode:
25153 rtx operands[2];
25154 split_di (&operand, 1, operands, operands + 1);
25155 emit_insn (
25156 gen_rtx_SET (VOIDmode,
25157 gen_rtx_MEM (SImode,
25158 gen_rtx_PRE_DEC (Pmode,
25159 stack_pointer_rtx)),
25160 operands[1]));
25161 emit_insn (
25162 gen_rtx_SET (VOIDmode,
25163 gen_rtx_MEM (SImode,
25164 gen_rtx_PRE_DEC (Pmode,
25165 stack_pointer_rtx)),
25166 operands[0]));
25168 break;
25169 case HImode:
25170 /* Store HImodes as SImodes. */
25171 operand = gen_lowpart (SImode, operand);
25172 /* FALLTHRU */
25173 case SImode:
25174 emit_insn (
25175 gen_rtx_SET (VOIDmode,
25176 gen_rtx_MEM (GET_MODE (operand),
25177 gen_rtx_PRE_DEC (SImode,
25178 stack_pointer_rtx)),
25179 operand));
25180 break;
25181 default:
25182 gcc_unreachable ();
25184 result = gen_rtx_MEM (mode, stack_pointer_rtx);
25186 return result;
25189 /* Free operand from the memory. */
25190 void
25191 ix86_free_from_memory (enum machine_mode mode)
25193 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
25195 int size;
25197 if (mode == DImode || TARGET_64BIT)
25198 size = 8;
25199 else
25200 size = 4;
25201 /* Use LEA to deallocate stack space. In peephole2 it will be converted
25202 to pop or add instruction if registers are available. */
25203 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
25204 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25205 GEN_INT (size))));
25209 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
25210 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
25211 same. */
25212 static const enum reg_class *
25213 i386_ira_cover_classes (void)
25215 static const enum reg_class sse_fpmath_classes[] = {
25216 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
25218 static const enum reg_class no_sse_fpmath_classes[] = {
25219 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
25222 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
25225 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
25226 QImode must go into class Q_REGS.
25227 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
25228 movdf to do mem-to-mem moves through integer regs. */
25229 enum reg_class
25230 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
25232 enum machine_mode mode = GET_MODE (x);
25234 /* We're only allowed to return a subclass of CLASS. Many of the
25235 following checks fail for NO_REGS, so eliminate that early. */
25236 if (regclass == NO_REGS)
25237 return NO_REGS;
25239 /* All classes can load zeros. */
25240 if (x == CONST0_RTX (mode))
25241 return regclass;
25243 /* Force constants into memory if we are loading a (nonzero) constant into
25244 an MMX or SSE register. This is because there are no MMX/SSE instructions
25245 to load from a constant. */
25246 if (CONSTANT_P (x)
25247 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
25248 return NO_REGS;
25250 /* Prefer SSE regs only, if we can use them for math. */
25251 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
25252 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
25254 /* Floating-point constants need more complex checks. */
25255 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
25257 /* General regs can load everything. */
25258 if (reg_class_subset_p (regclass, GENERAL_REGS))
25259 return regclass;
25261 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25262 zero above. We only want to wind up preferring 80387 registers if
25263 we plan on doing computation with them. */
25264 if (TARGET_80387
25265 && standard_80387_constant_p (x))
25267 /* Limit class to non-sse. */
25268 if (regclass == FLOAT_SSE_REGS)
25269 return FLOAT_REGS;
25270 if (regclass == FP_TOP_SSE_REGS)
25271 return FP_TOP_REG;
25272 if (regclass == FP_SECOND_SSE_REGS)
25273 return FP_SECOND_REG;
25274 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25275 return regclass;
25278 return NO_REGS;
25281 /* Generally when we see PLUS here, it's the function invariant
25282 (plus soft-fp const_int). Which can only be computed into general
25283 regs. */
25284 if (GET_CODE (x) == PLUS)
25285 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25287 /* QImode constants are easy to load, but non-constant QImode data
25288 must go into Q_REGS. */
25289 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25291 if (reg_class_subset_p (regclass, Q_REGS))
25292 return regclass;
25293 if (reg_class_subset_p (Q_REGS, regclass))
25294 return Q_REGS;
25295 return NO_REGS;
25298 return regclass;
25301 /* Discourage putting floating-point values in SSE registers unless
25302 SSE math is being used, and likewise for the 387 registers. */
25303 enum reg_class
25304 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25306 enum machine_mode mode = GET_MODE (x);
25308 /* Restrict the output reload class to the register bank that we are doing
25309 math on. If we would like not to return a subset of CLASS, reject this
25310 alternative: if reload cannot do this, it will still use its choice. */
25311 mode = GET_MODE (x);
25312 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25313 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25315 if (X87_FLOAT_MODE_P (mode))
25317 if (regclass == FP_TOP_SSE_REGS)
25318 return FP_TOP_REG;
25319 else if (regclass == FP_SECOND_SSE_REGS)
25320 return FP_SECOND_REG;
25321 else
25322 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25325 return regclass;
25328 static enum reg_class
25329 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25330 enum machine_mode mode,
25331 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25333 /* QImode spills from non-QI registers require
25334 intermediate register on 32bit targets. */
25335 if (!in_p && mode == QImode && !TARGET_64BIT
25336 && (rclass == GENERAL_REGS
25337 || rclass == LEGACY_REGS
25338 || rclass == INDEX_REGS))
25340 int regno;
25342 if (REG_P (x))
25343 regno = REGNO (x);
25344 else
25345 regno = -1;
25347 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25348 regno = true_regnum (x);
25350 /* Return Q_REGS if the operand is in memory. */
25351 if (regno == -1)
25352 return Q_REGS;
25355 return NO_REGS;
25358 /* If we are copying between general and FP registers, we need a memory
25359 location. The same is true for SSE and MMX registers.
25361 To optimize register_move_cost performance, allow inline variant.
25363 The macro can't work reliably when one of the CLASSES is class containing
25364 registers from multiple units (SSE, MMX, integer). We avoid this by never
25365 combining those units in single alternative in the machine description.
25366 Ensure that this constraint holds to avoid unexpected surprises.
25368 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25369 enforce these sanity checks. */
25371 static inline int
25372 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25373 enum machine_mode mode, int strict)
25375 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25376 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25377 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25378 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25379 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25380 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25382 gcc_assert (!strict);
25383 return true;
25386 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25387 return true;
25389 /* ??? This is a lie. We do have moves between mmx/general, and for
25390 mmx/sse2. But by saying we need secondary memory we discourage the
25391 register allocator from using the mmx registers unless needed. */
25392 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25393 return true;
25395 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25397 /* SSE1 doesn't have any direct moves from other classes. */
25398 if (!TARGET_SSE2)
25399 return true;
25401 /* If the target says that inter-unit moves are more expensive
25402 than moving through memory, then don't generate them. */
25403 if (!TARGET_INTER_UNIT_MOVES)
25404 return true;
25406 /* Between SSE and general, we have moves no larger than word size. */
25407 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25408 return true;
25411 return false;
25415 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25416 enum machine_mode mode, int strict)
25418 return inline_secondary_memory_needed (class1, class2, mode, strict);
25421 /* Return true if the registers in CLASS cannot represent the change from
25422 modes FROM to TO. */
25424 bool
25425 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25426 enum reg_class regclass)
25428 if (from == to)
25429 return false;
25431 /* x87 registers can't do subreg at all, as all values are reformatted
25432 to extended precision. */
25433 if (MAYBE_FLOAT_CLASS_P (regclass))
25434 return true;
25436 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25438 /* Vector registers do not support QI or HImode loads. If we don't
25439 disallow a change to these modes, reload will assume it's ok to
25440 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25441 the vec_dupv4hi pattern. */
25442 if (GET_MODE_SIZE (from) < 4)
25443 return true;
25445 /* Vector registers do not support subreg with nonzero offsets, which
25446 are otherwise valid for integer registers. Since we can't see
25447 whether we have a nonzero offset from here, prohibit all
25448 nonparadoxical subregs changing size. */
25449 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25450 return true;
25453 return false;
25456 /* Return the cost of moving data of mode M between a
25457 register and memory. A value of 2 is the default; this cost is
25458 relative to those in `REGISTER_MOVE_COST'.
25460 This function is used extensively by register_move_cost that is used to
25461 build tables at startup. Make it inline in this case.
25462 When IN is 2, return maximum of in and out move cost.
25464 If moving between registers and memory is more expensive than
25465 between two registers, you should define this macro to express the
25466 relative cost.
25468 Model also increased moving costs of QImode registers in non
25469 Q_REGS classes.
25471 static inline int
25472 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25473 int in)
25475 int cost;
25476 if (FLOAT_CLASS_P (regclass))
25478 int index;
25479 switch (mode)
25481 case SFmode:
25482 index = 0;
25483 break;
25484 case DFmode:
25485 index = 1;
25486 break;
25487 case XFmode:
25488 index = 2;
25489 break;
25490 default:
25491 return 100;
25493 if (in == 2)
25494 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25495 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25497 if (SSE_CLASS_P (regclass))
25499 int index;
25500 switch (GET_MODE_SIZE (mode))
25502 case 4:
25503 index = 0;
25504 break;
25505 case 8:
25506 index = 1;
25507 break;
25508 case 16:
25509 index = 2;
25510 break;
25511 default:
25512 return 100;
25514 if (in == 2)
25515 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25516 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25518 if (MMX_CLASS_P (regclass))
25520 int index;
25521 switch (GET_MODE_SIZE (mode))
25523 case 4:
25524 index = 0;
25525 break;
25526 case 8:
25527 index = 1;
25528 break;
25529 default:
25530 return 100;
25532 if (in)
25533 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25534 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25536 switch (GET_MODE_SIZE (mode))
25538 case 1:
25539 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25541 if (!in)
25542 return ix86_cost->int_store[0];
25543 if (TARGET_PARTIAL_REG_DEPENDENCY
25544 && optimize_function_for_speed_p (cfun))
25545 cost = ix86_cost->movzbl_load;
25546 else
25547 cost = ix86_cost->int_load[0];
25548 if (in == 2)
25549 return MAX (cost, ix86_cost->int_store[0]);
25550 return cost;
25552 else
25554 if (in == 2)
25555 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25556 if (in)
25557 return ix86_cost->movzbl_load;
25558 else
25559 return ix86_cost->int_store[0] + 4;
25561 break;
25562 case 2:
25563 if (in == 2)
25564 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25565 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25566 default:
25567 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25568 if (mode == TFmode)
25569 mode = XFmode;
25570 if (in == 2)
25571 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25572 else if (in)
25573 cost = ix86_cost->int_load[2];
25574 else
25575 cost = ix86_cost->int_store[2];
25576 return (cost * (((int) GET_MODE_SIZE (mode)
25577 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25582 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25584 return inline_memory_move_cost (mode, regclass, in);
25588 /* Return the cost of moving data from a register in class CLASS1 to
25589 one in class CLASS2.
25591 It is not required that the cost always equal 2 when FROM is the same as TO;
25592 on some machines it is expensive to move between registers if they are not
25593 general registers. */
25596 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25597 enum reg_class class2)
25599 /* In case we require secondary memory, compute cost of the store followed
25600 by load. In order to avoid bad register allocation choices, we need
25601 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25603 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25605 int cost = 1;
25607 cost += inline_memory_move_cost (mode, class1, 2);
25608 cost += inline_memory_move_cost (mode, class2, 2);
25610 /* In case of copying from general_purpose_register we may emit multiple
25611 stores followed by single load causing memory size mismatch stall.
25612 Count this as arbitrarily high cost of 20. */
25613 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25614 cost += 20;
25616 /* In the case of FP/MMX moves, the registers actually overlap, and we
25617 have to switch modes in order to treat them differently. */
25618 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25619 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25620 cost += 20;
25622 return cost;
25625 /* Moves between SSE/MMX and integer unit are expensive. */
25626 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25627 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25629 /* ??? By keeping returned value relatively high, we limit the number
25630 of moves between integer and MMX/SSE registers for all targets.
25631 Additionally, high value prevents problem with x86_modes_tieable_p(),
25632 where integer modes in MMX/SSE registers are not tieable
25633 because of missing QImode and HImode moves to, from or between
25634 MMX/SSE registers. */
25635 return MAX (8, ix86_cost->mmxsse_to_integer);
25637 if (MAYBE_FLOAT_CLASS_P (class1))
25638 return ix86_cost->fp_move;
25639 if (MAYBE_SSE_CLASS_P (class1))
25640 return ix86_cost->sse_move;
25641 if (MAYBE_MMX_CLASS_P (class1))
25642 return ix86_cost->mmx_move;
25643 return 2;
25646 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25648 bool
25649 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25651 /* Flags and only flags can only hold CCmode values. */
25652 if (CC_REGNO_P (regno))
25653 return GET_MODE_CLASS (mode) == MODE_CC;
25654 if (GET_MODE_CLASS (mode) == MODE_CC
25655 || GET_MODE_CLASS (mode) == MODE_RANDOM
25656 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25657 return 0;
25658 if (FP_REGNO_P (regno))
25659 return VALID_FP_MODE_P (mode);
25660 if (SSE_REGNO_P (regno))
25662 /* We implement the move patterns for all vector modes into and
25663 out of SSE registers, even when no operation instructions
25664 are available. OImode move is available only when AVX is
25665 enabled. */
25666 return ((TARGET_AVX && mode == OImode)
25667 || VALID_AVX256_REG_MODE (mode)
25668 || VALID_SSE_REG_MODE (mode)
25669 || VALID_SSE2_REG_MODE (mode)
25670 || VALID_MMX_REG_MODE (mode)
25671 || VALID_MMX_REG_MODE_3DNOW (mode));
25673 if (MMX_REGNO_P (regno))
25675 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25676 so if the register is available at all, then we can move data of
25677 the given mode into or out of it. */
25678 return (VALID_MMX_REG_MODE (mode)
25679 || VALID_MMX_REG_MODE_3DNOW (mode));
25682 if (mode == QImode)
25684 /* Take care for QImode values - they can be in non-QI regs,
25685 but then they do cause partial register stalls. */
25686 if (regno <= BX_REG || TARGET_64BIT)
25687 return 1;
25688 if (!TARGET_PARTIAL_REG_STALL)
25689 return 1;
25690 return reload_in_progress || reload_completed;
25692 /* We handle both integer and floats in the general purpose registers. */
25693 else if (VALID_INT_MODE_P (mode))
25694 return 1;
25695 else if (VALID_FP_MODE_P (mode))
25696 return 1;
25697 else if (VALID_DFP_MODE_P (mode))
25698 return 1;
25699 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25700 on to use that value in smaller contexts, this can easily force a
25701 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25702 supporting DImode, allow it. */
25703 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25704 return 1;
25706 return 0;
25709 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25710 tieable integer mode. */
25712 static bool
25713 ix86_tieable_integer_mode_p (enum machine_mode mode)
25715 switch (mode)
25717 case HImode:
25718 case SImode:
25719 return true;
25721 case QImode:
25722 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25724 case DImode:
25725 return TARGET_64BIT;
25727 default:
25728 return false;
25732 /* Return true if MODE1 is accessible in a register that can hold MODE2
25733 without copying. That is, all register classes that can hold MODE2
25734 can also hold MODE1. */
25736 bool
25737 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25739 if (mode1 == mode2)
25740 return true;
25742 if (ix86_tieable_integer_mode_p (mode1)
25743 && ix86_tieable_integer_mode_p (mode2))
25744 return true;
25746 /* MODE2 being XFmode implies fp stack or general regs, which means we
25747 can tie any smaller floating point modes to it. Note that we do not
25748 tie this with TFmode. */
25749 if (mode2 == XFmode)
25750 return mode1 == SFmode || mode1 == DFmode;
25752 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25753 that we can tie it with SFmode. */
25754 if (mode2 == DFmode)
25755 return mode1 == SFmode;
25757 /* If MODE2 is only appropriate for an SSE register, then tie with
25758 any other mode acceptable to SSE registers. */
25759 if (GET_MODE_SIZE (mode2) == 16
25760 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25761 return (GET_MODE_SIZE (mode1) == 16
25762 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25764 /* If MODE2 is appropriate for an MMX register, then tie
25765 with any other mode acceptable to MMX registers. */
25766 if (GET_MODE_SIZE (mode2) == 8
25767 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25768 return (GET_MODE_SIZE (mode1) == 8
25769 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25771 return false;
25774 /* Compute a (partial) cost for rtx X. Return true if the complete
25775 cost has been computed, and false if subexpressions should be
25776 scanned. In either case, *TOTAL contains the cost result. */
25778 static bool
25779 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25781 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25782 enum machine_mode mode = GET_MODE (x);
25783 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25785 switch (code)
25787 case CONST_INT:
25788 case CONST:
25789 case LABEL_REF:
25790 case SYMBOL_REF:
25791 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25792 *total = 3;
25793 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25794 *total = 2;
25795 else if (flag_pic && SYMBOLIC_CONST (x)
25796 && (!TARGET_64BIT
25797 || (!GET_CODE (x) != LABEL_REF
25798 && (GET_CODE (x) != SYMBOL_REF
25799 || !SYMBOL_REF_LOCAL_P (x)))))
25800 *total = 1;
25801 else
25802 *total = 0;
25803 return true;
25805 case CONST_DOUBLE:
25806 if (mode == VOIDmode)
25807 *total = 0;
25808 else
25809 switch (standard_80387_constant_p (x))
25811 case 1: /* 0.0 */
25812 *total = 1;
25813 break;
25814 default: /* Other constants */
25815 *total = 2;
25816 break;
25817 case 0:
25818 case -1:
25819 /* Start with (MEM (SYMBOL_REF)), since that's where
25820 it'll probably end up. Add a penalty for size. */
25821 *total = (COSTS_N_INSNS (1)
25822 + (flag_pic != 0 && !TARGET_64BIT)
25823 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25824 break;
25826 return true;
25828 case ZERO_EXTEND:
25829 /* The zero extensions is often completely free on x86_64, so make
25830 it as cheap as possible. */
25831 if (TARGET_64BIT && mode == DImode
25832 && GET_MODE (XEXP (x, 0)) == SImode)
25833 *total = 1;
25834 else if (TARGET_ZERO_EXTEND_WITH_AND)
25835 *total = cost->add;
25836 else
25837 *total = cost->movzx;
25838 return false;
25840 case SIGN_EXTEND:
25841 *total = cost->movsx;
25842 return false;
25844 case ASHIFT:
25845 if (CONST_INT_P (XEXP (x, 1))
25846 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25848 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25849 if (value == 1)
25851 *total = cost->add;
25852 return false;
25854 if ((value == 2 || value == 3)
25855 && cost->lea <= cost->shift_const)
25857 *total = cost->lea;
25858 return false;
25861 /* FALLTHRU */
25863 case ROTATE:
25864 case ASHIFTRT:
25865 case LSHIFTRT:
25866 case ROTATERT:
25867 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25869 if (CONST_INT_P (XEXP (x, 1)))
25871 if (INTVAL (XEXP (x, 1)) > 32)
25872 *total = cost->shift_const + COSTS_N_INSNS (2);
25873 else
25874 *total = cost->shift_const * 2;
25876 else
25878 if (GET_CODE (XEXP (x, 1)) == AND)
25879 *total = cost->shift_var * 2;
25880 else
25881 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25884 else
25886 if (CONST_INT_P (XEXP (x, 1)))
25887 *total = cost->shift_const;
25888 else
25889 *total = cost->shift_var;
25891 return false;
25893 case MULT:
25894 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25896 /* ??? SSE scalar cost should be used here. */
25897 *total = cost->fmul;
25898 return false;
25900 else if (X87_FLOAT_MODE_P (mode))
25902 *total = cost->fmul;
25903 return false;
25905 else if (FLOAT_MODE_P (mode))
25907 /* ??? SSE vector cost should be used here. */
25908 *total = cost->fmul;
25909 return false;
25911 else
25913 rtx op0 = XEXP (x, 0);
25914 rtx op1 = XEXP (x, 1);
25915 int nbits;
25916 if (CONST_INT_P (XEXP (x, 1)))
25918 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25919 for (nbits = 0; value != 0; value &= value - 1)
25920 nbits++;
25922 else
25923 /* This is arbitrary. */
25924 nbits = 7;
25926 /* Compute costs correctly for widening multiplication. */
25927 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25928 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25929 == GET_MODE_SIZE (mode))
25931 int is_mulwiden = 0;
25932 enum machine_mode inner_mode = GET_MODE (op0);
25934 if (GET_CODE (op0) == GET_CODE (op1))
25935 is_mulwiden = 1, op1 = XEXP (op1, 0);
25936 else if (CONST_INT_P (op1))
25938 if (GET_CODE (op0) == SIGN_EXTEND)
25939 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25940 == INTVAL (op1);
25941 else
25942 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25945 if (is_mulwiden)
25946 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25949 *total = (cost->mult_init[MODE_INDEX (mode)]
25950 + nbits * cost->mult_bit
25951 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25953 return true;
25956 case DIV:
25957 case UDIV:
25958 case MOD:
25959 case UMOD:
25960 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25961 /* ??? SSE cost should be used here. */
25962 *total = cost->fdiv;
25963 else if (X87_FLOAT_MODE_P (mode))
25964 *total = cost->fdiv;
25965 else if (FLOAT_MODE_P (mode))
25966 /* ??? SSE vector cost should be used here. */
25967 *total = cost->fdiv;
25968 else
25969 *total = cost->divide[MODE_INDEX (mode)];
25970 return false;
25972 case PLUS:
25973 if (GET_MODE_CLASS (mode) == MODE_INT
25974 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25976 if (GET_CODE (XEXP (x, 0)) == PLUS
25977 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25978 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25979 && CONSTANT_P (XEXP (x, 1)))
25981 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25982 if (val == 2 || val == 4 || val == 8)
25984 *total = cost->lea;
25985 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25986 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25987 outer_code, speed);
25988 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25989 return true;
25992 else if (GET_CODE (XEXP (x, 0)) == MULT
25993 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25995 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25996 if (val == 2 || val == 4 || val == 8)
25998 *total = cost->lea;
25999 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26000 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26001 return true;
26004 else if (GET_CODE (XEXP (x, 0)) == PLUS)
26006 *total = cost->lea;
26007 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
26008 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
26009 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
26010 return true;
26013 /* FALLTHRU */
26015 case MINUS:
26016 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26018 /* ??? SSE cost should be used here. */
26019 *total = cost->fadd;
26020 return false;
26022 else if (X87_FLOAT_MODE_P (mode))
26024 *total = cost->fadd;
26025 return false;
26027 else if (FLOAT_MODE_P (mode))
26029 /* ??? SSE vector cost should be used here. */
26030 *total = cost->fadd;
26031 return false;
26033 /* FALLTHRU */
26035 case AND:
26036 case IOR:
26037 case XOR:
26038 if (!TARGET_64BIT && mode == DImode)
26040 *total = (cost->add * 2
26041 + (rtx_cost (XEXP (x, 0), outer_code, speed)
26042 << (GET_MODE (XEXP (x, 0)) != DImode))
26043 + (rtx_cost (XEXP (x, 1), outer_code, speed)
26044 << (GET_MODE (XEXP (x, 1)) != DImode)));
26045 return true;
26047 /* FALLTHRU */
26049 case NEG:
26050 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26052 /* ??? SSE cost should be used here. */
26053 *total = cost->fchs;
26054 return false;
26056 else if (X87_FLOAT_MODE_P (mode))
26058 *total = cost->fchs;
26059 return false;
26061 else if (FLOAT_MODE_P (mode))
26063 /* ??? SSE vector cost should be used here. */
26064 *total = cost->fchs;
26065 return false;
26067 /* FALLTHRU */
26069 case NOT:
26070 if (!TARGET_64BIT && mode == DImode)
26071 *total = cost->add * 2;
26072 else
26073 *total = cost->add;
26074 return false;
26076 case COMPARE:
26077 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
26078 && XEXP (XEXP (x, 0), 1) == const1_rtx
26079 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
26080 && XEXP (x, 1) == const0_rtx)
26082 /* This kind of construct is implemented using test[bwl].
26083 Treat it as if we had an AND. */
26084 *total = (cost->add
26085 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
26086 + rtx_cost (const1_rtx, outer_code, speed));
26087 return true;
26089 return false;
26091 case FLOAT_EXTEND:
26092 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
26093 *total = 0;
26094 return false;
26096 case ABS:
26097 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26098 /* ??? SSE cost should be used here. */
26099 *total = cost->fabs;
26100 else if (X87_FLOAT_MODE_P (mode))
26101 *total = cost->fabs;
26102 else if (FLOAT_MODE_P (mode))
26103 /* ??? SSE vector cost should be used here. */
26104 *total = cost->fabs;
26105 return false;
26107 case SQRT:
26108 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
26109 /* ??? SSE cost should be used here. */
26110 *total = cost->fsqrt;
26111 else if (X87_FLOAT_MODE_P (mode))
26112 *total = cost->fsqrt;
26113 else if (FLOAT_MODE_P (mode))
26114 /* ??? SSE vector cost should be used here. */
26115 *total = cost->fsqrt;
26116 return false;
26118 case UNSPEC:
26119 if (XINT (x, 1) == UNSPEC_TP)
26120 *total = 0;
26121 return false;
26123 case VEC_SELECT:
26124 case VEC_CONCAT:
26125 case VEC_MERGE:
26126 case VEC_DUPLICATE:
26127 /* ??? Assume all of these vector manipulation patterns are
26128 recognizable. In which case they all pretty much have the
26129 same cost. */
26130 *total = COSTS_N_INSNS (1);
26131 return true;
26133 default:
26134 return false;
26138 #if TARGET_MACHO
26140 static int current_machopic_label_num;
26142 /* Given a symbol name and its associated stub, write out the
26143 definition of the stub. */
26145 void
26146 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26148 unsigned int length;
26149 char *binder_name, *symbol_name, lazy_ptr_name[32];
26150 int label = ++current_machopic_label_num;
26152 /* For 64-bit we shouldn't get here. */
26153 gcc_assert (!TARGET_64BIT);
26155 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26156 symb = (*targetm.strip_name_encoding) (symb);
26158 length = strlen (stub);
26159 binder_name = XALLOCAVEC (char, length + 32);
26160 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
26162 length = strlen (symb);
26163 symbol_name = XALLOCAVEC (char, length + 32);
26164 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26166 sprintf (lazy_ptr_name, "L%d$lz", label);
26168 if (MACHOPIC_PURE)
26169 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
26170 else
26171 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
26173 fprintf (file, "%s:\n", stub);
26174 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26176 if (MACHOPIC_PURE)
26178 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
26179 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
26180 fprintf (file, "\tjmp\t*%%edx\n");
26182 else
26183 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
26185 fprintf (file, "%s:\n", binder_name);
26187 if (MACHOPIC_PURE)
26189 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
26190 fputs ("\tpushl\t%eax\n", file);
26192 else
26193 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
26195 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
26197 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26198 fprintf (file, "%s:\n", lazy_ptr_name);
26199 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26200 fprintf (file, ASM_LONG "%s\n", binder_name);
26202 #endif /* TARGET_MACHO */
26204 /* Order the registers for register allocator. */
26206 void
26207 x86_order_regs_for_local_alloc (void)
26209 int pos = 0;
26210 int i;
26212 /* First allocate the local general purpose registers. */
26213 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26214 if (GENERAL_REGNO_P (i) && call_used_regs[i])
26215 reg_alloc_order [pos++] = i;
26217 /* Global general purpose registers. */
26218 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
26219 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
26220 reg_alloc_order [pos++] = i;
26222 /* x87 registers come first in case we are doing FP math
26223 using them. */
26224 if (!TARGET_SSE_MATH)
26225 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26226 reg_alloc_order [pos++] = i;
26228 /* SSE registers. */
26229 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
26230 reg_alloc_order [pos++] = i;
26231 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
26232 reg_alloc_order [pos++] = i;
26234 /* x87 registers. */
26235 if (TARGET_SSE_MATH)
26236 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
26237 reg_alloc_order [pos++] = i;
26239 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
26240 reg_alloc_order [pos++] = i;
26242 /* Initialize the rest of array as we do not allocate some registers
26243 at all. */
26244 while (pos < FIRST_PSEUDO_REGISTER)
26245 reg_alloc_order [pos++] = 0;
26248 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
26249 struct attribute_spec.handler. */
26250 static tree
26251 ix86_handle_abi_attribute (tree *node, tree name,
26252 tree args ATTRIBUTE_UNUSED,
26253 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26255 if (TREE_CODE (*node) != FUNCTION_TYPE
26256 && TREE_CODE (*node) != METHOD_TYPE
26257 && TREE_CODE (*node) != FIELD_DECL
26258 && TREE_CODE (*node) != TYPE_DECL)
26260 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26261 name);
26262 *no_add_attrs = true;
26263 return NULL_TREE;
26265 if (!TARGET_64BIT)
26267 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26268 name);
26269 *no_add_attrs = true;
26270 return NULL_TREE;
26273 /* Can combine regparm with all attributes but fastcall. */
26274 if (is_attribute_p ("ms_abi", name))
26276 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26278 error ("ms_abi and sysv_abi attributes are not compatible");
26281 return NULL_TREE;
26283 else if (is_attribute_p ("sysv_abi", name))
26285 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26287 error ("ms_abi and sysv_abi attributes are not compatible");
26290 return NULL_TREE;
26293 return NULL_TREE;
26296 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26297 struct attribute_spec.handler. */
26298 static tree
26299 ix86_handle_struct_attribute (tree *node, tree name,
26300 tree args ATTRIBUTE_UNUSED,
26301 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26303 tree *type = NULL;
26304 if (DECL_P (*node))
26306 if (TREE_CODE (*node) == TYPE_DECL)
26307 type = &TREE_TYPE (*node);
26309 else
26310 type = node;
26312 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26313 || TREE_CODE (*type) == UNION_TYPE)))
26315 warning (OPT_Wattributes, "%qE attribute ignored",
26316 name);
26317 *no_add_attrs = true;
26320 else if ((is_attribute_p ("ms_struct", name)
26321 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26322 || ((is_attribute_p ("gcc_struct", name)
26323 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26325 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26326 name);
26327 *no_add_attrs = true;
26330 return NULL_TREE;
26333 static tree
26334 ix86_handle_fndecl_attribute (tree *node, tree name,
26335 tree args ATTRIBUTE_UNUSED,
26336 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26338 if (TREE_CODE (*node) != FUNCTION_DECL)
26340 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26341 name);
26342 *no_add_attrs = true;
26343 return NULL_TREE;
26346 if (TARGET_64BIT)
26348 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26349 name);
26350 return NULL_TREE;
26353 #ifndef HAVE_AS_IX86_SWAP
26354 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26355 #endif
26357 return NULL_TREE;
26360 static bool
26361 ix86_ms_bitfield_layout_p (const_tree record_type)
26363 return (TARGET_MS_BITFIELD_LAYOUT &&
26364 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26365 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26368 /* Returns an expression indicating where the this parameter is
26369 located on entry to the FUNCTION. */
26371 static rtx
26372 x86_this_parameter (tree function)
26374 tree type = TREE_TYPE (function);
26375 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26376 int nregs;
26378 if (TARGET_64BIT)
26380 const int *parm_regs;
26382 if (ix86_function_type_abi (type) == MS_ABI)
26383 parm_regs = x86_64_ms_abi_int_parameter_registers;
26384 else
26385 parm_regs = x86_64_int_parameter_registers;
26386 return gen_rtx_REG (DImode, parm_regs[aggr]);
26389 nregs = ix86_function_regparm (type, function);
26391 if (nregs > 0 && !stdarg_p (type))
26393 int regno;
26395 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26396 regno = aggr ? DX_REG : CX_REG;
26397 else if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (type)))
26399 regno = CX_REG;
26400 if (aggr)
26401 return gen_rtx_MEM (SImode,
26402 plus_constant (stack_pointer_rtx, 4));
26404 else
26406 regno = AX_REG;
26407 if (aggr)
26409 regno = DX_REG;
26410 if (nregs == 1)
26411 return gen_rtx_MEM (SImode,
26412 plus_constant (stack_pointer_rtx, 4));
26415 return gen_rtx_REG (SImode, regno);
26418 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26421 /* Determine whether x86_output_mi_thunk can succeed. */
26423 static bool
26424 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26425 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26426 HOST_WIDE_INT vcall_offset, const_tree function)
26428 /* 64-bit can handle anything. */
26429 if (TARGET_64BIT)
26430 return true;
26432 /* For 32-bit, everything's fine if we have one free register. */
26433 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26434 return true;
26436 /* Need a free register for vcall_offset. */
26437 if (vcall_offset)
26438 return false;
26440 /* Need a free register for GOT references. */
26441 if (flag_pic && !(*targetm.binds_local_p) (function))
26442 return false;
26444 /* Otherwise ok. */
26445 return true;
26448 /* Output the assembler code for a thunk function. THUNK_DECL is the
26449 declaration for the thunk function itself, FUNCTION is the decl for
26450 the target function. DELTA is an immediate constant offset to be
26451 added to THIS. If VCALL_OFFSET is nonzero, the word at
26452 *(*this + vcall_offset) should be added to THIS. */
26454 static void
26455 x86_output_mi_thunk (FILE *file,
26456 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26457 HOST_WIDE_INT vcall_offset, tree function)
26459 rtx xops[3];
26460 rtx this_param = x86_this_parameter (function);
26461 rtx this_reg, tmp;
26463 /* Make sure unwind info is emitted for the thunk if needed. */
26464 final_start_function (emit_barrier (), file, 1);
26466 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26467 pull it in now and let DELTA benefit. */
26468 if (REG_P (this_param))
26469 this_reg = this_param;
26470 else if (vcall_offset)
26472 /* Put the this parameter into %eax. */
26473 xops[0] = this_param;
26474 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26475 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26477 else
26478 this_reg = NULL_RTX;
26480 /* Adjust the this parameter by a fixed constant. */
26481 if (delta)
26483 xops[0] = GEN_INT (delta);
26484 xops[1] = this_reg ? this_reg : this_param;
26485 if (TARGET_64BIT)
26487 if (!x86_64_general_operand (xops[0], DImode))
26489 tmp = gen_rtx_REG (DImode, R10_REG);
26490 xops[1] = tmp;
26491 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26492 xops[0] = tmp;
26493 xops[1] = this_param;
26495 if (x86_maybe_negate_const_int (&xops[0], DImode))
26496 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26497 else
26498 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26500 else if (x86_maybe_negate_const_int (&xops[0], SImode))
26501 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26502 else
26503 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26506 /* Adjust the this parameter by a value stored in the vtable. */
26507 if (vcall_offset)
26509 if (TARGET_64BIT)
26510 tmp = gen_rtx_REG (DImode, R10_REG);
26511 else
26513 int tmp_regno = CX_REG;
26514 if (lookup_attribute ("fastcall",
26515 TYPE_ATTRIBUTES (TREE_TYPE (function)))
26516 || lookup_attribute ("thiscall",
26517 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26518 tmp_regno = AX_REG;
26519 tmp = gen_rtx_REG (SImode, tmp_regno);
26522 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26523 xops[1] = tmp;
26524 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26526 /* Adjust the this parameter. */
26527 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26528 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26530 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26531 xops[0] = GEN_INT (vcall_offset);
26532 xops[1] = tmp2;
26533 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26534 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26536 xops[1] = this_reg;
26537 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26540 /* If necessary, drop THIS back to its stack slot. */
26541 if (this_reg && this_reg != this_param)
26543 xops[0] = this_reg;
26544 xops[1] = this_param;
26545 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26548 xops[0] = XEXP (DECL_RTL (function), 0);
26549 if (TARGET_64BIT)
26551 if (!flag_pic || (*targetm.binds_local_p) (function))
26552 output_asm_insn ("jmp\t%P0", xops);
26553 /* All thunks should be in the same object as their target,
26554 and thus binds_local_p should be true. */
26555 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26556 gcc_unreachable ();
26557 else
26559 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26560 tmp = gen_rtx_CONST (Pmode, tmp);
26561 tmp = gen_rtx_MEM (QImode, tmp);
26562 xops[0] = tmp;
26563 output_asm_insn ("jmp\t%A0", xops);
26566 else
26568 if (!flag_pic || (*targetm.binds_local_p) (function))
26569 output_asm_insn ("jmp\t%P0", xops);
26570 else
26571 #if TARGET_MACHO
26572 if (TARGET_MACHO)
26574 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26575 tmp = (gen_rtx_SYMBOL_REF
26576 (Pmode,
26577 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26578 tmp = gen_rtx_MEM (QImode, tmp);
26579 xops[0] = tmp;
26580 output_asm_insn ("jmp\t%0", xops);
26582 else
26583 #endif /* TARGET_MACHO */
26585 tmp = gen_rtx_REG (SImode, CX_REG);
26586 output_set_got (tmp, NULL_RTX);
26588 xops[1] = tmp;
26589 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26590 output_asm_insn ("jmp\t{*}%1", xops);
26593 final_end_function ();
26596 static void
26597 x86_file_start (void)
26599 default_file_start ();
26600 #if TARGET_MACHO
26601 darwin_file_start ();
26602 #endif
26603 if (X86_FILE_START_VERSION_DIRECTIVE)
26604 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26605 if (X86_FILE_START_FLTUSED)
26606 fputs ("\t.global\t__fltused\n", asm_out_file);
26607 if (ix86_asm_dialect == ASM_INTEL)
26608 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26612 x86_field_alignment (tree field, int computed)
26614 enum machine_mode mode;
26615 tree type = TREE_TYPE (field);
26617 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26618 return computed;
26619 mode = TYPE_MODE (strip_array_types (type));
26620 if (mode == DFmode || mode == DCmode
26621 || GET_MODE_CLASS (mode) == MODE_INT
26622 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26623 return MIN (32, computed);
26624 return computed;
26627 /* Output assembler code to FILE to increment profiler label # LABELNO
26628 for profiling a function entry. */
26629 void
26630 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26632 if (TARGET_64BIT)
26634 #ifndef NO_PROFILE_COUNTERS
26635 fprintf (file, "\tleaq\t" LPREFIX "P%d(%%rip),%%r11\n", labelno);
26636 #endif
26638 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26639 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26640 else
26641 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26643 else if (flag_pic)
26645 #ifndef NO_PROFILE_COUNTERS
26646 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26647 labelno);
26648 #endif
26649 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26651 else
26653 #ifndef NO_PROFILE_COUNTERS
26654 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26655 labelno);
26656 #endif
26657 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26661 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26662 /* We don't have exact information about the insn sizes, but we may assume
26663 quite safely that we are informed about all 1 byte insns and memory
26664 address sizes. This is enough to eliminate unnecessary padding in
26665 99% of cases. */
26667 static int
26668 min_insn_size (rtx insn)
26670 int l = 0, len;
26672 if (!INSN_P (insn) || !active_insn_p (insn))
26673 return 0;
26675 /* Discard alignments we've emit and jump instructions. */
26676 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26677 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26678 return 0;
26679 if (JUMP_TABLE_DATA_P (insn))
26680 return 0;
26682 /* Important case - calls are always 5 bytes.
26683 It is common to have many calls in the row. */
26684 if (CALL_P (insn)
26685 && symbolic_reference_mentioned_p (PATTERN (insn))
26686 && !SIBLING_CALL_P (insn))
26687 return 5;
26688 len = get_attr_length (insn);
26689 if (len <= 1)
26690 return 1;
26692 /* For normal instructions we rely on get_attr_length being exact,
26693 with a few exceptions. */
26694 if (!JUMP_P (insn))
26696 enum attr_type type = get_attr_type (insn);
26698 switch (type)
26700 case TYPE_MULTI:
26701 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26702 || asm_noperands (PATTERN (insn)) >= 0)
26703 return 0;
26704 break;
26705 case TYPE_OTHER:
26706 case TYPE_FCMP:
26707 break;
26708 default:
26709 /* Otherwise trust get_attr_length. */
26710 return len;
26713 l = get_attr_length_address (insn);
26714 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26715 l = 4;
26717 if (l)
26718 return 1+l;
26719 else
26720 return 2;
26723 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26724 window. */
26726 static void
26727 ix86_avoid_jump_mispredicts (void)
26729 rtx insn, start = get_insns ();
26730 int nbytes = 0, njumps = 0;
26731 int isjump = 0;
26733 /* Look for all minimal intervals of instructions containing 4 jumps.
26734 The intervals are bounded by START and INSN. NBYTES is the total
26735 size of instructions in the interval including INSN and not including
26736 START. When the NBYTES is smaller than 16 bytes, it is possible
26737 that the end of START and INSN ends up in the same 16byte page.
26739 The smallest offset in the page INSN can start is the case where START
26740 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26741 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26743 for (insn = start; insn; insn = NEXT_INSN (insn))
26745 int min_size;
26747 if (LABEL_P (insn))
26749 int align = label_to_alignment (insn);
26750 int max_skip = label_to_max_skip (insn);
26752 if (max_skip > 15)
26753 max_skip = 15;
26754 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26755 already in the current 16 byte page, because otherwise
26756 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26757 bytes to reach 16 byte boundary. */
26758 if (align <= 0
26759 || (align <= 3 && max_skip != (1 << align) - 1))
26760 max_skip = 0;
26761 if (dump_file)
26762 fprintf (dump_file, "Label %i with max_skip %i\n",
26763 INSN_UID (insn), max_skip);
26764 if (max_skip)
26766 while (nbytes + max_skip >= 16)
26768 start = NEXT_INSN (start);
26769 if ((JUMP_P (start)
26770 && GET_CODE (PATTERN (start)) != ADDR_VEC
26771 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26772 || CALL_P (start))
26773 njumps--, isjump = 1;
26774 else
26775 isjump = 0;
26776 nbytes -= min_insn_size (start);
26779 continue;
26782 min_size = min_insn_size (insn);
26783 nbytes += min_size;
26784 if (dump_file)
26785 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26786 INSN_UID (insn), min_size);
26787 if ((JUMP_P (insn)
26788 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26789 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26790 || CALL_P (insn))
26791 njumps++;
26792 else
26793 continue;
26795 while (njumps > 3)
26797 start = NEXT_INSN (start);
26798 if ((JUMP_P (start)
26799 && GET_CODE (PATTERN (start)) != ADDR_VEC
26800 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26801 || CALL_P (start))
26802 njumps--, isjump = 1;
26803 else
26804 isjump = 0;
26805 nbytes -= min_insn_size (start);
26807 gcc_assert (njumps >= 0);
26808 if (dump_file)
26809 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26810 INSN_UID (start), INSN_UID (insn), nbytes);
26812 if (njumps == 3 && isjump && nbytes < 16)
26814 int padsize = 15 - nbytes + min_insn_size (insn);
26816 if (dump_file)
26817 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26818 INSN_UID (insn), padsize);
26819 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26823 #endif
26825 /* AMD Athlon works faster
26826 when RET is not destination of conditional jump or directly preceded
26827 by other jump instruction. We avoid the penalty by inserting NOP just
26828 before the RET instructions in such cases. */
26829 static void
26830 ix86_pad_returns (void)
26832 edge e;
26833 edge_iterator ei;
26835 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26837 basic_block bb = e->src;
26838 rtx ret = BB_END (bb);
26839 rtx prev;
26840 bool replace = false;
26842 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26843 || optimize_bb_for_size_p (bb))
26844 continue;
26845 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26846 if (active_insn_p (prev) || LABEL_P (prev))
26847 break;
26848 if (prev && LABEL_P (prev))
26850 edge e;
26851 edge_iterator ei;
26853 FOR_EACH_EDGE (e, ei, bb->preds)
26854 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26855 && !(e->flags & EDGE_FALLTHRU))
26856 replace = true;
26858 if (!replace)
26860 prev = prev_active_insn (ret);
26861 if (prev
26862 && ((JUMP_P (prev) && any_condjump_p (prev))
26863 || CALL_P (prev)))
26864 replace = true;
26865 /* Empty functions get branch mispredict even when the jump destination
26866 is not visible to us. */
26867 if (!prev && !optimize_function_for_size_p (cfun))
26868 replace = true;
26870 if (replace)
26872 emit_jump_insn_before (gen_return_internal_long (), ret);
26873 delete_insn (ret);
26878 /* Implement machine specific optimizations. We implement padding of returns
26879 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26880 static void
26881 ix86_reorg (void)
26883 if (optimize && optimize_function_for_speed_p (cfun))
26885 if (TARGET_PAD_RETURNS)
26886 ix86_pad_returns ();
26887 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26888 if (TARGET_FOUR_JUMP_LIMIT)
26889 ix86_avoid_jump_mispredicts ();
26890 #endif
26894 /* Return nonzero when QImode register that must be represented via REX prefix
26895 is used. */
26896 bool
26897 x86_extended_QIreg_mentioned_p (rtx insn)
26899 int i;
26900 extract_insn_cached (insn);
26901 for (i = 0; i < recog_data.n_operands; i++)
26902 if (REG_P (recog_data.operand[i])
26903 && REGNO (recog_data.operand[i]) > BX_REG)
26904 return true;
26905 return false;
26908 /* Return nonzero when P points to register encoded via REX prefix.
26909 Called via for_each_rtx. */
26910 static int
26911 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26913 unsigned int regno;
26914 if (!REG_P (*p))
26915 return 0;
26916 regno = REGNO (*p);
26917 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26920 /* Return true when INSN mentions register that must be encoded using REX
26921 prefix. */
26922 bool
26923 x86_extended_reg_mentioned_p (rtx insn)
26925 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26926 extended_reg_mentioned_1, NULL);
26929 /* If profitable, negate (without causing overflow) integer constant
26930 of mode MODE at location LOC. Return true in this case. */
26931 bool
26932 x86_maybe_negate_const_int (rtx *loc, enum machine_mode mode)
26934 HOST_WIDE_INT val;
26936 if (!CONST_INT_P (*loc))
26937 return false;
26939 switch (mode)
26941 case DImode:
26942 /* DImode x86_64 constants must fit in 32 bits. */
26943 gcc_assert (x86_64_immediate_operand (*loc, mode));
26945 mode = SImode;
26946 break;
26948 case SImode:
26949 case HImode:
26950 case QImode:
26951 break;
26953 default:
26954 gcc_unreachable ();
26957 /* Avoid overflows. */
26958 if (mode_signbit_p (mode, *loc))
26959 return false;
26961 val = INTVAL (*loc);
26963 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26964 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26965 if ((val < 0 && val != -128)
26966 || val == 128)
26968 *loc = GEN_INT (-val);
26969 return true;
26972 return false;
26975 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26976 optabs would emit if we didn't have TFmode patterns. */
26978 void
26979 x86_emit_floatuns (rtx operands[2])
26981 rtx neglab, donelab, i0, i1, f0, in, out;
26982 enum machine_mode mode, inmode;
26984 inmode = GET_MODE (operands[1]);
26985 gcc_assert (inmode == SImode || inmode == DImode);
26987 out = operands[0];
26988 in = force_reg (inmode, operands[1]);
26989 mode = GET_MODE (out);
26990 neglab = gen_label_rtx ();
26991 donelab = gen_label_rtx ();
26992 f0 = gen_reg_rtx (mode);
26994 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26996 expand_float (out, in, 0);
26998 emit_jump_insn (gen_jump (donelab));
26999 emit_barrier ();
27001 emit_label (neglab);
27003 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
27004 1, OPTAB_DIRECT);
27005 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
27006 1, OPTAB_DIRECT);
27007 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
27009 expand_float (f0, i0, 0);
27011 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
27013 emit_label (donelab);
27016 /* AVX does not support 32-byte integer vector operations,
27017 thus the longest vector we are faced with is V16QImode. */
27018 #define MAX_VECT_LEN 16
27020 struct expand_vec_perm_d
27022 rtx target, op0, op1;
27023 unsigned char perm[MAX_VECT_LEN];
27024 enum machine_mode vmode;
27025 unsigned char nelt;
27026 bool testing_p;
27029 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
27030 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
27032 /* Get a vector mode of the same size as the original but with elements
27033 twice as wide. This is only guaranteed to apply to integral vectors. */
27035 static inline enum machine_mode
27036 get_mode_wider_vector (enum machine_mode o)
27038 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
27039 enum machine_mode n = GET_MODE_WIDER_MODE (o);
27040 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
27041 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
27042 return n;
27045 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27046 with all elements equal to VAR. Return true if successful. */
27048 static bool
27049 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
27050 rtx target, rtx val)
27052 bool ok;
27054 switch (mode)
27056 case V2SImode:
27057 case V2SFmode:
27058 if (!mmx_ok)
27059 return false;
27060 /* FALLTHRU */
27062 case V4DFmode:
27063 case V4DImode:
27064 case V8SFmode:
27065 case V8SImode:
27066 case V2DFmode:
27067 case V2DImode:
27068 case V4SFmode:
27069 case V4SImode:
27071 rtx insn, dup;
27073 /* First attempt to recognize VAL as-is. */
27074 dup = gen_rtx_VEC_DUPLICATE (mode, val);
27075 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
27076 if (recog_memoized (insn) < 0)
27078 rtx seq;
27079 /* If that fails, force VAL into a register. */
27081 start_sequence ();
27082 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
27083 seq = get_insns ();
27084 end_sequence ();
27085 if (seq)
27086 emit_insn_before (seq, insn);
27088 ok = recog_memoized (insn) >= 0;
27089 gcc_assert (ok);
27092 return true;
27094 case V4HImode:
27095 if (!mmx_ok)
27096 return false;
27097 if (TARGET_SSE || TARGET_3DNOW_A)
27099 rtx x;
27101 val = gen_lowpart (SImode, val);
27102 x = gen_rtx_TRUNCATE (HImode, val);
27103 x = gen_rtx_VEC_DUPLICATE (mode, x);
27104 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27105 return true;
27107 goto widen;
27109 case V8QImode:
27110 if (!mmx_ok)
27111 return false;
27112 goto widen;
27114 case V8HImode:
27115 if (TARGET_SSE2)
27117 struct expand_vec_perm_d dperm;
27118 rtx tmp1, tmp2;
27120 permute:
27121 memset (&dperm, 0, sizeof (dperm));
27122 dperm.target = target;
27123 dperm.vmode = mode;
27124 dperm.nelt = GET_MODE_NUNITS (mode);
27125 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
27127 /* Extend to SImode using a paradoxical SUBREG. */
27128 tmp1 = gen_reg_rtx (SImode);
27129 emit_move_insn (tmp1, gen_lowpart (SImode, val));
27131 /* Insert the SImode value as low element of a V4SImode vector. */
27132 tmp2 = gen_lowpart (V4SImode, dperm.op0);
27133 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
27135 ok = (expand_vec_perm_1 (&dperm)
27136 || expand_vec_perm_broadcast_1 (&dperm));
27137 gcc_assert (ok);
27138 return ok;
27140 goto widen;
27142 case V16QImode:
27143 if (TARGET_SSE2)
27144 goto permute;
27145 goto widen;
27147 widen:
27148 /* Replicate the value once into the next wider mode and recurse. */
27150 enum machine_mode smode, wsmode, wvmode;
27151 rtx x;
27153 smode = GET_MODE_INNER (mode);
27154 wvmode = get_mode_wider_vector (mode);
27155 wsmode = GET_MODE_INNER (wvmode);
27157 val = convert_modes (wsmode, smode, val, true);
27158 x = expand_simple_binop (wsmode, ASHIFT, val,
27159 GEN_INT (GET_MODE_BITSIZE (smode)),
27160 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27161 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
27163 x = gen_lowpart (wvmode, target);
27164 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
27165 gcc_assert (ok);
27166 return ok;
27169 case V16HImode:
27170 case V32QImode:
27172 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
27173 rtx x = gen_reg_rtx (hvmode);
27175 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
27176 gcc_assert (ok);
27178 x = gen_rtx_VEC_CONCAT (mode, x, x);
27179 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27181 return true;
27183 default:
27184 return false;
27188 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27189 whose ONE_VAR element is VAR, and other elements are zero. Return true
27190 if successful. */
27192 static bool
27193 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
27194 rtx target, rtx var, int one_var)
27196 enum machine_mode vsimode;
27197 rtx new_target;
27198 rtx x, tmp;
27199 bool use_vector_set = false;
27201 switch (mode)
27203 case V2DImode:
27204 /* For SSE4.1, we normally use vector set. But if the second
27205 element is zero and inter-unit moves are OK, we use movq
27206 instead. */
27207 use_vector_set = (TARGET_64BIT
27208 && TARGET_SSE4_1
27209 && !(TARGET_INTER_UNIT_MOVES
27210 && one_var == 0));
27211 break;
27212 case V16QImode:
27213 case V4SImode:
27214 case V4SFmode:
27215 use_vector_set = TARGET_SSE4_1;
27216 break;
27217 case V8HImode:
27218 use_vector_set = TARGET_SSE2;
27219 break;
27220 case V4HImode:
27221 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
27222 break;
27223 case V32QImode:
27224 case V16HImode:
27225 case V8SImode:
27226 case V8SFmode:
27227 case V4DFmode:
27228 use_vector_set = TARGET_AVX;
27229 break;
27230 case V4DImode:
27231 /* Use ix86_expand_vector_set in 64bit mode only. */
27232 use_vector_set = TARGET_AVX && TARGET_64BIT;
27233 break;
27234 default:
27235 break;
27238 if (use_vector_set)
27240 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
27241 var = force_reg (GET_MODE_INNER (mode), var);
27242 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27243 return true;
27246 switch (mode)
27248 case V2SFmode:
27249 case V2SImode:
27250 if (!mmx_ok)
27251 return false;
27252 /* FALLTHRU */
27254 case V2DFmode:
27255 case V2DImode:
27256 if (one_var != 0)
27257 return false;
27258 var = force_reg (GET_MODE_INNER (mode), var);
27259 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
27260 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27261 return true;
27263 case V4SFmode:
27264 case V4SImode:
27265 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
27266 new_target = gen_reg_rtx (mode);
27267 else
27268 new_target = target;
27269 var = force_reg (GET_MODE_INNER (mode), var);
27270 x = gen_rtx_VEC_DUPLICATE (mode, var);
27271 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
27272 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
27273 if (one_var != 0)
27275 /* We need to shuffle the value to the correct position, so
27276 create a new pseudo to store the intermediate result. */
27278 /* With SSE2, we can use the integer shuffle insns. */
27279 if (mode != V4SFmode && TARGET_SSE2)
27281 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
27282 const1_rtx,
27283 GEN_INT (one_var == 1 ? 0 : 1),
27284 GEN_INT (one_var == 2 ? 0 : 1),
27285 GEN_INT (one_var == 3 ? 0 : 1)));
27286 if (target != new_target)
27287 emit_move_insn (target, new_target);
27288 return true;
27291 /* Otherwise convert the intermediate result to V4SFmode and
27292 use the SSE1 shuffle instructions. */
27293 if (mode != V4SFmode)
27295 tmp = gen_reg_rtx (V4SFmode);
27296 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
27298 else
27299 tmp = new_target;
27301 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
27302 const1_rtx,
27303 GEN_INT (one_var == 1 ? 0 : 1),
27304 GEN_INT (one_var == 2 ? 0+4 : 1+4),
27305 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
27307 if (mode != V4SFmode)
27308 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
27309 else if (tmp != target)
27310 emit_move_insn (target, tmp);
27312 else if (target != new_target)
27313 emit_move_insn (target, new_target);
27314 return true;
27316 case V8HImode:
27317 case V16QImode:
27318 vsimode = V4SImode;
27319 goto widen;
27320 case V4HImode:
27321 case V8QImode:
27322 if (!mmx_ok)
27323 return false;
27324 vsimode = V2SImode;
27325 goto widen;
27326 widen:
27327 if (one_var != 0)
27328 return false;
27330 /* Zero extend the variable element to SImode and recurse. */
27331 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
27333 x = gen_reg_rtx (vsimode);
27334 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
27335 var, one_var))
27336 gcc_unreachable ();
27338 emit_move_insn (target, gen_lowpart (mode, x));
27339 return true;
27341 default:
27342 return false;
27346 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
27347 consisting of the values in VALS. It is known that all elements
27348 except ONE_VAR are constants. Return true if successful. */
27350 static bool
27351 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
27352 rtx target, rtx vals, int one_var)
27354 rtx var = XVECEXP (vals, 0, one_var);
27355 enum machine_mode wmode;
27356 rtx const_vec, x;
27358 const_vec = copy_rtx (vals);
27359 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
27360 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27362 switch (mode)
27364 case V2DFmode:
27365 case V2DImode:
27366 case V2SFmode:
27367 case V2SImode:
27368 /* For the two element vectors, it's just as easy to use
27369 the general case. */
27370 return false;
27372 case V4DImode:
27373 /* Use ix86_expand_vector_set in 64bit mode only. */
27374 if (!TARGET_64BIT)
27375 return false;
27376 case V4DFmode:
27377 case V8SFmode:
27378 case V8SImode:
27379 case V16HImode:
27380 case V32QImode:
27381 case V4SFmode:
27382 case V4SImode:
27383 case V8HImode:
27384 case V4HImode:
27385 break;
27387 case V16QImode:
27388 if (TARGET_SSE4_1)
27389 break;
27390 wmode = V8HImode;
27391 goto widen;
27392 case V8QImode:
27393 wmode = V4HImode;
27394 goto widen;
27395 widen:
27396 /* There's no way to set one QImode entry easily. Combine
27397 the variable value with its adjacent constant value, and
27398 promote to an HImode set. */
27399 x = XVECEXP (vals, 0, one_var ^ 1);
27400 if (one_var & 1)
27402 var = convert_modes (HImode, QImode, var, true);
27403 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27404 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27405 x = GEN_INT (INTVAL (x) & 0xff);
27407 else
27409 var = convert_modes (HImode, QImode, var, true);
27410 x = gen_int_mode (INTVAL (x) << 8, HImode);
27412 if (x != const0_rtx)
27413 var = expand_simple_binop (HImode, IOR, var, x, var,
27414 1, OPTAB_LIB_WIDEN);
27416 x = gen_reg_rtx (wmode);
27417 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27418 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27420 emit_move_insn (target, gen_lowpart (mode, x));
27421 return true;
27423 default:
27424 return false;
27427 emit_move_insn (target, const_vec);
27428 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27429 return true;
27432 /* A subroutine of ix86_expand_vector_init_general. Use vector
27433 concatenate to handle the most general case: all values variable,
27434 and none identical. */
27436 static void
27437 ix86_expand_vector_init_concat (enum machine_mode mode,
27438 rtx target, rtx *ops, int n)
27440 enum machine_mode cmode, hmode = VOIDmode;
27441 rtx first[8], second[4];
27442 rtvec v;
27443 int i, j;
27445 switch (n)
27447 case 2:
27448 switch (mode)
27450 case V8SImode:
27451 cmode = V4SImode;
27452 break;
27453 case V8SFmode:
27454 cmode = V4SFmode;
27455 break;
27456 case V4DImode:
27457 cmode = V2DImode;
27458 break;
27459 case V4DFmode:
27460 cmode = V2DFmode;
27461 break;
27462 case V4SImode:
27463 cmode = V2SImode;
27464 break;
27465 case V4SFmode:
27466 cmode = V2SFmode;
27467 break;
27468 case V2DImode:
27469 cmode = DImode;
27470 break;
27471 case V2SImode:
27472 cmode = SImode;
27473 break;
27474 case V2DFmode:
27475 cmode = DFmode;
27476 break;
27477 case V2SFmode:
27478 cmode = SFmode;
27479 break;
27480 default:
27481 gcc_unreachable ();
27484 if (!register_operand (ops[1], cmode))
27485 ops[1] = force_reg (cmode, ops[1]);
27486 if (!register_operand (ops[0], cmode))
27487 ops[0] = force_reg (cmode, ops[0]);
27488 emit_insn (gen_rtx_SET (VOIDmode, target,
27489 gen_rtx_VEC_CONCAT (mode, ops[0],
27490 ops[1])));
27491 break;
27493 case 4:
27494 switch (mode)
27496 case V4DImode:
27497 cmode = V2DImode;
27498 break;
27499 case V4DFmode:
27500 cmode = V2DFmode;
27501 break;
27502 case V4SImode:
27503 cmode = V2SImode;
27504 break;
27505 case V4SFmode:
27506 cmode = V2SFmode;
27507 break;
27508 default:
27509 gcc_unreachable ();
27511 goto half;
27513 case 8:
27514 switch (mode)
27516 case V8SImode:
27517 cmode = V2SImode;
27518 hmode = V4SImode;
27519 break;
27520 case V8SFmode:
27521 cmode = V2SFmode;
27522 hmode = V4SFmode;
27523 break;
27524 default:
27525 gcc_unreachable ();
27527 goto half;
27529 half:
27530 /* FIXME: We process inputs backward to help RA. PR 36222. */
27531 i = n - 1;
27532 j = (n >> 1) - 1;
27533 for (; i > 0; i -= 2, j--)
27535 first[j] = gen_reg_rtx (cmode);
27536 v = gen_rtvec (2, ops[i - 1], ops[i]);
27537 ix86_expand_vector_init (false, first[j],
27538 gen_rtx_PARALLEL (cmode, v));
27541 n >>= 1;
27542 if (n > 2)
27544 gcc_assert (hmode != VOIDmode);
27545 for (i = j = 0; i < n; i += 2, j++)
27547 second[j] = gen_reg_rtx (hmode);
27548 ix86_expand_vector_init_concat (hmode, second [j],
27549 &first [i], 2);
27551 n >>= 1;
27552 ix86_expand_vector_init_concat (mode, target, second, n);
27554 else
27555 ix86_expand_vector_init_concat (mode, target, first, n);
27556 break;
27558 default:
27559 gcc_unreachable ();
27563 /* A subroutine of ix86_expand_vector_init_general. Use vector
27564 interleave to handle the most general case: all values variable,
27565 and none identical. */
27567 static void
27568 ix86_expand_vector_init_interleave (enum machine_mode mode,
27569 rtx target, rtx *ops, int n)
27571 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27572 int i, j;
27573 rtx op0, op1;
27574 rtx (*gen_load_even) (rtx, rtx, rtx);
27575 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27576 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27578 switch (mode)
27580 case V8HImode:
27581 gen_load_even = gen_vec_setv8hi;
27582 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27583 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27584 inner_mode = HImode;
27585 first_imode = V4SImode;
27586 second_imode = V2DImode;
27587 third_imode = VOIDmode;
27588 break;
27589 case V16QImode:
27590 gen_load_even = gen_vec_setv16qi;
27591 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27592 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27593 inner_mode = QImode;
27594 first_imode = V8HImode;
27595 second_imode = V4SImode;
27596 third_imode = V2DImode;
27597 break;
27598 default:
27599 gcc_unreachable ();
27602 for (i = 0; i < n; i++)
27604 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27605 op0 = gen_reg_rtx (SImode);
27606 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27608 /* Insert the SImode value as low element of V4SImode vector. */
27609 op1 = gen_reg_rtx (V4SImode);
27610 op0 = gen_rtx_VEC_MERGE (V4SImode,
27611 gen_rtx_VEC_DUPLICATE (V4SImode,
27612 op0),
27613 CONST0_RTX (V4SImode),
27614 const1_rtx);
27615 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27617 /* Cast the V4SImode vector back to a vector in orignal mode. */
27618 op0 = gen_reg_rtx (mode);
27619 emit_move_insn (op0, gen_lowpart (mode, op1));
27621 /* Load even elements into the second positon. */
27622 emit_insn ((*gen_load_even) (op0,
27623 force_reg (inner_mode,
27624 ops [i + i + 1]),
27625 const1_rtx));
27627 /* Cast vector to FIRST_IMODE vector. */
27628 ops[i] = gen_reg_rtx (first_imode);
27629 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27632 /* Interleave low FIRST_IMODE vectors. */
27633 for (i = j = 0; i < n; i += 2, j++)
27635 op0 = gen_reg_rtx (first_imode);
27636 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27638 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27639 ops[j] = gen_reg_rtx (second_imode);
27640 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27643 /* Interleave low SECOND_IMODE vectors. */
27644 switch (second_imode)
27646 case V4SImode:
27647 for (i = j = 0; i < n / 2; i += 2, j++)
27649 op0 = gen_reg_rtx (second_imode);
27650 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27651 ops[i + 1]));
27653 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27654 vector. */
27655 ops[j] = gen_reg_rtx (third_imode);
27656 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27658 second_imode = V2DImode;
27659 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27660 /* FALLTHRU */
27662 case V2DImode:
27663 op0 = gen_reg_rtx (second_imode);
27664 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27665 ops[1]));
27667 /* Cast the SECOND_IMODE vector back to a vector on original
27668 mode. */
27669 emit_insn (gen_rtx_SET (VOIDmode, target,
27670 gen_lowpart (mode, op0)));
27671 break;
27673 default:
27674 gcc_unreachable ();
27678 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27679 all values variable, and none identical. */
27681 static void
27682 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27683 rtx target, rtx vals)
27685 rtx ops[32], op0, op1;
27686 enum machine_mode half_mode = VOIDmode;
27687 int n, i;
27689 switch (mode)
27691 case V2SFmode:
27692 case V2SImode:
27693 if (!mmx_ok && !TARGET_SSE)
27694 break;
27695 /* FALLTHRU */
27697 case V8SFmode:
27698 case V8SImode:
27699 case V4DFmode:
27700 case V4DImode:
27701 case V4SFmode:
27702 case V4SImode:
27703 case V2DFmode:
27704 case V2DImode:
27705 n = GET_MODE_NUNITS (mode);
27706 for (i = 0; i < n; i++)
27707 ops[i] = XVECEXP (vals, 0, i);
27708 ix86_expand_vector_init_concat (mode, target, ops, n);
27709 return;
27711 case V32QImode:
27712 half_mode = V16QImode;
27713 goto half;
27715 case V16HImode:
27716 half_mode = V8HImode;
27717 goto half;
27719 half:
27720 n = GET_MODE_NUNITS (mode);
27721 for (i = 0; i < n; i++)
27722 ops[i] = XVECEXP (vals, 0, i);
27723 op0 = gen_reg_rtx (half_mode);
27724 op1 = gen_reg_rtx (half_mode);
27725 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27726 n >> 2);
27727 ix86_expand_vector_init_interleave (half_mode, op1,
27728 &ops [n >> 1], n >> 2);
27729 emit_insn (gen_rtx_SET (VOIDmode, target,
27730 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27731 return;
27733 case V16QImode:
27734 if (!TARGET_SSE4_1)
27735 break;
27736 /* FALLTHRU */
27738 case V8HImode:
27739 if (!TARGET_SSE2)
27740 break;
27742 /* Don't use ix86_expand_vector_init_interleave if we can't
27743 move from GPR to SSE register directly. */
27744 if (!TARGET_INTER_UNIT_MOVES)
27745 break;
27747 n = GET_MODE_NUNITS (mode);
27748 for (i = 0; i < n; i++)
27749 ops[i] = XVECEXP (vals, 0, i);
27750 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27751 return;
27753 case V4HImode:
27754 case V8QImode:
27755 break;
27757 default:
27758 gcc_unreachable ();
27762 int i, j, n_elts, n_words, n_elt_per_word;
27763 enum machine_mode inner_mode;
27764 rtx words[4], shift;
27766 inner_mode = GET_MODE_INNER (mode);
27767 n_elts = GET_MODE_NUNITS (mode);
27768 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27769 n_elt_per_word = n_elts / n_words;
27770 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27772 for (i = 0; i < n_words; ++i)
27774 rtx word = NULL_RTX;
27776 for (j = 0; j < n_elt_per_word; ++j)
27778 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27779 elt = convert_modes (word_mode, inner_mode, elt, true);
27781 if (j == 0)
27782 word = elt;
27783 else
27785 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27786 word, 1, OPTAB_LIB_WIDEN);
27787 word = expand_simple_binop (word_mode, IOR, word, elt,
27788 word, 1, OPTAB_LIB_WIDEN);
27792 words[i] = word;
27795 if (n_words == 1)
27796 emit_move_insn (target, gen_lowpart (mode, words[0]));
27797 else if (n_words == 2)
27799 rtx tmp = gen_reg_rtx (mode);
27800 emit_clobber (tmp);
27801 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27802 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27803 emit_move_insn (target, tmp);
27805 else if (n_words == 4)
27807 rtx tmp = gen_reg_rtx (V4SImode);
27808 gcc_assert (word_mode == SImode);
27809 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27810 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27811 emit_move_insn (target, gen_lowpart (mode, tmp));
27813 else
27814 gcc_unreachable ();
27818 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27819 instructions unless MMX_OK is true. */
27821 void
27822 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27824 enum machine_mode mode = GET_MODE (target);
27825 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27826 int n_elts = GET_MODE_NUNITS (mode);
27827 int n_var = 0, one_var = -1;
27828 bool all_same = true, all_const_zero = true;
27829 int i;
27830 rtx x;
27832 for (i = 0; i < n_elts; ++i)
27834 x = XVECEXP (vals, 0, i);
27835 if (!(CONST_INT_P (x)
27836 || GET_CODE (x) == CONST_DOUBLE
27837 || GET_CODE (x) == CONST_FIXED))
27838 n_var++, one_var = i;
27839 else if (x != CONST0_RTX (inner_mode))
27840 all_const_zero = false;
27841 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27842 all_same = false;
27845 /* Constants are best loaded from the constant pool. */
27846 if (n_var == 0)
27848 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27849 return;
27852 /* If all values are identical, broadcast the value. */
27853 if (all_same
27854 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27855 XVECEXP (vals, 0, 0)))
27856 return;
27858 /* Values where only one field is non-constant are best loaded from
27859 the pool and overwritten via move later. */
27860 if (n_var == 1)
27862 if (all_const_zero
27863 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27864 XVECEXP (vals, 0, one_var),
27865 one_var))
27866 return;
27868 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27869 return;
27872 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27875 void
27876 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27878 enum machine_mode mode = GET_MODE (target);
27879 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27880 enum machine_mode half_mode;
27881 bool use_vec_merge = false;
27882 rtx tmp;
27883 static rtx (*gen_extract[6][2]) (rtx, rtx)
27885 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27886 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27887 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27888 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27889 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27890 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27892 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27894 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27895 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27896 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27897 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27898 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27899 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27901 int i, j, n;
27903 switch (mode)
27905 case V2SFmode:
27906 case V2SImode:
27907 if (mmx_ok)
27909 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27910 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27911 if (elt == 0)
27912 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27913 else
27914 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27915 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27916 return;
27918 break;
27920 case V2DImode:
27921 use_vec_merge = TARGET_SSE4_1;
27922 if (use_vec_merge)
27923 break;
27925 case V2DFmode:
27927 rtx op0, op1;
27929 /* For the two element vectors, we implement a VEC_CONCAT with
27930 the extraction of the other element. */
27932 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27933 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27935 if (elt == 0)
27936 op0 = val, op1 = tmp;
27937 else
27938 op0 = tmp, op1 = val;
27940 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27941 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27943 return;
27945 case V4SFmode:
27946 use_vec_merge = TARGET_SSE4_1;
27947 if (use_vec_merge)
27948 break;
27950 switch (elt)
27952 case 0:
27953 use_vec_merge = true;
27954 break;
27956 case 1:
27957 /* tmp = target = A B C D */
27958 tmp = copy_to_reg (target);
27959 /* target = A A B B */
27960 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27961 /* target = X A B B */
27962 ix86_expand_vector_set (false, target, val, 0);
27963 /* target = A X C D */
27964 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27965 const1_rtx, const0_rtx,
27966 GEN_INT (2+4), GEN_INT (3+4)));
27967 return;
27969 case 2:
27970 /* tmp = target = A B C D */
27971 tmp = copy_to_reg (target);
27972 /* tmp = X B C D */
27973 ix86_expand_vector_set (false, tmp, val, 0);
27974 /* target = A B X D */
27975 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27976 const0_rtx, const1_rtx,
27977 GEN_INT (0+4), GEN_INT (3+4)));
27978 return;
27980 case 3:
27981 /* tmp = target = A B C D */
27982 tmp = copy_to_reg (target);
27983 /* tmp = X B C D */
27984 ix86_expand_vector_set (false, tmp, val, 0);
27985 /* target = A B X D */
27986 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27987 const0_rtx, const1_rtx,
27988 GEN_INT (2+4), GEN_INT (0+4)));
27989 return;
27991 default:
27992 gcc_unreachable ();
27994 break;
27996 case V4SImode:
27997 use_vec_merge = TARGET_SSE4_1;
27998 if (use_vec_merge)
27999 break;
28001 /* Element 0 handled by vec_merge below. */
28002 if (elt == 0)
28004 use_vec_merge = true;
28005 break;
28008 if (TARGET_SSE2)
28010 /* With SSE2, use integer shuffles to swap element 0 and ELT,
28011 store into element 0, then shuffle them back. */
28013 rtx order[4];
28015 order[0] = GEN_INT (elt);
28016 order[1] = const1_rtx;
28017 order[2] = const2_rtx;
28018 order[3] = GEN_INT (3);
28019 order[elt] = const0_rtx;
28021 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28022 order[1], order[2], order[3]));
28024 ix86_expand_vector_set (false, target, val, 0);
28026 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
28027 order[1], order[2], order[3]));
28029 else
28031 /* For SSE1, we have to reuse the V4SF code. */
28032 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
28033 gen_lowpart (SFmode, val), elt);
28035 return;
28037 case V8HImode:
28038 use_vec_merge = TARGET_SSE2;
28039 break;
28040 case V4HImode:
28041 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28042 break;
28044 case V16QImode:
28045 use_vec_merge = TARGET_SSE4_1;
28046 break;
28048 case V8QImode:
28049 break;
28051 case V32QImode:
28052 half_mode = V16QImode;
28053 j = 0;
28054 n = 16;
28055 goto half;
28057 case V16HImode:
28058 half_mode = V8HImode;
28059 j = 1;
28060 n = 8;
28061 goto half;
28063 case V8SImode:
28064 half_mode = V4SImode;
28065 j = 2;
28066 n = 4;
28067 goto half;
28069 case V4DImode:
28070 half_mode = V2DImode;
28071 j = 3;
28072 n = 2;
28073 goto half;
28075 case V8SFmode:
28076 half_mode = V4SFmode;
28077 j = 4;
28078 n = 4;
28079 goto half;
28081 case V4DFmode:
28082 half_mode = V2DFmode;
28083 j = 5;
28084 n = 2;
28085 goto half;
28087 half:
28088 /* Compute offset. */
28089 i = elt / n;
28090 elt %= n;
28092 gcc_assert (i <= 1);
28094 /* Extract the half. */
28095 tmp = gen_reg_rtx (half_mode);
28096 emit_insn ((*gen_extract[j][i]) (tmp, target));
28098 /* Put val in tmp at elt. */
28099 ix86_expand_vector_set (false, tmp, val, elt);
28101 /* Put it back. */
28102 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
28103 return;
28105 default:
28106 break;
28109 if (use_vec_merge)
28111 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
28112 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
28113 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28115 else
28117 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28119 emit_move_insn (mem, target);
28121 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28122 emit_move_insn (tmp, val);
28124 emit_move_insn (target, mem);
28128 void
28129 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
28131 enum machine_mode mode = GET_MODE (vec);
28132 enum machine_mode inner_mode = GET_MODE_INNER (mode);
28133 bool use_vec_extr = false;
28134 rtx tmp;
28136 switch (mode)
28138 case V2SImode:
28139 case V2SFmode:
28140 if (!mmx_ok)
28141 break;
28142 /* FALLTHRU */
28144 case V2DFmode:
28145 case V2DImode:
28146 use_vec_extr = true;
28147 break;
28149 case V4SFmode:
28150 use_vec_extr = TARGET_SSE4_1;
28151 if (use_vec_extr)
28152 break;
28154 switch (elt)
28156 case 0:
28157 tmp = vec;
28158 break;
28160 case 1:
28161 case 3:
28162 tmp = gen_reg_rtx (mode);
28163 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
28164 GEN_INT (elt), GEN_INT (elt),
28165 GEN_INT (elt+4), GEN_INT (elt+4)));
28166 break;
28168 case 2:
28169 tmp = gen_reg_rtx (mode);
28170 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
28171 break;
28173 default:
28174 gcc_unreachable ();
28176 vec = tmp;
28177 use_vec_extr = true;
28178 elt = 0;
28179 break;
28181 case V4SImode:
28182 use_vec_extr = TARGET_SSE4_1;
28183 if (use_vec_extr)
28184 break;
28186 if (TARGET_SSE2)
28188 switch (elt)
28190 case 0:
28191 tmp = vec;
28192 break;
28194 case 1:
28195 case 3:
28196 tmp = gen_reg_rtx (mode);
28197 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
28198 GEN_INT (elt), GEN_INT (elt),
28199 GEN_INT (elt), GEN_INT (elt)));
28200 break;
28202 case 2:
28203 tmp = gen_reg_rtx (mode);
28204 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
28205 break;
28207 default:
28208 gcc_unreachable ();
28210 vec = tmp;
28211 use_vec_extr = true;
28212 elt = 0;
28214 else
28216 /* For SSE1, we have to reuse the V4SF code. */
28217 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
28218 gen_lowpart (V4SFmode, vec), elt);
28219 return;
28221 break;
28223 case V8HImode:
28224 use_vec_extr = TARGET_SSE2;
28225 break;
28226 case V4HImode:
28227 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
28228 break;
28230 case V16QImode:
28231 use_vec_extr = TARGET_SSE4_1;
28232 break;
28234 case V8QImode:
28235 /* ??? Could extract the appropriate HImode element and shift. */
28236 default:
28237 break;
28240 if (use_vec_extr)
28242 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
28243 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
28245 /* Let the rtl optimizers know about the zero extension performed. */
28246 if (inner_mode == QImode || inner_mode == HImode)
28248 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
28249 target = gen_lowpart (SImode, target);
28252 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
28254 else
28256 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
28258 emit_move_insn (mem, vec);
28260 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
28261 emit_move_insn (target, tmp);
28265 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
28266 pattern to reduce; DEST is the destination; IN is the input vector. */
28268 void
28269 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
28271 rtx tmp1, tmp2, tmp3;
28273 tmp1 = gen_reg_rtx (V4SFmode);
28274 tmp2 = gen_reg_rtx (V4SFmode);
28275 tmp3 = gen_reg_rtx (V4SFmode);
28277 emit_insn (gen_sse_movhlps (tmp1, in, in));
28278 emit_insn (fn (tmp2, tmp1, in));
28280 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
28281 const1_rtx, const1_rtx,
28282 GEN_INT (1+4), GEN_INT (1+4)));
28283 emit_insn (fn (dest, tmp2, tmp3));
28286 /* Target hook for scalar_mode_supported_p. */
28287 static bool
28288 ix86_scalar_mode_supported_p (enum machine_mode mode)
28290 if (DECIMAL_FLOAT_MODE_P (mode))
28291 return default_decimal_float_supported_p ();
28292 else if (mode == TFmode)
28293 return true;
28294 else
28295 return default_scalar_mode_supported_p (mode);
28298 /* Implements target hook vector_mode_supported_p. */
28299 static bool
28300 ix86_vector_mode_supported_p (enum machine_mode mode)
28302 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
28303 return true;
28304 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
28305 return true;
28306 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
28307 return true;
28308 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
28309 return true;
28310 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
28311 return true;
28312 return false;
28315 /* Target hook for c_mode_for_suffix. */
28316 static enum machine_mode
28317 ix86_c_mode_for_suffix (char suffix)
28319 if (suffix == 'q')
28320 return TFmode;
28321 if (suffix == 'w')
28322 return XFmode;
28324 return VOIDmode;
28327 /* Worker function for TARGET_MD_ASM_CLOBBERS.
28329 We do this in the new i386 backend to maintain source compatibility
28330 with the old cc0-based compiler. */
28332 static tree
28333 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
28334 tree inputs ATTRIBUTE_UNUSED,
28335 tree clobbers)
28337 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
28338 clobbers);
28339 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
28340 clobbers);
28341 return clobbers;
28344 /* Implements target vector targetm.asm.encode_section_info. This
28345 is not used by netware. */
28347 static void ATTRIBUTE_UNUSED
28348 ix86_encode_section_info (tree decl, rtx rtl, int first)
28350 default_encode_section_info (decl, rtl, first);
28352 if (TREE_CODE (decl) == VAR_DECL
28353 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
28354 && ix86_in_large_data_p (decl))
28355 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
28358 /* Worker function for REVERSE_CONDITION. */
28360 enum rtx_code
28361 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28363 return (mode != CCFPmode && mode != CCFPUmode
28364 ? reverse_condition (code)
28365 : reverse_condition_maybe_unordered (code));
28368 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28369 to OPERANDS[0]. */
28371 const char *
28372 output_387_reg_move (rtx insn, rtx *operands)
28374 if (REG_P (operands[0]))
28376 if (REG_P (operands[1])
28377 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28379 if (REGNO (operands[0]) == FIRST_STACK_REG)
28380 return output_387_ffreep (operands, 0);
28381 return "fstp\t%y0";
28383 if (STACK_TOP_P (operands[0]))
28384 return "fld%Z1\t%y1";
28385 return "fst\t%y0";
28387 else if (MEM_P (operands[0]))
28389 gcc_assert (REG_P (operands[1]));
28390 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28391 return "fstp%Z0\t%y0";
28392 else
28394 /* There is no non-popping store to memory for XFmode.
28395 So if we need one, follow the store with a load. */
28396 if (GET_MODE (operands[0]) == XFmode)
28397 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28398 else
28399 return "fst%Z0\t%y0";
28402 else
28403 gcc_unreachable();
28406 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28407 FP status register is set. */
28409 void
28410 ix86_emit_fp_unordered_jump (rtx label)
28412 rtx reg = gen_reg_rtx (HImode);
28413 rtx temp;
28415 emit_insn (gen_x86_fnstsw_1 (reg));
28417 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28419 emit_insn (gen_x86_sahf_1 (reg));
28421 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28422 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28424 else
28426 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28428 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28429 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28432 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28433 gen_rtx_LABEL_REF (VOIDmode, label),
28434 pc_rtx);
28435 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28437 emit_jump_insn (temp);
28438 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28441 /* Output code to perform a log1p XFmode calculation. */
28443 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28445 rtx label1 = gen_label_rtx ();
28446 rtx label2 = gen_label_rtx ();
28448 rtx tmp = gen_reg_rtx (XFmode);
28449 rtx tmp2 = gen_reg_rtx (XFmode);
28450 rtx test;
28452 emit_insn (gen_absxf2 (tmp, op1));
28453 test = gen_rtx_GE (VOIDmode, tmp,
28454 CONST_DOUBLE_FROM_REAL_VALUE (
28455 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28456 XFmode));
28457 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28459 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28460 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28461 emit_jump (label2);
28463 emit_label (label1);
28464 emit_move_insn (tmp, CONST1_RTX (XFmode));
28465 emit_insn (gen_addxf3 (tmp, op1, tmp));
28466 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28467 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28469 emit_label (label2);
28472 /* Output code to perform a Newton-Rhapson approximation of a single precision
28473 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28475 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28477 rtx x0, x1, e0, e1, two;
28479 x0 = gen_reg_rtx (mode);
28480 e0 = gen_reg_rtx (mode);
28481 e1 = gen_reg_rtx (mode);
28482 x1 = gen_reg_rtx (mode);
28484 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28486 if (VECTOR_MODE_P (mode))
28487 two = ix86_build_const_vector (SFmode, true, two);
28489 two = force_reg (mode, two);
28491 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28493 /* x0 = rcp(b) estimate */
28494 emit_insn (gen_rtx_SET (VOIDmode, x0,
28495 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28496 UNSPEC_RCP)));
28497 /* e0 = x0 * a */
28498 emit_insn (gen_rtx_SET (VOIDmode, e0,
28499 gen_rtx_MULT (mode, x0, a)));
28500 /* e1 = x0 * b */
28501 emit_insn (gen_rtx_SET (VOIDmode, e1,
28502 gen_rtx_MULT (mode, x0, b)));
28503 /* x1 = 2. - e1 */
28504 emit_insn (gen_rtx_SET (VOIDmode, x1,
28505 gen_rtx_MINUS (mode, two, e1)));
28506 /* res = e0 * x1 */
28507 emit_insn (gen_rtx_SET (VOIDmode, res,
28508 gen_rtx_MULT (mode, e0, x1)));
28511 /* Output code to perform a Newton-Rhapson approximation of a
28512 single precision floating point [reciprocal] square root. */
28514 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28515 bool recip)
28517 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28518 REAL_VALUE_TYPE r;
28520 x0 = gen_reg_rtx (mode);
28521 e0 = gen_reg_rtx (mode);
28522 e1 = gen_reg_rtx (mode);
28523 e2 = gen_reg_rtx (mode);
28524 e3 = gen_reg_rtx (mode);
28526 real_from_integer (&r, VOIDmode, -3, -1, 0);
28527 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28529 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28530 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28532 if (VECTOR_MODE_P (mode))
28534 mthree = ix86_build_const_vector (SFmode, true, mthree);
28535 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28538 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28539 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28541 /* x0 = rsqrt(a) estimate */
28542 emit_insn (gen_rtx_SET (VOIDmode, x0,
28543 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28544 UNSPEC_RSQRT)));
28546 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28547 if (!recip)
28549 rtx zero, mask;
28551 zero = gen_reg_rtx (mode);
28552 mask = gen_reg_rtx (mode);
28554 zero = force_reg (mode, CONST0_RTX(mode));
28555 emit_insn (gen_rtx_SET (VOIDmode, mask,
28556 gen_rtx_NE (mode, zero, a)));
28558 emit_insn (gen_rtx_SET (VOIDmode, x0,
28559 gen_rtx_AND (mode, x0, mask)));
28562 /* e0 = x0 * a */
28563 emit_insn (gen_rtx_SET (VOIDmode, e0,
28564 gen_rtx_MULT (mode, x0, a)));
28565 /* e1 = e0 * x0 */
28566 emit_insn (gen_rtx_SET (VOIDmode, e1,
28567 gen_rtx_MULT (mode, e0, x0)));
28569 /* e2 = e1 - 3. */
28570 mthree = force_reg (mode, mthree);
28571 emit_insn (gen_rtx_SET (VOIDmode, e2,
28572 gen_rtx_PLUS (mode, e1, mthree)));
28574 mhalf = force_reg (mode, mhalf);
28575 if (recip)
28576 /* e3 = -.5 * x0 */
28577 emit_insn (gen_rtx_SET (VOIDmode, e3,
28578 gen_rtx_MULT (mode, x0, mhalf)));
28579 else
28580 /* e3 = -.5 * e0 */
28581 emit_insn (gen_rtx_SET (VOIDmode, e3,
28582 gen_rtx_MULT (mode, e0, mhalf)));
28583 /* ret = e2 * e3 */
28584 emit_insn (gen_rtx_SET (VOIDmode, res,
28585 gen_rtx_MULT (mode, e2, e3)));
28588 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28590 static void ATTRIBUTE_UNUSED
28591 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28592 tree decl)
28594 /* With Binutils 2.15, the "@unwind" marker must be specified on
28595 every occurrence of the ".eh_frame" section, not just the first
28596 one. */
28597 if (TARGET_64BIT
28598 && strcmp (name, ".eh_frame") == 0)
28600 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28601 flags & SECTION_WRITE ? "aw" : "a");
28602 return;
28604 default_elf_asm_named_section (name, flags, decl);
28607 /* Return the mangling of TYPE if it is an extended fundamental type. */
28609 static const char *
28610 ix86_mangle_type (const_tree type)
28612 type = TYPE_MAIN_VARIANT (type);
28614 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28615 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28616 return NULL;
28618 switch (TYPE_MODE (type))
28620 case TFmode:
28621 /* __float128 is "g". */
28622 return "g";
28623 case XFmode:
28624 /* "long double" or __float80 is "e". */
28625 return "e";
28626 default:
28627 return NULL;
28631 /* For 32-bit code we can save PIC register setup by using
28632 __stack_chk_fail_local hidden function instead of calling
28633 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28634 register, so it is better to call __stack_chk_fail directly. */
28636 static tree
28637 ix86_stack_protect_fail (void)
28639 return TARGET_64BIT
28640 ? default_external_stack_protect_fail ()
28641 : default_hidden_stack_protect_fail ();
28644 /* Select a format to encode pointers in exception handling data. CODE
28645 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28646 true if the symbol may be affected by dynamic relocations.
28648 ??? All x86 object file formats are capable of representing this.
28649 After all, the relocation needed is the same as for the call insn.
28650 Whether or not a particular assembler allows us to enter such, I
28651 guess we'll have to see. */
28653 asm_preferred_eh_data_format (int code, int global)
28655 if (flag_pic)
28657 int type = DW_EH_PE_sdata8;
28658 if (!TARGET_64BIT
28659 || ix86_cmodel == CM_SMALL_PIC
28660 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28661 type = DW_EH_PE_sdata4;
28662 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28664 if (ix86_cmodel == CM_SMALL
28665 || (ix86_cmodel == CM_MEDIUM && code))
28666 return DW_EH_PE_udata4;
28667 return DW_EH_PE_absptr;
28670 /* Expand copysign from SIGN to the positive value ABS_VALUE
28671 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28672 the sign-bit. */
28673 static void
28674 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28676 enum machine_mode mode = GET_MODE (sign);
28677 rtx sgn = gen_reg_rtx (mode);
28678 if (mask == NULL_RTX)
28680 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28681 if (!VECTOR_MODE_P (mode))
28683 /* We need to generate a scalar mode mask in this case. */
28684 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28685 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28686 mask = gen_reg_rtx (mode);
28687 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28690 else
28691 mask = gen_rtx_NOT (mode, mask);
28692 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28693 gen_rtx_AND (mode, mask, sign)));
28694 emit_insn (gen_rtx_SET (VOIDmode, result,
28695 gen_rtx_IOR (mode, abs_value, sgn)));
28698 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28699 mask for masking out the sign-bit is stored in *SMASK, if that is
28700 non-null. */
28701 static rtx
28702 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28704 enum machine_mode mode = GET_MODE (op0);
28705 rtx xa, mask;
28707 xa = gen_reg_rtx (mode);
28708 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28709 if (!VECTOR_MODE_P (mode))
28711 /* We need to generate a scalar mode mask in this case. */
28712 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28713 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28714 mask = gen_reg_rtx (mode);
28715 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28717 emit_insn (gen_rtx_SET (VOIDmode, xa,
28718 gen_rtx_AND (mode, op0, mask)));
28720 if (smask)
28721 *smask = mask;
28723 return xa;
28726 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28727 swapping the operands if SWAP_OPERANDS is true. The expanded
28728 code is a forward jump to a newly created label in case the
28729 comparison is true. The generated label rtx is returned. */
28730 static rtx
28731 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28732 bool swap_operands)
28734 rtx label, tmp;
28736 if (swap_operands)
28738 tmp = op0;
28739 op0 = op1;
28740 op1 = tmp;
28743 label = gen_label_rtx ();
28744 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28745 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28746 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28747 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28748 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28749 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28750 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28751 JUMP_LABEL (tmp) = label;
28753 return label;
28756 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28757 using comparison code CODE. Operands are swapped for the comparison if
28758 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28759 static rtx
28760 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28761 bool swap_operands)
28763 enum machine_mode mode = GET_MODE (op0);
28764 rtx mask = gen_reg_rtx (mode);
28766 if (swap_operands)
28768 rtx tmp = op0;
28769 op0 = op1;
28770 op1 = tmp;
28773 if (mode == DFmode)
28774 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28775 gen_rtx_fmt_ee (code, mode, op0, op1)));
28776 else
28777 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28778 gen_rtx_fmt_ee (code, mode, op0, op1)));
28780 return mask;
28783 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28784 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28785 static rtx
28786 ix86_gen_TWO52 (enum machine_mode mode)
28788 REAL_VALUE_TYPE TWO52r;
28789 rtx TWO52;
28791 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28792 TWO52 = const_double_from_real_value (TWO52r, mode);
28793 TWO52 = force_reg (mode, TWO52);
28795 return TWO52;
28798 /* Expand SSE sequence for computing lround from OP1 storing
28799 into OP0. */
28800 void
28801 ix86_expand_lround (rtx op0, rtx op1)
28803 /* C code for the stuff we're doing below:
28804 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28805 return (long)tmp;
28807 enum machine_mode mode = GET_MODE (op1);
28808 const struct real_format *fmt;
28809 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28810 rtx adj;
28812 /* load nextafter (0.5, 0.0) */
28813 fmt = REAL_MODE_FORMAT (mode);
28814 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28815 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28817 /* adj = copysign (0.5, op1) */
28818 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28819 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28821 /* adj = op1 + adj */
28822 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28824 /* op0 = (imode)adj */
28825 expand_fix (op0, adj, 0);
28828 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28829 into OPERAND0. */
28830 void
28831 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28833 /* C code for the stuff we're doing below (for do_floor):
28834 xi = (long)op1;
28835 xi -= (double)xi > op1 ? 1 : 0;
28836 return xi;
28838 enum machine_mode fmode = GET_MODE (op1);
28839 enum machine_mode imode = GET_MODE (op0);
28840 rtx ireg, freg, label, tmp;
28842 /* reg = (long)op1 */
28843 ireg = gen_reg_rtx (imode);
28844 expand_fix (ireg, op1, 0);
28846 /* freg = (double)reg */
28847 freg = gen_reg_rtx (fmode);
28848 expand_float (freg, ireg, 0);
28850 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28851 label = ix86_expand_sse_compare_and_jump (UNLE,
28852 freg, op1, !do_floor);
28853 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28854 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28855 emit_move_insn (ireg, tmp);
28857 emit_label (label);
28858 LABEL_NUSES (label) = 1;
28860 emit_move_insn (op0, ireg);
28863 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28864 result in OPERAND0. */
28865 void
28866 ix86_expand_rint (rtx operand0, rtx operand1)
28868 /* C code for the stuff we're doing below:
28869 xa = fabs (operand1);
28870 if (!isless (xa, 2**52))
28871 return operand1;
28872 xa = xa + 2**52 - 2**52;
28873 return copysign (xa, operand1);
28875 enum machine_mode mode = GET_MODE (operand0);
28876 rtx res, xa, label, TWO52, mask;
28878 res = gen_reg_rtx (mode);
28879 emit_move_insn (res, operand1);
28881 /* xa = abs (operand1) */
28882 xa = ix86_expand_sse_fabs (res, &mask);
28884 /* if (!isless (xa, TWO52)) goto label; */
28885 TWO52 = ix86_gen_TWO52 (mode);
28886 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28888 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28889 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28891 ix86_sse_copysign_to_positive (res, xa, res, mask);
28893 emit_label (label);
28894 LABEL_NUSES (label) = 1;
28896 emit_move_insn (operand0, res);
28899 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28900 into OPERAND0. */
28901 void
28902 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28904 /* C code for the stuff we expand below.
28905 double xa = fabs (x), x2;
28906 if (!isless (xa, TWO52))
28907 return x;
28908 xa = xa + TWO52 - TWO52;
28909 x2 = copysign (xa, x);
28910 Compensate. Floor:
28911 if (x2 > x)
28912 x2 -= 1;
28913 Compensate. Ceil:
28914 if (x2 < x)
28915 x2 -= -1;
28916 return x2;
28918 enum machine_mode mode = GET_MODE (operand0);
28919 rtx xa, TWO52, tmp, label, one, res, mask;
28921 TWO52 = ix86_gen_TWO52 (mode);
28923 /* Temporary for holding the result, initialized to the input
28924 operand to ease control flow. */
28925 res = gen_reg_rtx (mode);
28926 emit_move_insn (res, operand1);
28928 /* xa = abs (operand1) */
28929 xa = ix86_expand_sse_fabs (res, &mask);
28931 /* if (!isless (xa, TWO52)) goto label; */
28932 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28934 /* xa = xa + TWO52 - TWO52; */
28935 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28936 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28938 /* xa = copysign (xa, operand1) */
28939 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28941 /* generate 1.0 or -1.0 */
28942 one = force_reg (mode,
28943 const_double_from_real_value (do_floor
28944 ? dconst1 : dconstm1, mode));
28946 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28947 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28948 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28949 gen_rtx_AND (mode, one, tmp)));
28950 /* We always need to subtract here to preserve signed zero. */
28951 tmp = expand_simple_binop (mode, MINUS,
28952 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28953 emit_move_insn (res, tmp);
28955 emit_label (label);
28956 LABEL_NUSES (label) = 1;
28958 emit_move_insn (operand0, res);
28961 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28962 into OPERAND0. */
28963 void
28964 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28966 /* C code for the stuff we expand below.
28967 double xa = fabs (x), x2;
28968 if (!isless (xa, TWO52))
28969 return x;
28970 x2 = (double)(long)x;
28971 Compensate. Floor:
28972 if (x2 > x)
28973 x2 -= 1;
28974 Compensate. Ceil:
28975 if (x2 < x)
28976 x2 += 1;
28977 if (HONOR_SIGNED_ZEROS (mode))
28978 return copysign (x2, x);
28979 return x2;
28981 enum machine_mode mode = GET_MODE (operand0);
28982 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28984 TWO52 = ix86_gen_TWO52 (mode);
28986 /* Temporary for holding the result, initialized to the input
28987 operand to ease control flow. */
28988 res = gen_reg_rtx (mode);
28989 emit_move_insn (res, operand1);
28991 /* xa = abs (operand1) */
28992 xa = ix86_expand_sse_fabs (res, &mask);
28994 /* if (!isless (xa, TWO52)) goto label; */
28995 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28997 /* xa = (double)(long)x */
28998 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28999 expand_fix (xi, res, 0);
29000 expand_float (xa, xi, 0);
29002 /* generate 1.0 */
29003 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29005 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
29006 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
29007 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29008 gen_rtx_AND (mode, one, tmp)));
29009 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
29010 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29011 emit_move_insn (res, tmp);
29013 if (HONOR_SIGNED_ZEROS (mode))
29014 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29016 emit_label (label);
29017 LABEL_NUSES (label) = 1;
29019 emit_move_insn (operand0, res);
29022 /* Expand SSE sequence for computing round from OPERAND1 storing
29023 into OPERAND0. Sequence that works without relying on DImode truncation
29024 via cvttsd2siq that is only available on 64bit targets. */
29025 void
29026 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
29028 /* C code for the stuff we expand below.
29029 double xa = fabs (x), xa2, x2;
29030 if (!isless (xa, TWO52))
29031 return x;
29032 Using the absolute value and copying back sign makes
29033 -0.0 -> -0.0 correct.
29034 xa2 = xa + TWO52 - TWO52;
29035 Compensate.
29036 dxa = xa2 - xa;
29037 if (dxa <= -0.5)
29038 xa2 += 1;
29039 else if (dxa > 0.5)
29040 xa2 -= 1;
29041 x2 = copysign (xa2, x);
29042 return x2;
29044 enum machine_mode mode = GET_MODE (operand0);
29045 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
29047 TWO52 = ix86_gen_TWO52 (mode);
29049 /* Temporary for holding the result, initialized to the input
29050 operand to ease control flow. */
29051 res = gen_reg_rtx (mode);
29052 emit_move_insn (res, operand1);
29054 /* xa = abs (operand1) */
29055 xa = ix86_expand_sse_fabs (res, &mask);
29057 /* if (!isless (xa, TWO52)) goto label; */
29058 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29060 /* xa2 = xa + TWO52 - TWO52; */
29061 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29062 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
29064 /* dxa = xa2 - xa; */
29065 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
29067 /* generate 0.5, 1.0 and -0.5 */
29068 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
29069 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
29070 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
29071 0, OPTAB_DIRECT);
29073 /* Compensate. */
29074 tmp = gen_reg_rtx (mode);
29075 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
29076 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
29077 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29078 gen_rtx_AND (mode, one, tmp)));
29079 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29080 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
29081 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
29082 emit_insn (gen_rtx_SET (VOIDmode, tmp,
29083 gen_rtx_AND (mode, one, tmp)));
29084 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
29086 /* res = copysign (xa2, operand1) */
29087 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
29089 emit_label (label);
29090 LABEL_NUSES (label) = 1;
29092 emit_move_insn (operand0, res);
29095 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29096 into OPERAND0. */
29097 void
29098 ix86_expand_trunc (rtx operand0, rtx operand1)
29100 /* C code for SSE variant we expand below.
29101 double xa = fabs (x), x2;
29102 if (!isless (xa, TWO52))
29103 return x;
29104 x2 = (double)(long)x;
29105 if (HONOR_SIGNED_ZEROS (mode))
29106 return copysign (x2, x);
29107 return x2;
29109 enum machine_mode mode = GET_MODE (operand0);
29110 rtx xa, xi, TWO52, label, res, mask;
29112 TWO52 = ix86_gen_TWO52 (mode);
29114 /* Temporary for holding the result, initialized to the input
29115 operand to ease control flow. */
29116 res = gen_reg_rtx (mode);
29117 emit_move_insn (res, operand1);
29119 /* xa = abs (operand1) */
29120 xa = ix86_expand_sse_fabs (res, &mask);
29122 /* if (!isless (xa, TWO52)) goto label; */
29123 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29125 /* x = (double)(long)x */
29126 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29127 expand_fix (xi, res, 0);
29128 expand_float (res, xi, 0);
29130 if (HONOR_SIGNED_ZEROS (mode))
29131 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
29133 emit_label (label);
29134 LABEL_NUSES (label) = 1;
29136 emit_move_insn (operand0, res);
29139 /* Expand SSE sequence for computing trunc from OPERAND1 storing
29140 into OPERAND0. */
29141 void
29142 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
29144 enum machine_mode mode = GET_MODE (operand0);
29145 rtx xa, mask, TWO52, label, one, res, smask, tmp;
29147 /* C code for SSE variant we expand below.
29148 double xa = fabs (x), x2;
29149 if (!isless (xa, TWO52))
29150 return x;
29151 xa2 = xa + TWO52 - TWO52;
29152 Compensate:
29153 if (xa2 > xa)
29154 xa2 -= 1.0;
29155 x2 = copysign (xa2, x);
29156 return x2;
29159 TWO52 = ix86_gen_TWO52 (mode);
29161 /* Temporary for holding the result, initialized to the input
29162 operand to ease control flow. */
29163 res = gen_reg_rtx (mode);
29164 emit_move_insn (res, operand1);
29166 /* xa = abs (operand1) */
29167 xa = ix86_expand_sse_fabs (res, &smask);
29169 /* if (!isless (xa, TWO52)) goto label; */
29170 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29172 /* res = xa + TWO52 - TWO52; */
29173 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
29174 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
29175 emit_move_insn (res, tmp);
29177 /* generate 1.0 */
29178 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
29180 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
29181 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
29182 emit_insn (gen_rtx_SET (VOIDmode, mask,
29183 gen_rtx_AND (mode, mask, one)));
29184 tmp = expand_simple_binop (mode, MINUS,
29185 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
29186 emit_move_insn (res, tmp);
29188 /* res = copysign (res, operand1) */
29189 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
29191 emit_label (label);
29192 LABEL_NUSES (label) = 1;
29194 emit_move_insn (operand0, res);
29197 /* Expand SSE sequence for computing round from OPERAND1 storing
29198 into OPERAND0. */
29199 void
29200 ix86_expand_round (rtx operand0, rtx operand1)
29202 /* C code for the stuff we're doing below:
29203 double xa = fabs (x);
29204 if (!isless (xa, TWO52))
29205 return x;
29206 xa = (double)(long)(xa + nextafter (0.5, 0.0));
29207 return copysign (xa, x);
29209 enum machine_mode mode = GET_MODE (operand0);
29210 rtx res, TWO52, xa, label, xi, half, mask;
29211 const struct real_format *fmt;
29212 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
29214 /* Temporary for holding the result, initialized to the input
29215 operand to ease control flow. */
29216 res = gen_reg_rtx (mode);
29217 emit_move_insn (res, operand1);
29219 TWO52 = ix86_gen_TWO52 (mode);
29220 xa = ix86_expand_sse_fabs (res, &mask);
29221 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
29223 /* load nextafter (0.5, 0.0) */
29224 fmt = REAL_MODE_FORMAT (mode);
29225 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
29226 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
29228 /* xa = xa + 0.5 */
29229 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
29230 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
29232 /* xa = (double)(int64_t)xa */
29233 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
29234 expand_fix (xi, xa, 0);
29235 expand_float (xa, xi, 0);
29237 /* res = copysign (xa, operand1) */
29238 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
29240 emit_label (label);
29241 LABEL_NUSES (label) = 1;
29243 emit_move_insn (operand0, res);
29247 /* Table of valid machine attributes. */
29248 static const struct attribute_spec ix86_attribute_table[] =
29250 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
29251 /* Stdcall attribute says callee is responsible for popping arguments
29252 if they are not variable. */
29253 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29254 /* Fastcall attribute says callee is responsible for popping arguments
29255 if they are not variable. */
29256 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29257 /* Thiscall attribute says callee is responsible for popping arguments
29258 if they are not variable. */
29259 { "thiscall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29260 /* Cdecl attribute says the callee is a normal C declaration */
29261 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29262 /* Regparm attribute specifies how many integer arguments are to be
29263 passed in registers. */
29264 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
29265 /* Sseregparm attribute says we are using x86_64 calling conventions
29266 for FP arguments. */
29267 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
29268 /* force_align_arg_pointer says this function realigns the stack at entry. */
29269 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
29270 false, true, true, ix86_handle_cconv_attribute },
29271 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29272 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
29273 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
29274 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
29275 #endif
29276 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29277 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
29278 #ifdef SUBTARGET_ATTRIBUTE_TABLE
29279 SUBTARGET_ATTRIBUTE_TABLE,
29280 #endif
29281 /* ms_abi and sysv_abi calling convention function attributes. */
29282 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29283 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
29284 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
29285 /* End element. */
29286 { NULL, 0, 0, false, false, false, NULL }
29289 /* Implement targetm.vectorize.builtin_vectorization_cost. */
29290 static int
29291 ix86_builtin_vectorization_cost (bool runtime_test)
29293 /* If the branch of the runtime test is taken - i.e. - the vectorized
29294 version is skipped - this incurs a misprediction cost (because the
29295 vectorized version is expected to be the fall-through). So we subtract
29296 the latency of a mispredicted branch from the costs that are incured
29297 when the vectorized version is executed.
29299 TODO: The values in individual target tables have to be tuned or new
29300 fields may be needed. For eg. on K8, the default branch path is the
29301 not-taken path. If the taken path is predicted correctly, the minimum
29302 penalty of going down the taken-path is 1 cycle. If the taken-path is
29303 not predicted correctly, then the minimum penalty is 10 cycles. */
29305 if (runtime_test)
29307 return (-(ix86_cost->cond_taken_branch_cost));
29309 else
29310 return 0;
29313 /* Implement targetm.vectorize.builtin_vec_perm. */
29315 static tree
29316 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
29318 tree itype = TREE_TYPE (vec_type);
29319 bool u = TYPE_UNSIGNED (itype);
29320 enum machine_mode vmode = TYPE_MODE (vec_type);
29321 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
29322 bool ok = TARGET_SSE2;
29324 switch (vmode)
29326 case V4DFmode:
29327 ok = TARGET_AVX;
29328 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
29329 goto get_di;
29330 case V2DFmode:
29331 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
29332 get_di:
29333 itype = ix86_get_builtin_type (IX86_BT_DI);
29334 break;
29336 case V8SFmode:
29337 ok = TARGET_AVX;
29338 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
29339 goto get_si;
29340 case V4SFmode:
29341 ok = TARGET_SSE;
29342 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
29343 get_si:
29344 itype = ix86_get_builtin_type (IX86_BT_SI);
29345 break;
29347 case V2DImode:
29348 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
29349 break;
29350 case V4SImode:
29351 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
29352 break;
29353 case V8HImode:
29354 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
29355 break;
29356 case V16QImode:
29357 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
29358 break;
29359 default:
29360 ok = false;
29361 break;
29364 if (!ok)
29365 return NULL_TREE;
29367 *mask_type = itype;
29368 return ix86_builtins[(int) fcode];
29371 /* Return a vector mode with twice as many elements as VMODE. */
29372 /* ??? Consider moving this to a table generated by genmodes.c. */
29374 static enum machine_mode
29375 doublesize_vector_mode (enum machine_mode vmode)
29377 switch (vmode)
29379 case V2SFmode: return V4SFmode;
29380 case V1DImode: return V2DImode;
29381 case V2SImode: return V4SImode;
29382 case V4HImode: return V8HImode;
29383 case V8QImode: return V16QImode;
29385 case V2DFmode: return V4DFmode;
29386 case V4SFmode: return V8SFmode;
29387 case V2DImode: return V4DImode;
29388 case V4SImode: return V8SImode;
29389 case V8HImode: return V16HImode;
29390 case V16QImode: return V32QImode;
29392 case V4DFmode: return V8DFmode;
29393 case V8SFmode: return V16SFmode;
29394 case V4DImode: return V8DImode;
29395 case V8SImode: return V16SImode;
29396 case V16HImode: return V32HImode;
29397 case V32QImode: return V64QImode;
29399 default:
29400 gcc_unreachable ();
29404 /* Construct (set target (vec_select op0 (parallel perm))) and
29405 return true if that's a valid instruction in the active ISA. */
29407 static bool
29408 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29410 rtx rperm[MAX_VECT_LEN], x;
29411 unsigned i;
29413 for (i = 0; i < nelt; ++i)
29414 rperm[i] = GEN_INT (perm[i]);
29416 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29417 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29418 x = gen_rtx_SET (VOIDmode, target, x);
29420 x = emit_insn (x);
29421 if (recog_memoized (x) < 0)
29423 remove_insn (x);
29424 return false;
29426 return true;
29429 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29431 static bool
29432 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29433 const unsigned char *perm, unsigned nelt)
29435 enum machine_mode v2mode;
29436 rtx x;
29438 v2mode = doublesize_vector_mode (GET_MODE (op0));
29439 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29440 return expand_vselect (target, x, perm, nelt);
29443 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29444 in terms of blendp[sd] / pblendw / pblendvb. */
29446 static bool
29447 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29449 enum machine_mode vmode = d->vmode;
29450 unsigned i, mask, nelt = d->nelt;
29451 rtx target, op0, op1, x;
29453 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29454 return false;
29455 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29456 return false;
29458 /* This is a blend, not a permute. Elements must stay in their
29459 respective lanes. */
29460 for (i = 0; i < nelt; ++i)
29462 unsigned e = d->perm[i];
29463 if (!(e == i || e == i + nelt))
29464 return false;
29467 if (d->testing_p)
29468 return true;
29470 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29471 decision should be extracted elsewhere, so that we only try that
29472 sequence once all budget==3 options have been tried. */
29474 /* For bytes, see if bytes move in pairs so we can use pblendw with
29475 an immediate argument, rather than pblendvb with a vector argument. */
29476 if (vmode == V16QImode)
29478 bool pblendw_ok = true;
29479 for (i = 0; i < 16 && pblendw_ok; i += 2)
29480 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29482 if (!pblendw_ok)
29484 rtx rperm[16], vperm;
29486 for (i = 0; i < nelt; ++i)
29487 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29489 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29490 vperm = force_reg (V16QImode, vperm);
29492 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29493 return true;
29497 target = d->target;
29498 op0 = d->op0;
29499 op1 = d->op1;
29500 mask = 0;
29502 switch (vmode)
29504 case V4DFmode:
29505 case V8SFmode:
29506 case V2DFmode:
29507 case V4SFmode:
29508 case V8HImode:
29509 for (i = 0; i < nelt; ++i)
29510 mask |= (d->perm[i] >= nelt) << i;
29511 break;
29513 case V2DImode:
29514 for (i = 0; i < 2; ++i)
29515 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29516 goto do_subreg;
29518 case V4SImode:
29519 for (i = 0; i < 4; ++i)
29520 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29521 goto do_subreg;
29523 case V16QImode:
29524 for (i = 0; i < 8; ++i)
29525 mask |= (d->perm[i * 2] >= 16) << i;
29527 do_subreg:
29528 vmode = V8HImode;
29529 target = gen_lowpart (vmode, target);
29530 op0 = gen_lowpart (vmode, op0);
29531 op1 = gen_lowpart (vmode, op1);
29532 break;
29534 default:
29535 gcc_unreachable ();
29538 /* This matches five different patterns with the different modes. */
29539 x = gen_rtx_VEC_MERGE (vmode, op1, op0, GEN_INT (mask));
29540 x = gen_rtx_SET (VOIDmode, target, x);
29541 emit_insn (x);
29543 return true;
29546 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29547 in terms of the variable form of vpermilps.
29549 Note that we will have already failed the immediate input vpermilps,
29550 which requires that the high and low part shuffle be identical; the
29551 variable form doesn't require that. */
29553 static bool
29554 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29556 rtx rperm[8], vperm;
29557 unsigned i;
29559 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29560 return false;
29562 /* We can only permute within the 128-bit lane. */
29563 for (i = 0; i < 8; ++i)
29565 unsigned e = d->perm[i];
29566 if (i < 4 ? e >= 4 : e < 4)
29567 return false;
29570 if (d->testing_p)
29571 return true;
29573 for (i = 0; i < 8; ++i)
29575 unsigned e = d->perm[i];
29577 /* Within each 128-bit lane, the elements of op0 are numbered
29578 from 0 and the elements of op1 are numbered from 4. */
29579 if (e >= 8 + 4)
29580 e -= 8;
29581 else if (e >= 4)
29582 e -= 4;
29584 rperm[i] = GEN_INT (e);
29587 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29588 vperm = force_reg (V8SImode, vperm);
29589 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29591 return true;
29594 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29595 in terms of pshufb or vpperm. */
29597 static bool
29598 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29600 unsigned i, nelt, eltsz;
29601 rtx rperm[16], vperm, target, op0, op1;
29603 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29604 return false;
29605 if (GET_MODE_SIZE (d->vmode) != 16)
29606 return false;
29608 if (d->testing_p)
29609 return true;
29611 nelt = d->nelt;
29612 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29614 for (i = 0; i < nelt; ++i)
29616 unsigned j, e = d->perm[i];
29617 for (j = 0; j < eltsz; ++j)
29618 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29621 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29622 vperm = force_reg (V16QImode, vperm);
29624 target = gen_lowpart (V16QImode, d->target);
29625 op0 = gen_lowpart (V16QImode, d->op0);
29626 if (d->op0 == d->op1)
29627 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29628 else
29630 op1 = gen_lowpart (V16QImode, d->op1);
29631 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29634 return true;
29637 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29638 in a single instruction. */
29640 static bool
29641 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29643 unsigned i, nelt = d->nelt;
29644 unsigned char perm2[MAX_VECT_LEN];
29646 /* Check plain VEC_SELECT first, because AVX has instructions that could
29647 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29648 input where SEL+CONCAT may not. */
29649 if (d->op0 == d->op1)
29651 int mask = nelt - 1;
29653 for (i = 0; i < nelt; i++)
29654 perm2[i] = d->perm[i] & mask;
29656 if (expand_vselect (d->target, d->op0, perm2, nelt))
29657 return true;
29659 /* There are plenty of patterns in sse.md that are written for
29660 SEL+CONCAT and are not replicated for a single op. Perhaps
29661 that should be changed, to avoid the nastiness here. */
29663 /* Recognize interleave style patterns, which means incrementing
29664 every other permutation operand. */
29665 for (i = 0; i < nelt; i += 2)
29667 perm2[i] = d->perm[i] & mask;
29668 perm2[i + 1] = (d->perm[i + 1] & mask) + nelt;
29670 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29671 return true;
29673 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29674 if (nelt >= 4)
29676 for (i = 0; i < nelt; i += 4)
29678 perm2[i + 0] = d->perm[i + 0] & mask;
29679 perm2[i + 1] = d->perm[i + 1] & mask;
29680 perm2[i + 2] = (d->perm[i + 2] & mask) + nelt;
29681 perm2[i + 3] = (d->perm[i + 3] & mask) + nelt;
29684 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29685 return true;
29689 /* Finally, try the fully general two operand permute. */
29690 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29691 return true;
29693 /* Recognize interleave style patterns with reversed operands. */
29694 if (d->op0 != d->op1)
29696 for (i = 0; i < nelt; ++i)
29698 unsigned e = d->perm[i];
29699 if (e >= nelt)
29700 e -= nelt;
29701 else
29702 e += nelt;
29703 perm2[i] = e;
29706 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29707 return true;
29710 /* Try the SSE4.1 blend variable merge instructions. */
29711 if (expand_vec_perm_blend (d))
29712 return true;
29714 /* Try one of the AVX vpermil variable permutations. */
29715 if (expand_vec_perm_vpermil (d))
29716 return true;
29718 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29719 if (expand_vec_perm_pshufb (d))
29720 return true;
29722 return false;
29725 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29726 in terms of a pair of pshuflw + pshufhw instructions. */
29728 static bool
29729 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29731 unsigned char perm2[MAX_VECT_LEN];
29732 unsigned i;
29733 bool ok;
29735 if (d->vmode != V8HImode || d->op0 != d->op1)
29736 return false;
29738 /* The two permutations only operate in 64-bit lanes. */
29739 for (i = 0; i < 4; ++i)
29740 if (d->perm[i] >= 4)
29741 return false;
29742 for (i = 4; i < 8; ++i)
29743 if (d->perm[i] < 4)
29744 return false;
29746 if (d->testing_p)
29747 return true;
29749 /* Emit the pshuflw. */
29750 memcpy (perm2, d->perm, 4);
29751 for (i = 4; i < 8; ++i)
29752 perm2[i] = i;
29753 ok = expand_vselect (d->target, d->op0, perm2, 8);
29754 gcc_assert (ok);
29756 /* Emit the pshufhw. */
29757 memcpy (perm2 + 4, d->perm + 4, 4);
29758 for (i = 0; i < 4; ++i)
29759 perm2[i] = i;
29760 ok = expand_vselect (d->target, d->target, perm2, 8);
29761 gcc_assert (ok);
29763 return true;
29766 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29767 the permutation using the SSSE3 palignr instruction. This succeeds
29768 when all of the elements in PERM fit within one vector and we merely
29769 need to shift them down so that a single vector permutation has a
29770 chance to succeed. */
29772 static bool
29773 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29775 unsigned i, nelt = d->nelt;
29776 unsigned min, max;
29777 bool in_order, ok;
29778 rtx shift;
29780 /* Even with AVX, palignr only operates on 128-bit vectors. */
29781 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29782 return false;
29784 min = nelt, max = 0;
29785 for (i = 0; i < nelt; ++i)
29787 unsigned e = d->perm[i];
29788 if (e < min)
29789 min = e;
29790 if (e > max)
29791 max = e;
29793 if (min == 0 || max - min >= nelt)
29794 return false;
29796 /* Given that we have SSSE3, we know we'll be able to implement the
29797 single operand permutation after the palignr with pshufb. */
29798 if (d->testing_p)
29799 return true;
29801 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29802 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29803 gen_lowpart (TImode, d->op1),
29804 gen_lowpart (TImode, d->op0), shift));
29806 d->op0 = d->op1 = d->target;
29808 in_order = true;
29809 for (i = 0; i < nelt; ++i)
29811 unsigned e = d->perm[i] - min;
29812 if (e != i)
29813 in_order = false;
29814 d->perm[i] = e;
29817 /* Test for the degenerate case where the alignment by itself
29818 produces the desired permutation. */
29819 if (in_order)
29820 return true;
29822 ok = expand_vec_perm_1 (d);
29823 gcc_assert (ok);
29825 return ok;
29828 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29829 a two vector permutation into a single vector permutation by using
29830 an interleave operation to merge the vectors. */
29832 static bool
29833 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29835 struct expand_vec_perm_d dremap, dfinal;
29836 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29837 unsigned contents, h1, h2, h3, h4;
29838 unsigned char remap[2 * MAX_VECT_LEN];
29839 rtx seq;
29840 bool ok;
29842 if (d->op0 == d->op1)
29843 return false;
29845 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29846 lanes. We can use similar techniques with the vperm2f128 instruction,
29847 but it requires slightly different logic. */
29848 if (GET_MODE_SIZE (d->vmode) != 16)
29849 return false;
29851 /* Examine from whence the elements come. */
29852 contents = 0;
29853 for (i = 0; i < nelt; ++i)
29854 contents |= 1u << d->perm[i];
29856 /* Split the two input vectors into 4 halves. */
29857 h1 = (1u << nelt2) - 1;
29858 h2 = h1 << nelt2;
29859 h3 = h2 << nelt2;
29860 h4 = h3 << nelt2;
29862 memset (remap, 0xff, sizeof (remap));
29863 dremap = *d;
29865 /* If the elements from the low halves use interleave low, and similarly
29866 for interleave high. If the elements are from mis-matched halves, we
29867 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29868 if ((contents & (h1 | h3)) == contents)
29870 for (i = 0; i < nelt2; ++i)
29872 remap[i] = i * 2;
29873 remap[i + nelt] = i * 2 + 1;
29874 dremap.perm[i * 2] = i;
29875 dremap.perm[i * 2 + 1] = i + nelt;
29878 else if ((contents & (h2 | h4)) == contents)
29880 for (i = 0; i < nelt2; ++i)
29882 remap[i + nelt2] = i * 2;
29883 remap[i + nelt + nelt2] = i * 2 + 1;
29884 dremap.perm[i * 2] = i + nelt2;
29885 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29888 else if ((contents & (h1 | h4)) == contents)
29890 for (i = 0; i < nelt2; ++i)
29892 remap[i] = i;
29893 remap[i + nelt + nelt2] = i + nelt2;
29894 dremap.perm[i] = i;
29895 dremap.perm[i + nelt2] = i + nelt + nelt2;
29897 if (nelt != 4)
29899 dremap.vmode = V2DImode;
29900 dremap.nelt = 2;
29901 dremap.perm[0] = 0;
29902 dremap.perm[1] = 3;
29905 else if ((contents & (h2 | h3)) == contents)
29907 for (i = 0; i < nelt2; ++i)
29909 remap[i + nelt2] = i;
29910 remap[i + nelt] = i + nelt2;
29911 dremap.perm[i] = i + nelt2;
29912 dremap.perm[i + nelt2] = i + nelt;
29914 if (nelt != 4)
29916 dremap.vmode = V2DImode;
29917 dremap.nelt = 2;
29918 dremap.perm[0] = 1;
29919 dremap.perm[1] = 2;
29922 else
29923 return false;
29925 /* Use the remapping array set up above to move the elements from their
29926 swizzled locations into their final destinations. */
29927 dfinal = *d;
29928 for (i = 0; i < nelt; ++i)
29930 unsigned e = remap[d->perm[i]];
29931 gcc_assert (e < nelt);
29932 dfinal.perm[i] = e;
29934 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29935 dfinal.op1 = dfinal.op0;
29936 dremap.target = dfinal.op0;
29938 /* Test if the final remap can be done with a single insn. For V4SFmode or
29939 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29940 start_sequence ();
29941 ok = expand_vec_perm_1 (&dfinal);
29942 seq = get_insns ();
29943 end_sequence ();
29945 if (!ok)
29946 return false;
29948 if (dremap.vmode != dfinal.vmode)
29950 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29951 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29952 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29955 ok = expand_vec_perm_1 (&dremap);
29956 gcc_assert (ok);
29958 emit_insn (seq);
29959 return true;
29962 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29963 permutation with two pshufb insns and an ior. We should have already
29964 failed all two instruction sequences. */
29966 static bool
29967 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29969 rtx rperm[2][16], vperm, l, h, op, m128;
29970 unsigned int i, nelt, eltsz;
29972 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29973 return false;
29974 gcc_assert (d->op0 != d->op1);
29976 nelt = d->nelt;
29977 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29979 /* Generate two permutation masks. If the required element is within
29980 the given vector it is shuffled into the proper lane. If the required
29981 element is in the other vector, force a zero into the lane by setting
29982 bit 7 in the permutation mask. */
29983 m128 = GEN_INT (-128);
29984 for (i = 0; i < nelt; ++i)
29986 unsigned j, e = d->perm[i];
29987 unsigned which = (e >= nelt);
29988 if (e >= nelt)
29989 e -= nelt;
29991 for (j = 0; j < eltsz; ++j)
29993 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29994 rperm[1-which][i*eltsz + j] = m128;
29998 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29999 vperm = force_reg (V16QImode, vperm);
30001 l = gen_reg_rtx (V16QImode);
30002 op = gen_lowpart (V16QImode, d->op0);
30003 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
30005 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
30006 vperm = force_reg (V16QImode, vperm);
30008 h = gen_reg_rtx (V16QImode);
30009 op = gen_lowpart (V16QImode, d->op1);
30010 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
30012 op = gen_lowpart (V16QImode, d->target);
30013 emit_insn (gen_iorv16qi3 (op, l, h));
30015 return true;
30018 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
30019 and extract-odd permutations. */
30021 static bool
30022 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
30024 rtx t1, t2, t3, t4;
30026 switch (d->vmode)
30028 case V4DFmode:
30029 t1 = gen_reg_rtx (V4DFmode);
30030 t2 = gen_reg_rtx (V4DFmode);
30032 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
30033 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
30034 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
30036 /* Now an unpck[lh]pd will produce the result required. */
30037 if (odd)
30038 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
30039 else
30040 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
30041 emit_insn (t3);
30042 break;
30044 case V8SFmode:
30046 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
30047 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
30048 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
30050 t1 = gen_reg_rtx (V8SFmode);
30051 t2 = gen_reg_rtx (V8SFmode);
30052 t3 = gen_reg_rtx (V8SFmode);
30053 t4 = gen_reg_rtx (V8SFmode);
30055 /* Shuffle within the 128-bit lanes to produce:
30056 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
30057 expand_vselect (t1, d->op0, perm1, 8);
30058 expand_vselect (t2, d->op1, perm1, 8);
30060 /* Shuffle the lanes around to produce:
30061 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
30062 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
30063 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
30065 /* Now a vpermil2p will produce the result required. */
30066 /* ??? The vpermil2p requires a vector constant. Another option
30067 is a unpck[lh]ps to merge the two vectors to produce
30068 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
30069 vpermilps to get the elements into the final order. */
30070 d->op0 = t3;
30071 d->op1 = t4;
30072 memcpy (d->perm, odd ? permo: perme, 8);
30073 expand_vec_perm_vpermil (d);
30075 break;
30077 case V2DFmode:
30078 case V4SFmode:
30079 case V2DImode:
30080 case V4SImode:
30081 /* These are always directly implementable by expand_vec_perm_1. */
30082 gcc_unreachable ();
30084 case V8HImode:
30085 if (TARGET_SSSE3)
30086 return expand_vec_perm_pshufb2 (d);
30087 else
30089 /* We need 2*log2(N)-1 operations to achieve odd/even
30090 with interleave. */
30091 t1 = gen_reg_rtx (V8HImode);
30092 t2 = gen_reg_rtx (V8HImode);
30093 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
30094 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
30095 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
30096 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
30097 if (odd)
30098 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
30099 else
30100 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
30101 emit_insn (t3);
30103 break;
30105 case V16QImode:
30106 if (TARGET_SSSE3)
30107 return expand_vec_perm_pshufb2 (d);
30108 else
30110 t1 = gen_reg_rtx (V16QImode);
30111 t2 = gen_reg_rtx (V16QImode);
30112 t3 = gen_reg_rtx (V16QImode);
30113 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
30114 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
30115 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
30116 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
30117 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
30118 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
30119 if (odd)
30120 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
30121 else
30122 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
30123 emit_insn (t3);
30125 break;
30127 default:
30128 gcc_unreachable ();
30131 return true;
30134 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30135 extract-even and extract-odd permutations. */
30137 static bool
30138 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
30140 unsigned i, odd, nelt = d->nelt;
30142 odd = d->perm[0];
30143 if (odd != 0 && odd != 1)
30144 return false;
30146 for (i = 1; i < nelt; ++i)
30147 if (d->perm[i] != 2 * i + odd)
30148 return false;
30150 return expand_vec_perm_even_odd_1 (d, odd);
30153 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
30154 permutations. We assume that expand_vec_perm_1 has already failed. */
30156 static bool
30157 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
30159 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
30160 enum machine_mode vmode = d->vmode;
30161 unsigned char perm2[4];
30162 rtx op0 = d->op0;
30163 bool ok;
30165 switch (vmode)
30167 case V4DFmode:
30168 case V8SFmode:
30169 /* These are special-cased in sse.md so that we can optionally
30170 use the vbroadcast instruction. They expand to two insns
30171 if the input happens to be in a register. */
30172 gcc_unreachable ();
30174 case V2DFmode:
30175 case V2DImode:
30176 case V4SFmode:
30177 case V4SImode:
30178 /* These are always implementable using standard shuffle patterns. */
30179 gcc_unreachable ();
30181 case V8HImode:
30182 case V16QImode:
30183 /* These can be implemented via interleave. We save one insn by
30184 stopping once we have promoted to V4SImode and then use pshufd. */
30187 optab otab = vec_interleave_low_optab;
30189 if (elt >= nelt2)
30191 otab = vec_interleave_high_optab;
30192 elt -= nelt2;
30194 nelt2 /= 2;
30196 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
30197 vmode = get_mode_wider_vector (vmode);
30198 op0 = gen_lowpart (vmode, op0);
30200 while (vmode != V4SImode);
30202 memset (perm2, elt, 4);
30203 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
30204 gcc_assert (ok);
30205 return true;
30207 default:
30208 gcc_unreachable ();
30212 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
30213 broadcast permutations. */
30215 static bool
30216 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
30218 unsigned i, elt, nelt = d->nelt;
30220 if (d->op0 != d->op1)
30221 return false;
30223 elt = d->perm[0];
30224 for (i = 1; i < nelt; ++i)
30225 if (d->perm[i] != elt)
30226 return false;
30228 return expand_vec_perm_broadcast_1 (d);
30231 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
30232 With all of the interface bits taken care of, perform the expansion
30233 in D and return true on success. */
30235 static bool
30236 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
30238 /* Try a single instruction expansion. */
30239 if (expand_vec_perm_1 (d))
30240 return true;
30242 /* Try sequences of two instructions. */
30244 if (expand_vec_perm_pshuflw_pshufhw (d))
30245 return true;
30247 if (expand_vec_perm_palignr (d))
30248 return true;
30250 if (expand_vec_perm_interleave2 (d))
30251 return true;
30253 if (expand_vec_perm_broadcast (d))
30254 return true;
30256 /* Try sequences of three instructions. */
30258 if (expand_vec_perm_pshufb2 (d))
30259 return true;
30261 /* ??? Look for narrow permutations whose element orderings would
30262 allow the promotion to a wider mode. */
30264 /* ??? Look for sequences of interleave or a wider permute that place
30265 the data into the correct lanes for a half-vector shuffle like
30266 pshuf[lh]w or vpermilps. */
30268 /* ??? Look for sequences of interleave that produce the desired results.
30269 The combinatorics of punpck[lh] get pretty ugly... */
30271 if (expand_vec_perm_even_odd (d))
30272 return true;
30274 return false;
30277 /* Extract the values from the vector CST into the permutation array in D.
30278 Return 0 on error, 1 if all values from the permutation come from the
30279 first vector, 2 if all values from the second vector, and 3 otherwise. */
30281 static int
30282 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
30284 tree list = TREE_VECTOR_CST_ELTS (cst);
30285 unsigned i, nelt = d->nelt;
30286 int ret = 0;
30288 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
30290 unsigned HOST_WIDE_INT e;
30292 if (!host_integerp (TREE_VALUE (list), 1))
30293 return 0;
30294 e = tree_low_cst (TREE_VALUE (list), 1);
30295 if (e >= 2 * nelt)
30296 return 0;
30298 ret |= (e < nelt ? 1 : 2);
30299 d->perm[i] = e;
30301 gcc_assert (list == NULL);
30303 /* For all elements from second vector, fold the elements to first. */
30304 if (ret == 2)
30305 for (i = 0; i < nelt; ++i)
30306 d->perm[i] -= nelt;
30308 return ret;
30311 static rtx
30312 ix86_expand_vec_perm_builtin (tree exp)
30314 struct expand_vec_perm_d d;
30315 tree arg0, arg1, arg2;
30317 arg0 = CALL_EXPR_ARG (exp, 0);
30318 arg1 = CALL_EXPR_ARG (exp, 1);
30319 arg2 = CALL_EXPR_ARG (exp, 2);
30321 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
30322 d.nelt = GET_MODE_NUNITS (d.vmode);
30323 d.testing_p = false;
30324 gcc_assert (VECTOR_MODE_P (d.vmode));
30326 if (TREE_CODE (arg2) != VECTOR_CST)
30328 error_at (EXPR_LOCATION (exp),
30329 "vector permutation requires vector constant");
30330 goto exit_error;
30333 switch (extract_vec_perm_cst (&d, arg2))
30335 default:
30336 gcc_unreachable();
30338 case 0:
30339 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
30340 goto exit_error;
30342 case 3:
30343 if (!operand_equal_p (arg0, arg1, 0))
30345 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30346 d.op0 = force_reg (d.vmode, d.op0);
30347 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30348 d.op1 = force_reg (d.vmode, d.op1);
30349 break;
30352 /* The elements of PERM do not suggest that only the first operand
30353 is used, but both operands are identical. Allow easier matching
30354 of the permutation by folding the permutation into the single
30355 input vector. */
30357 unsigned i, nelt = d.nelt;
30358 for (i = 0; i < nelt; ++i)
30359 if (d.perm[i] >= nelt)
30360 d.perm[i] -= nelt;
30362 /* FALLTHRU */
30364 case 1:
30365 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
30366 d.op0 = force_reg (d.vmode, d.op0);
30367 d.op1 = d.op0;
30368 break;
30370 case 2:
30371 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
30372 d.op0 = force_reg (d.vmode, d.op0);
30373 d.op1 = d.op0;
30374 break;
30377 d.target = gen_reg_rtx (d.vmode);
30378 if (ix86_expand_vec_perm_builtin_1 (&d))
30379 return d.target;
30381 /* For compiler generated permutations, we should never got here, because
30382 the compiler should also be checking the ok hook. But since this is a
30383 builtin the user has access too, so don't abort. */
30384 switch (d.nelt)
30386 case 2:
30387 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
30388 break;
30389 case 4:
30390 sorry ("vector permutation (%d %d %d %d)",
30391 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
30392 break;
30393 case 8:
30394 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
30395 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30396 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
30397 break;
30398 case 16:
30399 sorry ("vector permutation "
30400 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
30401 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
30402 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
30403 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
30404 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
30405 break;
30406 default:
30407 gcc_unreachable ();
30409 exit_error:
30410 return CONST0_RTX (d.vmode);
30413 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30415 static bool
30416 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30418 struct expand_vec_perm_d d;
30419 int vec_mask;
30420 bool ret, one_vec;
30422 d.vmode = TYPE_MODE (vec_type);
30423 d.nelt = GET_MODE_NUNITS (d.vmode);
30424 d.testing_p = true;
30426 /* Given sufficient ISA support we can just return true here
30427 for selected vector modes. */
30428 if (GET_MODE_SIZE (d.vmode) == 16)
30430 /* All implementable with a single vpperm insn. */
30431 if (TARGET_XOP)
30432 return true;
30433 /* All implementable with 2 pshufb + 1 ior. */
30434 if (TARGET_SSSE3)
30435 return true;
30436 /* All implementable with shufpd or unpck[lh]pd. */
30437 if (d.nelt == 2)
30438 return true;
30441 vec_mask = extract_vec_perm_cst (&d, mask);
30443 /* This hook is cannot be called in response to something that the
30444 user does (unlike the builtin expander) so we shouldn't ever see
30445 an error generated from the extract. */
30446 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30447 one_vec = (vec_mask != 3);
30449 /* Implementable with shufps or pshufd. */
30450 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30451 return true;
30453 /* Otherwise we have to go through the motions and see if we can
30454 figure out how to generate the requested permutation. */
30455 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30456 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30457 if (!one_vec)
30458 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30460 start_sequence ();
30461 ret = ix86_expand_vec_perm_builtin_1 (&d);
30462 end_sequence ();
30464 return ret;
30467 void
30468 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30470 struct expand_vec_perm_d d;
30471 unsigned i, nelt;
30473 d.target = targ;
30474 d.op0 = op0;
30475 d.op1 = op1;
30476 d.vmode = GET_MODE (targ);
30477 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30478 d.testing_p = false;
30480 for (i = 0; i < nelt; ++i)
30481 d.perm[i] = i * 2 + odd;
30483 /* We'll either be able to implement the permutation directly... */
30484 if (expand_vec_perm_1 (&d))
30485 return;
30487 /* ... or we use the special-case patterns. */
30488 expand_vec_perm_even_odd_1 (&d, odd);
30491 /* This function returns the calling abi specific va_list type node.
30492 It returns the FNDECL specific va_list type. */
30494 tree
30495 ix86_fn_abi_va_list (tree fndecl)
30497 if (!TARGET_64BIT)
30498 return va_list_type_node;
30499 gcc_assert (fndecl != NULL_TREE);
30501 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30502 return ms_va_list_type_node;
30503 else
30504 return sysv_va_list_type_node;
30507 /* Returns the canonical va_list type specified by TYPE. If there
30508 is no valid TYPE provided, it return NULL_TREE. */
30510 tree
30511 ix86_canonical_va_list_type (tree type)
30513 tree wtype, htype;
30515 /* Resolve references and pointers to va_list type. */
30516 if (INDIRECT_REF_P (type))
30517 type = TREE_TYPE (type);
30518 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30519 type = TREE_TYPE (type);
30521 if (TARGET_64BIT)
30523 wtype = va_list_type_node;
30524 gcc_assert (wtype != NULL_TREE);
30525 htype = type;
30526 if (TREE_CODE (wtype) == ARRAY_TYPE)
30528 /* If va_list is an array type, the argument may have decayed
30529 to a pointer type, e.g. by being passed to another function.
30530 In that case, unwrap both types so that we can compare the
30531 underlying records. */
30532 if (TREE_CODE (htype) == ARRAY_TYPE
30533 || POINTER_TYPE_P (htype))
30535 wtype = TREE_TYPE (wtype);
30536 htype = TREE_TYPE (htype);
30539 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30540 return va_list_type_node;
30541 wtype = sysv_va_list_type_node;
30542 gcc_assert (wtype != NULL_TREE);
30543 htype = type;
30544 if (TREE_CODE (wtype) == ARRAY_TYPE)
30546 /* If va_list is an array type, the argument may have decayed
30547 to a pointer type, e.g. by being passed to another function.
30548 In that case, unwrap both types so that we can compare the
30549 underlying records. */
30550 if (TREE_CODE (htype) == ARRAY_TYPE
30551 || POINTER_TYPE_P (htype))
30553 wtype = TREE_TYPE (wtype);
30554 htype = TREE_TYPE (htype);
30557 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30558 return sysv_va_list_type_node;
30559 wtype = ms_va_list_type_node;
30560 gcc_assert (wtype != NULL_TREE);
30561 htype = type;
30562 if (TREE_CODE (wtype) == ARRAY_TYPE)
30564 /* If va_list is an array type, the argument may have decayed
30565 to a pointer type, e.g. by being passed to another function.
30566 In that case, unwrap both types so that we can compare the
30567 underlying records. */
30568 if (TREE_CODE (htype) == ARRAY_TYPE
30569 || POINTER_TYPE_P (htype))
30571 wtype = TREE_TYPE (wtype);
30572 htype = TREE_TYPE (htype);
30575 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30576 return ms_va_list_type_node;
30577 return NULL_TREE;
30579 return std_canonical_va_list_type (type);
30582 /* Iterate through the target-specific builtin types for va_list.
30583 IDX denotes the iterator, *PTREE is set to the result type of
30584 the va_list builtin, and *PNAME to its internal type.
30585 Returns zero if there is no element for this index, otherwise
30586 IDX should be increased upon the next call.
30587 Note, do not iterate a base builtin's name like __builtin_va_list.
30588 Used from c_common_nodes_and_builtins. */
30591 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30593 if (!TARGET_64BIT)
30594 return 0;
30595 switch (idx) {
30596 case 0:
30597 *ptree = ms_va_list_type_node;
30598 *pname = "__builtin_ms_va_list";
30599 break;
30600 case 1:
30601 *ptree = sysv_va_list_type_node;
30602 *pname = "__builtin_sysv_va_list";
30603 break;
30604 default:
30605 return 0;
30607 return 1;
30610 /* Initialize the GCC target structure. */
30611 #undef TARGET_RETURN_IN_MEMORY
30612 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30614 #undef TARGET_LEGITIMIZE_ADDRESS
30615 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30617 #undef TARGET_ATTRIBUTE_TABLE
30618 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30619 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30620 # undef TARGET_MERGE_DECL_ATTRIBUTES
30621 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30622 #endif
30624 #undef TARGET_COMP_TYPE_ATTRIBUTES
30625 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30627 #undef TARGET_INIT_BUILTINS
30628 #define TARGET_INIT_BUILTINS ix86_init_builtins
30629 #undef TARGET_BUILTIN_DECL
30630 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30631 #undef TARGET_EXPAND_BUILTIN
30632 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30634 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30635 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30636 ix86_builtin_vectorized_function
30638 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30639 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30641 #undef TARGET_BUILTIN_RECIPROCAL
30642 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30644 #undef TARGET_ASM_FUNCTION_EPILOGUE
30645 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30647 #undef TARGET_ENCODE_SECTION_INFO
30648 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30649 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30650 #else
30651 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30652 #endif
30654 #undef TARGET_ASM_OPEN_PAREN
30655 #define TARGET_ASM_OPEN_PAREN ""
30656 #undef TARGET_ASM_CLOSE_PAREN
30657 #define TARGET_ASM_CLOSE_PAREN ""
30659 #undef TARGET_ASM_BYTE_OP
30660 #define TARGET_ASM_BYTE_OP ASM_BYTE
30662 #undef TARGET_ASM_ALIGNED_HI_OP
30663 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30664 #undef TARGET_ASM_ALIGNED_SI_OP
30665 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30666 #ifdef ASM_QUAD
30667 #undef TARGET_ASM_ALIGNED_DI_OP
30668 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30669 #endif
30671 #undef TARGET_ASM_UNALIGNED_HI_OP
30672 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30673 #undef TARGET_ASM_UNALIGNED_SI_OP
30674 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30675 #undef TARGET_ASM_UNALIGNED_DI_OP
30676 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30678 #undef TARGET_SCHED_ADJUST_COST
30679 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30680 #undef TARGET_SCHED_ISSUE_RATE
30681 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30682 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30683 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30684 ia32_multipass_dfa_lookahead
30686 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30687 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30689 #ifdef HAVE_AS_TLS
30690 #undef TARGET_HAVE_TLS
30691 #define TARGET_HAVE_TLS true
30692 #endif
30693 #undef TARGET_CANNOT_FORCE_CONST_MEM
30694 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30695 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30696 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30698 #undef TARGET_DELEGITIMIZE_ADDRESS
30699 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30701 #undef TARGET_MS_BITFIELD_LAYOUT_P
30702 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30704 #if TARGET_MACHO
30705 #undef TARGET_BINDS_LOCAL_P
30706 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30707 #endif
30708 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30709 #undef TARGET_BINDS_LOCAL_P
30710 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30711 #endif
30713 #undef TARGET_ASM_OUTPUT_MI_THUNK
30714 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30715 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30716 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30718 #undef TARGET_ASM_FILE_START
30719 #define TARGET_ASM_FILE_START x86_file_start
30721 #undef TARGET_DEFAULT_TARGET_FLAGS
30722 #define TARGET_DEFAULT_TARGET_FLAGS \
30723 (TARGET_DEFAULT \
30724 | TARGET_SUBTARGET_DEFAULT \
30725 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30726 | MASK_FUSED_MADD)
30728 #undef TARGET_HANDLE_OPTION
30729 #define TARGET_HANDLE_OPTION ix86_handle_option
30731 #undef TARGET_RTX_COSTS
30732 #define TARGET_RTX_COSTS ix86_rtx_costs
30733 #undef TARGET_ADDRESS_COST
30734 #define TARGET_ADDRESS_COST ix86_address_cost
30736 #undef TARGET_FIXED_CONDITION_CODE_REGS
30737 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30738 #undef TARGET_CC_MODES_COMPATIBLE
30739 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30741 #undef TARGET_MACHINE_DEPENDENT_REORG
30742 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30744 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30745 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30747 #undef TARGET_BUILD_BUILTIN_VA_LIST
30748 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30750 #undef TARGET_FN_ABI_VA_LIST
30751 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30753 #undef TARGET_CANONICAL_VA_LIST_TYPE
30754 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30756 #undef TARGET_EXPAND_BUILTIN_VA_START
30757 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30759 #undef TARGET_MD_ASM_CLOBBERS
30760 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30762 #undef TARGET_PROMOTE_PROTOTYPES
30763 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30764 #undef TARGET_STRUCT_VALUE_RTX
30765 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30766 #undef TARGET_SETUP_INCOMING_VARARGS
30767 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30768 #undef TARGET_MUST_PASS_IN_STACK
30769 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30770 #undef TARGET_PASS_BY_REFERENCE
30771 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30772 #undef TARGET_INTERNAL_ARG_POINTER
30773 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30774 #undef TARGET_UPDATE_STACK_BOUNDARY
30775 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30776 #undef TARGET_GET_DRAP_RTX
30777 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30778 #undef TARGET_STRICT_ARGUMENT_NAMING
30779 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30780 #undef TARGET_STATIC_CHAIN
30781 #define TARGET_STATIC_CHAIN ix86_static_chain
30782 #undef TARGET_TRAMPOLINE_INIT
30783 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30785 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30786 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30788 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30789 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30791 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30792 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30794 #undef TARGET_C_MODE_FOR_SUFFIX
30795 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30797 #ifdef HAVE_AS_TLS
30798 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30799 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30800 #endif
30802 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30803 #undef TARGET_INSERT_ATTRIBUTES
30804 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30805 #endif
30807 #undef TARGET_MANGLE_TYPE
30808 #define TARGET_MANGLE_TYPE ix86_mangle_type
30810 #undef TARGET_STACK_PROTECT_FAIL
30811 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30813 #undef TARGET_FUNCTION_VALUE
30814 #define TARGET_FUNCTION_VALUE ix86_function_value
30816 #undef TARGET_FUNCTION_VALUE_REGNO_P
30817 #define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
30819 #undef TARGET_SECONDARY_RELOAD
30820 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30822 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30823 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30824 ix86_builtin_vectorization_cost
30825 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30826 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30827 ix86_vectorize_builtin_vec_perm
30828 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30829 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30830 ix86_vectorize_builtin_vec_perm_ok
30832 #undef TARGET_SET_CURRENT_FUNCTION
30833 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30835 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30836 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30838 #undef TARGET_OPTION_SAVE
30839 #define TARGET_OPTION_SAVE ix86_function_specific_save
30841 #undef TARGET_OPTION_RESTORE
30842 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30844 #undef TARGET_OPTION_PRINT
30845 #define TARGET_OPTION_PRINT ix86_function_specific_print
30847 #undef TARGET_CAN_INLINE_P
30848 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30850 #undef TARGET_EXPAND_TO_RTL_HOOK
30851 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30853 #undef TARGET_LEGITIMATE_ADDRESS_P
30854 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30856 #undef TARGET_IRA_COVER_CLASSES
30857 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30859 #undef TARGET_FRAME_POINTER_REQUIRED
30860 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30862 #undef TARGET_CAN_ELIMINATE
30863 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30865 #undef TARGET_ASM_CODE_END
30866 #define TARGET_ASM_CODE_END ix86_code_end
30868 struct gcc_target targetm = TARGET_INITIALIZER;
30870 #include "gt-i386.h"