i386.c (return_in_memory_32): Add ATTRIBUTE_UNUSED.
[official-gcc.git] / gcc / config / i386 / i386.c
blob31a691ff38bb51e9d0fa04741bafd580fe68629b
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "tree-gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
57 static int x86_builtin_vectorization_cost (bool);
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78 static const
79 struct processor_costs size_cost = { /* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 static const
822 struct processor_costs pentium4_cost = {
823 COSTS_N_INSNS (1), /* cost of an add instruction */
824 COSTS_N_INSNS (3), /* cost of a lea instruction */
825 COSTS_N_INSNS (4), /* variable shift costs */
826 COSTS_N_INSNS (4), /* constant shift costs */
827 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
828 COSTS_N_INSNS (15), /* HI */
829 COSTS_N_INSNS (15), /* SI */
830 COSTS_N_INSNS (15), /* DI */
831 COSTS_N_INSNS (15)}, /* other */
832 0, /* cost of multiply per each bit set */
833 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
834 COSTS_N_INSNS (56), /* HI */
835 COSTS_N_INSNS (56), /* SI */
836 COSTS_N_INSNS (56), /* DI */
837 COSTS_N_INSNS (56)}, /* other */
838 COSTS_N_INSNS (1), /* cost of movsx */
839 COSTS_N_INSNS (1), /* cost of movzx */
840 16, /* "large" insn */
841 6, /* MOVE_RATIO */
842 2, /* cost for loading QImode using movzbl */
843 {4, 5, 4}, /* cost of loading integer registers
844 in QImode, HImode and SImode.
845 Relative to reg-reg move (2). */
846 {2, 3, 2}, /* cost of storing integer registers */
847 2, /* cost of reg,reg fld/fst */
848 {2, 2, 6}, /* cost of loading fp registers
849 in SFmode, DFmode and XFmode */
850 {4, 4, 6}, /* cost of storing fp registers
851 in SFmode, DFmode and XFmode */
852 2, /* cost of moving MMX register */
853 {2, 2}, /* cost of loading MMX registers
854 in SImode and DImode */
855 {2, 2}, /* cost of storing MMX registers
856 in SImode and DImode */
857 12, /* cost of moving SSE register */
858 {12, 12, 12}, /* cost of loading SSE registers
859 in SImode, DImode and TImode */
860 {2, 2, 8}, /* cost of storing SSE registers
861 in SImode, DImode and TImode */
862 10, /* MMX or SSE register to integer */
863 8, /* size of l1 cache. */
864 256, /* size of l2 cache. */
865 64, /* size of prefetch block */
866 6, /* number of parallel prefetches */
867 2, /* Branch cost */
868 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
869 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
870 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
871 COSTS_N_INSNS (2), /* cost of FABS instruction. */
872 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
873 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
874 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
875 DUMMY_STRINGOP_ALGS},
876 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
877 {-1, libcall}}},
878 DUMMY_STRINGOP_ALGS},
879 1, /* scalar_stmt_cost. */
880 1, /* scalar load_cost. */
881 1, /* scalar_store_cost. */
882 1, /* vec_stmt_cost. */
883 1, /* vec_to_scalar_cost. */
884 1, /* scalar_to_vec_cost. */
885 1, /* vec_align_load_cost. */
886 2, /* vec_unalign_load_cost. */
887 1, /* vec_store_cost. */
888 3, /* cond_taken_branch_cost. */
889 1, /* cond_not_taken_branch_cost. */
892 static const
893 struct processor_costs nocona_cost = {
894 COSTS_N_INSNS (1), /* cost of an add instruction */
895 COSTS_N_INSNS (1), /* cost of a lea instruction */
896 COSTS_N_INSNS (1), /* variable shift costs */
897 COSTS_N_INSNS (1), /* constant shift costs */
898 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
899 COSTS_N_INSNS (10), /* HI */
900 COSTS_N_INSNS (10), /* SI */
901 COSTS_N_INSNS (10), /* DI */
902 COSTS_N_INSNS (10)}, /* other */
903 0, /* cost of multiply per each bit set */
904 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
905 COSTS_N_INSNS (66), /* HI */
906 COSTS_N_INSNS (66), /* SI */
907 COSTS_N_INSNS (66), /* DI */
908 COSTS_N_INSNS (66)}, /* other */
909 COSTS_N_INSNS (1), /* cost of movsx */
910 COSTS_N_INSNS (1), /* cost of movzx */
911 16, /* "large" insn */
912 17, /* MOVE_RATIO */
913 4, /* cost for loading QImode using movzbl */
914 {4, 4, 4}, /* cost of loading integer registers
915 in QImode, HImode and SImode.
916 Relative to reg-reg move (2). */
917 {4, 4, 4}, /* cost of storing integer registers */
918 3, /* cost of reg,reg fld/fst */
919 {12, 12, 12}, /* cost of loading fp registers
920 in SFmode, DFmode and XFmode */
921 {4, 4, 4}, /* cost of storing fp registers
922 in SFmode, DFmode and XFmode */
923 6, /* cost of moving MMX register */
924 {12, 12}, /* cost of loading MMX registers
925 in SImode and DImode */
926 {12, 12}, /* cost of storing MMX registers
927 in SImode and DImode */
928 6, /* cost of moving SSE register */
929 {12, 12, 12}, /* cost of loading SSE registers
930 in SImode, DImode and TImode */
931 {12, 12, 12}, /* cost of storing SSE registers
932 in SImode, DImode and TImode */
933 8, /* MMX or SSE register to integer */
934 8, /* size of l1 cache. */
935 1024, /* size of l2 cache. */
936 128, /* size of prefetch block */
937 8, /* number of parallel prefetches */
938 1, /* Branch cost */
939 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
940 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
941 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
942 COSTS_N_INSNS (3), /* cost of FABS instruction. */
943 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
944 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
945 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
946 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
947 {100000, unrolled_loop}, {-1, libcall}}}},
948 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
949 {-1, libcall}}},
950 {libcall, {{24, loop}, {64, unrolled_loop},
951 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
952 1, /* scalar_stmt_cost. */
953 1, /* scalar load_cost. */
954 1, /* scalar_store_cost. */
955 1, /* vec_stmt_cost. */
956 1, /* vec_to_scalar_cost. */
957 1, /* scalar_to_vec_cost. */
958 1, /* vec_align_load_cost. */
959 2, /* vec_unalign_load_cost. */
960 1, /* vec_store_cost. */
961 3, /* cond_taken_branch_cost. */
962 1, /* cond_not_taken_branch_cost. */
965 static const
966 struct processor_costs core2_cost = {
967 COSTS_N_INSNS (1), /* cost of an add instruction */
968 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
969 COSTS_N_INSNS (1), /* variable shift costs */
970 COSTS_N_INSNS (1), /* constant shift costs */
971 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
972 COSTS_N_INSNS (3), /* HI */
973 COSTS_N_INSNS (3), /* SI */
974 COSTS_N_INSNS (3), /* DI */
975 COSTS_N_INSNS (3)}, /* other */
976 0, /* cost of multiply per each bit set */
977 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
978 COSTS_N_INSNS (22), /* HI */
979 COSTS_N_INSNS (22), /* SI */
980 COSTS_N_INSNS (22), /* DI */
981 COSTS_N_INSNS (22)}, /* other */
982 COSTS_N_INSNS (1), /* cost of movsx */
983 COSTS_N_INSNS (1), /* cost of movzx */
984 8, /* "large" insn */
985 16, /* MOVE_RATIO */
986 2, /* cost for loading QImode using movzbl */
987 {6, 6, 6}, /* cost of loading integer registers
988 in QImode, HImode and SImode.
989 Relative to reg-reg move (2). */
990 {4, 4, 4}, /* cost of storing integer registers */
991 2, /* cost of reg,reg fld/fst */
992 {6, 6, 6}, /* cost of loading fp registers
993 in SFmode, DFmode and XFmode */
994 {4, 4, 4}, /* cost of loading integer registers */
995 2, /* cost of moving MMX register */
996 {6, 6}, /* cost of loading MMX registers
997 in SImode and DImode */
998 {4, 4}, /* cost of storing MMX registers
999 in SImode and DImode */
1000 2, /* cost of moving SSE register */
1001 {6, 6, 6}, /* cost of loading SSE registers
1002 in SImode, DImode and TImode */
1003 {4, 4, 4}, /* cost of storing SSE registers
1004 in SImode, DImode and TImode */
1005 2, /* MMX or SSE register to integer */
1006 32, /* size of l1 cache. */
1007 2048, /* size of l2 cache. */
1008 128, /* size of prefetch block */
1009 8, /* number of parallel prefetches */
1010 3, /* Branch cost */
1011 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1012 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1013 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1014 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1015 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1016 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1017 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1018 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1019 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1020 {{libcall, {{8, loop}, {15, unrolled_loop},
1021 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1022 {libcall, {{24, loop}, {32, unrolled_loop},
1023 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1024 1, /* scalar_stmt_cost. */
1025 1, /* scalar load_cost. */
1026 1, /* scalar_store_cost. */
1027 1, /* vec_stmt_cost. */
1028 1, /* vec_to_scalar_cost. */
1029 1, /* scalar_to_vec_cost. */
1030 1, /* vec_align_load_cost. */
1031 2, /* vec_unalign_load_cost. */
1032 1, /* vec_store_cost. */
1033 3, /* cond_taken_branch_cost. */
1034 1, /* cond_not_taken_branch_cost. */
1037 /* Generic64 should produce code tuned for Nocona and K8. */
1038 static const
1039 struct processor_costs generic64_cost = {
1040 COSTS_N_INSNS (1), /* cost of an add instruction */
1041 /* On all chips taken into consideration lea is 2 cycles and more. With
1042 this cost however our current implementation of synth_mult results in
1043 use of unnecessary temporary registers causing regression on several
1044 SPECfp benchmarks. */
1045 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1046 COSTS_N_INSNS (1), /* variable shift costs */
1047 COSTS_N_INSNS (1), /* constant shift costs */
1048 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1049 COSTS_N_INSNS (4), /* HI */
1050 COSTS_N_INSNS (3), /* SI */
1051 COSTS_N_INSNS (4), /* DI */
1052 COSTS_N_INSNS (2)}, /* other */
1053 0, /* cost of multiply per each bit set */
1054 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1055 COSTS_N_INSNS (26), /* HI */
1056 COSTS_N_INSNS (42), /* SI */
1057 COSTS_N_INSNS (74), /* DI */
1058 COSTS_N_INSNS (74)}, /* other */
1059 COSTS_N_INSNS (1), /* cost of movsx */
1060 COSTS_N_INSNS (1), /* cost of movzx */
1061 8, /* "large" insn */
1062 17, /* MOVE_RATIO */
1063 4, /* cost for loading QImode using movzbl */
1064 {4, 4, 4}, /* cost of loading integer registers
1065 in QImode, HImode and SImode.
1066 Relative to reg-reg move (2). */
1067 {4, 4, 4}, /* cost of storing integer registers */
1068 4, /* cost of reg,reg fld/fst */
1069 {12, 12, 12}, /* cost of loading fp registers
1070 in SFmode, DFmode and XFmode */
1071 {6, 6, 8}, /* cost of storing fp registers
1072 in SFmode, DFmode and XFmode */
1073 2, /* cost of moving MMX register */
1074 {8, 8}, /* cost of loading MMX registers
1075 in SImode and DImode */
1076 {8, 8}, /* cost of storing MMX registers
1077 in SImode and DImode */
1078 2, /* cost of moving SSE register */
1079 {8, 8, 8}, /* cost of loading SSE registers
1080 in SImode, DImode and TImode */
1081 {8, 8, 8}, /* cost of storing SSE registers
1082 in SImode, DImode and TImode */
1083 5, /* MMX or SSE register to integer */
1084 32, /* size of l1 cache. */
1085 512, /* size of l2 cache. */
1086 64, /* size of prefetch block */
1087 6, /* number of parallel prefetches */
1088 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1089 is increased to perhaps more appropriate value of 5. */
1090 3, /* Branch cost */
1091 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1092 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1093 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1094 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1095 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1096 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1097 {DUMMY_STRINGOP_ALGS,
1098 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 1, /* scalar_stmt_cost. */
1102 1, /* scalar load_cost. */
1103 1, /* scalar_store_cost. */
1104 1, /* vec_stmt_cost. */
1105 1, /* vec_to_scalar_cost. */
1106 1, /* scalar_to_vec_cost. */
1107 1, /* vec_align_load_cost. */
1108 2, /* vec_unalign_load_cost. */
1109 1, /* vec_store_cost. */
1110 3, /* cond_taken_branch_cost. */
1111 1, /* cond_not_taken_branch_cost. */
1114 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1115 static const
1116 struct processor_costs generic32_cost = {
1117 COSTS_N_INSNS (1), /* cost of an add instruction */
1118 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1119 COSTS_N_INSNS (1), /* variable shift costs */
1120 COSTS_N_INSNS (1), /* constant shift costs */
1121 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1122 COSTS_N_INSNS (4), /* HI */
1123 COSTS_N_INSNS (3), /* SI */
1124 COSTS_N_INSNS (4), /* DI */
1125 COSTS_N_INSNS (2)}, /* other */
1126 0, /* cost of multiply per each bit set */
1127 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1128 COSTS_N_INSNS (26), /* HI */
1129 COSTS_N_INSNS (42), /* SI */
1130 COSTS_N_INSNS (74), /* DI */
1131 COSTS_N_INSNS (74)}, /* other */
1132 COSTS_N_INSNS (1), /* cost of movsx */
1133 COSTS_N_INSNS (1), /* cost of movzx */
1134 8, /* "large" insn */
1135 17, /* MOVE_RATIO */
1136 4, /* cost for loading QImode using movzbl */
1137 {4, 4, 4}, /* cost of loading integer registers
1138 in QImode, HImode and SImode.
1139 Relative to reg-reg move (2). */
1140 {4, 4, 4}, /* cost of storing integer registers */
1141 4, /* cost of reg,reg fld/fst */
1142 {12, 12, 12}, /* cost of loading fp registers
1143 in SFmode, DFmode and XFmode */
1144 {6, 6, 8}, /* cost of storing fp registers
1145 in SFmode, DFmode and XFmode */
1146 2, /* cost of moving MMX register */
1147 {8, 8}, /* cost of loading MMX registers
1148 in SImode and DImode */
1149 {8, 8}, /* cost of storing MMX registers
1150 in SImode and DImode */
1151 2, /* cost of moving SSE register */
1152 {8, 8, 8}, /* cost of loading SSE registers
1153 in SImode, DImode and TImode */
1154 {8, 8, 8}, /* cost of storing SSE registers
1155 in SImode, DImode and TImode */
1156 5, /* MMX or SSE register to integer */
1157 32, /* size of l1 cache. */
1158 256, /* size of l2 cache. */
1159 64, /* size of prefetch block */
1160 6, /* number of parallel prefetches */
1161 3, /* Branch cost */
1162 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1163 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1164 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1165 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1166 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1167 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1168 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1169 DUMMY_STRINGOP_ALGS},
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 1, /* scalar_stmt_cost. */
1173 1, /* scalar load_cost. */
1174 1, /* scalar_store_cost. */
1175 1, /* vec_stmt_cost. */
1176 1, /* vec_to_scalar_cost. */
1177 1, /* scalar_to_vec_cost. */
1178 1, /* vec_align_load_cost. */
1179 2, /* vec_unalign_load_cost. */
1180 1, /* vec_store_cost. */
1181 3, /* cond_taken_branch_cost. */
1182 1, /* cond_not_taken_branch_cost. */
1185 const struct processor_costs *ix86_cost = &pentium_cost;
1187 /* Processor feature/optimization bitmasks. */
1188 #define m_386 (1<<PROCESSOR_I386)
1189 #define m_486 (1<<PROCESSOR_I486)
1190 #define m_PENT (1<<PROCESSOR_PENTIUM)
1191 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1192 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1193 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1194 #define m_CORE2 (1<<PROCESSOR_CORE2)
1196 #define m_GEODE (1<<PROCESSOR_GEODE)
1197 #define m_K6 (1<<PROCESSOR_K6)
1198 #define m_K6_GEODE (m_K6 | m_GEODE)
1199 #define m_K8 (1<<PROCESSOR_K8)
1200 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1201 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1202 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1203 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1205 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1206 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1208 /* Generic instruction choice should be common subset of supported CPUs
1209 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1210 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1212 /* Feature tests against the various tunings. */
1213 unsigned int ix86_tune_features[X86_TUNE_LAST] = {
1214 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1215 negatively, so enabling for Generic64 seems like good code size
1216 tradeoff. We can't enable it for 32bit generic because it does not
1217 work well with PPro base chips. */
1218 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1220 /* X86_TUNE_PUSH_MEMORY */
1221 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1222 | m_NOCONA | m_CORE2 | m_GENERIC,
1224 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1225 m_486 | m_PENT,
1227 /* X86_TUNE_USE_BIT_TEST */
1228 m_386,
1230 /* X86_TUNE_UNROLL_STRLEN */
1231 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1233 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1234 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1236 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1237 on simulation result. But after P4 was made, no performance benefit
1238 was observed with branch hints. It also increases the code size.
1239 As a result, icc never generates branch hints. */
1242 /* X86_TUNE_DOUBLE_WITH_ADD */
1243 ~m_386,
1245 /* X86_TUNE_USE_SAHF */
1246 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1247 | m_NOCONA | m_CORE2 | m_GENERIC,
1249 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1250 partial dependencies. */
1251 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1252 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1254 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1255 register stalls on Generic32 compilation setting as well. However
1256 in current implementation the partial register stalls are not eliminated
1257 very well - they can be introduced via subregs synthesized by combine
1258 and can happen in caller/callee saving sequences. Because this option
1259 pays back little on PPro based chips and is in conflict with partial reg
1260 dependencies used by Athlon/P4 based chips, it is better to leave it off
1261 for generic32 for now. */
1262 m_PPRO,
1264 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1265 m_CORE2 | m_GENERIC,
1267 /* X86_TUNE_USE_HIMODE_FIOP */
1268 m_386 | m_486 | m_K6_GEODE,
1270 /* X86_TUNE_USE_SIMODE_FIOP */
1271 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1273 /* X86_TUNE_USE_MOV0 */
1274 m_K6,
1276 /* X86_TUNE_USE_CLTD */
1277 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1279 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1280 m_PENT4,
1282 /* X86_TUNE_SPLIT_LONG_MOVES */
1283 m_PPRO,
1285 /* X86_TUNE_READ_MODIFY_WRITE */
1286 ~m_PENT,
1288 /* X86_TUNE_READ_MODIFY */
1289 ~(m_PENT | m_PPRO),
1291 /* X86_TUNE_PROMOTE_QIMODE */
1292 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1293 | m_GENERIC /* | m_PENT4 ? */,
1295 /* X86_TUNE_FAST_PREFIX */
1296 ~(m_PENT | m_486 | m_386),
1298 /* X86_TUNE_SINGLE_STRINGOP */
1299 m_386 | m_PENT4 | m_NOCONA,
1301 /* X86_TUNE_QIMODE_MATH */
1304 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1305 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1306 might be considered for Generic32 if our scheme for avoiding partial
1307 stalls was more effective. */
1308 ~m_PPRO,
1310 /* X86_TUNE_PROMOTE_QI_REGS */
1313 /* X86_TUNE_PROMOTE_HI_REGS */
1314 m_PPRO,
1316 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1317 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1319 /* X86_TUNE_ADD_ESP_8 */
1320 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1321 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1323 /* X86_TUNE_SUB_ESP_4 */
1324 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_SUB_ESP_8 */
1327 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1328 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1330 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1331 for DFmode copies */
1332 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1333 | m_GENERIC | m_GEODE),
1335 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1336 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1338 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1339 conflict here in between PPro/Pentium4 based chips that thread 128bit
1340 SSE registers as single units versus K8 based chips that divide SSE
1341 registers to two 64bit halves. This knob promotes all store destinations
1342 to be 128bit to allow register renaming on 128bit SSE units, but usually
1343 results in one extra microop on 64bit SSE units. Experimental results
1344 shows that disabling this option on P4 brings over 20% SPECfp regression,
1345 while enabling it on K8 brings roughly 2.4% regression that can be partly
1346 masked by careful scheduling of moves. */
1347 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1349 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1350 m_AMDFAM10,
1352 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1353 are resolved on SSE register parts instead of whole registers, so we may
1354 maintain just lower part of scalar values in proper format leaving the
1355 upper part undefined. */
1356 m_ATHLON_K8,
1358 /* X86_TUNE_SSE_TYPELESS_STORES */
1359 m_AMD_MULTIPLE,
1361 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1362 m_PPRO | m_PENT4 | m_NOCONA,
1364 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1365 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1367 /* X86_TUNE_PROLOGUE_USING_MOVE */
1368 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1370 /* X86_TUNE_EPILOGUE_USING_MOVE */
1371 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1373 /* X86_TUNE_SHIFT1 */
1374 ~m_486,
1376 /* X86_TUNE_USE_FFREEP */
1377 m_AMD_MULTIPLE,
1379 /* X86_TUNE_INTER_UNIT_MOVES */
1380 ~(m_AMD_MULTIPLE | m_GENERIC),
1382 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1383 ~(m_AMDFAM10),
1385 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1386 than 4 branch instructions in the 16 byte window. */
1387 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1389 /* X86_TUNE_SCHEDULE */
1390 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1392 /* X86_TUNE_USE_BT */
1393 m_AMD_MULTIPLE,
1395 /* X86_TUNE_USE_INCDEC */
1396 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1398 /* X86_TUNE_PAD_RETURNS */
1399 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1401 /* X86_TUNE_EXT_80387_CONSTANTS */
1402 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1404 /* X86_TUNE_SHORTEN_X87_SSE */
1405 ~m_K8,
1407 /* X86_TUNE_AVOID_VECTOR_DECODE */
1408 m_K8 | m_GENERIC64,
1410 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1411 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1412 ~(m_386 | m_486),
1414 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1415 vector path on AMD machines. */
1416 m_K8 | m_GENERIC64 | m_AMDFAM10,
1418 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1419 machines. */
1420 m_K8 | m_GENERIC64 | m_AMDFAM10,
1422 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1423 than a MOV. */
1424 m_PENT,
1426 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1427 but one byte longer. */
1428 m_PENT,
1430 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1431 operand that cannot be represented using a modRM byte. The XOR
1432 replacement is long decoded, so this split helps here as well. */
1433 m_K6,
1435 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1436 from integer to FP. */
1437 m_AMDFAM10,
1440 /* Feature tests against the various architecture variations. */
1441 unsigned int ix86_arch_features[X86_ARCH_LAST] = {
1442 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1443 ~(m_386 | m_486 | m_PENT | m_K6),
1445 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1446 ~m_386,
1448 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1449 ~(m_386 | m_486),
1451 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1452 ~m_386,
1454 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1455 ~m_386,
1458 static const unsigned int x86_accumulate_outgoing_args
1459 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1461 static const unsigned int x86_arch_always_fancy_math_387
1462 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1463 | m_NOCONA | m_CORE2 | m_GENERIC;
1465 static enum stringop_alg stringop_alg = no_stringop;
1467 /* In case the average insn count for single function invocation is
1468 lower than this constant, emit fast (but longer) prologue and
1469 epilogue code. */
1470 #define FAST_PROLOGUE_INSN_COUNT 20
1472 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1473 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1474 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1475 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1477 /* Array of the smallest class containing reg number REGNO, indexed by
1478 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1480 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1482 /* ax, dx, cx, bx */
1483 AREG, DREG, CREG, BREG,
1484 /* si, di, bp, sp */
1485 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1486 /* FP registers */
1487 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1488 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1489 /* arg pointer */
1490 NON_Q_REGS,
1491 /* flags, fpsr, fpcr, frame */
1492 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1493 /* SSE registers */
1494 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1495 SSE_REGS, SSE_REGS,
1496 /* MMX registers */
1497 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1498 MMX_REGS, MMX_REGS,
1499 /* REX registers */
1500 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1501 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1502 /* SSE REX registers */
1503 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1504 SSE_REGS, SSE_REGS,
1507 /* The "default" register map used in 32bit mode. */
1509 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1511 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1512 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1513 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1514 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1515 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1516 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1517 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1520 static int const x86_64_int_parameter_registers[6] =
1522 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1523 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1526 static int const x86_64_ms_abi_int_parameter_registers[4] =
1528 2 /*RCX*/, 1 /*RDX*/,
1529 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1532 static int const x86_64_int_return_registers[4] =
1534 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1537 /* The "default" register map used in 64bit mode. */
1538 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1540 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1541 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1542 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1543 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1544 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1545 8,9,10,11,12,13,14,15, /* extended integer registers */
1546 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1549 /* Define the register numbers to be used in Dwarf debugging information.
1550 The SVR4 reference port C compiler uses the following register numbers
1551 in its Dwarf output code:
1552 0 for %eax (gcc regno = 0)
1553 1 for %ecx (gcc regno = 2)
1554 2 for %edx (gcc regno = 1)
1555 3 for %ebx (gcc regno = 3)
1556 4 for %esp (gcc regno = 7)
1557 5 for %ebp (gcc regno = 6)
1558 6 for %esi (gcc regno = 4)
1559 7 for %edi (gcc regno = 5)
1560 The following three DWARF register numbers are never generated by
1561 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1562 believes these numbers have these meanings.
1563 8 for %eip (no gcc equivalent)
1564 9 for %eflags (gcc regno = 17)
1565 10 for %trapno (no gcc equivalent)
1566 It is not at all clear how we should number the FP stack registers
1567 for the x86 architecture. If the version of SDB on x86/svr4 were
1568 a bit less brain dead with respect to floating-point then we would
1569 have a precedent to follow with respect to DWARF register numbers
1570 for x86 FP registers, but the SDB on x86/svr4 is so completely
1571 broken with respect to FP registers that it is hardly worth thinking
1572 of it as something to strive for compatibility with.
1573 The version of x86/svr4 SDB I have at the moment does (partially)
1574 seem to believe that DWARF register number 11 is associated with
1575 the x86 register %st(0), but that's about all. Higher DWARF
1576 register numbers don't seem to be associated with anything in
1577 particular, and even for DWARF regno 11, SDB only seems to under-
1578 stand that it should say that a variable lives in %st(0) (when
1579 asked via an `=' command) if we said it was in DWARF regno 11,
1580 but SDB still prints garbage when asked for the value of the
1581 variable in question (via a `/' command).
1582 (Also note that the labels SDB prints for various FP stack regs
1583 when doing an `x' command are all wrong.)
1584 Note that these problems generally don't affect the native SVR4
1585 C compiler because it doesn't allow the use of -O with -g and
1586 because when it is *not* optimizing, it allocates a memory
1587 location for each floating-point variable, and the memory
1588 location is what gets described in the DWARF AT_location
1589 attribute for the variable in question.
1590 Regardless of the severe mental illness of the x86/svr4 SDB, we
1591 do something sensible here and we use the following DWARF
1592 register numbers. Note that these are all stack-top-relative
1593 numbers.
1594 11 for %st(0) (gcc regno = 8)
1595 12 for %st(1) (gcc regno = 9)
1596 13 for %st(2) (gcc regno = 10)
1597 14 for %st(3) (gcc regno = 11)
1598 15 for %st(4) (gcc regno = 12)
1599 16 for %st(5) (gcc regno = 13)
1600 17 for %st(6) (gcc regno = 14)
1601 18 for %st(7) (gcc regno = 15)
1603 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1605 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1606 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1607 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1608 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1609 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1610 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1611 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1614 /* Test and compare insns in i386.md store the information needed to
1615 generate branch and scc insns here. */
1617 rtx ix86_compare_op0 = NULL_RTX;
1618 rtx ix86_compare_op1 = NULL_RTX;
1619 rtx ix86_compare_emitted = NULL_RTX;
1621 /* Size of the register save area. */
1622 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1624 /* Define the structure for the machine field in struct function. */
1626 struct stack_local_entry GTY(())
1628 unsigned short mode;
1629 unsigned short n;
1630 rtx rtl;
1631 struct stack_local_entry *next;
1634 /* Structure describing stack frame layout.
1635 Stack grows downward:
1637 [arguments]
1638 <- ARG_POINTER
1639 saved pc
1641 saved frame pointer if frame_pointer_needed
1642 <- HARD_FRAME_POINTER
1643 [saved regs]
1645 [padding1] \
1647 [va_arg registers] (
1648 > to_allocate <- FRAME_POINTER
1649 [frame] (
1651 [padding2] /
1653 struct ix86_frame
1655 int nregs;
1656 int padding1;
1657 int va_arg_size;
1658 HOST_WIDE_INT frame;
1659 int padding2;
1660 int outgoing_arguments_size;
1661 int red_zone_size;
1663 HOST_WIDE_INT to_allocate;
1664 /* The offsets relative to ARG_POINTER. */
1665 HOST_WIDE_INT frame_pointer_offset;
1666 HOST_WIDE_INT hard_frame_pointer_offset;
1667 HOST_WIDE_INT stack_pointer_offset;
1669 /* When save_regs_using_mov is set, emit prologue using
1670 move instead of push instructions. */
1671 bool save_regs_using_mov;
1674 /* Code model option. */
1675 enum cmodel ix86_cmodel;
1676 /* Asm dialect. */
1677 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1678 /* TLS dialects. */
1679 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1681 /* Which unit we are generating floating point math for. */
1682 enum fpmath_unit ix86_fpmath;
1684 /* Which cpu are we scheduling for. */
1685 enum processor_type ix86_tune;
1687 /* Which instruction set architecture to use. */
1688 enum processor_type ix86_arch;
1690 /* true if sse prefetch instruction is not NOOP. */
1691 int x86_prefetch_sse;
1693 /* ix86_regparm_string as a number */
1694 static int ix86_regparm;
1696 /* -mstackrealign option */
1697 extern int ix86_force_align_arg_pointer;
1698 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1700 /* Preferred alignment for stack boundary in bits. */
1701 unsigned int ix86_preferred_stack_boundary;
1703 /* Values 1-5: see jump.c */
1704 int ix86_branch_cost;
1706 /* Variables which are this size or smaller are put in the data/bss
1707 or ldata/lbss sections. */
1709 int ix86_section_threshold = 65536;
1711 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1712 char internal_label_prefix[16];
1713 int internal_label_prefix_len;
1715 /* Fence to use after loop using movnt. */
1716 tree x86_mfence;
1718 /* Register class used for passing given 64bit part of the argument.
1719 These represent classes as documented by the PS ABI, with the exception
1720 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1721 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1723 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1724 whenever possible (upper half does contain padding). */
1725 enum x86_64_reg_class
1727 X86_64_NO_CLASS,
1728 X86_64_INTEGER_CLASS,
1729 X86_64_INTEGERSI_CLASS,
1730 X86_64_SSE_CLASS,
1731 X86_64_SSESF_CLASS,
1732 X86_64_SSEDF_CLASS,
1733 X86_64_SSEUP_CLASS,
1734 X86_64_X87_CLASS,
1735 X86_64_X87UP_CLASS,
1736 X86_64_COMPLEX_X87_CLASS,
1737 X86_64_MEMORY_CLASS
1739 static const char * const x86_64_reg_class_name[] =
1741 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1742 "sseup", "x87", "x87up", "cplx87", "no"
1745 #define MAX_CLASSES 4
1747 /* Table of constants used by fldpi, fldln2, etc.... */
1748 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1749 static bool ext_80387_constants_init = 0;
1752 static struct machine_function * ix86_init_machine_status (void);
1753 static rtx ix86_function_value (const_tree, const_tree, bool);
1754 static int ix86_function_regparm (const_tree, const_tree);
1755 static void ix86_compute_frame_layout (struct ix86_frame *);
1756 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1757 rtx, rtx, int);
1760 /* The svr4 ABI for the i386 says that records and unions are returned
1761 in memory. */
1762 #ifndef DEFAULT_PCC_STRUCT_RETURN
1763 #define DEFAULT_PCC_STRUCT_RETURN 1
1764 #endif
1766 /* Bit flags that specify the ISA we are compiling for. */
1767 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1769 /* A mask of ix86_isa_flags that includes bit X if X
1770 was set or cleared on the command line. */
1771 static int ix86_isa_flags_explicit;
1773 /* Define a set of ISAs which are available when a given ISA is
1774 enabled. MMX and SSE ISAs are handled separately. */
1776 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1777 #define OPTION_MASK_ISA_3DNOW_SET \
1778 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1780 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1781 #define OPTION_MASK_ISA_SSE2_SET \
1782 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1783 #define OPTION_MASK_ISA_SSE3_SET \
1784 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1785 #define OPTION_MASK_ISA_SSSE3_SET \
1786 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1787 #define OPTION_MASK_ISA_SSE4_1_SET \
1788 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1789 #define OPTION_MASK_ISA_SSE4_2_SET \
1790 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1792 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1793 as -msse4.2. */
1794 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1796 #define OPTION_MASK_ISA_SSE4A_SET \
1797 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1798 #define OPTION_MASK_ISA_SSE5_SET \
1799 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1801 /* Define a set of ISAs which aren't available when a given ISA is
1802 disabled. MMX and SSE ISAs are handled separately. */
1804 #define OPTION_MASK_ISA_MMX_UNSET \
1805 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1806 #define OPTION_MASK_ISA_3DNOW_UNSET \
1807 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1808 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1810 #define OPTION_MASK_ISA_SSE_UNSET \
1811 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1812 #define OPTION_MASK_ISA_SSE2_UNSET \
1813 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1814 #define OPTION_MASK_ISA_SSE3_UNSET \
1815 (OPTION_MASK_ISA_SSE3 \
1816 | OPTION_MASK_ISA_SSSE3_UNSET \
1817 | OPTION_MASK_ISA_SSE4A_UNSET )
1818 #define OPTION_MASK_ISA_SSSE3_UNSET \
1819 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1820 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1821 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1822 #define OPTION_MASK_ISA_SSE4_2_UNSET OPTION_MASK_ISA_SSE4_2
1824 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1825 as -mno-sse4.1. */
1826 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1828 #define OPTION_MASK_ISA_SSE4A_UNSET \
1829 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1831 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1833 /* Vectorization library interface and handlers. */
1834 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1835 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1836 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1838 /* Implement TARGET_HANDLE_OPTION. */
1840 static bool
1841 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1843 switch (code)
1845 case OPT_mmmx:
1846 if (value)
1848 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1849 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1851 else
1853 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
1854 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
1856 return true;
1858 case OPT_m3dnow:
1859 if (value)
1861 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
1862 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
1864 else
1866 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
1867 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
1869 return true;
1871 case OPT_m3dnowa:
1872 return false;
1874 case OPT_msse:
1875 if (value)
1877 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
1878 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
1880 else
1882 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
1883 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
1885 return true;
1887 case OPT_msse2:
1888 if (value)
1890 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
1891 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
1893 else
1895 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
1896 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
1898 return true;
1900 case OPT_msse3:
1901 if (value)
1903 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
1904 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
1906 else
1908 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
1909 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
1911 return true;
1913 case OPT_mssse3:
1914 if (value)
1916 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
1917 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
1919 else
1921 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
1922 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
1924 return true;
1926 case OPT_msse4_1:
1927 if (value)
1929 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
1930 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
1932 else
1934 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
1935 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
1937 return true;
1939 case OPT_msse4_2:
1940 if (value)
1942 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
1943 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
1945 else
1947 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
1948 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
1950 return true;
1952 case OPT_msse4:
1953 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
1954 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
1955 return true;
1957 case OPT_mno_sse4:
1958 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
1959 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
1960 return true;
1962 case OPT_msse4a:
1963 if (value)
1965 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
1966 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
1968 else
1970 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
1971 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
1973 return true;
1975 case OPT_msse5:
1976 if (value)
1978 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
1979 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
1981 else
1983 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
1984 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
1986 return true;
1988 default:
1989 return true;
1993 /* Sometimes certain combinations of command options do not make
1994 sense on a particular target machine. You can define a macro
1995 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1996 defined, is executed once just after all the command options have
1997 been parsed.
1999 Don't use this macro to turn on various extra optimizations for
2000 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2002 void
2003 override_options (void)
2005 int i;
2006 int ix86_tune_defaulted = 0;
2007 int ix86_arch_specified = 0;
2008 unsigned int ix86_arch_mask, ix86_tune_mask;
2010 /* Comes from final.c -- no real reason to change it. */
2011 #define MAX_CODE_ALIGN 16
2013 static struct ptt
2015 const struct processor_costs *cost; /* Processor costs */
2016 const int align_loop; /* Default alignments. */
2017 const int align_loop_max_skip;
2018 const int align_jump;
2019 const int align_jump_max_skip;
2020 const int align_func;
2022 const processor_target_table[PROCESSOR_max] =
2024 {&i386_cost, 4, 3, 4, 3, 4},
2025 {&i486_cost, 16, 15, 16, 15, 16},
2026 {&pentium_cost, 16, 7, 16, 7, 16},
2027 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2028 {&geode_cost, 0, 0, 0, 0, 0},
2029 {&k6_cost, 32, 7, 32, 7, 32},
2030 {&athlon_cost, 16, 7, 16, 7, 16},
2031 {&pentium4_cost, 0, 0, 0, 0, 0},
2032 {&k8_cost, 16, 7, 16, 7, 16},
2033 {&nocona_cost, 0, 0, 0, 0, 0},
2034 {&core2_cost, 16, 10, 16, 10, 16},
2035 {&generic32_cost, 16, 7, 16, 7, 16},
2036 {&generic64_cost, 16, 10, 16, 10, 16},
2037 {&amdfam10_cost, 32, 24, 32, 7, 32}
2040 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2042 "generic",
2043 "i386",
2044 "i486",
2045 "pentium",
2046 "pentium-mmx",
2047 "pentiumpro",
2048 "pentium2",
2049 "pentium3",
2050 "pentium4",
2051 "pentium-m",
2052 "prescott",
2053 "nocona",
2054 "core2",
2055 "geode",
2056 "k6",
2057 "k6-2",
2058 "k6-3",
2059 "athlon",
2060 "athlon-4",
2061 "k8",
2062 "amdfam10"
2065 enum pta_flags
2067 PTA_SSE = 1 << 0,
2068 PTA_SSE2 = 1 << 1,
2069 PTA_SSE3 = 1 << 2,
2070 PTA_MMX = 1 << 3,
2071 PTA_PREFETCH_SSE = 1 << 4,
2072 PTA_3DNOW = 1 << 5,
2073 PTA_3DNOW_A = 1 << 6,
2074 PTA_64BIT = 1 << 7,
2075 PTA_SSSE3 = 1 << 8,
2076 PTA_CX16 = 1 << 9,
2077 PTA_POPCNT = 1 << 10,
2078 PTA_ABM = 1 << 11,
2079 PTA_SSE4A = 1 << 12,
2080 PTA_NO_SAHF = 1 << 13,
2081 PTA_SSE4_1 = 1 << 14,
2082 PTA_SSE4_2 = 1 << 15,
2083 PTA_SSE5 = 1 << 16,
2084 PTA_AES = 1 << 17,
2085 PTA_PCLMUL = 1 << 18
2088 static struct pta
2090 const char *const name; /* processor name or nickname. */
2091 const enum processor_type processor;
2092 const unsigned /*enum pta_flags*/ flags;
2094 const processor_alias_table[] =
2096 {"i386", PROCESSOR_I386, 0},
2097 {"i486", PROCESSOR_I486, 0},
2098 {"i586", PROCESSOR_PENTIUM, 0},
2099 {"pentium", PROCESSOR_PENTIUM, 0},
2100 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
2101 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
2102 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2103 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2104 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2105 {"i686", PROCESSOR_PENTIUMPRO, 0},
2106 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
2107 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
2108 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2109 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2110 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_SSE2},
2111 {"pentium4", PROCESSOR_PENTIUM4, PTA_MMX |PTA_SSE | PTA_SSE2},
2112 {"pentium4m", PROCESSOR_PENTIUM4, PTA_MMX | PTA_SSE | PTA_SSE2},
2113 {"prescott", PROCESSOR_NOCONA, PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2114 {"nocona", PROCESSOR_NOCONA, (PTA_64BIT
2115 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2116 | PTA_CX16 | PTA_NO_SAHF)},
2117 {"core2", PROCESSOR_CORE2, (PTA_64BIT
2118 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2119 | PTA_SSSE3
2120 | PTA_CX16)},
2121 {"geode", PROCESSOR_GEODE, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2122 |PTA_PREFETCH_SSE)},
2123 {"k6", PROCESSOR_K6, PTA_MMX},
2124 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2125 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2126 {"athlon", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2127 | PTA_PREFETCH_SSE)},
2128 {"athlon-tbird", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2129 | PTA_PREFETCH_SSE)},
2130 {"athlon-4", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2131 | PTA_SSE)},
2132 {"athlon-xp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2133 | PTA_SSE)},
2134 {"athlon-mp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2135 | PTA_SSE)},
2136 {"x86-64", PROCESSOR_K8, (PTA_64BIT
2137 | PTA_MMX | PTA_SSE | PTA_SSE2
2138 | PTA_NO_SAHF)},
2139 {"k8", PROCESSOR_K8, (PTA_64BIT
2140 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2141 | PTA_SSE | PTA_SSE2
2142 | PTA_NO_SAHF)},
2143 {"k8-sse3", PROCESSOR_K8, (PTA_64BIT
2144 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2145 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2146 | PTA_NO_SAHF)},
2147 {"opteron", PROCESSOR_K8, (PTA_64BIT
2148 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2149 | PTA_SSE | PTA_SSE2
2150 | PTA_NO_SAHF)},
2151 {"opteron-sse3", PROCESSOR_K8, (PTA_64BIT
2152 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2153 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2154 | PTA_NO_SAHF)},
2155 {"athlon64", PROCESSOR_K8, (PTA_64BIT
2156 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2157 | PTA_SSE | PTA_SSE2
2158 | PTA_NO_SAHF)},
2159 {"athlon64-sse3", PROCESSOR_K8, (PTA_64BIT
2160 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2161 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2162 | PTA_NO_SAHF)},
2163 {"athlon-fx", PROCESSOR_K8, (PTA_64BIT
2164 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2165 | PTA_SSE | PTA_SSE2
2166 | PTA_NO_SAHF)},
2167 {"amdfam10", PROCESSOR_AMDFAM10, (PTA_64BIT
2168 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2169 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2170 | PTA_SSE4A
2171 | PTA_CX16 | PTA_ABM)},
2172 {"barcelona", PROCESSOR_AMDFAM10, (PTA_64BIT
2173 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2174 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2175 | PTA_SSE4A
2176 | PTA_CX16 | PTA_ABM)},
2177 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
2178 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
2181 int const pta_size = ARRAY_SIZE (processor_alias_table);
2183 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2184 SUBTARGET_OVERRIDE_OPTIONS;
2185 #endif
2187 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2188 SUBSUBTARGET_OVERRIDE_OPTIONS;
2189 #endif
2191 /* -fPIC is the default for x86_64. */
2192 if (TARGET_MACHO && TARGET_64BIT)
2193 flag_pic = 2;
2195 /* Set the default values for switches whose default depends on TARGET_64BIT
2196 in case they weren't overwritten by command line options. */
2197 if (TARGET_64BIT)
2199 /* Mach-O doesn't support omitting the frame pointer for now. */
2200 if (flag_omit_frame_pointer == 2)
2201 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2202 if (flag_asynchronous_unwind_tables == 2)
2203 flag_asynchronous_unwind_tables = 1;
2204 if (flag_pcc_struct_return == 2)
2205 flag_pcc_struct_return = 0;
2207 else
2209 if (flag_omit_frame_pointer == 2)
2210 flag_omit_frame_pointer = 0;
2211 if (flag_asynchronous_unwind_tables == 2)
2212 flag_asynchronous_unwind_tables = 0;
2213 if (flag_pcc_struct_return == 2)
2214 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2217 /* Need to check -mtune=generic first. */
2218 if (ix86_tune_string)
2220 if (!strcmp (ix86_tune_string, "generic")
2221 || !strcmp (ix86_tune_string, "i686")
2222 /* As special support for cross compilers we read -mtune=native
2223 as -mtune=generic. With native compilers we won't see the
2224 -mtune=native, as it was changed by the driver. */
2225 || !strcmp (ix86_tune_string, "native"))
2227 if (TARGET_64BIT)
2228 ix86_tune_string = "generic64";
2229 else
2230 ix86_tune_string = "generic32";
2232 else if (!strncmp (ix86_tune_string, "generic", 7))
2233 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
2235 else
2237 if (ix86_arch_string)
2238 ix86_tune_string = ix86_arch_string;
2239 if (!ix86_tune_string)
2241 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2242 ix86_tune_defaulted = 1;
2245 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2246 need to use a sensible tune option. */
2247 if (!strcmp (ix86_tune_string, "generic")
2248 || !strcmp (ix86_tune_string, "x86-64")
2249 || !strcmp (ix86_tune_string, "i686"))
2251 if (TARGET_64BIT)
2252 ix86_tune_string = "generic64";
2253 else
2254 ix86_tune_string = "generic32";
2257 if (ix86_stringop_string)
2259 if (!strcmp (ix86_stringop_string, "rep_byte"))
2260 stringop_alg = rep_prefix_1_byte;
2261 else if (!strcmp (ix86_stringop_string, "libcall"))
2262 stringop_alg = libcall;
2263 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2264 stringop_alg = rep_prefix_4_byte;
2265 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
2266 stringop_alg = rep_prefix_8_byte;
2267 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2268 stringop_alg = loop_1_byte;
2269 else if (!strcmp (ix86_stringop_string, "loop"))
2270 stringop_alg = loop;
2271 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2272 stringop_alg = unrolled_loop;
2273 else
2274 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
2276 if (!strcmp (ix86_tune_string, "x86-64"))
2277 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
2278 "-mtune=generic instead as appropriate.");
2280 if (!ix86_arch_string)
2281 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2282 else
2283 ix86_arch_specified = 1;
2285 if (!strcmp (ix86_arch_string, "generic"))
2286 error ("generic CPU can be used only for -mtune= switch");
2287 if (!strncmp (ix86_arch_string, "generic", 7))
2288 error ("bad value (%s) for -march= switch", ix86_arch_string);
2290 if (ix86_cmodel_string != 0)
2292 if (!strcmp (ix86_cmodel_string, "small"))
2293 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2294 else if (!strcmp (ix86_cmodel_string, "medium"))
2295 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2296 else if (!strcmp (ix86_cmodel_string, "large"))
2297 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2298 else if (flag_pic)
2299 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2300 else if (!strcmp (ix86_cmodel_string, "32"))
2301 ix86_cmodel = CM_32;
2302 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2303 ix86_cmodel = CM_KERNEL;
2304 else
2305 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
2307 else
2309 /* For TARGET_64BIT_MS_ABI, force pic on, in order to enable the
2310 use of rip-relative addressing. This eliminates fixups that
2311 would otherwise be needed if this object is to be placed in a
2312 DLL, and is essentially just as efficient as direct addressing. */
2313 if (TARGET_64BIT_MS_ABI)
2314 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2315 else if (TARGET_64BIT)
2316 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2317 else
2318 ix86_cmodel = CM_32;
2320 if (ix86_asm_string != 0)
2322 if (! TARGET_MACHO
2323 && !strcmp (ix86_asm_string, "intel"))
2324 ix86_asm_dialect = ASM_INTEL;
2325 else if (!strcmp (ix86_asm_string, "att"))
2326 ix86_asm_dialect = ASM_ATT;
2327 else
2328 error ("bad value (%s) for -masm= switch", ix86_asm_string);
2330 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2331 error ("code model %qs not supported in the %s bit mode",
2332 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2333 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2334 sorry ("%i-bit mode not compiled in",
2335 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2337 for (i = 0; i < pta_size; i++)
2338 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2340 ix86_arch = processor_alias_table[i].processor;
2341 /* Default cpu tuning to the architecture. */
2342 ix86_tune = ix86_arch;
2344 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2345 error ("CPU you selected does not support x86-64 "
2346 "instruction set");
2348 if (processor_alias_table[i].flags & PTA_MMX
2349 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2350 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2351 if (processor_alias_table[i].flags & PTA_3DNOW
2352 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2353 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2354 if (processor_alias_table[i].flags & PTA_3DNOW_A
2355 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2356 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2357 if (processor_alias_table[i].flags & PTA_SSE
2358 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2359 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2360 if (processor_alias_table[i].flags & PTA_SSE2
2361 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2362 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2363 if (processor_alias_table[i].flags & PTA_SSE3
2364 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2365 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2366 if (processor_alias_table[i].flags & PTA_SSSE3
2367 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2368 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2369 if (processor_alias_table[i].flags & PTA_SSE4_1
2370 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2371 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2372 if (processor_alias_table[i].flags & PTA_SSE4_2
2373 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2374 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2375 if (processor_alias_table[i].flags & PTA_SSE4A
2376 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2377 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2378 if (processor_alias_table[i].flags & PTA_SSE5
2379 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2380 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2382 if (processor_alias_table[i].flags & PTA_ABM)
2383 x86_abm = true;
2384 if (processor_alias_table[i].flags & PTA_CX16)
2385 x86_cmpxchg16b = true;
2386 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM))
2387 x86_popcnt = true;
2388 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2389 x86_prefetch_sse = true;
2390 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF)))
2391 x86_sahf = true;
2392 if (processor_alias_table[i].flags & PTA_AES)
2393 x86_aes = true;
2394 if (processor_alias_table[i].flags & PTA_PCLMUL)
2395 x86_pclmul = true;
2397 break;
2400 if (i == pta_size)
2401 error ("bad value (%s) for -march= switch", ix86_arch_string);
2403 ix86_arch_mask = 1u << ix86_arch;
2404 for (i = 0; i < X86_ARCH_LAST; ++i)
2405 ix86_arch_features[i] &= ix86_arch_mask;
2407 for (i = 0; i < pta_size; i++)
2408 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2410 ix86_tune = processor_alias_table[i].processor;
2411 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2413 if (ix86_tune_defaulted)
2415 ix86_tune_string = "x86-64";
2416 for (i = 0; i < pta_size; i++)
2417 if (! strcmp (ix86_tune_string,
2418 processor_alias_table[i].name))
2419 break;
2420 ix86_tune = processor_alias_table[i].processor;
2422 else
2423 error ("CPU you selected does not support x86-64 "
2424 "instruction set");
2426 /* Intel CPUs have always interpreted SSE prefetch instructions as
2427 NOPs; so, we can enable SSE prefetch instructions even when
2428 -mtune (rather than -march) points us to a processor that has them.
2429 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2430 higher processors. */
2431 if (TARGET_CMOVE
2432 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2433 x86_prefetch_sse = true;
2434 break;
2436 if (i == pta_size)
2437 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
2439 /* Enable SSE2 if AES or PCLMUL is enabled. */
2440 if ((x86_aes || x86_pclmul)
2441 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2443 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2444 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2447 ix86_tune_mask = 1u << ix86_tune;
2448 for (i = 0; i < X86_TUNE_LAST; ++i)
2449 ix86_tune_features[i] &= ix86_tune_mask;
2451 if (optimize_size)
2452 ix86_cost = &size_cost;
2453 else
2454 ix86_cost = processor_target_table[ix86_tune].cost;
2456 /* Arrange to set up i386_stack_locals for all functions. */
2457 init_machine_status = ix86_init_machine_status;
2459 /* Validate -mregparm= value. */
2460 if (ix86_regparm_string)
2462 if (TARGET_64BIT)
2463 warning (0, "-mregparm is ignored in 64-bit mode");
2464 i = atoi (ix86_regparm_string);
2465 if (i < 0 || i > REGPARM_MAX)
2466 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
2467 else
2468 ix86_regparm = i;
2470 if (TARGET_64BIT)
2471 ix86_regparm = REGPARM_MAX;
2473 /* If the user has provided any of the -malign-* options,
2474 warn and use that value only if -falign-* is not set.
2475 Remove this code in GCC 3.2 or later. */
2476 if (ix86_align_loops_string)
2478 warning (0, "-malign-loops is obsolete, use -falign-loops");
2479 if (align_loops == 0)
2481 i = atoi (ix86_align_loops_string);
2482 if (i < 0 || i > MAX_CODE_ALIGN)
2483 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2484 else
2485 align_loops = 1 << i;
2489 if (ix86_align_jumps_string)
2491 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2492 if (align_jumps == 0)
2494 i = atoi (ix86_align_jumps_string);
2495 if (i < 0 || i > MAX_CODE_ALIGN)
2496 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2497 else
2498 align_jumps = 1 << i;
2502 if (ix86_align_funcs_string)
2504 warning (0, "-malign-functions is obsolete, use -falign-functions");
2505 if (align_functions == 0)
2507 i = atoi (ix86_align_funcs_string);
2508 if (i < 0 || i > MAX_CODE_ALIGN)
2509 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2510 else
2511 align_functions = 1 << i;
2515 /* Default align_* from the processor table. */
2516 if (align_loops == 0)
2518 align_loops = processor_target_table[ix86_tune].align_loop;
2519 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2521 if (align_jumps == 0)
2523 align_jumps = processor_target_table[ix86_tune].align_jump;
2524 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2526 if (align_functions == 0)
2528 align_functions = processor_target_table[ix86_tune].align_func;
2531 /* Validate -mbranch-cost= value, or provide default. */
2532 ix86_branch_cost = ix86_cost->branch_cost;
2533 if (ix86_branch_cost_string)
2535 i = atoi (ix86_branch_cost_string);
2536 if (i < 0 || i > 5)
2537 error ("-mbranch-cost=%d is not between 0 and 5", i);
2538 else
2539 ix86_branch_cost = i;
2541 if (ix86_section_threshold_string)
2543 i = atoi (ix86_section_threshold_string);
2544 if (i < 0)
2545 error ("-mlarge-data-threshold=%d is negative", i);
2546 else
2547 ix86_section_threshold = i;
2550 if (ix86_tls_dialect_string)
2552 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2553 ix86_tls_dialect = TLS_DIALECT_GNU;
2554 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2555 ix86_tls_dialect = TLS_DIALECT_GNU2;
2556 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2557 ix86_tls_dialect = TLS_DIALECT_SUN;
2558 else
2559 error ("bad value (%s) for -mtls-dialect= switch",
2560 ix86_tls_dialect_string);
2563 if (ix87_precision_string)
2565 i = atoi (ix87_precision_string);
2566 if (i != 32 && i != 64 && i != 80)
2567 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
2570 if (TARGET_64BIT)
2572 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
2574 /* Enable by default the SSE and MMX builtins. Do allow the user to
2575 explicitly disable any of these. In particular, disabling SSE and
2576 MMX for kernel code is extremely useful. */
2577 if (!ix86_arch_specified)
2578 ix86_isa_flags
2579 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
2580 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
2582 if (TARGET_RTD)
2583 warning (0, "-mrtd is ignored in 64bit mode");
2585 else
2587 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
2589 if (!ix86_arch_specified)
2590 ix86_isa_flags
2591 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
2593 /* i386 ABI does not specify red zone. It still makes sense to use it
2594 when programmer takes care to stack from being destroyed. */
2595 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2596 target_flags |= MASK_NO_RED_ZONE;
2599 /* Keep nonleaf frame pointers. */
2600 if (flag_omit_frame_pointer)
2601 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2602 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2603 flag_omit_frame_pointer = 1;
2605 /* If we're doing fast math, we don't care about comparison order
2606 wrt NaNs. This lets us use a shorter comparison sequence. */
2607 if (flag_finite_math_only)
2608 target_flags &= ~MASK_IEEE_FP;
2610 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2611 since the insns won't need emulation. */
2612 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
2613 target_flags &= ~MASK_NO_FANCY_MATH_387;
2615 /* Likewise, if the target doesn't have a 387, or we've specified
2616 software floating point, don't use 387 inline intrinsics. */
2617 if (!TARGET_80387)
2618 target_flags |= MASK_NO_FANCY_MATH_387;
2620 /* Turn on MMX builtins for -msse. */
2621 if (TARGET_SSE)
2623 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
2624 x86_prefetch_sse = true;
2627 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
2628 if (TARGET_SSE4_2 || TARGET_ABM)
2629 x86_popcnt = true;
2631 /* Validate -mpreferred-stack-boundary= value, or provide default.
2632 The default of 128 bits is for Pentium III's SSE __m128. We can't
2633 change it because of optimize_size. Otherwise, we can't mix object
2634 files compiled with -Os and -On. */
2635 ix86_preferred_stack_boundary = 128;
2636 if (ix86_preferred_stack_boundary_string)
2638 i = atoi (ix86_preferred_stack_boundary_string);
2639 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2640 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2641 TARGET_64BIT ? 4 : 2);
2642 else
2643 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2646 /* Accept -msseregparm only if at least SSE support is enabled. */
2647 if (TARGET_SSEREGPARM
2648 && ! TARGET_SSE)
2649 error ("-msseregparm used without SSE enabled");
2651 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2652 if (ix86_fpmath_string != 0)
2654 if (! strcmp (ix86_fpmath_string, "387"))
2655 ix86_fpmath = FPMATH_387;
2656 else if (! strcmp (ix86_fpmath_string, "sse"))
2658 if (!TARGET_SSE)
2660 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2661 ix86_fpmath = FPMATH_387;
2663 else
2664 ix86_fpmath = FPMATH_SSE;
2666 else if (! strcmp (ix86_fpmath_string, "387,sse")
2667 || ! strcmp (ix86_fpmath_string, "sse,387"))
2669 if (!TARGET_SSE)
2671 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2672 ix86_fpmath = FPMATH_387;
2674 else if (!TARGET_80387)
2676 warning (0, "387 instruction set disabled, using SSE arithmetics");
2677 ix86_fpmath = FPMATH_SSE;
2679 else
2680 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
2682 else
2683 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2686 /* If the i387 is disabled, then do not return values in it. */
2687 if (!TARGET_80387)
2688 target_flags &= ~MASK_FLOAT_RETURNS;
2690 /* Use external vectorized library in vectorizing intrinsics. */
2691 if (ix86_veclibabi_string)
2693 if (strcmp (ix86_veclibabi_string, "svml") == 0)
2694 ix86_veclib_handler = ix86_veclibabi_svml;
2695 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
2696 ix86_veclib_handler = ix86_veclibabi_acml;
2697 else
2698 error ("unknown vectorization library ABI type (%s) for "
2699 "-mveclibabi= switch", ix86_veclibabi_string);
2702 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
2703 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2704 && !optimize_size)
2705 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2707 /* ??? Unwind info is not correct around the CFG unless either a frame
2708 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2709 unwind info generation to be aware of the CFG and propagating states
2710 around edges. */
2711 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2712 || flag_exceptions || flag_non_call_exceptions)
2713 && flag_omit_frame_pointer
2714 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2716 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2717 warning (0, "unwind tables currently require either a frame pointer "
2718 "or -maccumulate-outgoing-args for correctness");
2719 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2722 /* If stack probes are required, the space used for large function
2723 arguments on the stack must also be probed, so enable
2724 -maccumulate-outgoing-args so this happens in the prologue. */
2725 if (TARGET_STACK_PROBE
2726 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2728 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2729 warning (0, "stack probing requires -maccumulate-outgoing-args "
2730 "for correctness");
2731 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2734 /* For sane SSE instruction set generation we need fcomi instruction.
2735 It is safe to enable all CMOVE instructions. */
2736 if (TARGET_SSE)
2737 TARGET_CMOVE = 1;
2739 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2741 char *p;
2742 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2743 p = strchr (internal_label_prefix, 'X');
2744 internal_label_prefix_len = p - internal_label_prefix;
2745 *p = '\0';
2748 /* When scheduling description is not available, disable scheduler pass
2749 so it won't slow down the compilation and make x87 code slower. */
2750 if (!TARGET_SCHEDULE)
2751 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2753 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2754 set_param_value ("simultaneous-prefetches",
2755 ix86_cost->simultaneous_prefetches);
2756 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2757 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2758 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
2759 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
2760 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
2761 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
2763 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
2764 can be optimized to ap = __builtin_next_arg (0). */
2765 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
2766 targetm.expand_builtin_va_start = NULL;
2769 /* Return true if this goes in large data/bss. */
2771 static bool
2772 ix86_in_large_data_p (tree exp)
2774 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
2775 return false;
2777 /* Functions are never large data. */
2778 if (TREE_CODE (exp) == FUNCTION_DECL)
2779 return false;
2781 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
2783 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
2784 if (strcmp (section, ".ldata") == 0
2785 || strcmp (section, ".lbss") == 0)
2786 return true;
2787 return false;
2789 else
2791 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
2793 /* If this is an incomplete type with size 0, then we can't put it
2794 in data because it might be too big when completed. */
2795 if (!size || size > ix86_section_threshold)
2796 return true;
2799 return false;
2802 /* Switch to the appropriate section for output of DECL.
2803 DECL is either a `VAR_DECL' node or a constant of some sort.
2804 RELOC indicates whether forming the initial value of DECL requires
2805 link-time relocations. */
2807 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
2808 ATTRIBUTE_UNUSED;
2810 static section *
2811 x86_64_elf_select_section (tree decl, int reloc,
2812 unsigned HOST_WIDE_INT align)
2814 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2815 && ix86_in_large_data_p (decl))
2817 const char *sname = NULL;
2818 unsigned int flags = SECTION_WRITE;
2819 switch (categorize_decl_for_section (decl, reloc))
2821 case SECCAT_DATA:
2822 sname = ".ldata";
2823 break;
2824 case SECCAT_DATA_REL:
2825 sname = ".ldata.rel";
2826 break;
2827 case SECCAT_DATA_REL_LOCAL:
2828 sname = ".ldata.rel.local";
2829 break;
2830 case SECCAT_DATA_REL_RO:
2831 sname = ".ldata.rel.ro";
2832 break;
2833 case SECCAT_DATA_REL_RO_LOCAL:
2834 sname = ".ldata.rel.ro.local";
2835 break;
2836 case SECCAT_BSS:
2837 sname = ".lbss";
2838 flags |= SECTION_BSS;
2839 break;
2840 case SECCAT_RODATA:
2841 case SECCAT_RODATA_MERGE_STR:
2842 case SECCAT_RODATA_MERGE_STR_INIT:
2843 case SECCAT_RODATA_MERGE_CONST:
2844 sname = ".lrodata";
2845 flags = 0;
2846 break;
2847 case SECCAT_SRODATA:
2848 case SECCAT_SDATA:
2849 case SECCAT_SBSS:
2850 gcc_unreachable ();
2851 case SECCAT_TEXT:
2852 case SECCAT_TDATA:
2853 case SECCAT_TBSS:
2854 /* We don't split these for medium model. Place them into
2855 default sections and hope for best. */
2856 break;
2857 case SECCAT_EMUTLS_VAR:
2858 case SECCAT_EMUTLS_TMPL:
2859 gcc_unreachable ();
2861 if (sname)
2863 /* We might get called with string constants, but get_named_section
2864 doesn't like them as they are not DECLs. Also, we need to set
2865 flags in that case. */
2866 if (!DECL_P (decl))
2867 return get_section (sname, flags, NULL);
2868 return get_named_section (decl, sname, reloc);
2871 return default_elf_select_section (decl, reloc, align);
2874 /* Build up a unique section name, expressed as a
2875 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2876 RELOC indicates whether the initial value of EXP requires
2877 link-time relocations. */
2879 static void ATTRIBUTE_UNUSED
2880 x86_64_elf_unique_section (tree decl, int reloc)
2882 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2883 && ix86_in_large_data_p (decl))
2885 const char *prefix = NULL;
2886 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2887 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2889 switch (categorize_decl_for_section (decl, reloc))
2891 case SECCAT_DATA:
2892 case SECCAT_DATA_REL:
2893 case SECCAT_DATA_REL_LOCAL:
2894 case SECCAT_DATA_REL_RO:
2895 case SECCAT_DATA_REL_RO_LOCAL:
2896 prefix = one_only ? ".ld" : ".ldata";
2897 break;
2898 case SECCAT_BSS:
2899 prefix = one_only ? ".lb" : ".lbss";
2900 break;
2901 case SECCAT_RODATA:
2902 case SECCAT_RODATA_MERGE_STR:
2903 case SECCAT_RODATA_MERGE_STR_INIT:
2904 case SECCAT_RODATA_MERGE_CONST:
2905 prefix = one_only ? ".lr" : ".lrodata";
2906 break;
2907 case SECCAT_SRODATA:
2908 case SECCAT_SDATA:
2909 case SECCAT_SBSS:
2910 gcc_unreachable ();
2911 case SECCAT_TEXT:
2912 case SECCAT_TDATA:
2913 case SECCAT_TBSS:
2914 /* We don't split these for medium model. Place them into
2915 default sections and hope for best. */
2916 break;
2917 case SECCAT_EMUTLS_VAR:
2918 prefix = targetm.emutls.var_section;
2919 break;
2920 case SECCAT_EMUTLS_TMPL:
2921 prefix = targetm.emutls.tmpl_section;
2922 break;
2924 if (prefix)
2926 const char *name, *linkonce;
2927 char *string;
2929 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2930 name = targetm.strip_name_encoding (name);
2932 /* If we're using one_only, then there needs to be a .gnu.linkonce
2933 prefix to the section name. */
2934 linkonce = one_only ? ".gnu.linkonce" : "";
2936 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
2938 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
2939 return;
2942 default_unique_section (decl, reloc);
2945 #ifdef COMMON_ASM_OP
2946 /* This says how to output assembler code to declare an
2947 uninitialized external linkage data object.
2949 For medium model x86-64 we need to use .largecomm opcode for
2950 large objects. */
2951 void
2952 x86_elf_aligned_common (FILE *file,
2953 const char *name, unsigned HOST_WIDE_INT size,
2954 int align)
2956 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2957 && size > (unsigned int)ix86_section_threshold)
2958 fprintf (file, ".largecomm\t");
2959 else
2960 fprintf (file, "%s", COMMON_ASM_OP);
2961 assemble_name (file, name);
2962 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2963 size, align / BITS_PER_UNIT);
2965 #endif
2967 /* Utility function for targets to use in implementing
2968 ASM_OUTPUT_ALIGNED_BSS. */
2970 void
2971 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2972 const char *name, unsigned HOST_WIDE_INT size,
2973 int align)
2975 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2976 && size > (unsigned int)ix86_section_threshold)
2977 switch_to_section (get_named_section (decl, ".lbss", 0));
2978 else
2979 switch_to_section (bss_section);
2980 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2981 #ifdef ASM_DECLARE_OBJECT_NAME
2982 last_assemble_variable_decl = decl;
2983 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2984 #else
2985 /* Standard thing is just output label for the object. */
2986 ASM_OUTPUT_LABEL (file, name);
2987 #endif /* ASM_DECLARE_OBJECT_NAME */
2988 ASM_OUTPUT_SKIP (file, size ? size : 1);
2991 void
2992 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2994 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2995 make the problem with not enough registers even worse. */
2996 #ifdef INSN_SCHEDULING
2997 if (level > 1)
2998 flag_schedule_insns = 0;
2999 #endif
3001 if (TARGET_MACHO)
3002 /* The Darwin libraries never set errno, so we might as well
3003 avoid calling them when that's the only reason we would. */
3004 flag_errno_math = 0;
3006 /* The default values of these switches depend on the TARGET_64BIT
3007 that is not known at this moment. Mark these values with 2 and
3008 let user the to override these. In case there is no command line option
3009 specifying them, we will set the defaults in override_options. */
3010 if (optimize >= 1)
3011 flag_omit_frame_pointer = 2;
3012 flag_pcc_struct_return = 2;
3013 flag_asynchronous_unwind_tables = 2;
3014 flag_vect_cost_model = 1;
3015 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
3016 SUBTARGET_OPTIMIZATION_OPTIONS;
3017 #endif
3020 /* Decide whether we can make a sibling call to a function. DECL is the
3021 declaration of the function being targeted by the call and EXP is the
3022 CALL_EXPR representing the call. */
3024 static bool
3025 ix86_function_ok_for_sibcall (tree decl, tree exp)
3027 tree func;
3028 rtx a, b;
3030 /* If we are generating position-independent code, we cannot sibcall
3031 optimize any indirect call, or a direct call to a global function,
3032 as the PLT requires %ebx be live. */
3033 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
3034 return false;
3036 if (decl)
3037 func = decl;
3038 else
3040 func = TREE_TYPE (CALL_EXPR_FN (exp));
3041 if (POINTER_TYPE_P (func))
3042 func = TREE_TYPE (func);
3045 /* Check that the return value locations are the same. Like
3046 if we are returning floats on the 80387 register stack, we cannot
3047 make a sibcall from a function that doesn't return a float to a
3048 function that does or, conversely, from a function that does return
3049 a float to a function that doesn't; the necessary stack adjustment
3050 would not be executed. This is also the place we notice
3051 differences in the return value ABI. Note that it is ok for one
3052 of the functions to have void return type as long as the return
3053 value of the other is passed in a register. */
3054 a = ix86_function_value (TREE_TYPE (exp), func, false);
3055 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
3056 cfun->decl, false);
3057 if (STACK_REG_P (a) || STACK_REG_P (b))
3059 if (!rtx_equal_p (a, b))
3060 return false;
3062 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
3064 else if (!rtx_equal_p (a, b))
3065 return false;
3067 /* If this call is indirect, we'll need to be able to use a call-clobbered
3068 register for the address of the target function. Make sure that all
3069 such registers are not used for passing parameters. */
3070 if (!decl && !TARGET_64BIT)
3072 tree type;
3074 /* We're looking at the CALL_EXPR, we need the type of the function. */
3075 type = CALL_EXPR_FN (exp); /* pointer expression */
3076 type = TREE_TYPE (type); /* pointer type */
3077 type = TREE_TYPE (type); /* function type */
3079 if (ix86_function_regparm (type, NULL) >= 3)
3081 /* ??? Need to count the actual number of registers to be used,
3082 not the possible number of registers. Fix later. */
3083 return false;
3087 /* Dllimport'd functions are also called indirectly. */
3088 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
3089 && decl && DECL_DLLIMPORT_P (decl)
3090 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
3091 return false;
3093 /* If we forced aligned the stack, then sibcalling would unalign the
3094 stack, which may break the called function. */
3095 if (cfun->machine->force_align_arg_pointer)
3096 return false;
3098 /* Otherwise okay. That also includes certain types of indirect calls. */
3099 return true;
3102 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
3103 calling convention attributes;
3104 arguments as in struct attribute_spec.handler. */
3106 static tree
3107 ix86_handle_cconv_attribute (tree *node, tree name,
3108 tree args,
3109 int flags ATTRIBUTE_UNUSED,
3110 bool *no_add_attrs)
3112 if (TREE_CODE (*node) != FUNCTION_TYPE
3113 && TREE_CODE (*node) != METHOD_TYPE
3114 && TREE_CODE (*node) != FIELD_DECL
3115 && TREE_CODE (*node) != TYPE_DECL)
3117 warning (OPT_Wattributes, "%qs attribute only applies to functions",
3118 IDENTIFIER_POINTER (name));
3119 *no_add_attrs = true;
3120 return NULL_TREE;
3123 /* Can combine regparm with all attributes but fastcall. */
3124 if (is_attribute_p ("regparm", name))
3126 tree cst;
3128 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3130 error ("fastcall and regparm attributes are not compatible");
3133 cst = TREE_VALUE (args);
3134 if (TREE_CODE (cst) != INTEGER_CST)
3136 warning (OPT_Wattributes,
3137 "%qs attribute requires an integer constant argument",
3138 IDENTIFIER_POINTER (name));
3139 *no_add_attrs = true;
3141 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
3143 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
3144 IDENTIFIER_POINTER (name), REGPARM_MAX);
3145 *no_add_attrs = true;
3148 if (!TARGET_64BIT
3149 && lookup_attribute (ix86_force_align_arg_pointer_string,
3150 TYPE_ATTRIBUTES (*node))
3151 && compare_tree_int (cst, REGPARM_MAX-1))
3153 error ("%s functions limited to %d register parameters",
3154 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
3157 return NULL_TREE;
3160 if (TARGET_64BIT)
3162 /* Do not warn when emulating the MS ABI. */
3163 if (!TARGET_64BIT_MS_ABI)
3164 warning (OPT_Wattributes, "%qs attribute ignored",
3165 IDENTIFIER_POINTER (name));
3166 *no_add_attrs = true;
3167 return NULL_TREE;
3170 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
3171 if (is_attribute_p ("fastcall", name))
3173 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
3175 error ("fastcall and cdecl attributes are not compatible");
3177 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
3179 error ("fastcall and stdcall attributes are not compatible");
3181 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
3183 error ("fastcall and regparm attributes are not compatible");
3187 /* Can combine stdcall with fastcall (redundant), regparm and
3188 sseregparm. */
3189 else if (is_attribute_p ("stdcall", name))
3191 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
3193 error ("stdcall and cdecl attributes are not compatible");
3195 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3197 error ("stdcall and fastcall attributes are not compatible");
3201 /* Can combine cdecl with regparm and sseregparm. */
3202 else if (is_attribute_p ("cdecl", name))
3204 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
3206 error ("stdcall and cdecl attributes are not compatible");
3208 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
3210 error ("fastcall and cdecl attributes are not compatible");
3214 /* Can combine sseregparm with all attributes. */
3216 return NULL_TREE;
3219 /* Return 0 if the attributes for two types are incompatible, 1 if they
3220 are compatible, and 2 if they are nearly compatible (which causes a
3221 warning to be generated). */
3223 static int
3224 ix86_comp_type_attributes (const_tree type1, const_tree type2)
3226 /* Check for mismatch of non-default calling convention. */
3227 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
3229 if (TREE_CODE (type1) != FUNCTION_TYPE
3230 && TREE_CODE (type1) != METHOD_TYPE)
3231 return 1;
3233 /* Check for mismatched fastcall/regparm types. */
3234 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
3235 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
3236 || (ix86_function_regparm (type1, NULL)
3237 != ix86_function_regparm (type2, NULL)))
3238 return 0;
3240 /* Check for mismatched sseregparm types. */
3241 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
3242 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
3243 return 0;
3245 /* Check for mismatched return types (cdecl vs stdcall). */
3246 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
3247 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
3248 return 0;
3250 return 1;
3253 /* Return the regparm value for a function with the indicated TYPE and DECL.
3254 DECL may be NULL when calling function indirectly
3255 or considering a libcall. */
3257 static int
3258 ix86_function_regparm (const_tree type, const_tree decl)
3260 tree attr;
3261 int regparm = ix86_regparm;
3263 static bool error_issued;
3265 if (TARGET_64BIT)
3266 return regparm;
3268 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
3269 if (attr)
3271 regparm
3272 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
3274 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
3276 /* We can't use regparm(3) for nested functions because
3277 these pass static chain pointer in %ecx register. */
3278 if (!error_issued && regparm == 3
3279 && decl_function_context (decl)
3280 && !DECL_NO_STATIC_CHAIN (decl))
3282 error ("nested functions are limited to 2 register parameters");
3283 error_issued = true;
3284 return 0;
3288 return regparm;
3291 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
3292 return 2;
3294 /* Use register calling convention for local functions when possible. */
3295 if (decl && TREE_CODE (decl) == FUNCTION_DECL
3296 && flag_unit_at_a_time && !profile_flag)
3298 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
3299 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
3300 if (i && i->local)
3302 int local_regparm, globals = 0, regno;
3303 struct function *f;
3305 /* Make sure no regparm register is taken by a
3306 fixed register variable. */
3307 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
3308 if (fixed_regs[local_regparm])
3309 break;
3311 /* We can't use regparm(3) for nested functions as these use
3312 static chain pointer in third argument. */
3313 if (local_regparm == 3
3314 && (decl_function_context (decl)
3315 || ix86_force_align_arg_pointer)
3316 && !DECL_NO_STATIC_CHAIN (decl))
3317 local_regparm = 2;
3319 /* If the function realigns its stackpointer, the prologue will
3320 clobber %ecx. If we've already generated code for the callee,
3321 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
3322 scanning the attributes for the self-realigning property. */
3323 f = DECL_STRUCT_FUNCTION (decl);
3324 if (local_regparm == 3
3325 && (f ? !!f->machine->force_align_arg_pointer
3326 : !!lookup_attribute (ix86_force_align_arg_pointer_string,
3327 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
3328 local_regparm = 2;
3330 /* Each fixed register usage increases register pressure,
3331 so less registers should be used for argument passing.
3332 This functionality can be overriden by an explicit
3333 regparm value. */
3334 for (regno = 0; regno <= DI_REG; regno++)
3335 if (fixed_regs[regno])
3336 globals++;
3338 local_regparm
3339 = globals < local_regparm ? local_regparm - globals : 0;
3341 if (local_regparm > regparm)
3342 regparm = local_regparm;
3346 return regparm;
3349 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
3350 DFmode (2) arguments in SSE registers for a function with the
3351 indicated TYPE and DECL. DECL may be NULL when calling function
3352 indirectly or considering a libcall. Otherwise return 0. */
3354 static int
3355 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
3357 gcc_assert (!TARGET_64BIT);
3359 /* Use SSE registers to pass SFmode and DFmode arguments if requested
3360 by the sseregparm attribute. */
3361 if (TARGET_SSEREGPARM
3362 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
3364 if (!TARGET_SSE)
3366 if (warn)
3368 if (decl)
3369 error ("Calling %qD with attribute sseregparm without "
3370 "SSE/SSE2 enabled", decl);
3371 else
3372 error ("Calling %qT with attribute sseregparm without "
3373 "SSE/SSE2 enabled", type);
3375 return 0;
3378 return 2;
3381 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
3382 (and DFmode for SSE2) arguments in SSE registers. */
3383 if (decl && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
3385 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
3386 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
3387 if (i && i->local)
3388 return TARGET_SSE2 ? 2 : 1;
3391 return 0;
3394 /* Return true if EAX is live at the start of the function. Used by
3395 ix86_expand_prologue to determine if we need special help before
3396 calling allocate_stack_worker. */
3398 static bool
3399 ix86_eax_live_at_start_p (void)
3401 /* Cheat. Don't bother working forward from ix86_function_regparm
3402 to the function type to whether an actual argument is located in
3403 eax. Instead just look at cfg info, which is still close enough
3404 to correct at this point. This gives false positives for broken
3405 functions that might use uninitialized data that happens to be
3406 allocated in eax, but who cares? */
3407 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
3410 /* Value is the number of bytes of arguments automatically
3411 popped when returning from a subroutine call.
3412 FUNDECL is the declaration node of the function (as a tree),
3413 FUNTYPE is the data type of the function (as a tree),
3414 or for a library call it is an identifier node for the subroutine name.
3415 SIZE is the number of bytes of arguments passed on the stack.
3417 On the 80386, the RTD insn may be used to pop them if the number
3418 of args is fixed, but if the number is variable then the caller
3419 must pop them all. RTD can't be used for library calls now
3420 because the library is compiled with the Unix compiler.
3421 Use of RTD is a selectable option, since it is incompatible with
3422 standard Unix calling sequences. If the option is not selected,
3423 the caller must always pop the args.
3425 The attribute stdcall is equivalent to RTD on a per module basis. */
3428 ix86_return_pops_args (tree fundecl, tree funtype, int size)
3430 int rtd;
3432 /* None of the 64-bit ABIs pop arguments. */
3433 if (TARGET_64BIT)
3434 return 0;
3436 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
3438 /* Cdecl functions override -mrtd, and never pop the stack. */
3439 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
3441 /* Stdcall and fastcall functions will pop the stack if not
3442 variable args. */
3443 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
3444 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
3445 rtd = 1;
3447 if (rtd && ! stdarg_p (funtype))
3448 return size;
3451 /* Lose any fake structure return argument if it is passed on the stack. */
3452 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
3453 && !KEEP_AGGREGATE_RETURN_POINTER)
3455 int nregs = ix86_function_regparm (funtype, fundecl);
3456 if (nregs == 0)
3457 return GET_MODE_SIZE (Pmode);
3460 return 0;
3463 /* Argument support functions. */
3465 /* Return true when register may be used to pass function parameters. */
3466 bool
3467 ix86_function_arg_regno_p (int regno)
3469 int i;
3470 const int *parm_regs;
3472 if (!TARGET_64BIT)
3474 if (TARGET_MACHO)
3475 return (regno < REGPARM_MAX
3476 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
3477 else
3478 return (regno < REGPARM_MAX
3479 || (TARGET_MMX && MMX_REGNO_P (regno)
3480 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
3481 || (TARGET_SSE && SSE_REGNO_P (regno)
3482 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
3485 if (TARGET_MACHO)
3487 if (SSE_REGNO_P (regno) && TARGET_SSE)
3488 return true;
3490 else
3492 if (TARGET_SSE && SSE_REGNO_P (regno)
3493 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
3494 return true;
3497 /* RAX is used as hidden argument to va_arg functions. */
3498 if (!TARGET_64BIT_MS_ABI && regno == AX_REG)
3499 return true;
3501 if (TARGET_64BIT_MS_ABI)
3502 parm_regs = x86_64_ms_abi_int_parameter_registers;
3503 else
3504 parm_regs = x86_64_int_parameter_registers;
3505 for (i = 0; i < REGPARM_MAX; i++)
3506 if (regno == parm_regs[i])
3507 return true;
3508 return false;
3511 /* Return if we do not know how to pass TYPE solely in registers. */
3513 static bool
3514 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
3516 if (must_pass_in_stack_var_size_or_pad (mode, type))
3517 return true;
3519 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
3520 The layout_type routine is crafty and tries to trick us into passing
3521 currently unsupported vector types on the stack by using TImode. */
3522 return (!TARGET_64BIT && mode == TImode
3523 && type && TREE_CODE (type) != VECTOR_TYPE);
3526 /* Initialize a variable CUM of type CUMULATIVE_ARGS
3527 for a call to a function whose data type is FNTYPE.
3528 For a library call, FNTYPE is 0. */
3530 void
3531 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
3532 tree fntype, /* tree ptr for function decl */
3533 rtx libname, /* SYMBOL_REF of library name or 0 */
3534 tree fndecl)
3536 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
3537 memset (cum, 0, sizeof (*cum));
3539 /* Set up the number of registers to use for passing arguments. */
3540 cum->nregs = ix86_regparm;
3541 if (TARGET_SSE)
3542 cum->sse_nregs = SSE_REGPARM_MAX;
3543 if (TARGET_MMX)
3544 cum->mmx_nregs = MMX_REGPARM_MAX;
3545 cum->warn_sse = true;
3546 cum->warn_mmx = true;
3548 /* Because type might mismatch in between caller and callee, we need to
3549 use actual type of function for local calls.
3550 FIXME: cgraph_analyze can be told to actually record if function uses
3551 va_start so for local functions maybe_vaarg can be made aggressive
3552 helping K&R code.
3553 FIXME: once typesytem is fixed, we won't need this code anymore. */
3554 if (i && i->local)
3555 fntype = TREE_TYPE (fndecl);
3556 cum->maybe_vaarg = (fntype
3557 ? (!prototype_p (fntype) || stdarg_p (fntype))
3558 : !libname);
3560 if (!TARGET_64BIT)
3562 /* If there are variable arguments, then we won't pass anything
3563 in registers in 32-bit mode. */
3564 if (stdarg_p (fntype))
3566 cum->nregs = 0;
3567 cum->sse_nregs = 0;
3568 cum->mmx_nregs = 0;
3569 cum->warn_sse = 0;
3570 cum->warn_mmx = 0;
3571 return;
3574 /* Use ecx and edx registers if function has fastcall attribute,
3575 else look for regparm information. */
3576 if (fntype)
3578 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
3580 cum->nregs = 2;
3581 cum->fastcall = 1;
3583 else
3584 cum->nregs = ix86_function_regparm (fntype, fndecl);
3587 /* Set up the number of SSE registers used for passing SFmode
3588 and DFmode arguments. Warn for mismatching ABI. */
3589 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
3593 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3594 But in the case of vector types, it is some vector mode.
3596 When we have only some of our vector isa extensions enabled, then there
3597 are some modes for which vector_mode_supported_p is false. For these
3598 modes, the generic vector support in gcc will choose some non-vector mode
3599 in order to implement the type. By computing the natural mode, we'll
3600 select the proper ABI location for the operand and not depend on whatever
3601 the middle-end decides to do with these vector types. */
3603 static enum machine_mode
3604 type_natural_mode (const_tree type)
3606 enum machine_mode mode = TYPE_MODE (type);
3608 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3610 HOST_WIDE_INT size = int_size_in_bytes (type);
3611 if ((size == 8 || size == 16)
3612 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3613 && TYPE_VECTOR_SUBPARTS (type) > 1)
3615 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3617 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3618 mode = MIN_MODE_VECTOR_FLOAT;
3619 else
3620 mode = MIN_MODE_VECTOR_INT;
3622 /* Get the mode which has this inner mode and number of units. */
3623 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3624 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3625 && GET_MODE_INNER (mode) == innermode)
3626 return mode;
3628 gcc_unreachable ();
3632 return mode;
3635 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3636 this may not agree with the mode that the type system has chosen for the
3637 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3638 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3640 static rtx
3641 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3642 unsigned int regno)
3644 rtx tmp;
3646 if (orig_mode != BLKmode)
3647 tmp = gen_rtx_REG (orig_mode, regno);
3648 else
3650 tmp = gen_rtx_REG (mode, regno);
3651 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3652 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3655 return tmp;
3658 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3659 of this code is to classify each 8bytes of incoming argument by the register
3660 class and assign registers accordingly. */
3662 /* Return the union class of CLASS1 and CLASS2.
3663 See the x86-64 PS ABI for details. */
3665 static enum x86_64_reg_class
3666 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3668 /* Rule #1: If both classes are equal, this is the resulting class. */
3669 if (class1 == class2)
3670 return class1;
3672 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3673 the other class. */
3674 if (class1 == X86_64_NO_CLASS)
3675 return class2;
3676 if (class2 == X86_64_NO_CLASS)
3677 return class1;
3679 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3680 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3681 return X86_64_MEMORY_CLASS;
3683 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3684 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3685 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3686 return X86_64_INTEGERSI_CLASS;
3687 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3688 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3689 return X86_64_INTEGER_CLASS;
3691 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3692 MEMORY is used. */
3693 if (class1 == X86_64_X87_CLASS
3694 || class1 == X86_64_X87UP_CLASS
3695 || class1 == X86_64_COMPLEX_X87_CLASS
3696 || class2 == X86_64_X87_CLASS
3697 || class2 == X86_64_X87UP_CLASS
3698 || class2 == X86_64_COMPLEX_X87_CLASS)
3699 return X86_64_MEMORY_CLASS;
3701 /* Rule #6: Otherwise class SSE is used. */
3702 return X86_64_SSE_CLASS;
3705 /* Classify the argument of type TYPE and mode MODE.
3706 CLASSES will be filled by the register class used to pass each word
3707 of the operand. The number of words is returned. In case the parameter
3708 should be passed in memory, 0 is returned. As a special case for zero
3709 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3711 BIT_OFFSET is used internally for handling records and specifies offset
3712 of the offset in bits modulo 256 to avoid overflow cases.
3714 See the x86-64 PS ABI for details.
3717 static int
3718 classify_argument (enum machine_mode mode, const_tree type,
3719 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3721 HOST_WIDE_INT bytes =
3722 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3723 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3725 /* Variable sized entities are always passed/returned in memory. */
3726 if (bytes < 0)
3727 return 0;
3729 if (mode != VOIDmode
3730 && targetm.calls.must_pass_in_stack (mode, type))
3731 return 0;
3733 if (type && AGGREGATE_TYPE_P (type))
3735 int i;
3736 tree field;
3737 enum x86_64_reg_class subclasses[MAX_CLASSES];
3739 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3740 if (bytes > 16)
3741 return 0;
3743 for (i = 0; i < words; i++)
3744 classes[i] = X86_64_NO_CLASS;
3746 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3747 signalize memory class, so handle it as special case. */
3748 if (!words)
3750 classes[0] = X86_64_NO_CLASS;
3751 return 1;
3754 /* Classify each field of record and merge classes. */
3755 switch (TREE_CODE (type))
3757 case RECORD_TYPE:
3758 /* And now merge the fields of structure. */
3759 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3761 if (TREE_CODE (field) == FIELD_DECL)
3763 int num;
3765 if (TREE_TYPE (field) == error_mark_node)
3766 continue;
3768 /* Bitfields are always classified as integer. Handle them
3769 early, since later code would consider them to be
3770 misaligned integers. */
3771 if (DECL_BIT_FIELD (field))
3773 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3774 i < ((int_bit_position (field) + (bit_offset % 64))
3775 + tree_low_cst (DECL_SIZE (field), 0)
3776 + 63) / 8 / 8; i++)
3777 classes[i] =
3778 merge_classes (X86_64_INTEGER_CLASS,
3779 classes[i]);
3781 else
3783 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3784 TREE_TYPE (field), subclasses,
3785 (int_bit_position (field)
3786 + bit_offset) % 256);
3787 if (!num)
3788 return 0;
3789 for (i = 0; i < num; i++)
3791 int pos =
3792 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3793 classes[i + pos] =
3794 merge_classes (subclasses[i], classes[i + pos]);
3799 break;
3801 case ARRAY_TYPE:
3802 /* Arrays are handled as small records. */
3804 int num;
3805 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3806 TREE_TYPE (type), subclasses, bit_offset);
3807 if (!num)
3808 return 0;
3810 /* The partial classes are now full classes. */
3811 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3812 subclasses[0] = X86_64_SSE_CLASS;
3813 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3814 subclasses[0] = X86_64_INTEGER_CLASS;
3816 for (i = 0; i < words; i++)
3817 classes[i] = subclasses[i % num];
3819 break;
3821 case UNION_TYPE:
3822 case QUAL_UNION_TYPE:
3823 /* Unions are similar to RECORD_TYPE but offset is always 0.
3825 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3827 if (TREE_CODE (field) == FIELD_DECL)
3829 int num;
3831 if (TREE_TYPE (field) == error_mark_node)
3832 continue;
3834 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3835 TREE_TYPE (field), subclasses,
3836 bit_offset);
3837 if (!num)
3838 return 0;
3839 for (i = 0; i < num; i++)
3840 classes[i] = merge_classes (subclasses[i], classes[i]);
3843 break;
3845 default:
3846 gcc_unreachable ();
3849 /* Final merger cleanup. */
3850 for (i = 0; i < words; i++)
3852 /* If one class is MEMORY, everything should be passed in
3853 memory. */
3854 if (classes[i] == X86_64_MEMORY_CLASS)
3855 return 0;
3857 /* The X86_64_SSEUP_CLASS should be always preceded by
3858 X86_64_SSE_CLASS. */
3859 if (classes[i] == X86_64_SSEUP_CLASS
3860 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3861 classes[i] = X86_64_SSE_CLASS;
3863 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3864 if (classes[i] == X86_64_X87UP_CLASS
3865 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3866 classes[i] = X86_64_SSE_CLASS;
3868 return words;
3871 /* Compute alignment needed. We align all types to natural boundaries with
3872 exception of XFmode that is aligned to 64bits. */
3873 if (mode != VOIDmode && mode != BLKmode)
3875 int mode_alignment = GET_MODE_BITSIZE (mode);
3877 if (mode == XFmode)
3878 mode_alignment = 128;
3879 else if (mode == XCmode)
3880 mode_alignment = 256;
3881 if (COMPLEX_MODE_P (mode))
3882 mode_alignment /= 2;
3883 /* Misaligned fields are always returned in memory. */
3884 if (bit_offset % mode_alignment)
3885 return 0;
3888 /* for V1xx modes, just use the base mode */
3889 if (VECTOR_MODE_P (mode) && mode != V1DImode
3890 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3891 mode = GET_MODE_INNER (mode);
3893 /* Classification of atomic types. */
3894 switch (mode)
3896 case SDmode:
3897 case DDmode:
3898 classes[0] = X86_64_SSE_CLASS;
3899 return 1;
3900 case TDmode:
3901 classes[0] = X86_64_SSE_CLASS;
3902 classes[1] = X86_64_SSEUP_CLASS;
3903 return 2;
3904 case DImode:
3905 case SImode:
3906 case HImode:
3907 case QImode:
3908 case CSImode:
3909 case CHImode:
3910 case CQImode:
3911 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3912 classes[0] = X86_64_INTEGERSI_CLASS;
3913 else
3914 classes[0] = X86_64_INTEGER_CLASS;
3915 return 1;
3916 case CDImode:
3917 case TImode:
3918 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3919 return 2;
3920 case CTImode:
3921 return 0;
3922 case SFmode:
3923 if (!(bit_offset % 64))
3924 classes[0] = X86_64_SSESF_CLASS;
3925 else
3926 classes[0] = X86_64_SSE_CLASS;
3927 return 1;
3928 case DFmode:
3929 classes[0] = X86_64_SSEDF_CLASS;
3930 return 1;
3931 case XFmode:
3932 classes[0] = X86_64_X87_CLASS;
3933 classes[1] = X86_64_X87UP_CLASS;
3934 return 2;
3935 case TFmode:
3936 classes[0] = X86_64_SSE_CLASS;
3937 classes[1] = X86_64_SSEUP_CLASS;
3938 return 2;
3939 case SCmode:
3940 classes[0] = X86_64_SSE_CLASS;
3941 return 1;
3942 case DCmode:
3943 classes[0] = X86_64_SSEDF_CLASS;
3944 classes[1] = X86_64_SSEDF_CLASS;
3945 return 2;
3946 case XCmode:
3947 classes[0] = X86_64_COMPLEX_X87_CLASS;
3948 return 1;
3949 case TCmode:
3950 /* This modes is larger than 16 bytes. */
3951 return 0;
3952 case V4SFmode:
3953 case V4SImode:
3954 case V16QImode:
3955 case V8HImode:
3956 case V2DFmode:
3957 case V2DImode:
3958 classes[0] = X86_64_SSE_CLASS;
3959 classes[1] = X86_64_SSEUP_CLASS;
3960 return 2;
3961 case V1DImode:
3962 case V2SFmode:
3963 case V2SImode:
3964 case V4HImode:
3965 case V8QImode:
3966 classes[0] = X86_64_SSE_CLASS;
3967 return 1;
3968 case BLKmode:
3969 case VOIDmode:
3970 return 0;
3971 default:
3972 gcc_assert (VECTOR_MODE_P (mode));
3974 if (bytes > 16)
3975 return 0;
3977 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3979 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3980 classes[0] = X86_64_INTEGERSI_CLASS;
3981 else
3982 classes[0] = X86_64_INTEGER_CLASS;
3983 classes[1] = X86_64_INTEGER_CLASS;
3984 return 1 + (bytes > 8);
3988 /* Examine the argument and return set number of register required in each
3989 class. Return 0 iff parameter should be passed in memory. */
3990 static int
3991 examine_argument (enum machine_mode mode, const_tree type, int in_return,
3992 int *int_nregs, int *sse_nregs)
3994 enum x86_64_reg_class regclass[MAX_CLASSES];
3995 int n = classify_argument (mode, type, regclass, 0);
3997 *int_nregs = 0;
3998 *sse_nregs = 0;
3999 if (!n)
4000 return 0;
4001 for (n--; n >= 0; n--)
4002 switch (regclass[n])
4004 case X86_64_INTEGER_CLASS:
4005 case X86_64_INTEGERSI_CLASS:
4006 (*int_nregs)++;
4007 break;
4008 case X86_64_SSE_CLASS:
4009 case X86_64_SSESF_CLASS:
4010 case X86_64_SSEDF_CLASS:
4011 (*sse_nregs)++;
4012 break;
4013 case X86_64_NO_CLASS:
4014 case X86_64_SSEUP_CLASS:
4015 break;
4016 case X86_64_X87_CLASS:
4017 case X86_64_X87UP_CLASS:
4018 if (!in_return)
4019 return 0;
4020 break;
4021 case X86_64_COMPLEX_X87_CLASS:
4022 return in_return ? 2 : 0;
4023 case X86_64_MEMORY_CLASS:
4024 gcc_unreachable ();
4026 return 1;
4029 /* Construct container for the argument used by GCC interface. See
4030 FUNCTION_ARG for the detailed description. */
4032 static rtx
4033 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
4034 const_tree type, int in_return, int nintregs, int nsseregs,
4035 const int *intreg, int sse_regno)
4037 /* The following variables hold the static issued_error state. */
4038 static bool issued_sse_arg_error;
4039 static bool issued_sse_ret_error;
4040 static bool issued_x87_ret_error;
4042 enum machine_mode tmpmode;
4043 int bytes =
4044 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4045 enum x86_64_reg_class regclass[MAX_CLASSES];
4046 int n;
4047 int i;
4048 int nexps = 0;
4049 int needed_sseregs, needed_intregs;
4050 rtx exp[MAX_CLASSES];
4051 rtx ret;
4053 n = classify_argument (mode, type, regclass, 0);
4054 if (!n)
4055 return NULL;
4056 if (!examine_argument (mode, type, in_return, &needed_intregs,
4057 &needed_sseregs))
4058 return NULL;
4059 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
4060 return NULL;
4062 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
4063 some less clueful developer tries to use floating-point anyway. */
4064 if (needed_sseregs && !TARGET_SSE)
4066 if (in_return)
4068 if (!issued_sse_ret_error)
4070 error ("SSE register return with SSE disabled");
4071 issued_sse_ret_error = true;
4074 else if (!issued_sse_arg_error)
4076 error ("SSE register argument with SSE disabled");
4077 issued_sse_arg_error = true;
4079 return NULL;
4082 /* Likewise, error if the ABI requires us to return values in the
4083 x87 registers and the user specified -mno-80387. */
4084 if (!TARGET_80387 && in_return)
4085 for (i = 0; i < n; i++)
4086 if (regclass[i] == X86_64_X87_CLASS
4087 || regclass[i] == X86_64_X87UP_CLASS
4088 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
4090 if (!issued_x87_ret_error)
4092 error ("x87 register return with x87 disabled");
4093 issued_x87_ret_error = true;
4095 return NULL;
4098 /* First construct simple cases. Avoid SCmode, since we want to use
4099 single register to pass this type. */
4100 if (n == 1 && mode != SCmode)
4101 switch (regclass[0])
4103 case X86_64_INTEGER_CLASS:
4104 case X86_64_INTEGERSI_CLASS:
4105 return gen_rtx_REG (mode, intreg[0]);
4106 case X86_64_SSE_CLASS:
4107 case X86_64_SSESF_CLASS:
4108 case X86_64_SSEDF_CLASS:
4109 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
4110 case X86_64_X87_CLASS:
4111 case X86_64_COMPLEX_X87_CLASS:
4112 return gen_rtx_REG (mode, FIRST_STACK_REG);
4113 case X86_64_NO_CLASS:
4114 /* Zero sized array, struct or class. */
4115 return NULL;
4116 default:
4117 gcc_unreachable ();
4119 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
4120 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
4121 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
4123 if (n == 2
4124 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
4125 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
4126 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
4127 && regclass[1] == X86_64_INTEGER_CLASS
4128 && (mode == CDImode || mode == TImode || mode == TFmode)
4129 && intreg[0] + 1 == intreg[1])
4130 return gen_rtx_REG (mode, intreg[0]);
4132 /* Otherwise figure out the entries of the PARALLEL. */
4133 for (i = 0; i < n; i++)
4135 switch (regclass[i])
4137 case X86_64_NO_CLASS:
4138 break;
4139 case X86_64_INTEGER_CLASS:
4140 case X86_64_INTEGERSI_CLASS:
4141 /* Merge TImodes on aligned occasions here too. */
4142 if (i * 8 + 8 > bytes)
4143 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
4144 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
4145 tmpmode = SImode;
4146 else
4147 tmpmode = DImode;
4148 /* We've requested 24 bytes we don't have mode for. Use DImode. */
4149 if (tmpmode == BLKmode)
4150 tmpmode = DImode;
4151 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4152 gen_rtx_REG (tmpmode, *intreg),
4153 GEN_INT (i*8));
4154 intreg++;
4155 break;
4156 case X86_64_SSESF_CLASS:
4157 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4158 gen_rtx_REG (SFmode,
4159 SSE_REGNO (sse_regno)),
4160 GEN_INT (i*8));
4161 sse_regno++;
4162 break;
4163 case X86_64_SSEDF_CLASS:
4164 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4165 gen_rtx_REG (DFmode,
4166 SSE_REGNO (sse_regno)),
4167 GEN_INT (i*8));
4168 sse_regno++;
4169 break;
4170 case X86_64_SSE_CLASS:
4171 if (i < n - 1 && regclass[i + 1] == X86_64_SSEUP_CLASS)
4172 tmpmode = TImode;
4173 else
4174 tmpmode = DImode;
4175 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
4176 gen_rtx_REG (tmpmode,
4177 SSE_REGNO (sse_regno)),
4178 GEN_INT (i*8));
4179 if (tmpmode == TImode)
4180 i++;
4181 sse_regno++;
4182 break;
4183 default:
4184 gcc_unreachable ();
4188 /* Empty aligned struct, union or class. */
4189 if (nexps == 0)
4190 return NULL;
4192 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
4193 for (i = 0; i < nexps; i++)
4194 XVECEXP (ret, 0, i) = exp [i];
4195 return ret;
4198 /* Update the data in CUM to advance over an argument of mode MODE
4199 and data type TYPE. (TYPE is null for libcalls where that information
4200 may not be available.) */
4202 static void
4203 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4204 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
4206 switch (mode)
4208 default:
4209 break;
4211 case BLKmode:
4212 if (bytes < 0)
4213 break;
4214 /* FALLTHRU */
4216 case DImode:
4217 case SImode:
4218 case HImode:
4219 case QImode:
4220 cum->words += words;
4221 cum->nregs -= words;
4222 cum->regno += words;
4224 if (cum->nregs <= 0)
4226 cum->nregs = 0;
4227 cum->regno = 0;
4229 break;
4231 case DFmode:
4232 if (cum->float_in_sse < 2)
4233 break;
4234 case SFmode:
4235 if (cum->float_in_sse < 1)
4236 break;
4237 /* FALLTHRU */
4239 case TImode:
4240 case V16QImode:
4241 case V8HImode:
4242 case V4SImode:
4243 case V2DImode:
4244 case V4SFmode:
4245 case V2DFmode:
4246 if (!type || !AGGREGATE_TYPE_P (type))
4248 cum->sse_words += words;
4249 cum->sse_nregs -= 1;
4250 cum->sse_regno += 1;
4251 if (cum->sse_nregs <= 0)
4253 cum->sse_nregs = 0;
4254 cum->sse_regno = 0;
4257 break;
4259 case V8QImode:
4260 case V4HImode:
4261 case V2SImode:
4262 case V2SFmode:
4263 case V1DImode:
4264 if (!type || !AGGREGATE_TYPE_P (type))
4266 cum->mmx_words += words;
4267 cum->mmx_nregs -= 1;
4268 cum->mmx_regno += 1;
4269 if (cum->mmx_nregs <= 0)
4271 cum->mmx_nregs = 0;
4272 cum->mmx_regno = 0;
4275 break;
4279 static void
4280 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4281 tree type, HOST_WIDE_INT words)
4283 int int_nregs, sse_nregs;
4285 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
4286 cum->words += words;
4287 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
4289 cum->nregs -= int_nregs;
4290 cum->sse_nregs -= sse_nregs;
4291 cum->regno += int_nregs;
4292 cum->sse_regno += sse_nregs;
4294 else
4295 cum->words += words;
4298 static void
4299 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
4300 HOST_WIDE_INT words)
4302 /* Otherwise, this should be passed indirect. */
4303 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
4305 cum->words += words;
4306 if (cum->nregs > 0)
4308 cum->nregs -= 1;
4309 cum->regno += 1;
4313 void
4314 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4315 tree type, int named ATTRIBUTE_UNUSED)
4317 HOST_WIDE_INT bytes, words;
4319 if (mode == BLKmode)
4320 bytes = int_size_in_bytes (type);
4321 else
4322 bytes = GET_MODE_SIZE (mode);
4323 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4325 if (type)
4326 mode = type_natural_mode (type);
4328 if (TARGET_64BIT_MS_ABI)
4329 function_arg_advance_ms_64 (cum, bytes, words);
4330 else if (TARGET_64BIT)
4331 function_arg_advance_64 (cum, mode, type, words);
4332 else
4333 function_arg_advance_32 (cum, mode, type, bytes, words);
4336 /* Define where to put the arguments to a function.
4337 Value is zero to push the argument on the stack,
4338 or a hard register in which to store the argument.
4340 MODE is the argument's machine mode.
4341 TYPE is the data type of the argument (as a tree).
4342 This is null for libcalls where that information may
4343 not be available.
4344 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4345 the preceding args and about the function being called.
4346 NAMED is nonzero if this argument is a named parameter
4347 (otherwise it is an extra parameter matching an ellipsis). */
4349 static rtx
4350 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4351 enum machine_mode orig_mode, tree type,
4352 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
4354 static bool warnedsse, warnedmmx;
4356 /* Avoid the AL settings for the Unix64 ABI. */
4357 if (mode == VOIDmode)
4358 return constm1_rtx;
4360 switch (mode)
4362 default:
4363 break;
4365 case BLKmode:
4366 if (bytes < 0)
4367 break;
4368 /* FALLTHRU */
4369 case DImode:
4370 case SImode:
4371 case HImode:
4372 case QImode:
4373 if (words <= cum->nregs)
4375 int regno = cum->regno;
4377 /* Fastcall allocates the first two DWORD (SImode) or
4378 smaller arguments to ECX and EDX if it isn't an
4379 aggregate type . */
4380 if (cum->fastcall)
4382 if (mode == BLKmode
4383 || mode == DImode
4384 || (type && AGGREGATE_TYPE_P (type)))
4385 break;
4387 /* ECX not EAX is the first allocated register. */
4388 if (regno == AX_REG)
4389 regno = CX_REG;
4391 return gen_rtx_REG (mode, regno);
4393 break;
4395 case DFmode:
4396 if (cum->float_in_sse < 2)
4397 break;
4398 case SFmode:
4399 if (cum->float_in_sse < 1)
4400 break;
4401 /* FALLTHRU */
4402 case TImode:
4403 case V16QImode:
4404 case V8HImode:
4405 case V4SImode:
4406 case V2DImode:
4407 case V4SFmode:
4408 case V2DFmode:
4409 if (!type || !AGGREGATE_TYPE_P (type))
4411 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
4413 warnedsse = true;
4414 warning (0, "SSE vector argument without SSE enabled "
4415 "changes the ABI");
4417 if (cum->sse_nregs)
4418 return gen_reg_or_parallel (mode, orig_mode,
4419 cum->sse_regno + FIRST_SSE_REG);
4421 break;
4423 case V8QImode:
4424 case V4HImode:
4425 case V2SImode:
4426 case V2SFmode:
4427 case V1DImode:
4428 if (!type || !AGGREGATE_TYPE_P (type))
4430 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
4432 warnedmmx = true;
4433 warning (0, "MMX vector argument without MMX enabled "
4434 "changes the ABI");
4436 if (cum->mmx_nregs)
4437 return gen_reg_or_parallel (mode, orig_mode,
4438 cum->mmx_regno + FIRST_MMX_REG);
4440 break;
4443 return NULL_RTX;
4446 static rtx
4447 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4448 enum machine_mode orig_mode, tree type)
4450 /* Handle a hidden AL argument containing number of registers
4451 for varargs x86-64 functions. */
4452 if (mode == VOIDmode)
4453 return GEN_INT (cum->maybe_vaarg
4454 ? (cum->sse_nregs < 0
4455 ? SSE_REGPARM_MAX
4456 : cum->sse_regno)
4457 : -1);
4459 return construct_container (mode, orig_mode, type, 0, cum->nregs,
4460 cum->sse_nregs,
4461 &x86_64_int_parameter_registers [cum->regno],
4462 cum->sse_regno);
4465 static rtx
4466 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4467 enum machine_mode orig_mode, int named,
4468 HOST_WIDE_INT bytes)
4470 unsigned int regno;
4472 /* Avoid the AL settings for the Unix64 ABI. */
4473 if (mode == VOIDmode)
4474 return constm1_rtx;
4476 /* If we've run out of registers, it goes on the stack. */
4477 if (cum->nregs == 0)
4478 return NULL_RTX;
4480 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
4482 /* Only floating point modes are passed in anything but integer regs. */
4483 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
4485 if (named)
4486 regno = cum->regno + FIRST_SSE_REG;
4487 else
4489 rtx t1, t2;
4491 /* Unnamed floating parameters are passed in both the
4492 SSE and integer registers. */
4493 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
4494 t2 = gen_rtx_REG (mode, regno);
4495 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
4496 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
4497 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
4500 /* Handle aggregated types passed in register. */
4501 if (orig_mode == BLKmode)
4503 if (bytes > 0 && bytes <= 8)
4504 mode = (bytes > 4 ? DImode : SImode);
4505 if (mode == BLKmode)
4506 mode = DImode;
4509 return gen_reg_or_parallel (mode, orig_mode, regno);
4513 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
4514 tree type, int named)
4516 enum machine_mode mode = omode;
4517 HOST_WIDE_INT bytes, words;
4519 if (mode == BLKmode)
4520 bytes = int_size_in_bytes (type);
4521 else
4522 bytes = GET_MODE_SIZE (mode);
4523 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4525 /* To simplify the code below, represent vector types with a vector mode
4526 even if MMX/SSE are not active. */
4527 if (type && TREE_CODE (type) == VECTOR_TYPE)
4528 mode = type_natural_mode (type);
4530 if (TARGET_64BIT_MS_ABI)
4531 return function_arg_ms_64 (cum, mode, omode, named, bytes);
4532 else if (TARGET_64BIT)
4533 return function_arg_64 (cum, mode, omode, type);
4534 else
4535 return function_arg_32 (cum, mode, omode, type, bytes, words);
4538 /* A C expression that indicates when an argument must be passed by
4539 reference. If nonzero for an argument, a copy of that argument is
4540 made in memory and a pointer to the argument is passed instead of
4541 the argument itself. The pointer is passed in whatever way is
4542 appropriate for passing a pointer to that type. */
4544 static bool
4545 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4546 enum machine_mode mode ATTRIBUTE_UNUSED,
4547 const_tree type, bool named ATTRIBUTE_UNUSED)
4549 /* See Windows x64 Software Convention. */
4550 if (TARGET_64BIT_MS_ABI)
4552 int msize = (int) GET_MODE_SIZE (mode);
4553 if (type)
4555 /* Arrays are passed by reference. */
4556 if (TREE_CODE (type) == ARRAY_TYPE)
4557 return true;
4559 if (AGGREGATE_TYPE_P (type))
4561 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
4562 are passed by reference. */
4563 msize = int_size_in_bytes (type);
4567 /* __m128 is passed by reference. */
4568 switch (msize) {
4569 case 1: case 2: case 4: case 8:
4570 break;
4571 default:
4572 return true;
4575 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
4576 return 1;
4578 return 0;
4581 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
4582 ABI. */
4583 static bool
4584 contains_aligned_value_p (tree type)
4586 enum machine_mode mode = TYPE_MODE (type);
4587 if (((TARGET_SSE && SSE_REG_MODE_P (mode)) || mode == TDmode)
4588 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
4589 return true;
4590 if (TYPE_ALIGN (type) < 128)
4591 return false;
4593 if (AGGREGATE_TYPE_P (type))
4595 /* Walk the aggregates recursively. */
4596 switch (TREE_CODE (type))
4598 case RECORD_TYPE:
4599 case UNION_TYPE:
4600 case QUAL_UNION_TYPE:
4602 tree field;
4604 /* Walk all the structure fields. */
4605 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4607 if (TREE_CODE (field) == FIELD_DECL
4608 && contains_aligned_value_p (TREE_TYPE (field)))
4609 return true;
4611 break;
4614 case ARRAY_TYPE:
4615 /* Just for use if some languages passes arrays by value. */
4616 if (contains_aligned_value_p (TREE_TYPE (type)))
4617 return true;
4618 break;
4620 default:
4621 gcc_unreachable ();
4624 return false;
4627 /* Gives the alignment boundary, in bits, of an argument with the
4628 specified mode and type. */
4631 ix86_function_arg_boundary (enum machine_mode mode, tree type)
4633 int align;
4634 if (type)
4635 align = TYPE_ALIGN (type);
4636 else
4637 align = GET_MODE_ALIGNMENT (mode);
4638 if (align < PARM_BOUNDARY)
4639 align = PARM_BOUNDARY;
4640 /* In 32bit, only _Decimal128 is aligned to its natural boundary. */
4641 if (!TARGET_64BIT && mode != TDmode)
4643 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
4644 make an exception for SSE modes since these require 128bit
4645 alignment.
4647 The handling here differs from field_alignment. ICC aligns MMX
4648 arguments to 4 byte boundaries, while structure fields are aligned
4649 to 8 byte boundaries. */
4650 if (!type)
4652 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)) && mode != TDmode)
4653 align = PARM_BOUNDARY;
4655 else
4657 if (!contains_aligned_value_p (type))
4658 align = PARM_BOUNDARY;
4661 if (align > BIGGEST_ALIGNMENT)
4662 align = BIGGEST_ALIGNMENT;
4663 return align;
4666 /* Return true if N is a possible register number of function value. */
4668 bool
4669 ix86_function_value_regno_p (int regno)
4671 switch (regno)
4673 case 0:
4674 return true;
4676 case FIRST_FLOAT_REG:
4677 if (TARGET_64BIT_MS_ABI)
4678 return false;
4679 return TARGET_FLOAT_RETURNS_IN_80387;
4681 case FIRST_SSE_REG:
4682 return TARGET_SSE;
4684 case FIRST_MMX_REG:
4685 if (TARGET_MACHO || TARGET_64BIT)
4686 return false;
4687 return TARGET_MMX;
4690 return false;
4693 /* Define how to find the value returned by a function.
4694 VALTYPE is the data type of the value (as a tree).
4695 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4696 otherwise, FUNC is 0. */
4698 static rtx
4699 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
4700 const_tree fntype, const_tree fn)
4702 unsigned int regno;
4704 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4705 we normally prevent this case when mmx is not available. However
4706 some ABIs may require the result to be returned like DImode. */
4707 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4708 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
4710 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4711 we prevent this case when sse is not available. However some ABIs
4712 may require the result to be returned like integer TImode. */
4713 else if (mode == TImode
4714 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4715 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
4717 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
4718 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
4719 regno = FIRST_FLOAT_REG;
4720 else
4721 /* Most things go in %eax. */
4722 regno = AX_REG;
4724 /* Override FP return register with %xmm0 for local functions when
4725 SSE math is enabled or for functions with sseregparm attribute. */
4726 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
4728 int sse_level = ix86_function_sseregparm (fntype, fn, false);
4729 if ((sse_level >= 1 && mode == SFmode)
4730 || (sse_level == 2 && mode == DFmode))
4731 regno = FIRST_SSE_REG;
4734 return gen_rtx_REG (orig_mode, regno);
4737 static rtx
4738 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
4739 const_tree valtype)
4741 rtx ret;
4743 /* Handle libcalls, which don't provide a type node. */
4744 if (valtype == NULL)
4746 switch (mode)
4748 case SFmode:
4749 case SCmode:
4750 case DFmode:
4751 case DCmode:
4752 case TFmode:
4753 case SDmode:
4754 case DDmode:
4755 case TDmode:
4756 return gen_rtx_REG (mode, FIRST_SSE_REG);
4757 case XFmode:
4758 case XCmode:
4759 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4760 case TCmode:
4761 return NULL;
4762 default:
4763 return gen_rtx_REG (mode, AX_REG);
4767 ret = construct_container (mode, orig_mode, valtype, 1,
4768 REGPARM_MAX, SSE_REGPARM_MAX,
4769 x86_64_int_return_registers, 0);
4771 /* For zero sized structures, construct_container returns NULL, but we
4772 need to keep rest of compiler happy by returning meaningful value. */
4773 if (!ret)
4774 ret = gen_rtx_REG (orig_mode, AX_REG);
4776 return ret;
4779 static rtx
4780 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
4782 unsigned int regno = AX_REG;
4784 if (TARGET_SSE)
4786 switch (GET_MODE_SIZE (mode))
4788 case 16:
4789 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
4790 && !COMPLEX_MODE_P (mode))
4791 regno = FIRST_SSE_REG;
4792 break;
4793 case 8:
4794 case 4:
4795 if (mode == SFmode || mode == DFmode)
4796 regno = FIRST_SSE_REG;
4797 break;
4798 default:
4799 break;
4802 return gen_rtx_REG (orig_mode, regno);
4805 static rtx
4806 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
4807 enum machine_mode orig_mode, enum machine_mode mode)
4809 const_tree fn, fntype;
4811 fn = NULL_TREE;
4812 if (fntype_or_decl && DECL_P (fntype_or_decl))
4813 fn = fntype_or_decl;
4814 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4816 if (TARGET_64BIT_MS_ABI)
4817 return function_value_ms_64 (orig_mode, mode);
4818 else if (TARGET_64BIT)
4819 return function_value_64 (orig_mode, mode, valtype);
4820 else
4821 return function_value_32 (orig_mode, mode, fntype, fn);
4824 static rtx
4825 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
4826 bool outgoing ATTRIBUTE_UNUSED)
4828 enum machine_mode mode, orig_mode;
4830 orig_mode = TYPE_MODE (valtype);
4831 mode = type_natural_mode (valtype);
4832 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
4836 ix86_libcall_value (enum machine_mode mode)
4838 return ix86_function_value_1 (NULL, NULL, mode, mode);
4841 /* Return true iff type is returned in memory. */
4843 static int ATTRIBUTE_UNUSED
4844 return_in_memory_32 (const_tree type, enum machine_mode mode)
4846 HOST_WIDE_INT size;
4848 if (mode == BLKmode)
4849 return 1;
4851 size = int_size_in_bytes (type);
4853 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4854 return 0;
4856 if (VECTOR_MODE_P (mode) || mode == TImode)
4858 /* User-created vectors small enough to fit in EAX. */
4859 if (size < 8)
4860 return 0;
4862 /* MMX/3dNow values are returned in MM0,
4863 except when it doesn't exits. */
4864 if (size == 8)
4865 return (TARGET_MMX ? 0 : 1);
4867 /* SSE values are returned in XMM0, except when it doesn't exist. */
4868 if (size == 16)
4869 return (TARGET_SSE ? 0 : 1);
4872 if (mode == XFmode)
4873 return 0;
4875 if (mode == TDmode)
4876 return 1;
4878 if (size > 12)
4879 return 1;
4880 return 0;
4883 static int ATTRIBUTE_UNUSED
4884 return_in_memory_64 (const_tree type, enum machine_mode mode)
4886 int needed_intregs, needed_sseregs;
4887 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4890 static int ATTRIBUTE_UNUSED
4891 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
4893 HOST_WIDE_INT size = int_size_in_bytes (type);
4895 /* __m128 is returned in xmm0. */
4896 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
4897 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
4898 return 0;
4900 /* Otherwise, the size must be exactly in [1248]. */
4901 return (size != 1 && size != 2 && size != 4 && size != 8);
4904 static bool
4905 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
4907 #ifdef SUBTARGET_RETURN_IN_MEMORY
4908 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
4909 #else
4910 const enum machine_mode mode = type_natural_mode (type);
4912 if (TARGET_64BIT_MS_ABI)
4913 return return_in_memory_ms_64 (type, mode);
4914 else if (TARGET_64BIT)
4915 return return_in_memory_64 (type, mode);
4916 else
4917 return return_in_memory_32 (type, mode);
4918 #endif
4921 /* Return false iff TYPE is returned in memory. This version is used
4922 on Solaris 10. It is similar to the generic ix86_return_in_memory,
4923 but differs notably in that when MMX is available, 8-byte vectors
4924 are returned in memory, rather than in MMX registers. */
4926 bool
4927 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
4929 int size;
4930 enum machine_mode mode = type_natural_mode (type);
4932 if (TARGET_64BIT)
4933 return return_in_memory_64 (type, mode);
4935 if (mode == BLKmode)
4936 return 1;
4938 size = int_size_in_bytes (type);
4940 if (VECTOR_MODE_P (mode))
4942 /* Return in memory only if MMX registers *are* available. This
4943 seems backwards, but it is consistent with the existing
4944 Solaris x86 ABI. */
4945 if (size == 8)
4946 return TARGET_MMX;
4947 if (size == 16)
4948 return !TARGET_SSE;
4950 else if (mode == TImode)
4951 return !TARGET_SSE;
4952 else if (mode == XFmode)
4953 return 0;
4955 return size > 12;
4958 /* When returning SSE vector types, we have a choice of either
4959 (1) being abi incompatible with a -march switch, or
4960 (2) generating an error.
4961 Given no good solution, I think the safest thing is one warning.
4962 The user won't be able to use -Werror, but....
4964 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4965 called in response to actually generating a caller or callee that
4966 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
4967 via aggregate_value_p for general type probing from tree-ssa. */
4969 static rtx
4970 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4972 static bool warnedsse, warnedmmx;
4974 if (!TARGET_64BIT && type)
4976 /* Look at the return type of the function, not the function type. */
4977 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4979 if (!TARGET_SSE && !warnedsse)
4981 if (mode == TImode
4982 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4984 warnedsse = true;
4985 warning (0, "SSE vector return without SSE enabled "
4986 "changes the ABI");
4990 if (!TARGET_MMX && !warnedmmx)
4992 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4994 warnedmmx = true;
4995 warning (0, "MMX vector return without MMX enabled "
4996 "changes the ABI");
5001 return NULL;
5005 /* Create the va_list data type. */
5007 static tree
5008 ix86_build_builtin_va_list (void)
5010 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
5012 /* For i386 we use plain pointer to argument area. */
5013 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
5014 return build_pointer_type (char_type_node);
5016 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5017 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5019 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
5020 unsigned_type_node);
5021 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
5022 unsigned_type_node);
5023 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
5024 ptr_type_node);
5025 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
5026 ptr_type_node);
5028 va_list_gpr_counter_field = f_gpr;
5029 va_list_fpr_counter_field = f_fpr;
5031 DECL_FIELD_CONTEXT (f_gpr) = record;
5032 DECL_FIELD_CONTEXT (f_fpr) = record;
5033 DECL_FIELD_CONTEXT (f_ovf) = record;
5034 DECL_FIELD_CONTEXT (f_sav) = record;
5036 TREE_CHAIN (record) = type_decl;
5037 TYPE_NAME (record) = type_decl;
5038 TYPE_FIELDS (record) = f_gpr;
5039 TREE_CHAIN (f_gpr) = f_fpr;
5040 TREE_CHAIN (f_fpr) = f_ovf;
5041 TREE_CHAIN (f_ovf) = f_sav;
5043 layout_type (record);
5045 /* The correct type is an array type of one element. */
5046 return build_array_type (record, build_index_type (size_zero_node));
5049 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
5051 static void
5052 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
5054 rtx save_area, mem;
5055 rtx label;
5056 rtx label_ref;
5057 rtx tmp_reg;
5058 rtx nsse_reg;
5059 alias_set_type set;
5060 int i;
5062 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
5063 return;
5065 /* Indicate to allocate space on the stack for varargs save area. */
5066 ix86_save_varrargs_registers = 1;
5067 /* We need 16-byte stack alignment to save SSE registers. If user
5068 asked for lower preferred_stack_boundary, lets just hope that he knows
5069 what he is doing and won't varargs SSE values.
5071 We also may end up assuming that only 64bit values are stored in SSE
5072 register let some floating point program work. */
5073 if (ix86_preferred_stack_boundary >= BIGGEST_ALIGNMENT)
5074 crtl->stack_alignment_needed = BIGGEST_ALIGNMENT;
5076 save_area = frame_pointer_rtx;
5077 set = get_varargs_alias_set ();
5079 for (i = cum->regno;
5080 i < ix86_regparm
5081 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
5082 i++)
5084 mem = gen_rtx_MEM (Pmode,
5085 plus_constant (save_area, i * UNITS_PER_WORD));
5086 MEM_NOTRAP_P (mem) = 1;
5087 set_mem_alias_set (mem, set);
5088 emit_move_insn (mem, gen_rtx_REG (Pmode,
5089 x86_64_int_parameter_registers[i]));
5092 if (cum->sse_nregs && cfun->va_list_fpr_size)
5094 /* Now emit code to save SSE registers. The AX parameter contains number
5095 of SSE parameter registers used to call this function. We use
5096 sse_prologue_save insn template that produces computed jump across
5097 SSE saves. We need some preparation work to get this working. */
5099 label = gen_label_rtx ();
5100 label_ref = gen_rtx_LABEL_REF (Pmode, label);
5102 /* Compute address to jump to :
5103 label - 5*eax + nnamed_sse_arguments*5 */
5104 tmp_reg = gen_reg_rtx (Pmode);
5105 nsse_reg = gen_reg_rtx (Pmode);
5106 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
5107 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
5108 gen_rtx_MULT (Pmode, nsse_reg,
5109 GEN_INT (4))));
5110 if (cum->sse_regno)
5111 emit_move_insn
5112 (nsse_reg,
5113 gen_rtx_CONST (DImode,
5114 gen_rtx_PLUS (DImode,
5115 label_ref,
5116 GEN_INT (cum->sse_regno * 4))));
5117 else
5118 emit_move_insn (nsse_reg, label_ref);
5119 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
5121 /* Compute address of memory block we save into. We always use pointer
5122 pointing 127 bytes after first byte to store - this is needed to keep
5123 instruction size limited by 4 bytes. */
5124 tmp_reg = gen_reg_rtx (Pmode);
5125 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
5126 plus_constant (save_area,
5127 8 * REGPARM_MAX + 127)));
5128 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
5129 MEM_NOTRAP_P (mem) = 1;
5130 set_mem_alias_set (mem, set);
5131 set_mem_align (mem, BITS_PER_WORD);
5133 /* And finally do the dirty job! */
5134 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
5135 GEN_INT (cum->sse_regno), label));
5139 static void
5140 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
5142 alias_set_type set = get_varargs_alias_set ();
5143 int i;
5145 for (i = cum->regno; i < REGPARM_MAX; i++)
5147 rtx reg, mem;
5149 mem = gen_rtx_MEM (Pmode,
5150 plus_constant (virtual_incoming_args_rtx,
5151 i * UNITS_PER_WORD));
5152 MEM_NOTRAP_P (mem) = 1;
5153 set_mem_alias_set (mem, set);
5155 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
5156 emit_move_insn (mem, reg);
5160 static void
5161 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5162 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5163 int no_rtl)
5165 CUMULATIVE_ARGS next_cum;
5166 tree fntype;
5168 /* This argument doesn't appear to be used anymore. Which is good,
5169 because the old code here didn't suppress rtl generation. */
5170 gcc_assert (!no_rtl);
5172 if (!TARGET_64BIT)
5173 return;
5175 fntype = TREE_TYPE (current_function_decl);
5177 /* For varargs, we do not want to skip the dummy va_dcl argument.
5178 For stdargs, we do want to skip the last named argument. */
5179 next_cum = *cum;
5180 if (stdarg_p (fntype))
5181 function_arg_advance (&next_cum, mode, type, 1);
5183 if (TARGET_64BIT_MS_ABI)
5184 setup_incoming_varargs_ms_64 (&next_cum);
5185 else
5186 setup_incoming_varargs_64 (&next_cum);
5189 /* Implement va_start. */
5191 static void
5192 ix86_va_start (tree valist, rtx nextarg)
5194 HOST_WIDE_INT words, n_gpr, n_fpr;
5195 tree f_gpr, f_fpr, f_ovf, f_sav;
5196 tree gpr, fpr, ovf, sav, t;
5197 tree type;
5199 /* Only 64bit target needs something special. */
5200 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
5202 std_expand_builtin_va_start (valist, nextarg);
5203 return;
5206 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
5207 f_fpr = TREE_CHAIN (f_gpr);
5208 f_ovf = TREE_CHAIN (f_fpr);
5209 f_sav = TREE_CHAIN (f_ovf);
5211 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
5212 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
5213 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
5214 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
5215 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
5217 /* Count number of gp and fp argument registers used. */
5218 words = crtl->args.info.words;
5219 n_gpr = crtl->args.info.regno;
5220 n_fpr = crtl->args.info.sse_regno;
5222 if (cfun->va_list_gpr_size)
5224 type = TREE_TYPE (gpr);
5225 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
5226 build_int_cst (type, n_gpr * 8));
5227 TREE_SIDE_EFFECTS (t) = 1;
5228 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5231 if (cfun->va_list_fpr_size)
5233 type = TREE_TYPE (fpr);
5234 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
5235 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
5236 TREE_SIDE_EFFECTS (t) = 1;
5237 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5240 /* Find the overflow area. */
5241 type = TREE_TYPE (ovf);
5242 t = make_tree (type, virtual_incoming_args_rtx);
5243 if (words != 0)
5244 t = build2 (POINTER_PLUS_EXPR, type, t,
5245 size_int (words * UNITS_PER_WORD));
5246 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
5247 TREE_SIDE_EFFECTS (t) = 1;
5248 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5250 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
5252 /* Find the register save area.
5253 Prologue of the function save it right above stack frame. */
5254 type = TREE_TYPE (sav);
5255 t = make_tree (type, frame_pointer_rtx);
5256 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
5257 TREE_SIDE_EFFECTS (t) = 1;
5258 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5262 /* Implement va_arg. */
5264 static tree
5265 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5267 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
5268 tree f_gpr, f_fpr, f_ovf, f_sav;
5269 tree gpr, fpr, ovf, sav, t;
5270 int size, rsize;
5271 tree lab_false, lab_over = NULL_TREE;
5272 tree addr, t2;
5273 rtx container;
5274 int indirect_p = 0;
5275 tree ptrtype;
5276 enum machine_mode nat_mode;
5278 /* Only 64bit target needs something special. */
5279 if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
5280 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5282 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
5283 f_fpr = TREE_CHAIN (f_gpr);
5284 f_ovf = TREE_CHAIN (f_fpr);
5285 f_sav = TREE_CHAIN (f_ovf);
5287 valist = build_va_arg_indirect_ref (valist);
5288 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
5289 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
5290 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
5291 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
5293 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5294 if (indirect_p)
5295 type = build_pointer_type (type);
5296 size = int_size_in_bytes (type);
5297 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5299 nat_mode = type_natural_mode (type);
5300 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
5301 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
5303 /* Pull the value out of the saved registers. */
5305 addr = create_tmp_var (ptr_type_node, "addr");
5306 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
5308 if (container)
5310 int needed_intregs, needed_sseregs;
5311 bool need_temp;
5312 tree int_addr, sse_addr;
5314 lab_false = create_artificial_label ();
5315 lab_over = create_artificial_label ();
5317 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
5319 need_temp = (!REG_P (container)
5320 && ((needed_intregs && TYPE_ALIGN (type) > 64)
5321 || TYPE_ALIGN (type) > 128));
5323 /* In case we are passing structure, verify that it is consecutive block
5324 on the register save area. If not we need to do moves. */
5325 if (!need_temp && !REG_P (container))
5327 /* Verify that all registers are strictly consecutive */
5328 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
5330 int i;
5332 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
5334 rtx slot = XVECEXP (container, 0, i);
5335 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
5336 || INTVAL (XEXP (slot, 1)) != i * 16)
5337 need_temp = 1;
5340 else
5342 int i;
5344 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
5346 rtx slot = XVECEXP (container, 0, i);
5347 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
5348 || INTVAL (XEXP (slot, 1)) != i * 8)
5349 need_temp = 1;
5353 if (!need_temp)
5355 int_addr = addr;
5356 sse_addr = addr;
5358 else
5360 int_addr = create_tmp_var (ptr_type_node, "int_addr");
5361 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
5362 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
5363 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
5366 /* First ensure that we fit completely in registers. */
5367 if (needed_intregs)
5369 t = build_int_cst (TREE_TYPE (gpr),
5370 (REGPARM_MAX - needed_intregs + 1) * 8);
5371 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
5372 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
5373 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
5374 gimplify_and_add (t, pre_p);
5376 if (needed_sseregs)
5378 t = build_int_cst (TREE_TYPE (fpr),
5379 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
5380 + REGPARM_MAX * 8);
5381 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
5382 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
5383 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
5384 gimplify_and_add (t, pre_p);
5387 /* Compute index to start of area used for integer regs. */
5388 if (needed_intregs)
5390 /* int_addr = gpr + sav; */
5391 t = fold_convert (sizetype, gpr);
5392 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
5393 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
5394 gimplify_and_add (t, pre_p);
5396 if (needed_sseregs)
5398 /* sse_addr = fpr + sav; */
5399 t = fold_convert (sizetype, fpr);
5400 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
5401 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
5402 gimplify_and_add (t, pre_p);
5404 if (need_temp)
5406 int i;
5407 tree temp = create_tmp_var (type, "va_arg_tmp");
5409 /* addr = &temp; */
5410 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
5411 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
5412 gimplify_and_add (t, pre_p);
5414 for (i = 0; i < XVECLEN (container, 0); i++)
5416 rtx slot = XVECEXP (container, 0, i);
5417 rtx reg = XEXP (slot, 0);
5418 enum machine_mode mode = GET_MODE (reg);
5419 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
5420 tree addr_type = build_pointer_type (piece_type);
5421 tree src_addr, src;
5422 int src_offset;
5423 tree dest_addr, dest;
5425 if (SSE_REGNO_P (REGNO (reg)))
5427 src_addr = sse_addr;
5428 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
5430 else
5432 src_addr = int_addr;
5433 src_offset = REGNO (reg) * 8;
5435 src_addr = fold_convert (addr_type, src_addr);
5436 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
5437 size_int (src_offset));
5438 src = build_va_arg_indirect_ref (src_addr);
5440 dest_addr = fold_convert (addr_type, addr);
5441 dest_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, dest_addr,
5442 size_int (INTVAL (XEXP (slot, 1))));
5443 dest = build_va_arg_indirect_ref (dest_addr);
5445 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
5446 gimplify_and_add (t, pre_p);
5450 if (needed_intregs)
5452 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
5453 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
5454 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
5455 gimplify_and_add (t, pre_p);
5457 if (needed_sseregs)
5459 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
5460 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
5461 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
5462 gimplify_and_add (t, pre_p);
5465 t = build1 (GOTO_EXPR, void_type_node, lab_over);
5466 gimplify_and_add (t, pre_p);
5468 t = build1 (LABEL_EXPR, void_type_node, lab_false);
5469 append_to_statement_list (t, pre_p);
5472 /* ... otherwise out of the overflow area. */
5474 /* Care for on-stack alignment if needed. */
5475 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
5476 || integer_zerop (TYPE_SIZE (type)))
5477 t = ovf;
5478 else
5480 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
5481 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
5482 size_int (align - 1));
5483 t = fold_convert (sizetype, t);
5484 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
5485 size_int (-align));
5486 t = fold_convert (TREE_TYPE (ovf), t);
5488 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
5490 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
5491 gimplify_and_add (t2, pre_p);
5493 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
5494 size_int (rsize * UNITS_PER_WORD));
5495 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
5496 gimplify_and_add (t, pre_p);
5498 if (container)
5500 t = build1 (LABEL_EXPR, void_type_node, lab_over);
5501 append_to_statement_list (t, pre_p);
5504 ptrtype = build_pointer_type (type);
5505 addr = fold_convert (ptrtype, addr);
5507 if (indirect_p)
5508 addr = build_va_arg_indirect_ref (addr);
5509 return build_va_arg_indirect_ref (addr);
5512 /* Return nonzero if OPNUM's MEM should be matched
5513 in movabs* patterns. */
5516 ix86_check_movabs (rtx insn, int opnum)
5518 rtx set, mem;
5520 set = PATTERN (insn);
5521 if (GET_CODE (set) == PARALLEL)
5522 set = XVECEXP (set, 0, 0);
5523 gcc_assert (GET_CODE (set) == SET);
5524 mem = XEXP (set, opnum);
5525 while (GET_CODE (mem) == SUBREG)
5526 mem = SUBREG_REG (mem);
5527 gcc_assert (MEM_P (mem));
5528 return (volatile_ok || !MEM_VOLATILE_P (mem));
5531 /* Initialize the table of extra 80387 mathematical constants. */
5533 static void
5534 init_ext_80387_constants (void)
5536 static const char * cst[5] =
5538 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
5539 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
5540 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
5541 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
5542 "3.1415926535897932385128089594061862044", /* 4: fldpi */
5544 int i;
5546 for (i = 0; i < 5; i++)
5548 real_from_string (&ext_80387_constants_table[i], cst[i]);
5549 /* Ensure each constant is rounded to XFmode precision. */
5550 real_convert (&ext_80387_constants_table[i],
5551 XFmode, &ext_80387_constants_table[i]);
5554 ext_80387_constants_init = 1;
5557 /* Return true if the constant is something that can be loaded with
5558 a special instruction. */
5561 standard_80387_constant_p (rtx x)
5563 enum machine_mode mode = GET_MODE (x);
5565 REAL_VALUE_TYPE r;
5567 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
5568 return -1;
5570 if (x == CONST0_RTX (mode))
5571 return 1;
5572 if (x == CONST1_RTX (mode))
5573 return 2;
5575 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5577 /* For XFmode constants, try to find a special 80387 instruction when
5578 optimizing for size or on those CPUs that benefit from them. */
5579 if (mode == XFmode
5580 && (optimize_size || TARGET_EXT_80387_CONSTANTS))
5582 int i;
5584 if (! ext_80387_constants_init)
5585 init_ext_80387_constants ();
5587 for (i = 0; i < 5; i++)
5588 if (real_identical (&r, &ext_80387_constants_table[i]))
5589 return i + 3;
5592 /* Load of the constant -0.0 or -1.0 will be split as
5593 fldz;fchs or fld1;fchs sequence. */
5594 if (real_isnegzero (&r))
5595 return 8;
5596 if (real_identical (&r, &dconstm1))
5597 return 9;
5599 return 0;
5602 /* Return the opcode of the special instruction to be used to load
5603 the constant X. */
5605 const char *
5606 standard_80387_constant_opcode (rtx x)
5608 switch (standard_80387_constant_p (x))
5610 case 1:
5611 return "fldz";
5612 case 2:
5613 return "fld1";
5614 case 3:
5615 return "fldlg2";
5616 case 4:
5617 return "fldln2";
5618 case 5:
5619 return "fldl2e";
5620 case 6:
5621 return "fldl2t";
5622 case 7:
5623 return "fldpi";
5624 case 8:
5625 case 9:
5626 return "#";
5627 default:
5628 gcc_unreachable ();
5632 /* Return the CONST_DOUBLE representing the 80387 constant that is
5633 loaded by the specified special instruction. The argument IDX
5634 matches the return value from standard_80387_constant_p. */
5637 standard_80387_constant_rtx (int idx)
5639 int i;
5641 if (! ext_80387_constants_init)
5642 init_ext_80387_constants ();
5644 switch (idx)
5646 case 3:
5647 case 4:
5648 case 5:
5649 case 6:
5650 case 7:
5651 i = idx - 3;
5652 break;
5654 default:
5655 gcc_unreachable ();
5658 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
5659 XFmode);
5662 /* Return 1 if mode is a valid mode for sse. */
5663 static int
5664 standard_sse_mode_p (enum machine_mode mode)
5666 switch (mode)
5668 case V16QImode:
5669 case V8HImode:
5670 case V4SImode:
5671 case V2DImode:
5672 case V4SFmode:
5673 case V2DFmode:
5674 return 1;
5676 default:
5677 return 0;
5681 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
5684 standard_sse_constant_p (rtx x)
5686 enum machine_mode mode = GET_MODE (x);
5688 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
5689 return 1;
5690 if (vector_all_ones_operand (x, mode)
5691 && standard_sse_mode_p (mode))
5692 return TARGET_SSE2 ? 2 : -1;
5694 return 0;
5697 /* Return the opcode of the special instruction to be used to load
5698 the constant X. */
5700 const char *
5701 standard_sse_constant_opcode (rtx insn, rtx x)
5703 switch (standard_sse_constant_p (x))
5705 case 1:
5706 if (get_attr_mode (insn) == MODE_V4SF)
5707 return "xorps\t%0, %0";
5708 else if (get_attr_mode (insn) == MODE_V2DF)
5709 return "xorpd\t%0, %0";
5710 else
5711 return "pxor\t%0, %0";
5712 case 2:
5713 return "pcmpeqd\t%0, %0";
5715 gcc_unreachable ();
5718 /* Returns 1 if OP contains a symbol reference */
5721 symbolic_reference_mentioned_p (rtx op)
5723 const char *fmt;
5724 int i;
5726 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
5727 return 1;
5729 fmt = GET_RTX_FORMAT (GET_CODE (op));
5730 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
5732 if (fmt[i] == 'E')
5734 int j;
5736 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
5737 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
5738 return 1;
5741 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
5742 return 1;
5745 return 0;
5748 /* Return 1 if it is appropriate to emit `ret' instructions in the
5749 body of a function. Do this only if the epilogue is simple, needing a
5750 couple of insns. Prior to reloading, we can't tell how many registers
5751 must be saved, so return 0 then. Return 0 if there is no frame
5752 marker to de-allocate. */
5755 ix86_can_use_return_insn_p (void)
5757 struct ix86_frame frame;
5759 if (! reload_completed || frame_pointer_needed)
5760 return 0;
5762 /* Don't allow more than 32 pop, since that's all we can do
5763 with one instruction. */
5764 if (crtl->args.pops_args
5765 && crtl->args.size >= 32768)
5766 return 0;
5768 ix86_compute_frame_layout (&frame);
5769 return frame.to_allocate == 0 && frame.nregs == 0;
5772 /* Value should be nonzero if functions must have frame pointers.
5773 Zero means the frame pointer need not be set up (and parms may
5774 be accessed via the stack pointer) in functions that seem suitable. */
5777 ix86_frame_pointer_required (void)
5779 /* If we accessed previous frames, then the generated code expects
5780 to be able to access the saved ebp value in our frame. */
5781 if (cfun->machine->accesses_prev_frame)
5782 return 1;
5784 /* Several x86 os'es need a frame pointer for other reasons,
5785 usually pertaining to setjmp. */
5786 if (SUBTARGET_FRAME_POINTER_REQUIRED)
5787 return 1;
5789 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
5790 the frame pointer by default. Turn it back on now if we've not
5791 got a leaf function. */
5792 if (TARGET_OMIT_LEAF_FRAME_POINTER
5793 && (!current_function_is_leaf
5794 || ix86_current_function_calls_tls_descriptor))
5795 return 1;
5797 if (crtl->profile)
5798 return 1;
5800 return 0;
5803 /* Record that the current function accesses previous call frames. */
5805 void
5806 ix86_setup_frame_addresses (void)
5808 cfun->machine->accesses_prev_frame = 1;
5811 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
5812 # define USE_HIDDEN_LINKONCE 1
5813 #else
5814 # define USE_HIDDEN_LINKONCE 0
5815 #endif
5817 static int pic_labels_used;
5819 /* Fills in the label name that should be used for a pc thunk for
5820 the given register. */
5822 static void
5823 get_pc_thunk_name (char name[32], unsigned int regno)
5825 gcc_assert (!TARGET_64BIT);
5827 if (USE_HIDDEN_LINKONCE)
5828 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5829 else
5830 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5834 /* This function generates code for -fpic that loads %ebx with
5835 the return address of the caller and then returns. */
5837 void
5838 ix86_file_end (void)
5840 rtx xops[2];
5841 int regno;
5843 for (regno = 0; regno < 8; ++regno)
5845 char name[32];
5847 if (! ((pic_labels_used >> regno) & 1))
5848 continue;
5850 get_pc_thunk_name (name, regno);
5852 #if TARGET_MACHO
5853 if (TARGET_MACHO)
5855 switch_to_section (darwin_sections[text_coal_section]);
5856 fputs ("\t.weak_definition\t", asm_out_file);
5857 assemble_name (asm_out_file, name);
5858 fputs ("\n\t.private_extern\t", asm_out_file);
5859 assemble_name (asm_out_file, name);
5860 fputs ("\n", asm_out_file);
5861 ASM_OUTPUT_LABEL (asm_out_file, name);
5863 else
5864 #endif
5865 if (USE_HIDDEN_LINKONCE)
5867 tree decl;
5869 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5870 error_mark_node);
5871 TREE_PUBLIC (decl) = 1;
5872 TREE_STATIC (decl) = 1;
5873 DECL_ONE_ONLY (decl) = 1;
5875 (*targetm.asm_out.unique_section) (decl, 0);
5876 switch_to_section (get_named_section (decl, NULL, 0));
5878 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5879 fputs ("\t.hidden\t", asm_out_file);
5880 assemble_name (asm_out_file, name);
5881 fputc ('\n', asm_out_file);
5882 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5884 else
5886 switch_to_section (text_section);
5887 ASM_OUTPUT_LABEL (asm_out_file, name);
5890 xops[0] = gen_rtx_REG (Pmode, regno);
5891 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
5892 if (TARGET_64BIT)
5893 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
5894 else
5895 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5896 output_asm_insn ("ret", xops);
5899 if (NEED_INDICATE_EXEC_STACK)
5900 file_end_indicate_exec_stack ();
5903 /* Emit code for the SET_GOT patterns. */
5905 const char *
5906 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5908 rtx xops[3];
5910 xops[0] = dest;
5912 if (TARGET_VXWORKS_RTP && flag_pic)
5914 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
5915 xops[2] = gen_rtx_MEM (Pmode,
5916 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
5917 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5919 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
5920 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
5921 an unadorned address. */
5922 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
5923 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
5924 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
5925 return "";
5928 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5930 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5932 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5934 if (!flag_pic)
5936 if (TARGET_64BIT)
5937 output_asm_insn ("mov{q}\t{%2, %0|%0, %2}", xops);
5938 else
5939 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5941 else
5942 output_asm_insn ("call\t%a2", xops);
5944 #if TARGET_MACHO
5945 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5946 is what will be referenced by the Mach-O PIC subsystem. */
5947 if (!label)
5948 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5949 #endif
5951 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5952 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5954 if (flag_pic)
5956 if (TARGET_64BIT)
5957 output_asm_insn ("pop{q}\t%0", xops);
5958 else
5959 output_asm_insn ("pop{l}\t%0", xops);
5962 else
5964 char name[32];
5965 get_pc_thunk_name (name, REGNO (dest));
5966 pic_labels_used |= 1 << REGNO (dest);
5968 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5969 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5970 output_asm_insn ("call\t%X2", xops);
5971 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5972 is what will be referenced by the Mach-O PIC subsystem. */
5973 #if TARGET_MACHO
5974 if (!label)
5975 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5976 else
5977 targetm.asm_out.internal_label (asm_out_file, "L",
5978 CODE_LABEL_NUMBER (label));
5979 #endif
5982 if (TARGET_MACHO)
5983 return "";
5985 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5987 if (TARGET_64BIT)
5988 output_asm_insn ("add{q}\t{%1, %0|%0, %1}", xops);
5989 else
5990 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5992 else
5994 if (TARGET_64BIT)
5995 output_asm_insn ("add{q}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5996 else
5997 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
6000 return "";
6003 /* Generate an "push" pattern for input ARG. */
6005 static rtx
6006 gen_push (rtx arg)
6008 return gen_rtx_SET (VOIDmode,
6009 gen_rtx_MEM (Pmode,
6010 gen_rtx_PRE_DEC (Pmode,
6011 stack_pointer_rtx)),
6012 arg);
6015 /* Return >= 0 if there is an unused call-clobbered register available
6016 for the entire function. */
6018 static unsigned int
6019 ix86_select_alt_pic_regnum (void)
6021 if (current_function_is_leaf && !crtl->profile
6022 && !ix86_current_function_calls_tls_descriptor)
6024 int i;
6025 for (i = 2; i >= 0; --i)
6026 if (!df_regs_ever_live_p (i))
6027 return i;
6030 return INVALID_REGNUM;
6033 /* Return 1 if we need to save REGNO. */
6034 static int
6035 ix86_save_reg (unsigned int regno, int maybe_eh_return)
6037 if (pic_offset_table_rtx
6038 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
6039 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
6040 || crtl->profile
6041 || crtl->calls_eh_return
6042 || crtl->uses_const_pool))
6044 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
6045 return 0;
6046 return 1;
6049 if (crtl->calls_eh_return && maybe_eh_return)
6051 unsigned i;
6052 for (i = 0; ; i++)
6054 unsigned test = EH_RETURN_DATA_REGNO (i);
6055 if (test == INVALID_REGNUM)
6056 break;
6057 if (test == regno)
6058 return 1;
6062 if (cfun->machine->force_align_arg_pointer
6063 && regno == REGNO (cfun->machine->force_align_arg_pointer))
6064 return 1;
6066 return (df_regs_ever_live_p (regno)
6067 && !call_used_regs[regno]
6068 && !fixed_regs[regno]
6069 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
6072 /* Return number of registers to be saved on the stack. */
6074 static int
6075 ix86_nsaved_regs (void)
6077 int nregs = 0;
6078 int regno;
6080 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
6081 if (ix86_save_reg (regno, true))
6082 nregs++;
6083 return nregs;
6086 /* Return the offset between two registers, one to be eliminated, and the other
6087 its replacement, at the start of a routine. */
6089 HOST_WIDE_INT
6090 ix86_initial_elimination_offset (int from, int to)
6092 struct ix86_frame frame;
6093 ix86_compute_frame_layout (&frame);
6095 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
6096 return frame.hard_frame_pointer_offset;
6097 else if (from == FRAME_POINTER_REGNUM
6098 && to == HARD_FRAME_POINTER_REGNUM)
6099 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
6100 else
6102 gcc_assert (to == STACK_POINTER_REGNUM);
6104 if (from == ARG_POINTER_REGNUM)
6105 return frame.stack_pointer_offset;
6107 gcc_assert (from == FRAME_POINTER_REGNUM);
6108 return frame.stack_pointer_offset - frame.frame_pointer_offset;
6112 /* Fill structure ix86_frame about frame of currently computed function. */
6114 static void
6115 ix86_compute_frame_layout (struct ix86_frame *frame)
6117 HOST_WIDE_INT total_size;
6118 unsigned int stack_alignment_needed;
6119 HOST_WIDE_INT offset;
6120 unsigned int preferred_alignment;
6121 HOST_WIDE_INT size = get_frame_size ();
6123 frame->nregs = ix86_nsaved_regs ();
6124 total_size = size;
6126 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
6127 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
6129 /* During reload iteration the amount of registers saved can change.
6130 Recompute the value as needed. Do not recompute when amount of registers
6131 didn't change as reload does multiple calls to the function and does not
6132 expect the decision to change within single iteration. */
6133 if (!optimize_size
6134 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
6136 int count = frame->nregs;
6138 cfun->machine->use_fast_prologue_epilogue_nregs = count;
6139 /* The fast prologue uses move instead of push to save registers. This
6140 is significantly longer, but also executes faster as modern hardware
6141 can execute the moves in parallel, but can't do that for push/pop.
6143 Be careful about choosing what prologue to emit: When function takes
6144 many instructions to execute we may use slow version as well as in
6145 case function is known to be outside hot spot (this is known with
6146 feedback only). Weight the size of function by number of registers
6147 to save as it is cheap to use one or two push instructions but very
6148 slow to use many of them. */
6149 if (count)
6150 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
6151 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
6152 || (flag_branch_probabilities
6153 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
6154 cfun->machine->use_fast_prologue_epilogue = false;
6155 else
6156 cfun->machine->use_fast_prologue_epilogue
6157 = !expensive_function_p (count);
6159 if (TARGET_PROLOGUE_USING_MOVE
6160 && cfun->machine->use_fast_prologue_epilogue)
6161 frame->save_regs_using_mov = true;
6162 else
6163 frame->save_regs_using_mov = false;
6166 /* Skip return address and saved base pointer. */
6167 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
6169 frame->hard_frame_pointer_offset = offset;
6171 /* Do some sanity checking of stack_alignment_needed and
6172 preferred_alignment, since i386 port is the only using those features
6173 that may break easily. */
6175 gcc_assert (!size || stack_alignment_needed);
6176 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
6177 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
6178 gcc_assert (stack_alignment_needed
6179 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
6181 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
6182 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
6184 /* Register save area */
6185 offset += frame->nregs * UNITS_PER_WORD;
6187 /* Va-arg area */
6188 if (ix86_save_varrargs_registers)
6190 offset += X86_64_VARARGS_SIZE;
6191 frame->va_arg_size = X86_64_VARARGS_SIZE;
6193 else
6194 frame->va_arg_size = 0;
6196 /* Align start of frame for local function. */
6197 frame->padding1 = ((offset + stack_alignment_needed - 1)
6198 & -stack_alignment_needed) - offset;
6200 offset += frame->padding1;
6202 /* Frame pointer points here. */
6203 frame->frame_pointer_offset = offset;
6205 offset += size;
6207 /* Add outgoing arguments area. Can be skipped if we eliminated
6208 all the function calls as dead code.
6209 Skipping is however impossible when function calls alloca. Alloca
6210 expander assumes that last crtl->outgoing_args_size
6211 of stack frame are unused. */
6212 if (ACCUMULATE_OUTGOING_ARGS
6213 && (!current_function_is_leaf || cfun->calls_alloca
6214 || ix86_current_function_calls_tls_descriptor))
6216 offset += crtl->outgoing_args_size;
6217 frame->outgoing_arguments_size = crtl->outgoing_args_size;
6219 else
6220 frame->outgoing_arguments_size = 0;
6222 /* Align stack boundary. Only needed if we're calling another function
6223 or using alloca. */
6224 if (!current_function_is_leaf || cfun->calls_alloca
6225 || ix86_current_function_calls_tls_descriptor)
6226 frame->padding2 = ((offset + preferred_alignment - 1)
6227 & -preferred_alignment) - offset;
6228 else
6229 frame->padding2 = 0;
6231 offset += frame->padding2;
6233 /* We've reached end of stack frame. */
6234 frame->stack_pointer_offset = offset;
6236 /* Size prologue needs to allocate. */
6237 frame->to_allocate =
6238 (size + frame->padding1 + frame->padding2
6239 + frame->outgoing_arguments_size + frame->va_arg_size);
6241 if ((!frame->to_allocate && frame->nregs <= 1)
6242 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
6243 frame->save_regs_using_mov = false;
6245 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
6246 && current_function_is_leaf
6247 && !ix86_current_function_calls_tls_descriptor)
6249 frame->red_zone_size = frame->to_allocate;
6250 if (frame->save_regs_using_mov)
6251 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
6252 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
6253 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
6255 else
6256 frame->red_zone_size = 0;
6257 frame->to_allocate -= frame->red_zone_size;
6258 frame->stack_pointer_offset -= frame->red_zone_size;
6259 #if 0
6260 fprintf (stderr, "\n");
6261 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
6262 fprintf (stderr, "size: %ld\n", (long)size);
6263 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
6264 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
6265 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
6266 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
6267 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
6268 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
6269 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
6270 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
6271 (long)frame->hard_frame_pointer_offset);
6272 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
6273 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
6274 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
6275 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
6276 #endif
6279 /* Emit code to save registers in the prologue. */
6281 static void
6282 ix86_emit_save_regs (void)
6284 unsigned int regno;
6285 rtx insn;
6287 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
6288 if (ix86_save_reg (regno, true))
6290 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
6291 RTX_FRAME_RELATED_P (insn) = 1;
6295 /* Emit code to save registers using MOV insns. First register
6296 is restored from POINTER + OFFSET. */
6297 static void
6298 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
6300 unsigned int regno;
6301 rtx insn;
6303 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6304 if (ix86_save_reg (regno, true))
6306 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
6307 Pmode, offset),
6308 gen_rtx_REG (Pmode, regno));
6309 RTX_FRAME_RELATED_P (insn) = 1;
6310 offset += UNITS_PER_WORD;
6314 /* Expand prologue or epilogue stack adjustment.
6315 The pattern exist to put a dependency on all ebp-based memory accesses.
6316 STYLE should be negative if instructions should be marked as frame related,
6317 zero if %r11 register is live and cannot be freely used and positive
6318 otherwise. */
6320 static void
6321 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
6323 rtx insn;
6325 if (! TARGET_64BIT)
6326 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
6327 else if (x86_64_immediate_operand (offset, DImode))
6328 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
6329 else
6331 rtx r11;
6332 /* r11 is used by indirect sibcall return as well, set before the
6333 epilogue and used after the epilogue. ATM indirect sibcall
6334 shouldn't be used together with huge frame sizes in one
6335 function because of the frame_size check in sibcall.c. */
6336 gcc_assert (style);
6337 r11 = gen_rtx_REG (DImode, R11_REG);
6338 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
6339 if (style < 0)
6340 RTX_FRAME_RELATED_P (insn) = 1;
6341 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
6342 offset));
6344 if (style < 0)
6345 RTX_FRAME_RELATED_P (insn) = 1;
6348 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
6350 static rtx
6351 ix86_internal_arg_pointer (void)
6353 bool has_force_align_arg_pointer =
6354 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
6355 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
6356 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
6357 && DECL_NAME (current_function_decl)
6358 && MAIN_NAME_P (DECL_NAME (current_function_decl))
6359 && DECL_FILE_SCOPE_P (current_function_decl))
6360 || ix86_force_align_arg_pointer
6361 || has_force_align_arg_pointer)
6363 /* Nested functions can't realign the stack due to a register
6364 conflict. */
6365 if (DECL_CONTEXT (current_function_decl)
6366 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
6368 if (ix86_force_align_arg_pointer)
6369 warning (0, "-mstackrealign ignored for nested functions");
6370 if (has_force_align_arg_pointer)
6371 error ("%s not supported for nested functions",
6372 ix86_force_align_arg_pointer_string);
6373 return virtual_incoming_args_rtx;
6375 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, CX_REG);
6376 return copy_to_reg (cfun->machine->force_align_arg_pointer);
6378 else
6379 return virtual_incoming_args_rtx;
6382 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
6383 This is called from dwarf2out.c to emit call frame instructions
6384 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
6385 static void
6386 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
6388 rtx unspec = SET_SRC (pattern);
6389 gcc_assert (GET_CODE (unspec) == UNSPEC);
6391 switch (index)
6393 case UNSPEC_REG_SAVE:
6394 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
6395 SET_DEST (pattern));
6396 break;
6397 case UNSPEC_DEF_CFA:
6398 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
6399 INTVAL (XVECEXP (unspec, 0, 0)));
6400 break;
6401 default:
6402 gcc_unreachable ();
6406 /* Expand the prologue into a bunch of separate insns. */
6408 void
6409 ix86_expand_prologue (void)
6411 rtx insn;
6412 bool pic_reg_used;
6413 struct ix86_frame frame;
6414 HOST_WIDE_INT allocate;
6416 ix86_compute_frame_layout (&frame);
6418 if (cfun->machine->force_align_arg_pointer)
6420 rtx x, y;
6422 /* Grab the argument pointer. */
6423 x = plus_constant (stack_pointer_rtx, 4);
6424 y = cfun->machine->force_align_arg_pointer;
6425 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
6426 RTX_FRAME_RELATED_P (insn) = 1;
6428 /* The unwind info consists of two parts: install the fafp as the cfa,
6429 and record the fafp as the "save register" of the stack pointer.
6430 The later is there in order that the unwinder can see where it
6431 should restore the stack pointer across the and insn. */
6432 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
6433 x = gen_rtx_SET (VOIDmode, y, x);
6434 RTX_FRAME_RELATED_P (x) = 1;
6435 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
6436 UNSPEC_REG_SAVE);
6437 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
6438 RTX_FRAME_RELATED_P (y) = 1;
6439 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
6440 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
6441 REG_NOTES (insn) = x;
6443 /* Align the stack. */
6444 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
6445 GEN_INT (-16)));
6447 /* And here we cheat like madmen with the unwind info. We force the
6448 cfa register back to sp+4, which is exactly what it was at the
6449 start of the function. Re-pushing the return address results in
6450 the return at the same spot relative to the cfa, and thus is
6451 correct wrt the unwind info. */
6452 x = cfun->machine->force_align_arg_pointer;
6453 x = gen_frame_mem (Pmode, plus_constant (x, -4));
6454 insn = emit_insn (gen_push (x));
6455 RTX_FRAME_RELATED_P (insn) = 1;
6457 x = GEN_INT (4);
6458 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
6459 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
6460 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
6461 REG_NOTES (insn) = x;
6464 /* Note: AT&T enter does NOT have reversed args. Enter is probably
6465 slower on all targets. Also sdb doesn't like it. */
6467 if (frame_pointer_needed)
6469 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
6470 RTX_FRAME_RELATED_P (insn) = 1;
6472 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6473 RTX_FRAME_RELATED_P (insn) = 1;
6476 allocate = frame.to_allocate;
6478 if (!frame.save_regs_using_mov)
6479 ix86_emit_save_regs ();
6480 else
6481 allocate += frame.nregs * UNITS_PER_WORD;
6483 /* When using red zone we may start register saving before allocating
6484 the stack frame saving one cycle of the prologue. However I will
6485 avoid doing this if I am going to have to probe the stack since
6486 at least on x86_64 the stack probe can turn into a call that clobbers
6487 a red zone location */
6488 if (TARGET_RED_ZONE && frame.save_regs_using_mov
6489 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
6490 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
6491 : stack_pointer_rtx,
6492 -frame.nregs * UNITS_PER_WORD);
6494 if (allocate == 0)
6496 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
6497 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6498 GEN_INT (-allocate), -1);
6499 else
6501 /* Only valid for Win32. */
6502 rtx eax = gen_rtx_REG (Pmode, AX_REG);
6503 bool eax_live;
6504 rtx t;
6506 gcc_assert (!TARGET_64BIT || TARGET_64BIT_MS_ABI);
6508 if (TARGET_64BIT_MS_ABI)
6509 eax_live = false;
6510 else
6511 eax_live = ix86_eax_live_at_start_p ();
6513 if (eax_live)
6515 emit_insn (gen_push (eax));
6516 allocate -= UNITS_PER_WORD;
6519 emit_move_insn (eax, GEN_INT (allocate));
6521 if (TARGET_64BIT)
6522 insn = gen_allocate_stack_worker_64 (eax);
6523 else
6524 insn = gen_allocate_stack_worker_32 (eax);
6525 insn = emit_insn (insn);
6526 RTX_FRAME_RELATED_P (insn) = 1;
6527 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
6528 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
6529 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6530 t, REG_NOTES (insn));
6532 if (eax_live)
6534 if (frame_pointer_needed)
6535 t = plus_constant (hard_frame_pointer_rtx,
6536 allocate
6537 - frame.to_allocate
6538 - frame.nregs * UNITS_PER_WORD);
6539 else
6540 t = plus_constant (stack_pointer_rtx, allocate);
6541 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
6545 if (frame.save_regs_using_mov
6546 && !(TARGET_RED_ZONE
6547 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
6549 if (!frame_pointer_needed || !frame.to_allocate)
6550 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
6551 else
6552 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
6553 -frame.nregs * UNITS_PER_WORD);
6556 pic_reg_used = false;
6557 if (pic_offset_table_rtx
6558 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
6559 || crtl->profile))
6561 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
6563 if (alt_pic_reg_used != INVALID_REGNUM)
6564 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
6566 pic_reg_used = true;
6569 if (pic_reg_used)
6571 if (TARGET_64BIT)
6573 if (ix86_cmodel == CM_LARGE_PIC)
6575 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
6576 rtx label = gen_label_rtx ();
6577 emit_label (label);
6578 LABEL_PRESERVE_P (label) = 1;
6579 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
6580 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
6581 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
6582 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
6583 pic_offset_table_rtx, tmp_reg));
6585 else
6586 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
6588 else
6589 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
6592 /* Prevent function calls from being scheduled before the call to mcount.
6593 In the pic_reg_used case, make sure that the got load isn't deleted. */
6594 if (crtl->profile)
6596 if (pic_reg_used)
6597 emit_insn (gen_prologue_use (pic_offset_table_rtx));
6598 emit_insn (gen_blockage ());
6602 /* Emit code to restore saved registers using MOV insns. First register
6603 is restored from POINTER + OFFSET. */
6604 static void
6605 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
6606 int maybe_eh_return)
6608 int regno;
6609 rtx base_address = gen_rtx_MEM (Pmode, pointer);
6611 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6612 if (ix86_save_reg (regno, maybe_eh_return))
6614 /* Ensure that adjust_address won't be forced to produce pointer
6615 out of range allowed by x86-64 instruction set. */
6616 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
6618 rtx r11;
6620 r11 = gen_rtx_REG (DImode, R11_REG);
6621 emit_move_insn (r11, GEN_INT (offset));
6622 emit_insn (gen_adddi3 (r11, r11, pointer));
6623 base_address = gen_rtx_MEM (Pmode, r11);
6624 offset = 0;
6626 emit_move_insn (gen_rtx_REG (Pmode, regno),
6627 adjust_address (base_address, Pmode, offset));
6628 offset += UNITS_PER_WORD;
6632 /* Restore function stack, frame, and registers. */
6634 void
6635 ix86_expand_epilogue (int style)
6637 int regno;
6638 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
6639 struct ix86_frame frame;
6640 HOST_WIDE_INT offset;
6642 ix86_compute_frame_layout (&frame);
6644 /* Calculate start of saved registers relative to ebp. Special care
6645 must be taken for the normal return case of a function using
6646 eh_return: the eax and edx registers are marked as saved, but not
6647 restored along this path. */
6648 offset = frame.nregs;
6649 if (crtl->calls_eh_return && style != 2)
6650 offset -= 2;
6651 offset *= -UNITS_PER_WORD;
6653 /* If we're only restoring one register and sp is not valid then
6654 using a move instruction to restore the register since it's
6655 less work than reloading sp and popping the register.
6657 The default code result in stack adjustment using add/lea instruction,
6658 while this code results in LEAVE instruction (or discrete equivalent),
6659 so it is profitable in some other cases as well. Especially when there
6660 are no registers to restore. We also use this code when TARGET_USE_LEAVE
6661 and there is exactly one register to pop. This heuristic may need some
6662 tuning in future. */
6663 if ((!sp_valid && frame.nregs <= 1)
6664 || (TARGET_EPILOGUE_USING_MOVE
6665 && cfun->machine->use_fast_prologue_epilogue
6666 && (frame.nregs > 1 || frame.to_allocate))
6667 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
6668 || (frame_pointer_needed && TARGET_USE_LEAVE
6669 && cfun->machine->use_fast_prologue_epilogue
6670 && frame.nregs == 1)
6671 || crtl->calls_eh_return)
6673 /* Restore registers. We can use ebp or esp to address the memory
6674 locations. If both are available, default to ebp, since offsets
6675 are known to be small. Only exception is esp pointing directly to the
6676 end of block of saved registers, where we may simplify addressing
6677 mode. */
6679 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
6680 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
6681 frame.to_allocate, style == 2);
6682 else
6683 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
6684 offset, style == 2);
6686 /* eh_return epilogues need %ecx added to the stack pointer. */
6687 if (style == 2)
6689 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
6691 if (frame_pointer_needed)
6693 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
6694 tmp = plus_constant (tmp, UNITS_PER_WORD);
6695 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
6697 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
6698 emit_move_insn (hard_frame_pointer_rtx, tmp);
6700 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
6701 const0_rtx, style);
6703 else
6705 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
6706 tmp = plus_constant (tmp, (frame.to_allocate
6707 + frame.nregs * UNITS_PER_WORD));
6708 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
6711 else if (!frame_pointer_needed)
6712 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6713 GEN_INT (frame.to_allocate
6714 + frame.nregs * UNITS_PER_WORD),
6715 style);
6716 /* If not an i386, mov & pop is faster than "leave". */
6717 else if (TARGET_USE_LEAVE || optimize_size
6718 || !cfun->machine->use_fast_prologue_epilogue)
6719 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6720 else
6722 pro_epilogue_adjust_stack (stack_pointer_rtx,
6723 hard_frame_pointer_rtx,
6724 const0_rtx, style);
6725 if (TARGET_64BIT)
6726 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6727 else
6728 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6731 else
6733 /* First step is to deallocate the stack frame so that we can
6734 pop the registers. */
6735 if (!sp_valid)
6737 gcc_assert (frame_pointer_needed);
6738 pro_epilogue_adjust_stack (stack_pointer_rtx,
6739 hard_frame_pointer_rtx,
6740 GEN_INT (offset), style);
6742 else if (frame.to_allocate)
6743 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
6744 GEN_INT (frame.to_allocate), style);
6746 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6747 if (ix86_save_reg (regno, false))
6749 if (TARGET_64BIT)
6750 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
6751 else
6752 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
6754 if (frame_pointer_needed)
6756 /* Leave results in shorter dependency chains on CPUs that are
6757 able to grok it fast. */
6758 if (TARGET_USE_LEAVE)
6759 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
6760 else if (TARGET_64BIT)
6761 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
6762 else
6763 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
6767 if (cfun->machine->force_align_arg_pointer)
6769 emit_insn (gen_addsi3 (stack_pointer_rtx,
6770 cfun->machine->force_align_arg_pointer,
6771 GEN_INT (-4)));
6774 /* Sibcall epilogues don't want a return instruction. */
6775 if (style == 0)
6776 return;
6778 if (crtl->args.pops_args && crtl->args.size)
6780 rtx popc = GEN_INT (crtl->args.pops_args);
6782 /* i386 can only pop 64K bytes. If asked to pop more, pop
6783 return address, do explicit add, and jump indirectly to the
6784 caller. */
6786 if (crtl->args.pops_args >= 65536)
6788 rtx ecx = gen_rtx_REG (SImode, CX_REG);
6790 /* There is no "pascal" calling convention in any 64bit ABI. */
6791 gcc_assert (!TARGET_64BIT);
6793 emit_insn (gen_popsi1 (ecx));
6794 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
6795 emit_jump_insn (gen_return_indirect_internal (ecx));
6797 else
6798 emit_jump_insn (gen_return_pop_internal (popc));
6800 else
6801 emit_jump_insn (gen_return_internal ());
6804 /* Reset from the function's potential modifications. */
6806 static void
6807 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6808 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6810 if (pic_offset_table_rtx)
6811 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
6812 #if TARGET_MACHO
6813 /* Mach-O doesn't support labels at the end of objects, so if
6814 it looks like we might want one, insert a NOP. */
6816 rtx insn = get_last_insn ();
6817 while (insn
6818 && NOTE_P (insn)
6819 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
6820 insn = PREV_INSN (insn);
6821 if (insn
6822 && (LABEL_P (insn)
6823 || (NOTE_P (insn)
6824 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
6825 fputs ("\tnop\n", file);
6827 #endif
6831 /* Extract the parts of an RTL expression that is a valid memory address
6832 for an instruction. Return 0 if the structure of the address is
6833 grossly off. Return -1 if the address contains ASHIFT, so it is not
6834 strictly valid, but still used for computing length of lea instruction. */
6837 ix86_decompose_address (rtx addr, struct ix86_address *out)
6839 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
6840 rtx base_reg, index_reg;
6841 HOST_WIDE_INT scale = 1;
6842 rtx scale_rtx = NULL_RTX;
6843 int retval = 1;
6844 enum ix86_address_seg seg = SEG_DEFAULT;
6846 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
6847 base = addr;
6848 else if (GET_CODE (addr) == PLUS)
6850 rtx addends[4], op;
6851 int n = 0, i;
6853 op = addr;
6856 if (n >= 4)
6857 return 0;
6858 addends[n++] = XEXP (op, 1);
6859 op = XEXP (op, 0);
6861 while (GET_CODE (op) == PLUS);
6862 if (n >= 4)
6863 return 0;
6864 addends[n] = op;
6866 for (i = n; i >= 0; --i)
6868 op = addends[i];
6869 switch (GET_CODE (op))
6871 case MULT:
6872 if (index)
6873 return 0;
6874 index = XEXP (op, 0);
6875 scale_rtx = XEXP (op, 1);
6876 break;
6878 case UNSPEC:
6879 if (XINT (op, 1) == UNSPEC_TP
6880 && TARGET_TLS_DIRECT_SEG_REFS
6881 && seg == SEG_DEFAULT)
6882 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
6883 else
6884 return 0;
6885 break;
6887 case REG:
6888 case SUBREG:
6889 if (!base)
6890 base = op;
6891 else if (!index)
6892 index = op;
6893 else
6894 return 0;
6895 break;
6897 case CONST:
6898 case CONST_INT:
6899 case SYMBOL_REF:
6900 case LABEL_REF:
6901 if (disp)
6902 return 0;
6903 disp = op;
6904 break;
6906 default:
6907 return 0;
6911 else if (GET_CODE (addr) == MULT)
6913 index = XEXP (addr, 0); /* index*scale */
6914 scale_rtx = XEXP (addr, 1);
6916 else if (GET_CODE (addr) == ASHIFT)
6918 rtx tmp;
6920 /* We're called for lea too, which implements ashift on occasion. */
6921 index = XEXP (addr, 0);
6922 tmp = XEXP (addr, 1);
6923 if (!CONST_INT_P (tmp))
6924 return 0;
6925 scale = INTVAL (tmp);
6926 if ((unsigned HOST_WIDE_INT) scale > 3)
6927 return 0;
6928 scale = 1 << scale;
6929 retval = -1;
6931 else
6932 disp = addr; /* displacement */
6934 /* Extract the integral value of scale. */
6935 if (scale_rtx)
6937 if (!CONST_INT_P (scale_rtx))
6938 return 0;
6939 scale = INTVAL (scale_rtx);
6942 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6943 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6945 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6946 if (base_reg && index_reg && scale == 1
6947 && (index_reg == arg_pointer_rtx
6948 || index_reg == frame_pointer_rtx
6949 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6951 rtx tmp;
6952 tmp = base, base = index, index = tmp;
6953 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6956 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6957 if ((base_reg == hard_frame_pointer_rtx
6958 || base_reg == frame_pointer_rtx
6959 || base_reg == arg_pointer_rtx) && !disp)
6960 disp = const0_rtx;
6962 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6963 Avoid this by transforming to [%esi+0]. */
6964 if (TARGET_K6 && !optimize_size
6965 && base_reg && !index_reg && !disp
6966 && REG_P (base_reg)
6967 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6968 disp = const0_rtx;
6970 /* Special case: encode reg+reg instead of reg*2. */
6971 if (!base && index && scale && scale == 2)
6972 base = index, base_reg = index_reg, scale = 1;
6974 /* Special case: scaling cannot be encoded without base or displacement. */
6975 if (!base && !disp && index && scale != 1)
6976 disp = const0_rtx;
6978 out->base = base;
6979 out->index = index;
6980 out->disp = disp;
6981 out->scale = scale;
6982 out->seg = seg;
6984 return retval;
6987 /* Return cost of the memory address x.
6988 For i386, it is better to use a complex address than let gcc copy
6989 the address into a reg and make a new pseudo. But not if the address
6990 requires to two regs - that would mean more pseudos with longer
6991 lifetimes. */
6992 static int
6993 ix86_address_cost (rtx x)
6995 struct ix86_address parts;
6996 int cost = 1;
6997 int ok = ix86_decompose_address (x, &parts);
6999 gcc_assert (ok);
7001 if (parts.base && GET_CODE (parts.base) == SUBREG)
7002 parts.base = SUBREG_REG (parts.base);
7003 if (parts.index && GET_CODE (parts.index) == SUBREG)
7004 parts.index = SUBREG_REG (parts.index);
7006 /* Attempt to minimize number of registers in the address. */
7007 if ((parts.base
7008 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
7009 || (parts.index
7010 && (!REG_P (parts.index)
7011 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
7012 cost++;
7014 if (parts.base
7015 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
7016 && parts.index
7017 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
7018 && parts.base != parts.index)
7019 cost++;
7021 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
7022 since it's predecode logic can't detect the length of instructions
7023 and it degenerates to vector decoded. Increase cost of such
7024 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
7025 to split such addresses or even refuse such addresses at all.
7027 Following addressing modes are affected:
7028 [base+scale*index]
7029 [scale*index+disp]
7030 [base+index]
7032 The first and last case may be avoidable by explicitly coding the zero in
7033 memory address, but I don't have AMD-K6 machine handy to check this
7034 theory. */
7036 if (TARGET_K6
7037 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
7038 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
7039 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
7040 cost += 10;
7042 return cost;
7045 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
7046 this is used for to form addresses to local data when -fPIC is in
7047 use. */
7049 static bool
7050 darwin_local_data_pic (rtx disp)
7052 if (GET_CODE (disp) == MINUS)
7054 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
7055 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
7056 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
7058 const char *sym_name = XSTR (XEXP (disp, 1), 0);
7059 if (! strcmp (sym_name, "<pic base>"))
7060 return true;
7064 return false;
7067 /* Determine if a given RTX is a valid constant. We already know this
7068 satisfies CONSTANT_P. */
7070 bool
7071 legitimate_constant_p (rtx x)
7073 switch (GET_CODE (x))
7075 case CONST:
7076 x = XEXP (x, 0);
7078 if (GET_CODE (x) == PLUS)
7080 if (!CONST_INT_P (XEXP (x, 1)))
7081 return false;
7082 x = XEXP (x, 0);
7085 if (TARGET_MACHO && darwin_local_data_pic (x))
7086 return true;
7088 /* Only some unspecs are valid as "constants". */
7089 if (GET_CODE (x) == UNSPEC)
7090 switch (XINT (x, 1))
7092 case UNSPEC_GOT:
7093 case UNSPEC_GOTOFF:
7094 case UNSPEC_PLTOFF:
7095 return TARGET_64BIT;
7096 case UNSPEC_TPOFF:
7097 case UNSPEC_NTPOFF:
7098 x = XVECEXP (x, 0, 0);
7099 return (GET_CODE (x) == SYMBOL_REF
7100 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
7101 case UNSPEC_DTPOFF:
7102 x = XVECEXP (x, 0, 0);
7103 return (GET_CODE (x) == SYMBOL_REF
7104 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
7105 default:
7106 return false;
7109 /* We must have drilled down to a symbol. */
7110 if (GET_CODE (x) == LABEL_REF)
7111 return true;
7112 if (GET_CODE (x) != SYMBOL_REF)
7113 return false;
7114 /* FALLTHRU */
7116 case SYMBOL_REF:
7117 /* TLS symbols are never valid. */
7118 if (SYMBOL_REF_TLS_MODEL (x))
7119 return false;
7121 /* DLLIMPORT symbols are never valid. */
7122 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
7123 && SYMBOL_REF_DLLIMPORT_P (x))
7124 return false;
7125 break;
7127 case CONST_DOUBLE:
7128 if (GET_MODE (x) == TImode
7129 && x != CONST0_RTX (TImode)
7130 && !TARGET_64BIT)
7131 return false;
7132 break;
7134 case CONST_VECTOR:
7135 if (x == CONST0_RTX (GET_MODE (x)))
7136 return true;
7137 return false;
7139 default:
7140 break;
7143 /* Otherwise we handle everything else in the move patterns. */
7144 return true;
7147 /* Determine if it's legal to put X into the constant pool. This
7148 is not possible for the address of thread-local symbols, which
7149 is checked above. */
7151 static bool
7152 ix86_cannot_force_const_mem (rtx x)
7154 /* We can always put integral constants and vectors in memory. */
7155 switch (GET_CODE (x))
7157 case CONST_INT:
7158 case CONST_DOUBLE:
7159 case CONST_VECTOR:
7160 return false;
7162 default:
7163 break;
7165 return !legitimate_constant_p (x);
7168 /* Determine if a given RTX is a valid constant address. */
7170 bool
7171 constant_address_p (rtx x)
7173 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
7176 /* Nonzero if the constant value X is a legitimate general operand
7177 when generating PIC code. It is given that flag_pic is on and
7178 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
7180 bool
7181 legitimate_pic_operand_p (rtx x)
7183 rtx inner;
7185 switch (GET_CODE (x))
7187 case CONST:
7188 inner = XEXP (x, 0);
7189 if (GET_CODE (inner) == PLUS
7190 && CONST_INT_P (XEXP (inner, 1)))
7191 inner = XEXP (inner, 0);
7193 /* Only some unspecs are valid as "constants". */
7194 if (GET_CODE (inner) == UNSPEC)
7195 switch (XINT (inner, 1))
7197 case UNSPEC_GOT:
7198 case UNSPEC_GOTOFF:
7199 case UNSPEC_PLTOFF:
7200 return TARGET_64BIT;
7201 case UNSPEC_TPOFF:
7202 x = XVECEXP (inner, 0, 0);
7203 return (GET_CODE (x) == SYMBOL_REF
7204 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
7205 default:
7206 return false;
7208 /* FALLTHRU */
7210 case SYMBOL_REF:
7211 case LABEL_REF:
7212 return legitimate_pic_address_disp_p (x);
7214 default:
7215 return true;
7219 /* Determine if a given CONST RTX is a valid memory displacement
7220 in PIC mode. */
7223 legitimate_pic_address_disp_p (rtx disp)
7225 bool saw_plus;
7227 /* In 64bit mode we can allow direct addresses of symbols and labels
7228 when they are not dynamic symbols. */
7229 if (TARGET_64BIT)
7231 rtx op0 = disp, op1;
7233 switch (GET_CODE (disp))
7235 case LABEL_REF:
7236 return true;
7238 case CONST:
7239 if (GET_CODE (XEXP (disp, 0)) != PLUS)
7240 break;
7241 op0 = XEXP (XEXP (disp, 0), 0);
7242 op1 = XEXP (XEXP (disp, 0), 1);
7243 if (!CONST_INT_P (op1)
7244 || INTVAL (op1) >= 16*1024*1024
7245 || INTVAL (op1) < -16*1024*1024)
7246 break;
7247 if (GET_CODE (op0) == LABEL_REF)
7248 return true;
7249 if (GET_CODE (op0) != SYMBOL_REF)
7250 break;
7251 /* FALLTHRU */
7253 case SYMBOL_REF:
7254 /* TLS references should always be enclosed in UNSPEC. */
7255 if (SYMBOL_REF_TLS_MODEL (op0))
7256 return false;
7257 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
7258 && ix86_cmodel != CM_LARGE_PIC)
7259 return true;
7260 break;
7262 default:
7263 break;
7266 if (GET_CODE (disp) != CONST)
7267 return 0;
7268 disp = XEXP (disp, 0);
7270 if (TARGET_64BIT)
7272 /* We are unsafe to allow PLUS expressions. This limit allowed distance
7273 of GOT tables. We should not need these anyway. */
7274 if (GET_CODE (disp) != UNSPEC
7275 || (XINT (disp, 1) != UNSPEC_GOTPCREL
7276 && XINT (disp, 1) != UNSPEC_GOTOFF
7277 && XINT (disp, 1) != UNSPEC_PLTOFF))
7278 return 0;
7280 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
7281 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
7282 return 0;
7283 return 1;
7286 saw_plus = false;
7287 if (GET_CODE (disp) == PLUS)
7289 if (!CONST_INT_P (XEXP (disp, 1)))
7290 return 0;
7291 disp = XEXP (disp, 0);
7292 saw_plus = true;
7295 if (TARGET_MACHO && darwin_local_data_pic (disp))
7296 return 1;
7298 if (GET_CODE (disp) != UNSPEC)
7299 return 0;
7301 switch (XINT (disp, 1))
7303 case UNSPEC_GOT:
7304 if (saw_plus)
7305 return false;
7306 /* We need to check for both symbols and labels because VxWorks loads
7307 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
7308 details. */
7309 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
7310 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
7311 case UNSPEC_GOTOFF:
7312 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
7313 While ABI specify also 32bit relocation but we don't produce it in
7314 small PIC model at all. */
7315 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
7316 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
7317 && !TARGET_64BIT)
7318 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
7319 return false;
7320 case UNSPEC_GOTTPOFF:
7321 case UNSPEC_GOTNTPOFF:
7322 case UNSPEC_INDNTPOFF:
7323 if (saw_plus)
7324 return false;
7325 disp = XVECEXP (disp, 0, 0);
7326 return (GET_CODE (disp) == SYMBOL_REF
7327 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
7328 case UNSPEC_NTPOFF:
7329 disp = XVECEXP (disp, 0, 0);
7330 return (GET_CODE (disp) == SYMBOL_REF
7331 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
7332 case UNSPEC_DTPOFF:
7333 disp = XVECEXP (disp, 0, 0);
7334 return (GET_CODE (disp) == SYMBOL_REF
7335 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
7338 return 0;
7341 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
7342 memory address for an instruction. The MODE argument is the machine mode
7343 for the MEM expression that wants to use this address.
7345 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
7346 convert common non-canonical forms to canonical form so that they will
7347 be recognized. */
7350 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
7351 rtx addr, int strict)
7353 struct ix86_address parts;
7354 rtx base, index, disp;
7355 HOST_WIDE_INT scale;
7356 const char *reason = NULL;
7357 rtx reason_rtx = NULL_RTX;
7359 if (ix86_decompose_address (addr, &parts) <= 0)
7361 reason = "decomposition failed";
7362 goto report_error;
7365 base = parts.base;
7366 index = parts.index;
7367 disp = parts.disp;
7368 scale = parts.scale;
7370 /* Validate base register.
7372 Don't allow SUBREG's that span more than a word here. It can lead to spill
7373 failures when the base is one word out of a two word structure, which is
7374 represented internally as a DImode int. */
7376 if (base)
7378 rtx reg;
7379 reason_rtx = base;
7381 if (REG_P (base))
7382 reg = base;
7383 else if (GET_CODE (base) == SUBREG
7384 && REG_P (SUBREG_REG (base))
7385 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
7386 <= UNITS_PER_WORD)
7387 reg = SUBREG_REG (base);
7388 else
7390 reason = "base is not a register";
7391 goto report_error;
7394 if (GET_MODE (base) != Pmode)
7396 reason = "base is not in Pmode";
7397 goto report_error;
7400 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
7401 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
7403 reason = "base is not valid";
7404 goto report_error;
7408 /* Validate index register.
7410 Don't allow SUBREG's that span more than a word here -- same as above. */
7412 if (index)
7414 rtx reg;
7415 reason_rtx = index;
7417 if (REG_P (index))
7418 reg = index;
7419 else if (GET_CODE (index) == SUBREG
7420 && REG_P (SUBREG_REG (index))
7421 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
7422 <= UNITS_PER_WORD)
7423 reg = SUBREG_REG (index);
7424 else
7426 reason = "index is not a register";
7427 goto report_error;
7430 if (GET_MODE (index) != Pmode)
7432 reason = "index is not in Pmode";
7433 goto report_error;
7436 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
7437 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
7439 reason = "index is not valid";
7440 goto report_error;
7444 /* Validate scale factor. */
7445 if (scale != 1)
7447 reason_rtx = GEN_INT (scale);
7448 if (!index)
7450 reason = "scale without index";
7451 goto report_error;
7454 if (scale != 2 && scale != 4 && scale != 8)
7456 reason = "scale is not a valid multiplier";
7457 goto report_error;
7461 /* Validate displacement. */
7462 if (disp)
7464 reason_rtx = disp;
7466 if (GET_CODE (disp) == CONST
7467 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
7468 switch (XINT (XEXP (disp, 0), 1))
7470 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
7471 used. While ABI specify also 32bit relocations, we don't produce
7472 them at all and use IP relative instead. */
7473 case UNSPEC_GOT:
7474 case UNSPEC_GOTOFF:
7475 gcc_assert (flag_pic);
7476 if (!TARGET_64BIT)
7477 goto is_legitimate_pic;
7478 reason = "64bit address unspec";
7479 goto report_error;
7481 case UNSPEC_GOTPCREL:
7482 gcc_assert (flag_pic);
7483 goto is_legitimate_pic;
7485 case UNSPEC_GOTTPOFF:
7486 case UNSPEC_GOTNTPOFF:
7487 case UNSPEC_INDNTPOFF:
7488 case UNSPEC_NTPOFF:
7489 case UNSPEC_DTPOFF:
7490 break;
7492 default:
7493 reason = "invalid address unspec";
7494 goto report_error;
7497 else if (SYMBOLIC_CONST (disp)
7498 && (flag_pic
7499 || (TARGET_MACHO
7500 #if TARGET_MACHO
7501 && MACHOPIC_INDIRECT
7502 && !machopic_operand_p (disp)
7503 #endif
7507 is_legitimate_pic:
7508 if (TARGET_64BIT && (index || base))
7510 /* foo@dtpoff(%rX) is ok. */
7511 if (GET_CODE (disp) != CONST
7512 || GET_CODE (XEXP (disp, 0)) != PLUS
7513 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
7514 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
7515 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
7516 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
7518 reason = "non-constant pic memory reference";
7519 goto report_error;
7522 else if (! legitimate_pic_address_disp_p (disp))
7524 reason = "displacement is an invalid pic construct";
7525 goto report_error;
7528 /* This code used to verify that a symbolic pic displacement
7529 includes the pic_offset_table_rtx register.
7531 While this is good idea, unfortunately these constructs may
7532 be created by "adds using lea" optimization for incorrect
7533 code like:
7535 int a;
7536 int foo(int i)
7538 return *(&a+i);
7541 This code is nonsensical, but results in addressing
7542 GOT table with pic_offset_table_rtx base. We can't
7543 just refuse it easily, since it gets matched by
7544 "addsi3" pattern, that later gets split to lea in the
7545 case output register differs from input. While this
7546 can be handled by separate addsi pattern for this case
7547 that never results in lea, this seems to be easier and
7548 correct fix for crash to disable this test. */
7550 else if (GET_CODE (disp) != LABEL_REF
7551 && !CONST_INT_P (disp)
7552 && (GET_CODE (disp) != CONST
7553 || !legitimate_constant_p (disp))
7554 && (GET_CODE (disp) != SYMBOL_REF
7555 || !legitimate_constant_p (disp)))
7557 reason = "displacement is not constant";
7558 goto report_error;
7560 else if (TARGET_64BIT
7561 && !x86_64_immediate_operand (disp, VOIDmode))
7563 reason = "displacement is out of range";
7564 goto report_error;
7568 /* Everything looks valid. */
7569 return TRUE;
7571 report_error:
7572 return FALSE;
7575 /* Return a unique alias set for the GOT. */
7577 static alias_set_type
7578 ix86_GOT_alias_set (void)
7580 static alias_set_type set = -1;
7581 if (set == -1)
7582 set = new_alias_set ();
7583 return set;
7586 /* Return a legitimate reference for ORIG (an address) using the
7587 register REG. If REG is 0, a new pseudo is generated.
7589 There are two types of references that must be handled:
7591 1. Global data references must load the address from the GOT, via
7592 the PIC reg. An insn is emitted to do this load, and the reg is
7593 returned.
7595 2. Static data references, constant pool addresses, and code labels
7596 compute the address as an offset from the GOT, whose base is in
7597 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
7598 differentiate them from global data objects. The returned
7599 address is the PIC reg + an unspec constant.
7601 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
7602 reg also appears in the address. */
7604 static rtx
7605 legitimize_pic_address (rtx orig, rtx reg)
7607 rtx addr = orig;
7608 rtx new_rtx = orig;
7609 rtx base;
7611 #if TARGET_MACHO
7612 if (TARGET_MACHO && !TARGET_64BIT)
7614 if (reg == 0)
7615 reg = gen_reg_rtx (Pmode);
7616 /* Use the generic Mach-O PIC machinery. */
7617 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
7619 #endif
7621 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
7622 new_rtx = addr;
7623 else if (TARGET_64BIT
7624 && ix86_cmodel != CM_SMALL_PIC
7625 && gotoff_operand (addr, Pmode))
7627 rtx tmpreg;
7628 /* This symbol may be referenced via a displacement from the PIC
7629 base address (@GOTOFF). */
7631 if (reload_in_progress)
7632 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7633 if (GET_CODE (addr) == CONST)
7634 addr = XEXP (addr, 0);
7635 if (GET_CODE (addr) == PLUS)
7637 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7638 UNSPEC_GOTOFF);
7639 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
7641 else
7642 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7643 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7644 if (!reg)
7645 tmpreg = gen_reg_rtx (Pmode);
7646 else
7647 tmpreg = reg;
7648 emit_move_insn (tmpreg, new_rtx);
7650 if (reg != 0)
7652 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
7653 tmpreg, 1, OPTAB_DIRECT);
7654 new_rtx = reg;
7656 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
7658 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
7660 /* This symbol may be referenced via a displacement from the PIC
7661 base address (@GOTOFF). */
7663 if (reload_in_progress)
7664 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7665 if (GET_CODE (addr) == CONST)
7666 addr = XEXP (addr, 0);
7667 if (GET_CODE (addr) == PLUS)
7669 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
7670 UNSPEC_GOTOFF);
7671 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
7673 else
7674 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
7675 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7676 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7678 if (reg != 0)
7680 emit_move_insn (reg, new_rtx);
7681 new_rtx = reg;
7684 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
7685 /* We can't use @GOTOFF for text labels on VxWorks;
7686 see gotoff_operand. */
7687 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
7689 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
7691 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
7692 return legitimize_dllimport_symbol (addr, true);
7693 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
7694 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
7695 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
7697 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
7698 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
7702 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
7704 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
7705 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7706 new_rtx = gen_const_mem (Pmode, new_rtx);
7707 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
7709 if (reg == 0)
7710 reg = gen_reg_rtx (Pmode);
7711 /* Use directly gen_movsi, otherwise the address is loaded
7712 into register for CSE. We don't want to CSE this addresses,
7713 instead we CSE addresses from the GOT table, so skip this. */
7714 emit_insn (gen_movsi (reg, new_rtx));
7715 new_rtx = reg;
7717 else
7719 /* This symbol must be referenced via a load from the
7720 Global Offset Table (@GOT). */
7722 if (reload_in_progress)
7723 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7724 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
7725 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7726 if (TARGET_64BIT)
7727 new_rtx = force_reg (Pmode, new_rtx);
7728 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7729 new_rtx = gen_const_mem (Pmode, new_rtx);
7730 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
7732 if (reg == 0)
7733 reg = gen_reg_rtx (Pmode);
7734 emit_move_insn (reg, new_rtx);
7735 new_rtx = reg;
7738 else
7740 if (CONST_INT_P (addr)
7741 && !x86_64_immediate_operand (addr, VOIDmode))
7743 if (reg)
7745 emit_move_insn (reg, addr);
7746 new_rtx = reg;
7748 else
7749 new_rtx = force_reg (Pmode, addr);
7751 else if (GET_CODE (addr) == CONST)
7753 addr = XEXP (addr, 0);
7755 /* We must match stuff we generate before. Assume the only
7756 unspecs that can get here are ours. Not that we could do
7757 anything with them anyway.... */
7758 if (GET_CODE (addr) == UNSPEC
7759 || (GET_CODE (addr) == PLUS
7760 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
7761 return orig;
7762 gcc_assert (GET_CODE (addr) == PLUS);
7764 if (GET_CODE (addr) == PLUS)
7766 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
7768 /* Check first to see if this is a constant offset from a @GOTOFF
7769 symbol reference. */
7770 if (gotoff_operand (op0, Pmode)
7771 && CONST_INT_P (op1))
7773 if (!TARGET_64BIT)
7775 if (reload_in_progress)
7776 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7777 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
7778 UNSPEC_GOTOFF);
7779 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
7780 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
7781 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
7783 if (reg != 0)
7785 emit_move_insn (reg, new_rtx);
7786 new_rtx = reg;
7789 else
7791 if (INTVAL (op1) < -16*1024*1024
7792 || INTVAL (op1) >= 16*1024*1024)
7794 if (!x86_64_immediate_operand (op1, Pmode))
7795 op1 = force_reg (Pmode, op1);
7796 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
7800 else
7802 base = legitimize_pic_address (XEXP (addr, 0), reg);
7803 new_rtx = legitimize_pic_address (XEXP (addr, 1),
7804 base == reg ? NULL_RTX : reg);
7806 if (CONST_INT_P (new_rtx))
7807 new_rtx = plus_constant (base, INTVAL (new_rtx));
7808 else
7810 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
7812 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
7813 new_rtx = XEXP (new_rtx, 1);
7815 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
7820 return new_rtx;
7823 /* Load the thread pointer. If TO_REG is true, force it into a register. */
7825 static rtx
7826 get_thread_pointer (int to_reg)
7828 rtx tp, reg, insn;
7830 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
7831 if (!to_reg)
7832 return tp;
7834 reg = gen_reg_rtx (Pmode);
7835 insn = gen_rtx_SET (VOIDmode, reg, tp);
7836 insn = emit_insn (insn);
7838 return reg;
7841 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
7842 false if we expect this to be used for a memory address and true if
7843 we expect to load the address into a register. */
7845 static rtx
7846 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
7848 rtx dest, base, off, pic, tp;
7849 int type;
7851 switch (model)
7853 case TLS_MODEL_GLOBAL_DYNAMIC:
7854 dest = gen_reg_rtx (Pmode);
7855 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7857 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7859 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
7861 start_sequence ();
7862 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7863 insns = get_insns ();
7864 end_sequence ();
7866 RTL_CONST_CALL_P (insns) = 1;
7867 emit_libcall_block (insns, dest, rax, x);
7869 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7870 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7871 else
7872 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7874 if (TARGET_GNU2_TLS)
7876 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7878 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7880 break;
7882 case TLS_MODEL_LOCAL_DYNAMIC:
7883 base = gen_reg_rtx (Pmode);
7884 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7886 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7888 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
7890 start_sequence ();
7891 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7892 insns = get_insns ();
7893 end_sequence ();
7895 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7896 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7897 RTL_CONST_CALL_P (insns) = 1;
7898 emit_libcall_block (insns, base, rax, note);
7900 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7901 emit_insn (gen_tls_local_dynamic_base_64 (base));
7902 else
7903 emit_insn (gen_tls_local_dynamic_base_32 (base));
7905 if (TARGET_GNU2_TLS)
7907 rtx x = ix86_tls_module_base ();
7909 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7910 gen_rtx_MINUS (Pmode, x, tp));
7913 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7914 off = gen_rtx_CONST (Pmode, off);
7916 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7918 if (TARGET_GNU2_TLS)
7920 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7922 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7925 break;
7927 case TLS_MODEL_INITIAL_EXEC:
7928 if (TARGET_64BIT)
7930 pic = NULL;
7931 type = UNSPEC_GOTNTPOFF;
7933 else if (flag_pic)
7935 if (reload_in_progress)
7936 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
7937 pic = pic_offset_table_rtx;
7938 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7940 else if (!TARGET_ANY_GNU_TLS)
7942 pic = gen_reg_rtx (Pmode);
7943 emit_insn (gen_set_got (pic));
7944 type = UNSPEC_GOTTPOFF;
7946 else
7948 pic = NULL;
7949 type = UNSPEC_INDNTPOFF;
7952 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7953 off = gen_rtx_CONST (Pmode, off);
7954 if (pic)
7955 off = gen_rtx_PLUS (Pmode, pic, off);
7956 off = gen_const_mem (Pmode, off);
7957 set_mem_alias_set (off, ix86_GOT_alias_set ());
7959 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7961 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7962 off = force_reg (Pmode, off);
7963 return gen_rtx_PLUS (Pmode, base, off);
7965 else
7967 base = get_thread_pointer (true);
7968 dest = gen_reg_rtx (Pmode);
7969 emit_insn (gen_subsi3 (dest, base, off));
7971 break;
7973 case TLS_MODEL_LOCAL_EXEC:
7974 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7975 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7976 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7977 off = gen_rtx_CONST (Pmode, off);
7979 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7981 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7982 return gen_rtx_PLUS (Pmode, base, off);
7984 else
7986 base = get_thread_pointer (true);
7987 dest = gen_reg_rtx (Pmode);
7988 emit_insn (gen_subsi3 (dest, base, off));
7990 break;
7992 default:
7993 gcc_unreachable ();
7996 return dest;
7999 /* Create or return the unique __imp_DECL dllimport symbol corresponding
8000 to symbol DECL. */
8002 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
8003 htab_t dllimport_map;
8005 static tree
8006 get_dllimport_decl (tree decl)
8008 struct tree_map *h, in;
8009 void **loc;
8010 const char *name;
8011 const char *prefix;
8012 size_t namelen, prefixlen;
8013 char *imp_name;
8014 tree to;
8015 rtx rtl;
8017 if (!dllimport_map)
8018 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
8020 in.hash = htab_hash_pointer (decl);
8021 in.base.from = decl;
8022 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
8023 h = (struct tree_map *) *loc;
8024 if (h)
8025 return h->to;
8027 *loc = h = GGC_NEW (struct tree_map);
8028 h->hash = in.hash;
8029 h->base.from = decl;
8030 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
8031 DECL_ARTIFICIAL (to) = 1;
8032 DECL_IGNORED_P (to) = 1;
8033 DECL_EXTERNAL (to) = 1;
8034 TREE_READONLY (to) = 1;
8036 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
8037 name = targetm.strip_name_encoding (name);
8038 prefix = name[0] == FASTCALL_PREFIX ? "*__imp_": "*__imp__";
8039 namelen = strlen (name);
8040 prefixlen = strlen (prefix);
8041 imp_name = (char *) alloca (namelen + prefixlen + 1);
8042 memcpy (imp_name, prefix, prefixlen);
8043 memcpy (imp_name + prefixlen, name, namelen + 1);
8045 name = ggc_alloc_string (imp_name, namelen + prefixlen);
8046 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
8047 SET_SYMBOL_REF_DECL (rtl, to);
8048 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
8050 rtl = gen_const_mem (Pmode, rtl);
8051 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
8053 SET_DECL_RTL (to, rtl);
8054 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
8056 return to;
8059 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
8060 true if we require the result be a register. */
8062 static rtx
8063 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
8065 tree imp_decl;
8066 rtx x;
8068 gcc_assert (SYMBOL_REF_DECL (symbol));
8069 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
8071 x = DECL_RTL (imp_decl);
8072 if (want_reg)
8073 x = force_reg (Pmode, x);
8074 return x;
8077 /* Try machine-dependent ways of modifying an illegitimate address
8078 to be legitimate. If we find one, return the new, valid address.
8079 This macro is used in only one place: `memory_address' in explow.c.
8081 OLDX is the address as it was before break_out_memory_refs was called.
8082 In some cases it is useful to look at this to decide what needs to be done.
8084 MODE and WIN are passed so that this macro can use
8085 GO_IF_LEGITIMATE_ADDRESS.
8087 It is always safe for this macro to do nothing. It exists to recognize
8088 opportunities to optimize the output.
8090 For the 80386, we handle X+REG by loading X into a register R and
8091 using R+REG. R will go in a general reg and indexing will be used.
8092 However, if REG is a broken-out memory address or multiplication,
8093 nothing needs to be done because REG can certainly go in a general reg.
8095 When -fpic is used, special handling is needed for symbolic references.
8096 See comments by legitimize_pic_address in i386.c for details. */
8099 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
8101 int changed = 0;
8102 unsigned log;
8104 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
8105 if (log)
8106 return legitimize_tls_address (x, (enum tls_model) log, false);
8107 if (GET_CODE (x) == CONST
8108 && GET_CODE (XEXP (x, 0)) == PLUS
8109 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8110 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
8112 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
8113 (enum tls_model) log, false);
8114 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
8117 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
8119 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
8120 return legitimize_dllimport_symbol (x, true);
8121 if (GET_CODE (x) == CONST
8122 && GET_CODE (XEXP (x, 0)) == PLUS
8123 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8124 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
8126 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
8127 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
8131 if (flag_pic && SYMBOLIC_CONST (x))
8132 return legitimize_pic_address (x, 0);
8134 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
8135 if (GET_CODE (x) == ASHIFT
8136 && CONST_INT_P (XEXP (x, 1))
8137 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
8139 changed = 1;
8140 log = INTVAL (XEXP (x, 1));
8141 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
8142 GEN_INT (1 << log));
8145 if (GET_CODE (x) == PLUS)
8147 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
8149 if (GET_CODE (XEXP (x, 0)) == ASHIFT
8150 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8151 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
8153 changed = 1;
8154 log = INTVAL (XEXP (XEXP (x, 0), 1));
8155 XEXP (x, 0) = gen_rtx_MULT (Pmode,
8156 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
8157 GEN_INT (1 << log));
8160 if (GET_CODE (XEXP (x, 1)) == ASHIFT
8161 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
8162 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
8164 changed = 1;
8165 log = INTVAL (XEXP (XEXP (x, 1), 1));
8166 XEXP (x, 1) = gen_rtx_MULT (Pmode,
8167 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
8168 GEN_INT (1 << log));
8171 /* Put multiply first if it isn't already. */
8172 if (GET_CODE (XEXP (x, 1)) == MULT)
8174 rtx tmp = XEXP (x, 0);
8175 XEXP (x, 0) = XEXP (x, 1);
8176 XEXP (x, 1) = tmp;
8177 changed = 1;
8180 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
8181 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
8182 created by virtual register instantiation, register elimination, and
8183 similar optimizations. */
8184 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
8186 changed = 1;
8187 x = gen_rtx_PLUS (Pmode,
8188 gen_rtx_PLUS (Pmode, XEXP (x, 0),
8189 XEXP (XEXP (x, 1), 0)),
8190 XEXP (XEXP (x, 1), 1));
8193 /* Canonicalize
8194 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
8195 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
8196 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
8197 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
8198 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
8199 && CONSTANT_P (XEXP (x, 1)))
8201 rtx constant;
8202 rtx other = NULL_RTX;
8204 if (CONST_INT_P (XEXP (x, 1)))
8206 constant = XEXP (x, 1);
8207 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
8209 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
8211 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
8212 other = XEXP (x, 1);
8214 else
8215 constant = 0;
8217 if (constant)
8219 changed = 1;
8220 x = gen_rtx_PLUS (Pmode,
8221 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
8222 XEXP (XEXP (XEXP (x, 0), 1), 0)),
8223 plus_constant (other, INTVAL (constant)));
8227 if (changed && legitimate_address_p (mode, x, FALSE))
8228 return x;
8230 if (GET_CODE (XEXP (x, 0)) == MULT)
8232 changed = 1;
8233 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
8236 if (GET_CODE (XEXP (x, 1)) == MULT)
8238 changed = 1;
8239 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
8242 if (changed
8243 && REG_P (XEXP (x, 1))
8244 && REG_P (XEXP (x, 0)))
8245 return x;
8247 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
8249 changed = 1;
8250 x = legitimize_pic_address (x, 0);
8253 if (changed && legitimate_address_p (mode, x, FALSE))
8254 return x;
8256 if (REG_P (XEXP (x, 0)))
8258 rtx temp = gen_reg_rtx (Pmode);
8259 rtx val = force_operand (XEXP (x, 1), temp);
8260 if (val != temp)
8261 emit_move_insn (temp, val);
8263 XEXP (x, 1) = temp;
8264 return x;
8267 else if (REG_P (XEXP (x, 1)))
8269 rtx temp = gen_reg_rtx (Pmode);
8270 rtx val = force_operand (XEXP (x, 0), temp);
8271 if (val != temp)
8272 emit_move_insn (temp, val);
8274 XEXP (x, 0) = temp;
8275 return x;
8279 return x;
8282 /* Print an integer constant expression in assembler syntax. Addition
8283 and subtraction are the only arithmetic that may appear in these
8284 expressions. FILE is the stdio stream to write to, X is the rtx, and
8285 CODE is the operand print code from the output string. */
8287 static void
8288 output_pic_addr_const (FILE *file, rtx x, int code)
8290 char buf[256];
8292 switch (GET_CODE (x))
8294 case PC:
8295 gcc_assert (flag_pic);
8296 putc ('.', file);
8297 break;
8299 case SYMBOL_REF:
8300 if (! TARGET_MACHO || TARGET_64BIT)
8301 output_addr_const (file, x);
8302 else
8304 const char *name = XSTR (x, 0);
8306 /* Mark the decl as referenced so that cgraph will
8307 output the function. */
8308 if (SYMBOL_REF_DECL (x))
8309 mark_decl_referenced (SYMBOL_REF_DECL (x));
8311 #if TARGET_MACHO
8312 if (MACHOPIC_INDIRECT
8313 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
8314 name = machopic_indirection_name (x, /*stub_p=*/true);
8315 #endif
8316 assemble_name (file, name);
8318 if (!TARGET_MACHO && !TARGET_64BIT_MS_ABI
8319 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
8320 fputs ("@PLT", file);
8321 break;
8323 case LABEL_REF:
8324 x = XEXP (x, 0);
8325 /* FALLTHRU */
8326 case CODE_LABEL:
8327 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
8328 assemble_name (asm_out_file, buf);
8329 break;
8331 case CONST_INT:
8332 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8333 break;
8335 case CONST:
8336 /* This used to output parentheses around the expression,
8337 but that does not work on the 386 (either ATT or BSD assembler). */
8338 output_pic_addr_const (file, XEXP (x, 0), code);
8339 break;
8341 case CONST_DOUBLE:
8342 if (GET_MODE (x) == VOIDmode)
8344 /* We can use %d if the number is <32 bits and positive. */
8345 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
8346 fprintf (file, "0x%lx%08lx",
8347 (unsigned long) CONST_DOUBLE_HIGH (x),
8348 (unsigned long) CONST_DOUBLE_LOW (x));
8349 else
8350 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
8352 else
8353 /* We can't handle floating point constants;
8354 PRINT_OPERAND must handle them. */
8355 output_operand_lossage ("floating constant misused");
8356 break;
8358 case PLUS:
8359 /* Some assemblers need integer constants to appear first. */
8360 if (CONST_INT_P (XEXP (x, 0)))
8362 output_pic_addr_const (file, XEXP (x, 0), code);
8363 putc ('+', file);
8364 output_pic_addr_const (file, XEXP (x, 1), code);
8366 else
8368 gcc_assert (CONST_INT_P (XEXP (x, 1)));
8369 output_pic_addr_const (file, XEXP (x, 1), code);
8370 putc ('+', file);
8371 output_pic_addr_const (file, XEXP (x, 0), code);
8373 break;
8375 case MINUS:
8376 if (!TARGET_MACHO)
8377 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
8378 output_pic_addr_const (file, XEXP (x, 0), code);
8379 putc ('-', file);
8380 output_pic_addr_const (file, XEXP (x, 1), code);
8381 if (!TARGET_MACHO)
8382 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
8383 break;
8385 case UNSPEC:
8386 gcc_assert (XVECLEN (x, 0) == 1);
8387 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
8388 switch (XINT (x, 1))
8390 case UNSPEC_GOT:
8391 fputs ("@GOT", file);
8392 break;
8393 case UNSPEC_GOTOFF:
8394 fputs ("@GOTOFF", file);
8395 break;
8396 case UNSPEC_PLTOFF:
8397 fputs ("@PLTOFF", file);
8398 break;
8399 case UNSPEC_GOTPCREL:
8400 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
8401 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
8402 break;
8403 case UNSPEC_GOTTPOFF:
8404 /* FIXME: This might be @TPOFF in Sun ld too. */
8405 fputs ("@GOTTPOFF", file);
8406 break;
8407 case UNSPEC_TPOFF:
8408 fputs ("@TPOFF", file);
8409 break;
8410 case UNSPEC_NTPOFF:
8411 if (TARGET_64BIT)
8412 fputs ("@TPOFF", file);
8413 else
8414 fputs ("@NTPOFF", file);
8415 break;
8416 case UNSPEC_DTPOFF:
8417 fputs ("@DTPOFF", file);
8418 break;
8419 case UNSPEC_GOTNTPOFF:
8420 if (TARGET_64BIT)
8421 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
8422 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
8423 else
8424 fputs ("@GOTNTPOFF", file);
8425 break;
8426 case UNSPEC_INDNTPOFF:
8427 fputs ("@INDNTPOFF", file);
8428 break;
8429 default:
8430 output_operand_lossage ("invalid UNSPEC as operand");
8431 break;
8433 break;
8435 default:
8436 output_operand_lossage ("invalid expression as operand");
8440 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8441 We need to emit DTP-relative relocations. */
8443 static void ATTRIBUTE_UNUSED
8444 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
8446 fputs (ASM_LONG, file);
8447 output_addr_const (file, x);
8448 fputs ("@DTPOFF", file);
8449 switch (size)
8451 case 4:
8452 break;
8453 case 8:
8454 fputs (", 0", file);
8455 break;
8456 default:
8457 gcc_unreachable ();
8461 /* In the name of slightly smaller debug output, and to cater to
8462 general assembler lossage, recognize PIC+GOTOFF and turn it back
8463 into a direct symbol reference.
8465 On Darwin, this is necessary to avoid a crash, because Darwin
8466 has a different PIC label for each routine but the DWARF debugging
8467 information is not associated with any particular routine, so it's
8468 necessary to remove references to the PIC label from RTL stored by
8469 the DWARF output code. */
8471 static rtx
8472 ix86_delegitimize_address (rtx orig_x)
8474 rtx x = orig_x;
8475 /* reg_addend is NULL or a multiple of some register. */
8476 rtx reg_addend = NULL_RTX;
8477 /* const_addend is NULL or a const_int. */
8478 rtx const_addend = NULL_RTX;
8479 /* This is the result, or NULL. */
8480 rtx result = NULL_RTX;
8482 if (MEM_P (x))
8483 x = XEXP (x, 0);
8485 if (TARGET_64BIT)
8487 if (GET_CODE (x) != CONST
8488 || GET_CODE (XEXP (x, 0)) != UNSPEC
8489 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
8490 || !MEM_P (orig_x))
8491 return orig_x;
8492 return XVECEXP (XEXP (x, 0), 0, 0);
8495 if (GET_CODE (x) != PLUS
8496 || GET_CODE (XEXP (x, 1)) != CONST)
8497 return orig_x;
8499 if (REG_P (XEXP (x, 0))
8500 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
8501 /* %ebx + GOT/GOTOFF */
8503 else if (GET_CODE (XEXP (x, 0)) == PLUS)
8505 /* %ebx + %reg * scale + GOT/GOTOFF */
8506 reg_addend = XEXP (x, 0);
8507 if (REG_P (XEXP (reg_addend, 0))
8508 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
8509 reg_addend = XEXP (reg_addend, 1);
8510 else if (REG_P (XEXP (reg_addend, 1))
8511 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
8512 reg_addend = XEXP (reg_addend, 0);
8513 else
8514 return orig_x;
8515 if (!REG_P (reg_addend)
8516 && GET_CODE (reg_addend) != MULT
8517 && GET_CODE (reg_addend) != ASHIFT)
8518 return orig_x;
8520 else
8521 return orig_x;
8523 x = XEXP (XEXP (x, 1), 0);
8524 if (GET_CODE (x) == PLUS
8525 && CONST_INT_P (XEXP (x, 1)))
8527 const_addend = XEXP (x, 1);
8528 x = XEXP (x, 0);
8531 if (GET_CODE (x) == UNSPEC
8532 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
8533 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
8534 result = XVECEXP (x, 0, 0);
8536 if (TARGET_MACHO && darwin_local_data_pic (x)
8537 && !MEM_P (orig_x))
8538 result = XEXP (x, 0);
8540 if (! result)
8541 return orig_x;
8543 if (const_addend)
8544 result = gen_rtx_PLUS (Pmode, result, const_addend);
8545 if (reg_addend)
8546 result = gen_rtx_PLUS (Pmode, reg_addend, result);
8547 return result;
8550 /* If X is a machine specific address (i.e. a symbol or label being
8551 referenced as a displacement from the GOT implemented using an
8552 UNSPEC), then return the base term. Otherwise return X. */
8555 ix86_find_base_term (rtx x)
8557 rtx term;
8559 if (TARGET_64BIT)
8561 if (GET_CODE (x) != CONST)
8562 return x;
8563 term = XEXP (x, 0);
8564 if (GET_CODE (term) == PLUS
8565 && (CONST_INT_P (XEXP (term, 1))
8566 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
8567 term = XEXP (term, 0);
8568 if (GET_CODE (term) != UNSPEC
8569 || XINT (term, 1) != UNSPEC_GOTPCREL)
8570 return x;
8572 term = XVECEXP (term, 0, 0);
8574 if (GET_CODE (term) != SYMBOL_REF
8575 && GET_CODE (term) != LABEL_REF)
8576 return x;
8578 return term;
8581 term = ix86_delegitimize_address (x);
8583 if (GET_CODE (term) != SYMBOL_REF
8584 && GET_CODE (term) != LABEL_REF)
8585 return x;
8587 return term;
8590 static void
8591 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
8592 int fp, FILE *file)
8594 const char *suffix;
8596 if (mode == CCFPmode || mode == CCFPUmode)
8598 enum rtx_code second_code, bypass_code;
8599 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
8600 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
8601 code = ix86_fp_compare_code_to_integer (code);
8602 mode = CCmode;
8604 if (reverse)
8605 code = reverse_condition (code);
8607 switch (code)
8609 case EQ:
8610 switch (mode)
8612 case CCAmode:
8613 suffix = "a";
8614 break;
8616 case CCCmode:
8617 suffix = "c";
8618 break;
8620 case CCOmode:
8621 suffix = "o";
8622 break;
8624 case CCSmode:
8625 suffix = "s";
8626 break;
8628 default:
8629 suffix = "e";
8631 break;
8632 case NE:
8633 switch (mode)
8635 case CCAmode:
8636 suffix = "na";
8637 break;
8639 case CCCmode:
8640 suffix = "nc";
8641 break;
8643 case CCOmode:
8644 suffix = "no";
8645 break;
8647 case CCSmode:
8648 suffix = "ns";
8649 break;
8651 default:
8652 suffix = "ne";
8654 break;
8655 case GT:
8656 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
8657 suffix = "g";
8658 break;
8659 case GTU:
8660 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
8661 Those same assemblers have the same but opposite lossage on cmov. */
8662 if (mode == CCmode)
8663 suffix = fp ? "nbe" : "a";
8664 else if (mode == CCCmode)
8665 suffix = "b";
8666 else
8667 gcc_unreachable ();
8668 break;
8669 case LT:
8670 switch (mode)
8672 case CCNOmode:
8673 case CCGOCmode:
8674 suffix = "s";
8675 break;
8677 case CCmode:
8678 case CCGCmode:
8679 suffix = "l";
8680 break;
8682 default:
8683 gcc_unreachable ();
8685 break;
8686 case LTU:
8687 gcc_assert (mode == CCmode || mode == CCCmode);
8688 suffix = "b";
8689 break;
8690 case GE:
8691 switch (mode)
8693 case CCNOmode:
8694 case CCGOCmode:
8695 suffix = "ns";
8696 break;
8698 case CCmode:
8699 case CCGCmode:
8700 suffix = "ge";
8701 break;
8703 default:
8704 gcc_unreachable ();
8706 break;
8707 case GEU:
8708 /* ??? As above. */
8709 gcc_assert (mode == CCmode || mode == CCCmode);
8710 suffix = fp ? "nb" : "ae";
8711 break;
8712 case LE:
8713 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
8714 suffix = "le";
8715 break;
8716 case LEU:
8717 /* ??? As above. */
8718 if (mode == CCmode)
8719 suffix = "be";
8720 else if (mode == CCCmode)
8721 suffix = fp ? "nb" : "ae";
8722 else
8723 gcc_unreachable ();
8724 break;
8725 case UNORDERED:
8726 suffix = fp ? "u" : "p";
8727 break;
8728 case ORDERED:
8729 suffix = fp ? "nu" : "np";
8730 break;
8731 default:
8732 gcc_unreachable ();
8734 fputs (suffix, file);
8737 /* Print the name of register X to FILE based on its machine mode and number.
8738 If CODE is 'w', pretend the mode is HImode.
8739 If CODE is 'b', pretend the mode is QImode.
8740 If CODE is 'k', pretend the mode is SImode.
8741 If CODE is 'q', pretend the mode is DImode.
8742 If CODE is 'h', pretend the reg is the 'high' byte register.
8743 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
8745 void
8746 print_reg (rtx x, int code, FILE *file)
8748 gcc_assert (x == pc_rtx
8749 || (REGNO (x) != ARG_POINTER_REGNUM
8750 && REGNO (x) != FRAME_POINTER_REGNUM
8751 && REGNO (x) != FLAGS_REG
8752 && REGNO (x) != FPSR_REG
8753 && REGNO (x) != FPCR_REG));
8755 if (ASSEMBLER_DIALECT == ASM_ATT)
8756 putc ('%', file);
8758 if (x == pc_rtx)
8760 gcc_assert (TARGET_64BIT);
8761 fputs ("rip", file);
8762 return;
8765 if (code == 'w' || MMX_REG_P (x))
8766 code = 2;
8767 else if (code == 'b')
8768 code = 1;
8769 else if (code == 'k')
8770 code = 4;
8771 else if (code == 'q')
8772 code = 8;
8773 else if (code == 'y')
8774 code = 3;
8775 else if (code == 'h')
8776 code = 0;
8777 else
8778 code = GET_MODE_SIZE (GET_MODE (x));
8780 /* Irritatingly, AMD extended registers use different naming convention
8781 from the normal registers. */
8782 if (REX_INT_REG_P (x))
8784 gcc_assert (TARGET_64BIT);
8785 switch (code)
8787 case 0:
8788 error ("extended registers have no high halves");
8789 break;
8790 case 1:
8791 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
8792 break;
8793 case 2:
8794 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
8795 break;
8796 case 4:
8797 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
8798 break;
8799 case 8:
8800 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
8801 break;
8802 default:
8803 error ("unsupported operand size for extended register");
8804 break;
8806 return;
8808 switch (code)
8810 case 3:
8811 if (STACK_TOP_P (x))
8813 fputs ("st(0)", file);
8814 break;
8816 /* FALLTHRU */
8817 case 8:
8818 case 4:
8819 case 12:
8820 if (! ANY_FP_REG_P (x))
8821 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
8822 /* FALLTHRU */
8823 case 16:
8824 case 2:
8825 normal:
8826 fputs (hi_reg_name[REGNO (x)], file);
8827 break;
8828 case 1:
8829 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
8830 goto normal;
8831 fputs (qi_reg_name[REGNO (x)], file);
8832 break;
8833 case 0:
8834 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
8835 goto normal;
8836 fputs (qi_high_reg_name[REGNO (x)], file);
8837 break;
8838 default:
8839 gcc_unreachable ();
8843 /* Locate some local-dynamic symbol still in use by this function
8844 so that we can print its name in some tls_local_dynamic_base
8845 pattern. */
8847 static int
8848 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8850 rtx x = *px;
8852 if (GET_CODE (x) == SYMBOL_REF
8853 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8855 cfun->machine->some_ld_name = XSTR (x, 0);
8856 return 1;
8859 return 0;
8862 static const char *
8863 get_some_local_dynamic_name (void)
8865 rtx insn;
8867 if (cfun->machine->some_ld_name)
8868 return cfun->machine->some_ld_name;
8870 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8871 if (INSN_P (insn)
8872 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8873 return cfun->machine->some_ld_name;
8875 gcc_unreachable ();
8878 /* Meaning of CODE:
8879 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
8880 C -- print opcode suffix for set/cmov insn.
8881 c -- like C, but print reversed condition
8882 F,f -- likewise, but for floating-point.
8883 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
8884 otherwise nothing
8885 R -- print the prefix for register names.
8886 z -- print the opcode suffix for the size of the current operand.
8887 * -- print a star (in certain assembler syntax)
8888 A -- print an absolute memory reference.
8889 w -- print the operand as if it's a "word" (HImode) even if it isn't.
8890 s -- print a shift double count, followed by the assemblers argument
8891 delimiter.
8892 b -- print the QImode name of the register for the indicated operand.
8893 %b0 would print %al if operands[0] is reg 0.
8894 w -- likewise, print the HImode name of the register.
8895 k -- likewise, print the SImode name of the register.
8896 q -- likewise, print the DImode name of the register.
8897 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
8898 y -- print "st(0)" instead of "st" as a register.
8899 D -- print condition for SSE cmp instruction.
8900 P -- if PIC, print an @PLT suffix.
8901 X -- don't print any sort of PIC '@' suffix for a symbol.
8902 & -- print some in-use local-dynamic symbol name.
8903 H -- print a memory address offset by 8; used for sse high-parts
8904 Y -- print condition for SSE5 com* instruction.
8905 + -- print a branch hint as 'cs' or 'ds' prefix
8906 ; -- print a semicolon (after prefixes due to bug in older gas).
8909 void
8910 print_operand (FILE *file, rtx x, int code)
8912 if (code)
8914 switch (code)
8916 case '*':
8917 if (ASSEMBLER_DIALECT == ASM_ATT)
8918 putc ('*', file);
8919 return;
8921 case '&':
8922 assemble_name (file, get_some_local_dynamic_name ());
8923 return;
8925 case 'A':
8926 switch (ASSEMBLER_DIALECT)
8928 case ASM_ATT:
8929 putc ('*', file);
8930 break;
8932 case ASM_INTEL:
8933 /* Intel syntax. For absolute addresses, registers should not
8934 be surrounded by braces. */
8935 if (!REG_P (x))
8937 putc ('[', file);
8938 PRINT_OPERAND (file, x, 0);
8939 putc (']', file);
8940 return;
8942 break;
8944 default:
8945 gcc_unreachable ();
8948 PRINT_OPERAND (file, x, 0);
8949 return;
8952 case 'L':
8953 if (ASSEMBLER_DIALECT == ASM_ATT)
8954 putc ('l', file);
8955 return;
8957 case 'W':
8958 if (ASSEMBLER_DIALECT == ASM_ATT)
8959 putc ('w', file);
8960 return;
8962 case 'B':
8963 if (ASSEMBLER_DIALECT == ASM_ATT)
8964 putc ('b', file);
8965 return;
8967 case 'Q':
8968 if (ASSEMBLER_DIALECT == ASM_ATT)
8969 putc ('l', file);
8970 return;
8972 case 'S':
8973 if (ASSEMBLER_DIALECT == ASM_ATT)
8974 putc ('s', file);
8975 return;
8977 case 'T':
8978 if (ASSEMBLER_DIALECT == ASM_ATT)
8979 putc ('t', file);
8980 return;
8982 case 'z':
8983 /* 387 opcodes don't get size suffixes if the operands are
8984 registers. */
8985 if (STACK_REG_P (x))
8986 return;
8988 /* Likewise if using Intel opcodes. */
8989 if (ASSEMBLER_DIALECT == ASM_INTEL)
8990 return;
8992 /* This is the size of op from size of operand. */
8993 switch (GET_MODE_SIZE (GET_MODE (x)))
8995 case 1:
8996 putc ('b', file);
8997 return;
8999 case 2:
9000 if (MEM_P (x))
9002 #ifdef HAVE_GAS_FILDS_FISTS
9003 putc ('s', file);
9004 #endif
9005 return;
9007 else
9008 putc ('w', file);
9009 return;
9011 case 4:
9012 if (GET_MODE (x) == SFmode)
9014 putc ('s', file);
9015 return;
9017 else
9018 putc ('l', file);
9019 return;
9021 case 12:
9022 case 16:
9023 putc ('t', file);
9024 return;
9026 case 8:
9027 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
9029 #ifdef GAS_MNEMONICS
9030 putc ('q', file);
9031 #else
9032 putc ('l', file);
9033 putc ('l', file);
9034 #endif
9036 else
9037 putc ('l', file);
9038 return;
9040 default:
9041 gcc_unreachable ();
9044 case 'b':
9045 case 'w':
9046 case 'k':
9047 case 'q':
9048 case 'h':
9049 case 'y':
9050 case 'X':
9051 case 'P':
9052 break;
9054 case 's':
9055 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
9057 PRINT_OPERAND (file, x, 0);
9058 putc (',', file);
9060 return;
9062 case 'D':
9063 /* Little bit of braindamage here. The SSE compare instructions
9064 does use completely different names for the comparisons that the
9065 fp conditional moves. */
9066 switch (GET_CODE (x))
9068 case EQ:
9069 case UNEQ:
9070 fputs ("eq", file);
9071 break;
9072 case LT:
9073 case UNLT:
9074 fputs ("lt", file);
9075 break;
9076 case LE:
9077 case UNLE:
9078 fputs ("le", file);
9079 break;
9080 case UNORDERED:
9081 fputs ("unord", file);
9082 break;
9083 case NE:
9084 case LTGT:
9085 fputs ("neq", file);
9086 break;
9087 case UNGE:
9088 case GE:
9089 fputs ("nlt", file);
9090 break;
9091 case UNGT:
9092 case GT:
9093 fputs ("nle", file);
9094 break;
9095 case ORDERED:
9096 fputs ("ord", file);
9097 break;
9098 default:
9099 gcc_unreachable ();
9101 return;
9102 case 'O':
9103 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9104 if (ASSEMBLER_DIALECT == ASM_ATT)
9106 switch (GET_MODE (x))
9108 case HImode: putc ('w', file); break;
9109 case SImode:
9110 case SFmode: putc ('l', file); break;
9111 case DImode:
9112 case DFmode: putc ('q', file); break;
9113 default: gcc_unreachable ();
9115 putc ('.', file);
9117 #endif
9118 return;
9119 case 'C':
9120 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
9121 return;
9122 case 'F':
9123 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9124 if (ASSEMBLER_DIALECT == ASM_ATT)
9125 putc ('.', file);
9126 #endif
9127 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
9128 return;
9130 /* Like above, but reverse condition */
9131 case 'c':
9132 /* Check to see if argument to %c is really a constant
9133 and not a condition code which needs to be reversed. */
9134 if (!COMPARISON_P (x))
9136 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
9137 return;
9139 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
9140 return;
9141 case 'f':
9142 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
9143 if (ASSEMBLER_DIALECT == ASM_ATT)
9144 putc ('.', file);
9145 #endif
9146 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
9147 return;
9149 case 'H':
9150 /* It doesn't actually matter what mode we use here, as we're
9151 only going to use this for printing. */
9152 x = adjust_address_nv (x, DImode, 8);
9153 break;
9155 case '+':
9157 rtx x;
9159 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
9160 return;
9162 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
9163 if (x)
9165 int pred_val = INTVAL (XEXP (x, 0));
9167 if (pred_val < REG_BR_PROB_BASE * 45 / 100
9168 || pred_val > REG_BR_PROB_BASE * 55 / 100)
9170 int taken = pred_val > REG_BR_PROB_BASE / 2;
9171 int cputaken = final_forward_branch_p (current_output_insn) == 0;
9173 /* Emit hints only in the case default branch prediction
9174 heuristics would fail. */
9175 if (taken != cputaken)
9177 /* We use 3e (DS) prefix for taken branches and
9178 2e (CS) prefix for not taken branches. */
9179 if (taken)
9180 fputs ("ds ; ", file);
9181 else
9182 fputs ("cs ; ", file);
9186 return;
9189 case 'Y':
9190 switch (GET_CODE (x))
9192 case NE:
9193 fputs ("neq", file);
9194 break;
9195 case EQ:
9196 fputs ("eq", file);
9197 break;
9198 case GE:
9199 case GEU:
9200 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
9201 break;
9202 case GT:
9203 case GTU:
9204 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
9205 break;
9206 case LE:
9207 case LEU:
9208 fputs ("le", file);
9209 break;
9210 case LT:
9211 case LTU:
9212 fputs ("lt", file);
9213 break;
9214 case UNORDERED:
9215 fputs ("unord", file);
9216 break;
9217 case ORDERED:
9218 fputs ("ord", file);
9219 break;
9220 case UNEQ:
9221 fputs ("ueq", file);
9222 break;
9223 case UNGE:
9224 fputs ("nlt", file);
9225 break;
9226 case UNGT:
9227 fputs ("nle", file);
9228 break;
9229 case UNLE:
9230 fputs ("ule", file);
9231 break;
9232 case UNLT:
9233 fputs ("ult", file);
9234 break;
9235 case LTGT:
9236 fputs ("une", file);
9237 break;
9238 default:
9239 gcc_unreachable ();
9241 return;
9243 case ';':
9244 #if TARGET_MACHO
9245 fputs (" ; ", file);
9246 #else
9247 fputc (' ', file);
9248 #endif
9249 return;
9251 default:
9252 output_operand_lossage ("invalid operand code '%c'", code);
9256 if (REG_P (x))
9257 print_reg (x, code, file);
9259 else if (MEM_P (x))
9261 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
9262 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
9263 && GET_MODE (x) != BLKmode)
9265 const char * size;
9266 switch (GET_MODE_SIZE (GET_MODE (x)))
9268 case 1: size = "BYTE"; break;
9269 case 2: size = "WORD"; break;
9270 case 4: size = "DWORD"; break;
9271 case 8: size = "QWORD"; break;
9272 case 12: size = "XWORD"; break;
9273 case 16:
9274 if (GET_MODE (x) == XFmode)
9275 size = "XWORD";
9276 else
9277 size = "XMMWORD";
9278 break;
9279 default:
9280 gcc_unreachable ();
9283 /* Check for explicit size override (codes 'b', 'w' and 'k') */
9284 if (code == 'b')
9285 size = "BYTE";
9286 else if (code == 'w')
9287 size = "WORD";
9288 else if (code == 'k')
9289 size = "DWORD";
9291 fputs (size, file);
9292 fputs (" PTR ", file);
9295 x = XEXP (x, 0);
9296 /* Avoid (%rip) for call operands. */
9297 if (CONSTANT_ADDRESS_P (x) && code == 'P'
9298 && !CONST_INT_P (x))
9299 output_addr_const (file, x);
9300 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
9301 output_operand_lossage ("invalid constraints for operand");
9302 else
9303 output_address (x);
9306 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
9308 REAL_VALUE_TYPE r;
9309 long l;
9311 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
9312 REAL_VALUE_TO_TARGET_SINGLE (r, l);
9314 if (ASSEMBLER_DIALECT == ASM_ATT)
9315 putc ('$', file);
9316 fprintf (file, "0x%08lx", (long unsigned int) l);
9319 /* These float cases don't actually occur as immediate operands. */
9320 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
9322 char dstr[30];
9324 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
9325 fprintf (file, "%s", dstr);
9328 else if (GET_CODE (x) == CONST_DOUBLE
9329 && GET_MODE (x) == XFmode)
9331 char dstr[30];
9333 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
9334 fprintf (file, "%s", dstr);
9337 else
9339 /* We have patterns that allow zero sets of memory, for instance.
9340 In 64-bit mode, we should probably support all 8-byte vectors,
9341 since we can in fact encode that into an immediate. */
9342 if (GET_CODE (x) == CONST_VECTOR)
9344 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
9345 x = const0_rtx;
9348 if (code != 'P')
9350 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
9352 if (ASSEMBLER_DIALECT == ASM_ATT)
9353 putc ('$', file);
9355 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
9356 || GET_CODE (x) == LABEL_REF)
9358 if (ASSEMBLER_DIALECT == ASM_ATT)
9359 putc ('$', file);
9360 else
9361 fputs ("OFFSET FLAT:", file);
9364 if (CONST_INT_P (x))
9365 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
9366 else if (flag_pic)
9367 output_pic_addr_const (file, x, code);
9368 else
9369 output_addr_const (file, x);
9373 /* Print a memory operand whose address is ADDR. */
9375 void
9376 print_operand_address (FILE *file, rtx addr)
9378 struct ix86_address parts;
9379 rtx base, index, disp;
9380 int scale;
9381 int ok = ix86_decompose_address (addr, &parts);
9383 gcc_assert (ok);
9385 base = parts.base;
9386 index = parts.index;
9387 disp = parts.disp;
9388 scale = parts.scale;
9390 switch (parts.seg)
9392 case SEG_DEFAULT:
9393 break;
9394 case SEG_FS:
9395 case SEG_GS:
9396 if (ASSEMBLER_DIALECT == ASM_ATT)
9397 putc ('%', file);
9398 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
9399 break;
9400 default:
9401 gcc_unreachable ();
9404 /* Use one byte shorter RIP relative addressing for 64bit mode. */
9405 if (TARGET_64BIT && !base && !index)
9407 rtx symbol = disp;
9409 if (GET_CODE (disp) == CONST
9410 && GET_CODE (XEXP (disp, 0)) == PLUS
9411 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
9412 symbol = XEXP (XEXP (disp, 0), 0);
9414 if (GET_CODE (symbol) == LABEL_REF
9415 || (GET_CODE (symbol) == SYMBOL_REF
9416 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
9417 base = pc_rtx;
9419 if (!base && !index)
9421 /* Displacement only requires special attention. */
9423 if (CONST_INT_P (disp))
9425 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
9426 fputs ("ds:", file);
9427 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
9429 else if (flag_pic)
9430 output_pic_addr_const (file, disp, 0);
9431 else
9432 output_addr_const (file, disp);
9434 else
9436 if (ASSEMBLER_DIALECT == ASM_ATT)
9438 if (disp)
9440 if (flag_pic)
9441 output_pic_addr_const (file, disp, 0);
9442 else if (GET_CODE (disp) == LABEL_REF)
9443 output_asm_label (disp);
9444 else
9445 output_addr_const (file, disp);
9448 putc ('(', file);
9449 if (base)
9450 print_reg (base, 0, file);
9451 if (index)
9453 putc (',', file);
9454 print_reg (index, 0, file);
9455 if (scale != 1)
9456 fprintf (file, ",%d", scale);
9458 putc (')', file);
9460 else
9462 rtx offset = NULL_RTX;
9464 if (disp)
9466 /* Pull out the offset of a symbol; print any symbol itself. */
9467 if (GET_CODE (disp) == CONST
9468 && GET_CODE (XEXP (disp, 0)) == PLUS
9469 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
9471 offset = XEXP (XEXP (disp, 0), 1);
9472 disp = gen_rtx_CONST (VOIDmode,
9473 XEXP (XEXP (disp, 0), 0));
9476 if (flag_pic)
9477 output_pic_addr_const (file, disp, 0);
9478 else if (GET_CODE (disp) == LABEL_REF)
9479 output_asm_label (disp);
9480 else if (CONST_INT_P (disp))
9481 offset = disp;
9482 else
9483 output_addr_const (file, disp);
9486 putc ('[', file);
9487 if (base)
9489 print_reg (base, 0, file);
9490 if (offset)
9492 if (INTVAL (offset) >= 0)
9493 putc ('+', file);
9494 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
9497 else if (offset)
9498 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
9499 else
9500 putc ('0', file);
9502 if (index)
9504 putc ('+', file);
9505 print_reg (index, 0, file);
9506 if (scale != 1)
9507 fprintf (file, "*%d", scale);
9509 putc (']', file);
9514 bool
9515 output_addr_const_extra (FILE *file, rtx x)
9517 rtx op;
9519 if (GET_CODE (x) != UNSPEC)
9520 return false;
9522 op = XVECEXP (x, 0, 0);
9523 switch (XINT (x, 1))
9525 case UNSPEC_GOTTPOFF:
9526 output_addr_const (file, op);
9527 /* FIXME: This might be @TPOFF in Sun ld. */
9528 fputs ("@GOTTPOFF", file);
9529 break;
9530 case UNSPEC_TPOFF:
9531 output_addr_const (file, op);
9532 fputs ("@TPOFF", file);
9533 break;
9534 case UNSPEC_NTPOFF:
9535 output_addr_const (file, op);
9536 if (TARGET_64BIT)
9537 fputs ("@TPOFF", file);
9538 else
9539 fputs ("@NTPOFF", file);
9540 break;
9541 case UNSPEC_DTPOFF:
9542 output_addr_const (file, op);
9543 fputs ("@DTPOFF", file);
9544 break;
9545 case UNSPEC_GOTNTPOFF:
9546 output_addr_const (file, op);
9547 if (TARGET_64BIT)
9548 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9549 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
9550 else
9551 fputs ("@GOTNTPOFF", file);
9552 break;
9553 case UNSPEC_INDNTPOFF:
9554 output_addr_const (file, op);
9555 fputs ("@INDNTPOFF", file);
9556 break;
9558 default:
9559 return false;
9562 return true;
9565 /* Split one or more DImode RTL references into pairs of SImode
9566 references. The RTL can be REG, offsettable MEM, integer constant, or
9567 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
9568 split and "num" is its length. lo_half and hi_half are output arrays
9569 that parallel "operands". */
9571 void
9572 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
9574 while (num--)
9576 rtx op = operands[num];
9578 /* simplify_subreg refuse to split volatile memory addresses,
9579 but we still have to handle it. */
9580 if (MEM_P (op))
9582 lo_half[num] = adjust_address (op, SImode, 0);
9583 hi_half[num] = adjust_address (op, SImode, 4);
9585 else
9587 lo_half[num] = simplify_gen_subreg (SImode, op,
9588 GET_MODE (op) == VOIDmode
9589 ? DImode : GET_MODE (op), 0);
9590 hi_half[num] = simplify_gen_subreg (SImode, op,
9591 GET_MODE (op) == VOIDmode
9592 ? DImode : GET_MODE (op), 4);
9596 /* Split one or more TImode RTL references into pairs of DImode
9597 references. The RTL can be REG, offsettable MEM, integer constant, or
9598 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
9599 split and "num" is its length. lo_half and hi_half are output arrays
9600 that parallel "operands". */
9602 void
9603 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
9605 while (num--)
9607 rtx op = operands[num];
9609 /* simplify_subreg refuse to split volatile memory addresses, but we
9610 still have to handle it. */
9611 if (MEM_P (op))
9613 lo_half[num] = adjust_address (op, DImode, 0);
9614 hi_half[num] = adjust_address (op, DImode, 8);
9616 else
9618 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
9619 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
9624 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
9625 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
9626 is the expression of the binary operation. The output may either be
9627 emitted here, or returned to the caller, like all output_* functions.
9629 There is no guarantee that the operands are the same mode, as they
9630 might be within FLOAT or FLOAT_EXTEND expressions. */
9632 #ifndef SYSV386_COMPAT
9633 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
9634 wants to fix the assemblers because that causes incompatibility
9635 with gcc. No-one wants to fix gcc because that causes
9636 incompatibility with assemblers... You can use the option of
9637 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
9638 #define SYSV386_COMPAT 1
9639 #endif
9641 const char *
9642 output_387_binary_op (rtx insn, rtx *operands)
9644 static char buf[30];
9645 const char *p;
9646 const char *ssep;
9647 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
9649 #ifdef ENABLE_CHECKING
9650 /* Even if we do not want to check the inputs, this documents input
9651 constraints. Which helps in understanding the following code. */
9652 if (STACK_REG_P (operands[0])
9653 && ((REG_P (operands[1])
9654 && REGNO (operands[0]) == REGNO (operands[1])
9655 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
9656 || (REG_P (operands[2])
9657 && REGNO (operands[0]) == REGNO (operands[2])
9658 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
9659 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
9660 ; /* ok */
9661 else
9662 gcc_assert (is_sse);
9663 #endif
9665 switch (GET_CODE (operands[3]))
9667 case PLUS:
9668 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9669 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9670 p = "fiadd";
9671 else
9672 p = "fadd";
9673 ssep = "add";
9674 break;
9676 case MINUS:
9677 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9678 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9679 p = "fisub";
9680 else
9681 p = "fsub";
9682 ssep = "sub";
9683 break;
9685 case MULT:
9686 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9687 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9688 p = "fimul";
9689 else
9690 p = "fmul";
9691 ssep = "mul";
9692 break;
9694 case DIV:
9695 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
9696 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
9697 p = "fidiv";
9698 else
9699 p = "fdiv";
9700 ssep = "div";
9701 break;
9703 default:
9704 gcc_unreachable ();
9707 if (is_sse)
9709 strcpy (buf, ssep);
9710 if (GET_MODE (operands[0]) == SFmode)
9711 strcat (buf, "ss\t{%2, %0|%0, %2}");
9712 else
9713 strcat (buf, "sd\t{%2, %0|%0, %2}");
9714 return buf;
9716 strcpy (buf, p);
9718 switch (GET_CODE (operands[3]))
9720 case MULT:
9721 case PLUS:
9722 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
9724 rtx temp = operands[2];
9725 operands[2] = operands[1];
9726 operands[1] = temp;
9729 /* know operands[0] == operands[1]. */
9731 if (MEM_P (operands[2]))
9733 p = "%z2\t%2";
9734 break;
9737 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
9739 if (STACK_TOP_P (operands[0]))
9740 /* How is it that we are storing to a dead operand[2]?
9741 Well, presumably operands[1] is dead too. We can't
9742 store the result to st(0) as st(0) gets popped on this
9743 instruction. Instead store to operands[2] (which I
9744 think has to be st(1)). st(1) will be popped later.
9745 gcc <= 2.8.1 didn't have this check and generated
9746 assembly code that the Unixware assembler rejected. */
9747 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
9748 else
9749 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
9750 break;
9753 if (STACK_TOP_P (operands[0]))
9754 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
9755 else
9756 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9757 break;
9759 case MINUS:
9760 case DIV:
9761 if (MEM_P (operands[1]))
9763 p = "r%z1\t%1";
9764 break;
9767 if (MEM_P (operands[2]))
9769 p = "%z2\t%2";
9770 break;
9773 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
9775 #if SYSV386_COMPAT
9776 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
9777 derived assemblers, confusingly reverse the direction of
9778 the operation for fsub{r} and fdiv{r} when the
9779 destination register is not st(0). The Intel assembler
9780 doesn't have this brain damage. Read !SYSV386_COMPAT to
9781 figure out what the hardware really does. */
9782 if (STACK_TOP_P (operands[0]))
9783 p = "{p\t%0, %2|rp\t%2, %0}";
9784 else
9785 p = "{rp\t%2, %0|p\t%0, %2}";
9786 #else
9787 if (STACK_TOP_P (operands[0]))
9788 /* As above for fmul/fadd, we can't store to st(0). */
9789 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
9790 else
9791 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
9792 #endif
9793 break;
9796 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
9798 #if SYSV386_COMPAT
9799 if (STACK_TOP_P (operands[0]))
9800 p = "{rp\t%0, %1|p\t%1, %0}";
9801 else
9802 p = "{p\t%1, %0|rp\t%0, %1}";
9803 #else
9804 if (STACK_TOP_P (operands[0]))
9805 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
9806 else
9807 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
9808 #endif
9809 break;
9812 if (STACK_TOP_P (operands[0]))
9814 if (STACK_TOP_P (operands[1]))
9815 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
9816 else
9817 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
9818 break;
9820 else if (STACK_TOP_P (operands[1]))
9822 #if SYSV386_COMPAT
9823 p = "{\t%1, %0|r\t%0, %1}";
9824 #else
9825 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
9826 #endif
9828 else
9830 #if SYSV386_COMPAT
9831 p = "{r\t%2, %0|\t%0, %2}";
9832 #else
9833 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
9834 #endif
9836 break;
9838 default:
9839 gcc_unreachable ();
9842 strcat (buf, p);
9843 return buf;
9846 /* Return needed mode for entity in optimize_mode_switching pass. */
9849 ix86_mode_needed (int entity, rtx insn)
9851 enum attr_i387_cw mode;
9853 /* The mode UNINITIALIZED is used to store control word after a
9854 function call or ASM pattern. The mode ANY specify that function
9855 has no requirements on the control word and make no changes in the
9856 bits we are interested in. */
9858 if (CALL_P (insn)
9859 || (NONJUMP_INSN_P (insn)
9860 && (asm_noperands (PATTERN (insn)) >= 0
9861 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
9862 return I387_CW_UNINITIALIZED;
9864 if (recog_memoized (insn) < 0)
9865 return I387_CW_ANY;
9867 mode = get_attr_i387_cw (insn);
9869 switch (entity)
9871 case I387_TRUNC:
9872 if (mode == I387_CW_TRUNC)
9873 return mode;
9874 break;
9876 case I387_FLOOR:
9877 if (mode == I387_CW_FLOOR)
9878 return mode;
9879 break;
9881 case I387_CEIL:
9882 if (mode == I387_CW_CEIL)
9883 return mode;
9884 break;
9886 case I387_MASK_PM:
9887 if (mode == I387_CW_MASK_PM)
9888 return mode;
9889 break;
9891 default:
9892 gcc_unreachable ();
9895 return I387_CW_ANY;
9898 /* Output code to initialize control word copies used by trunc?f?i and
9899 rounding patterns. CURRENT_MODE is set to current control word,
9900 while NEW_MODE is set to new control word. */
9902 void
9903 emit_i387_cw_initialization (int mode)
9905 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
9906 rtx new_mode;
9908 enum ix86_stack_slot slot;
9910 rtx reg = gen_reg_rtx (HImode);
9912 emit_insn (gen_x86_fnstcw_1 (stored_mode));
9913 emit_move_insn (reg, copy_rtx (stored_mode));
9915 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
9917 switch (mode)
9919 case I387_CW_TRUNC:
9920 /* round toward zero (truncate) */
9921 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
9922 slot = SLOT_CW_TRUNC;
9923 break;
9925 case I387_CW_FLOOR:
9926 /* round down toward -oo */
9927 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9928 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
9929 slot = SLOT_CW_FLOOR;
9930 break;
9932 case I387_CW_CEIL:
9933 /* round up toward +oo */
9934 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
9935 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
9936 slot = SLOT_CW_CEIL;
9937 break;
9939 case I387_CW_MASK_PM:
9940 /* mask precision exception for nearbyint() */
9941 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9942 slot = SLOT_CW_MASK_PM;
9943 break;
9945 default:
9946 gcc_unreachable ();
9949 else
9951 switch (mode)
9953 case I387_CW_TRUNC:
9954 /* round toward zero (truncate) */
9955 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
9956 slot = SLOT_CW_TRUNC;
9957 break;
9959 case I387_CW_FLOOR:
9960 /* round down toward -oo */
9961 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
9962 slot = SLOT_CW_FLOOR;
9963 break;
9965 case I387_CW_CEIL:
9966 /* round up toward +oo */
9967 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
9968 slot = SLOT_CW_CEIL;
9969 break;
9971 case I387_CW_MASK_PM:
9972 /* mask precision exception for nearbyint() */
9973 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
9974 slot = SLOT_CW_MASK_PM;
9975 break;
9977 default:
9978 gcc_unreachable ();
9982 gcc_assert (slot < MAX_386_STACK_LOCALS);
9984 new_mode = assign_386_stack_local (HImode, slot);
9985 emit_move_insn (new_mode, reg);
9988 /* Output code for INSN to convert a float to a signed int. OPERANDS
9989 are the insn operands. The output may be [HSD]Imode and the input
9990 operand may be [SDX]Fmode. */
9992 const char *
9993 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
9995 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
9996 int dimode_p = GET_MODE (operands[0]) == DImode;
9997 int round_mode = get_attr_i387_cw (insn);
9999 /* Jump through a hoop or two for DImode, since the hardware has no
10000 non-popping instruction. We used to do this a different way, but
10001 that was somewhat fragile and broke with post-reload splitters. */
10002 if ((dimode_p || fisttp) && !stack_top_dies)
10003 output_asm_insn ("fld\t%y1", operands);
10005 gcc_assert (STACK_TOP_P (operands[1]));
10006 gcc_assert (MEM_P (operands[0]));
10007 gcc_assert (GET_MODE (operands[1]) != TFmode);
10009 if (fisttp)
10010 output_asm_insn ("fisttp%z0\t%0", operands);
10011 else
10013 if (round_mode != I387_CW_ANY)
10014 output_asm_insn ("fldcw\t%3", operands);
10015 if (stack_top_dies || dimode_p)
10016 output_asm_insn ("fistp%z0\t%0", operands);
10017 else
10018 output_asm_insn ("fist%z0\t%0", operands);
10019 if (round_mode != I387_CW_ANY)
10020 output_asm_insn ("fldcw\t%2", operands);
10023 return "";
10026 /* Output code for x87 ffreep insn. The OPNO argument, which may only
10027 have the values zero or one, indicates the ffreep insn's operand
10028 from the OPERANDS array. */
10030 static const char *
10031 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
10033 if (TARGET_USE_FFREEP)
10034 #if HAVE_AS_IX86_FFREEP
10035 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
10036 #else
10038 static char retval[] = ".word\t0xc_df";
10039 int regno = REGNO (operands[opno]);
10041 gcc_assert (FP_REGNO_P (regno));
10043 retval[9] = '0' + (regno - FIRST_STACK_REG);
10044 return retval;
10046 #endif
10048 return opno ? "fstp\t%y1" : "fstp\t%y0";
10052 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
10053 should be used. UNORDERED_P is true when fucom should be used. */
10055 const char *
10056 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
10058 int stack_top_dies;
10059 rtx cmp_op0, cmp_op1;
10060 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
10062 if (eflags_p)
10064 cmp_op0 = operands[0];
10065 cmp_op1 = operands[1];
10067 else
10069 cmp_op0 = operands[1];
10070 cmp_op1 = operands[2];
10073 if (is_sse)
10075 if (GET_MODE (operands[0]) == SFmode)
10076 if (unordered_p)
10077 return "ucomiss\t{%1, %0|%0, %1}";
10078 else
10079 return "comiss\t{%1, %0|%0, %1}";
10080 else
10081 if (unordered_p)
10082 return "ucomisd\t{%1, %0|%0, %1}";
10083 else
10084 return "comisd\t{%1, %0|%0, %1}";
10087 gcc_assert (STACK_TOP_P (cmp_op0));
10089 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
10091 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
10093 if (stack_top_dies)
10095 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
10096 return output_387_ffreep (operands, 1);
10098 else
10099 return "ftst\n\tfnstsw\t%0";
10102 if (STACK_REG_P (cmp_op1)
10103 && stack_top_dies
10104 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
10105 && REGNO (cmp_op1) != FIRST_STACK_REG)
10107 /* If both the top of the 387 stack dies, and the other operand
10108 is also a stack register that dies, then this must be a
10109 `fcompp' float compare */
10111 if (eflags_p)
10113 /* There is no double popping fcomi variant. Fortunately,
10114 eflags is immune from the fstp's cc clobbering. */
10115 if (unordered_p)
10116 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
10117 else
10118 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
10119 return output_387_ffreep (operands, 0);
10121 else
10123 if (unordered_p)
10124 return "fucompp\n\tfnstsw\t%0";
10125 else
10126 return "fcompp\n\tfnstsw\t%0";
10129 else
10131 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
10133 static const char * const alt[16] =
10135 "fcom%z2\t%y2\n\tfnstsw\t%0",
10136 "fcomp%z2\t%y2\n\tfnstsw\t%0",
10137 "fucom%z2\t%y2\n\tfnstsw\t%0",
10138 "fucomp%z2\t%y2\n\tfnstsw\t%0",
10140 "ficom%z2\t%y2\n\tfnstsw\t%0",
10141 "ficomp%z2\t%y2\n\tfnstsw\t%0",
10142 NULL,
10143 NULL,
10145 "fcomi\t{%y1, %0|%0, %y1}",
10146 "fcomip\t{%y1, %0|%0, %y1}",
10147 "fucomi\t{%y1, %0|%0, %y1}",
10148 "fucomip\t{%y1, %0|%0, %y1}",
10150 NULL,
10151 NULL,
10152 NULL,
10153 NULL
10156 int mask;
10157 const char *ret;
10159 mask = eflags_p << 3;
10160 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
10161 mask |= unordered_p << 1;
10162 mask |= stack_top_dies;
10164 gcc_assert (mask < 16);
10165 ret = alt[mask];
10166 gcc_assert (ret);
10168 return ret;
10172 void
10173 ix86_output_addr_vec_elt (FILE *file, int value)
10175 const char *directive = ASM_LONG;
10177 #ifdef ASM_QUAD
10178 if (TARGET_64BIT)
10179 directive = ASM_QUAD;
10180 #else
10181 gcc_assert (!TARGET_64BIT);
10182 #endif
10184 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
10187 void
10188 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
10190 const char *directive = ASM_LONG;
10192 #ifdef ASM_QUAD
10193 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
10194 directive = ASM_QUAD;
10195 #else
10196 gcc_assert (!TARGET_64BIT);
10197 #endif
10198 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
10199 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
10200 fprintf (file, "%s%s%d-%s%d\n",
10201 directive, LPREFIX, value, LPREFIX, rel);
10202 else if (HAVE_AS_GOTOFF_IN_DATA)
10203 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
10204 #if TARGET_MACHO
10205 else if (TARGET_MACHO)
10207 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
10208 machopic_output_function_base_name (file);
10209 fprintf(file, "\n");
10211 #endif
10212 else
10213 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
10214 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
10217 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
10218 for the target. */
10220 void
10221 ix86_expand_clear (rtx dest)
10223 rtx tmp;
10225 /* We play register width games, which are only valid after reload. */
10226 gcc_assert (reload_completed);
10228 /* Avoid HImode and its attendant prefix byte. */
10229 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
10230 dest = gen_rtx_REG (SImode, REGNO (dest));
10231 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
10233 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
10234 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
10236 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10237 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
10240 emit_insn (tmp);
10243 /* X is an unchanging MEM. If it is a constant pool reference, return
10244 the constant pool rtx, else NULL. */
10247 maybe_get_pool_constant (rtx x)
10249 x = ix86_delegitimize_address (XEXP (x, 0));
10251 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
10252 return get_pool_constant (x);
10254 return NULL_RTX;
10257 void
10258 ix86_expand_move (enum machine_mode mode, rtx operands[])
10260 rtx op0, op1;
10261 enum tls_model model;
10263 op0 = operands[0];
10264 op1 = operands[1];
10266 if (GET_CODE (op1) == SYMBOL_REF)
10268 model = SYMBOL_REF_TLS_MODEL (op1);
10269 if (model)
10271 op1 = legitimize_tls_address (op1, model, true);
10272 op1 = force_operand (op1, op0);
10273 if (op1 == op0)
10274 return;
10276 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10277 && SYMBOL_REF_DLLIMPORT_P (op1))
10278 op1 = legitimize_dllimport_symbol (op1, false);
10280 else if (GET_CODE (op1) == CONST
10281 && GET_CODE (XEXP (op1, 0)) == PLUS
10282 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
10284 rtx addend = XEXP (XEXP (op1, 0), 1);
10285 rtx symbol = XEXP (XEXP (op1, 0), 0);
10286 rtx tmp = NULL;
10288 model = SYMBOL_REF_TLS_MODEL (symbol);
10289 if (model)
10290 tmp = legitimize_tls_address (symbol, model, true);
10291 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
10292 && SYMBOL_REF_DLLIMPORT_P (symbol))
10293 tmp = legitimize_dllimport_symbol (symbol, true);
10295 if (tmp)
10297 tmp = force_operand (tmp, NULL);
10298 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
10299 op0, 1, OPTAB_DIRECT);
10300 if (tmp == op0)
10301 return;
10305 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
10307 if (TARGET_MACHO && !TARGET_64BIT)
10309 #if TARGET_MACHO
10310 if (MACHOPIC_PURE)
10312 rtx temp = ((reload_in_progress
10313 || ((op0 && REG_P (op0))
10314 && mode == Pmode))
10315 ? op0 : gen_reg_rtx (Pmode));
10316 op1 = machopic_indirect_data_reference (op1, temp);
10317 op1 = machopic_legitimize_pic_address (op1, mode,
10318 temp == op1 ? 0 : temp);
10320 else if (MACHOPIC_INDIRECT)
10321 op1 = machopic_indirect_data_reference (op1, 0);
10322 if (op0 == op1)
10323 return;
10324 #endif
10326 else
10328 if (MEM_P (op0))
10329 op1 = force_reg (Pmode, op1);
10330 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
10332 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
10333 op1 = legitimize_pic_address (op1, reg);
10334 if (op0 == op1)
10335 return;
10339 else
10341 if (MEM_P (op0)
10342 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
10343 || !push_operand (op0, mode))
10344 && MEM_P (op1))
10345 op1 = force_reg (mode, op1);
10347 if (push_operand (op0, mode)
10348 && ! general_no_elim_operand (op1, mode))
10349 op1 = copy_to_mode_reg (mode, op1);
10351 /* Force large constants in 64bit compilation into register
10352 to get them CSEed. */
10353 if (can_create_pseudo_p ()
10354 && (mode == DImode) && TARGET_64BIT
10355 && immediate_operand (op1, mode)
10356 && !x86_64_zext_immediate_operand (op1, VOIDmode)
10357 && !register_operand (op0, mode)
10358 && optimize)
10359 op1 = copy_to_mode_reg (mode, op1);
10361 if (can_create_pseudo_p ()
10362 && FLOAT_MODE_P (mode)
10363 && GET_CODE (op1) == CONST_DOUBLE)
10365 /* If we are loading a floating point constant to a register,
10366 force the value to memory now, since we'll get better code
10367 out the back end. */
10369 op1 = validize_mem (force_const_mem (mode, op1));
10370 if (!register_operand (op0, mode))
10372 rtx temp = gen_reg_rtx (mode);
10373 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
10374 emit_move_insn (op0, temp);
10375 return;
10380 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
10383 void
10384 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
10386 rtx op0 = operands[0], op1 = operands[1];
10387 unsigned int align = GET_MODE_ALIGNMENT (mode);
10389 /* Force constants other than zero into memory. We do not know how
10390 the instructions used to build constants modify the upper 64 bits
10391 of the register, once we have that information we may be able
10392 to handle some of them more efficiently. */
10393 if (can_create_pseudo_p ()
10394 && register_operand (op0, mode)
10395 && (CONSTANT_P (op1)
10396 || (GET_CODE (op1) == SUBREG
10397 && CONSTANT_P (SUBREG_REG (op1))))
10398 && standard_sse_constant_p (op1) <= 0)
10399 op1 = validize_mem (force_const_mem (mode, op1));
10401 /* TDmode values are passed as TImode on the stack. TImode values
10402 are moved via xmm registers, and moving them to stack can result in
10403 unaligned memory access. Use ix86_expand_vector_move_misalign()
10404 if memory operand is not aligned correctly. */
10405 if (can_create_pseudo_p ()
10406 && (mode == TImode) && !TARGET_64BIT
10407 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
10408 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
10410 rtx tmp[2];
10412 /* ix86_expand_vector_move_misalign() does not like constants ... */
10413 if (CONSTANT_P (op1)
10414 || (GET_CODE (op1) == SUBREG
10415 && CONSTANT_P (SUBREG_REG (op1))))
10416 op1 = validize_mem (force_const_mem (mode, op1));
10418 /* ... nor both arguments in memory. */
10419 if (!register_operand (op0, mode)
10420 && !register_operand (op1, mode))
10421 op1 = force_reg (mode, op1);
10423 tmp[0] = op0; tmp[1] = op1;
10424 ix86_expand_vector_move_misalign (mode, tmp);
10425 return;
10428 /* Make operand1 a register if it isn't already. */
10429 if (can_create_pseudo_p ()
10430 && !register_operand (op0, mode)
10431 && !register_operand (op1, mode))
10433 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
10434 return;
10437 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
10440 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
10441 straight to ix86_expand_vector_move. */
10442 /* Code generation for scalar reg-reg moves of single and double precision data:
10443 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
10444 movaps reg, reg
10445 else
10446 movss reg, reg
10447 if (x86_sse_partial_reg_dependency == true)
10448 movapd reg, reg
10449 else
10450 movsd reg, reg
10452 Code generation for scalar loads of double precision data:
10453 if (x86_sse_split_regs == true)
10454 movlpd mem, reg (gas syntax)
10455 else
10456 movsd mem, reg
10458 Code generation for unaligned packed loads of single precision data
10459 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
10460 if (x86_sse_unaligned_move_optimal)
10461 movups mem, reg
10463 if (x86_sse_partial_reg_dependency == true)
10465 xorps reg, reg
10466 movlps mem, reg
10467 movhps mem+8, reg
10469 else
10471 movlps mem, reg
10472 movhps mem+8, reg
10475 Code generation for unaligned packed loads of double precision data
10476 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
10477 if (x86_sse_unaligned_move_optimal)
10478 movupd mem, reg
10480 if (x86_sse_split_regs == true)
10482 movlpd mem, reg
10483 movhpd mem+8, reg
10485 else
10487 movsd mem, reg
10488 movhpd mem+8, reg
10492 void
10493 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
10495 rtx op0, op1, m;
10497 op0 = operands[0];
10498 op1 = operands[1];
10500 if (MEM_P (op1))
10502 /* If we're optimizing for size, movups is the smallest. */
10503 if (optimize_size)
10505 op0 = gen_lowpart (V4SFmode, op0);
10506 op1 = gen_lowpart (V4SFmode, op1);
10507 emit_insn (gen_sse_movups (op0, op1));
10508 return;
10511 /* ??? If we have typed data, then it would appear that using
10512 movdqu is the only way to get unaligned data loaded with
10513 integer type. */
10514 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
10516 op0 = gen_lowpart (V16QImode, op0);
10517 op1 = gen_lowpart (V16QImode, op1);
10518 emit_insn (gen_sse2_movdqu (op0, op1));
10519 return;
10522 if (TARGET_SSE2 && mode == V2DFmode)
10524 rtx zero;
10526 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
10528 op0 = gen_lowpart (V2DFmode, op0);
10529 op1 = gen_lowpart (V2DFmode, op1);
10530 emit_insn (gen_sse2_movupd (op0, op1));
10531 return;
10534 /* When SSE registers are split into halves, we can avoid
10535 writing to the top half twice. */
10536 if (TARGET_SSE_SPLIT_REGS)
10538 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
10539 zero = op0;
10541 else
10543 /* ??? Not sure about the best option for the Intel chips.
10544 The following would seem to satisfy; the register is
10545 entirely cleared, breaking the dependency chain. We
10546 then store to the upper half, with a dependency depth
10547 of one. A rumor has it that Intel recommends two movsd
10548 followed by an unpacklpd, but this is unconfirmed. And
10549 given that the dependency depth of the unpacklpd would
10550 still be one, I'm not sure why this would be better. */
10551 zero = CONST0_RTX (V2DFmode);
10554 m = adjust_address (op1, DFmode, 0);
10555 emit_insn (gen_sse2_loadlpd (op0, zero, m));
10556 m = adjust_address (op1, DFmode, 8);
10557 emit_insn (gen_sse2_loadhpd (op0, op0, m));
10559 else
10561 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
10563 op0 = gen_lowpart (V4SFmode, op0);
10564 op1 = gen_lowpart (V4SFmode, op1);
10565 emit_insn (gen_sse_movups (op0, op1));
10566 return;
10569 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
10570 emit_move_insn (op0, CONST0_RTX (mode));
10571 else
10572 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
10574 if (mode != V4SFmode)
10575 op0 = gen_lowpart (V4SFmode, op0);
10576 m = adjust_address (op1, V2SFmode, 0);
10577 emit_insn (gen_sse_loadlps (op0, op0, m));
10578 m = adjust_address (op1, V2SFmode, 8);
10579 emit_insn (gen_sse_loadhps (op0, op0, m));
10582 else if (MEM_P (op0))
10584 /* If we're optimizing for size, movups is the smallest. */
10585 if (optimize_size)
10587 op0 = gen_lowpart (V4SFmode, op0);
10588 op1 = gen_lowpart (V4SFmode, op1);
10589 emit_insn (gen_sse_movups (op0, op1));
10590 return;
10593 /* ??? Similar to above, only less clear because of quote
10594 typeless stores unquote. */
10595 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
10596 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
10598 op0 = gen_lowpart (V16QImode, op0);
10599 op1 = gen_lowpart (V16QImode, op1);
10600 emit_insn (gen_sse2_movdqu (op0, op1));
10601 return;
10604 if (TARGET_SSE2 && mode == V2DFmode)
10606 m = adjust_address (op0, DFmode, 0);
10607 emit_insn (gen_sse2_storelpd (m, op1));
10608 m = adjust_address (op0, DFmode, 8);
10609 emit_insn (gen_sse2_storehpd (m, op1));
10611 else
10613 if (mode != V4SFmode)
10614 op1 = gen_lowpart (V4SFmode, op1);
10615 m = adjust_address (op0, V2SFmode, 0);
10616 emit_insn (gen_sse_storelps (m, op1));
10617 m = adjust_address (op0, V2SFmode, 8);
10618 emit_insn (gen_sse_storehps (m, op1));
10621 else
10622 gcc_unreachable ();
10625 /* Expand a push in MODE. This is some mode for which we do not support
10626 proper push instructions, at least from the registers that we expect
10627 the value to live in. */
10629 void
10630 ix86_expand_push (enum machine_mode mode, rtx x)
10632 rtx tmp;
10634 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
10635 GEN_INT (-GET_MODE_SIZE (mode)),
10636 stack_pointer_rtx, 1, OPTAB_DIRECT);
10637 if (tmp != stack_pointer_rtx)
10638 emit_move_insn (stack_pointer_rtx, tmp);
10640 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
10641 emit_move_insn (tmp, x);
10644 /* Helper function of ix86_fixup_binary_operands to canonicalize
10645 operand order. Returns true if the operands should be swapped. */
10647 static bool
10648 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
10649 rtx operands[])
10651 rtx dst = operands[0];
10652 rtx src1 = operands[1];
10653 rtx src2 = operands[2];
10655 /* If the operation is not commutative, we can't do anything. */
10656 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
10657 return false;
10659 /* Highest priority is that src1 should match dst. */
10660 if (rtx_equal_p (dst, src1))
10661 return false;
10662 if (rtx_equal_p (dst, src2))
10663 return true;
10665 /* Next highest priority is that immediate constants come second. */
10666 if (immediate_operand (src2, mode))
10667 return false;
10668 if (immediate_operand (src1, mode))
10669 return true;
10671 /* Lowest priority is that memory references should come second. */
10672 if (MEM_P (src2))
10673 return false;
10674 if (MEM_P (src1))
10675 return true;
10677 return false;
10681 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
10682 destination to use for the operation. If different from the true
10683 destination in operands[0], a copy operation will be required. */
10686 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
10687 rtx operands[])
10689 rtx dst = operands[0];
10690 rtx src1 = operands[1];
10691 rtx src2 = operands[2];
10693 /* Canonicalize operand order. */
10694 if (ix86_swap_binary_operands_p (code, mode, operands))
10696 rtx temp;
10698 /* It is invalid to swap operands of different modes. */
10699 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
10701 temp = src1;
10702 src1 = src2;
10703 src2 = temp;
10706 /* Both source operands cannot be in memory. */
10707 if (MEM_P (src1) && MEM_P (src2))
10709 /* Optimization: Only read from memory once. */
10710 if (rtx_equal_p (src1, src2))
10712 src2 = force_reg (mode, src2);
10713 src1 = src2;
10715 else
10716 src2 = force_reg (mode, src2);
10719 /* If the destination is memory, and we do not have matching source
10720 operands, do things in registers. */
10721 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
10722 dst = gen_reg_rtx (mode);
10724 /* Source 1 cannot be a constant. */
10725 if (CONSTANT_P (src1))
10726 src1 = force_reg (mode, src1);
10728 /* Source 1 cannot be a non-matching memory. */
10729 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
10730 src1 = force_reg (mode, src1);
10732 operands[1] = src1;
10733 operands[2] = src2;
10734 return dst;
10737 /* Similarly, but assume that the destination has already been
10738 set up properly. */
10740 void
10741 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
10742 enum machine_mode mode, rtx operands[])
10744 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
10745 gcc_assert (dst == operands[0]);
10748 /* Attempt to expand a binary operator. Make the expansion closer to the
10749 actual machine, then just general_operand, which will allow 3 separate
10750 memory references (one output, two input) in a single insn. */
10752 void
10753 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
10754 rtx operands[])
10756 rtx src1, src2, dst, op, clob;
10758 dst = ix86_fixup_binary_operands (code, mode, operands);
10759 src1 = operands[1];
10760 src2 = operands[2];
10762 /* Emit the instruction. */
10764 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
10765 if (reload_in_progress)
10767 /* Reload doesn't know about the flags register, and doesn't know that
10768 it doesn't want to clobber it. We can only do this with PLUS. */
10769 gcc_assert (code == PLUS);
10770 emit_insn (op);
10772 else
10774 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10775 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
10778 /* Fix up the destination if needed. */
10779 if (dst != operands[0])
10780 emit_move_insn (operands[0], dst);
10783 /* Return TRUE or FALSE depending on whether the binary operator meets the
10784 appropriate constraints. */
10787 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
10788 rtx operands[3])
10790 rtx dst = operands[0];
10791 rtx src1 = operands[1];
10792 rtx src2 = operands[2];
10794 /* Both source operands cannot be in memory. */
10795 if (MEM_P (src1) && MEM_P (src2))
10796 return 0;
10798 /* Canonicalize operand order for commutative operators. */
10799 if (ix86_swap_binary_operands_p (code, mode, operands))
10801 rtx temp = src1;
10802 src1 = src2;
10803 src2 = temp;
10806 /* If the destination is memory, we must have a matching source operand. */
10807 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
10808 return 0;
10810 /* Source 1 cannot be a constant. */
10811 if (CONSTANT_P (src1))
10812 return 0;
10814 /* Source 1 cannot be a non-matching memory. */
10815 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
10816 return 0;
10818 return 1;
10821 /* Attempt to expand a unary operator. Make the expansion closer to the
10822 actual machine, then just general_operand, which will allow 2 separate
10823 memory references (one output, one input) in a single insn. */
10825 void
10826 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
10827 rtx operands[])
10829 int matching_memory;
10830 rtx src, dst, op, clob;
10832 dst = operands[0];
10833 src = operands[1];
10835 /* If the destination is memory, and we do not have matching source
10836 operands, do things in registers. */
10837 matching_memory = 0;
10838 if (MEM_P (dst))
10840 if (rtx_equal_p (dst, src))
10841 matching_memory = 1;
10842 else
10843 dst = gen_reg_rtx (mode);
10846 /* When source operand is memory, destination must match. */
10847 if (MEM_P (src) && !matching_memory)
10848 src = force_reg (mode, src);
10850 /* Emit the instruction. */
10852 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
10853 if (reload_in_progress || code == NOT)
10855 /* Reload doesn't know about the flags register, and doesn't know that
10856 it doesn't want to clobber it. */
10857 gcc_assert (code == NOT);
10858 emit_insn (op);
10860 else
10862 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
10863 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
10866 /* Fix up the destination if needed. */
10867 if (dst != operands[0])
10868 emit_move_insn (operands[0], dst);
10871 /* Return TRUE or FALSE depending on whether the unary operator meets the
10872 appropriate constraints. */
10875 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
10876 enum machine_mode mode ATTRIBUTE_UNUSED,
10877 rtx operands[2] ATTRIBUTE_UNUSED)
10879 /* If one of operands is memory, source and destination must match. */
10880 if ((MEM_P (operands[0])
10881 || MEM_P (operands[1]))
10882 && ! rtx_equal_p (operands[0], operands[1]))
10883 return FALSE;
10884 return TRUE;
10887 /* Post-reload splitter for converting an SF or DFmode value in an
10888 SSE register into an unsigned SImode. */
10890 void
10891 ix86_split_convert_uns_si_sse (rtx operands[])
10893 enum machine_mode vecmode;
10894 rtx value, large, zero_or_two31, input, two31, x;
10896 large = operands[1];
10897 zero_or_two31 = operands[2];
10898 input = operands[3];
10899 two31 = operands[4];
10900 vecmode = GET_MODE (large);
10901 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
10903 /* Load up the value into the low element. We must ensure that the other
10904 elements are valid floats -- zero is the easiest such value. */
10905 if (MEM_P (input))
10907 if (vecmode == V4SFmode)
10908 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
10909 else
10910 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
10912 else
10914 input = gen_rtx_REG (vecmode, REGNO (input));
10915 emit_move_insn (value, CONST0_RTX (vecmode));
10916 if (vecmode == V4SFmode)
10917 emit_insn (gen_sse_movss (value, value, input));
10918 else
10919 emit_insn (gen_sse2_movsd (value, value, input));
10922 emit_move_insn (large, two31);
10923 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
10925 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
10926 emit_insn (gen_rtx_SET (VOIDmode, large, x));
10928 x = gen_rtx_AND (vecmode, zero_or_two31, large);
10929 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
10931 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
10932 emit_insn (gen_rtx_SET (VOIDmode, value, x));
10934 large = gen_rtx_REG (V4SImode, REGNO (large));
10935 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
10937 x = gen_rtx_REG (V4SImode, REGNO (value));
10938 if (vecmode == V4SFmode)
10939 emit_insn (gen_sse2_cvttps2dq (x, value));
10940 else
10941 emit_insn (gen_sse2_cvttpd2dq (x, value));
10942 value = x;
10944 emit_insn (gen_xorv4si3 (value, value, large));
10947 /* Convert an unsigned DImode value into a DFmode, using only SSE.
10948 Expects the 64-bit DImode to be supplied in a pair of integral
10949 registers. Requires SSE2; will use SSE3 if available. For x86_32,
10950 -mfpmath=sse, !optimize_size only. */
10952 void
10953 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
10955 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
10956 rtx int_xmm, fp_xmm;
10957 rtx biases, exponents;
10958 rtx x;
10960 int_xmm = gen_reg_rtx (V4SImode);
10961 if (TARGET_INTER_UNIT_MOVES)
10962 emit_insn (gen_movdi_to_sse (int_xmm, input));
10963 else if (TARGET_SSE_SPLIT_REGS)
10965 emit_insn (gen_rtx_CLOBBER (VOIDmode, int_xmm));
10966 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
10968 else
10970 x = gen_reg_rtx (V2DImode);
10971 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
10972 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
10975 x = gen_rtx_CONST_VECTOR (V4SImode,
10976 gen_rtvec (4, GEN_INT (0x43300000UL),
10977 GEN_INT (0x45300000UL),
10978 const0_rtx, const0_rtx));
10979 exponents = validize_mem (force_const_mem (V4SImode, x));
10981 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
10982 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
10984 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
10985 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
10986 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
10987 (0x1.0p84 + double(fp_value_hi_xmm)).
10988 Note these exponents differ by 32. */
10990 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
10992 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
10993 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
10994 real_ldexp (&bias_lo_rvt, &dconst1, 52);
10995 real_ldexp (&bias_hi_rvt, &dconst1, 84);
10996 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
10997 x = const_double_from_real_value (bias_hi_rvt, DFmode);
10998 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
10999 biases = validize_mem (force_const_mem (V2DFmode, biases));
11000 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
11002 /* Add the upper and lower DFmode values together. */
11003 if (TARGET_SSE3)
11004 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
11005 else
11007 x = copy_to_mode_reg (V2DFmode, fp_xmm);
11008 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
11009 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
11012 ix86_expand_vector_extract (false, target, fp_xmm, 0);
11015 /* Not used, but eases macroization of patterns. */
11016 void
11017 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
11018 rtx input ATTRIBUTE_UNUSED)
11020 gcc_unreachable ();
11023 /* Convert an unsigned SImode value into a DFmode. Only currently used
11024 for SSE, but applicable anywhere. */
11026 void
11027 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
11029 REAL_VALUE_TYPE TWO31r;
11030 rtx x, fp;
11032 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
11033 NULL, 1, OPTAB_DIRECT);
11035 fp = gen_reg_rtx (DFmode);
11036 emit_insn (gen_floatsidf2 (fp, x));
11038 real_ldexp (&TWO31r, &dconst1, 31);
11039 x = const_double_from_real_value (TWO31r, DFmode);
11041 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
11042 if (x != target)
11043 emit_move_insn (target, x);
11046 /* Convert a signed DImode value into a DFmode. Only used for SSE in
11047 32-bit mode; otherwise we have a direct convert instruction. */
11049 void
11050 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
11052 REAL_VALUE_TYPE TWO32r;
11053 rtx fp_lo, fp_hi, x;
11055 fp_lo = gen_reg_rtx (DFmode);
11056 fp_hi = gen_reg_rtx (DFmode);
11058 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
11060 real_ldexp (&TWO32r, &dconst1, 32);
11061 x = const_double_from_real_value (TWO32r, DFmode);
11062 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
11064 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
11066 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
11067 0, OPTAB_DIRECT);
11068 if (x != target)
11069 emit_move_insn (target, x);
11072 /* Convert an unsigned SImode value into a SFmode, using only SSE.
11073 For x86_32, -mfpmath=sse, !optimize_size only. */
11074 void
11075 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
11077 REAL_VALUE_TYPE ONE16r;
11078 rtx fp_hi, fp_lo, int_hi, int_lo, x;
11080 real_ldexp (&ONE16r, &dconst1, 16);
11081 x = const_double_from_real_value (ONE16r, SFmode);
11082 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
11083 NULL, 0, OPTAB_DIRECT);
11084 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
11085 NULL, 0, OPTAB_DIRECT);
11086 fp_hi = gen_reg_rtx (SFmode);
11087 fp_lo = gen_reg_rtx (SFmode);
11088 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
11089 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
11090 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
11091 0, OPTAB_DIRECT);
11092 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
11093 0, OPTAB_DIRECT);
11094 if (!rtx_equal_p (target, fp_hi))
11095 emit_move_insn (target, fp_hi);
11098 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
11099 then replicate the value for all elements of the vector
11100 register. */
11103 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
11105 rtvec v;
11106 switch (mode)
11108 case SImode:
11109 gcc_assert (vect);
11110 v = gen_rtvec (4, value, value, value, value);
11111 return gen_rtx_CONST_VECTOR (V4SImode, v);
11113 case DImode:
11114 gcc_assert (vect);
11115 v = gen_rtvec (2, value, value);
11116 return gen_rtx_CONST_VECTOR (V2DImode, v);
11118 case SFmode:
11119 if (vect)
11120 v = gen_rtvec (4, value, value, value, value);
11121 else
11122 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
11123 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
11124 return gen_rtx_CONST_VECTOR (V4SFmode, v);
11126 case DFmode:
11127 if (vect)
11128 v = gen_rtvec (2, value, value);
11129 else
11130 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
11131 return gen_rtx_CONST_VECTOR (V2DFmode, v);
11133 default:
11134 gcc_unreachable ();
11138 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
11139 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
11140 for an SSE register. If VECT is true, then replicate the mask for
11141 all elements of the vector register. If INVERT is true, then create
11142 a mask excluding the sign bit. */
11145 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
11147 enum machine_mode vec_mode, imode;
11148 HOST_WIDE_INT hi, lo;
11149 int shift = 63;
11150 rtx v;
11151 rtx mask;
11153 /* Find the sign bit, sign extended to 2*HWI. */
11154 switch (mode)
11156 case SImode:
11157 case SFmode:
11158 imode = SImode;
11159 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
11160 lo = 0x80000000, hi = lo < 0;
11161 break;
11163 case DImode:
11164 case DFmode:
11165 imode = DImode;
11166 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
11167 if (HOST_BITS_PER_WIDE_INT >= 64)
11168 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
11169 else
11170 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
11171 break;
11173 case TImode:
11174 case TFmode:
11175 imode = TImode;
11176 vec_mode = VOIDmode;
11177 gcc_assert (HOST_BITS_PER_WIDE_INT >= 64);
11178 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
11179 break;
11181 default:
11182 gcc_unreachable ();
11185 if (invert)
11186 lo = ~lo, hi = ~hi;
11188 /* Force this value into the low part of a fp vector constant. */
11189 mask = immed_double_const (lo, hi, imode);
11190 mask = gen_lowpart (mode, mask);
11192 if (vec_mode == VOIDmode)
11193 return force_reg (mode, mask);
11195 v = ix86_build_const_vector (mode, vect, mask);
11196 return force_reg (vec_mode, v);
11199 /* Generate code for floating point ABS or NEG. */
11201 void
11202 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
11203 rtx operands[])
11205 rtx mask, set, use, clob, dst, src;
11206 bool use_sse = false;
11207 bool vector_mode = VECTOR_MODE_P (mode);
11208 enum machine_mode elt_mode = mode;
11210 if (vector_mode)
11212 elt_mode = GET_MODE_INNER (mode);
11213 use_sse = true;
11215 else if (mode == TFmode)
11216 use_sse = true;
11217 else if (TARGET_SSE_MATH)
11218 use_sse = SSE_FLOAT_MODE_P (mode);
11220 /* NEG and ABS performed with SSE use bitwise mask operations.
11221 Create the appropriate mask now. */
11222 if (use_sse)
11223 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
11224 else
11225 mask = NULL_RTX;
11227 dst = operands[0];
11228 src = operands[1];
11230 if (vector_mode)
11232 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
11233 set = gen_rtx_SET (VOIDmode, dst, set);
11234 emit_insn (set);
11236 else
11238 set = gen_rtx_fmt_e (code, mode, src);
11239 set = gen_rtx_SET (VOIDmode, dst, set);
11240 if (mask)
11242 use = gen_rtx_USE (VOIDmode, mask);
11243 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11244 emit_insn (gen_rtx_PARALLEL (VOIDmode,
11245 gen_rtvec (3, set, use, clob)));
11247 else
11248 emit_insn (set);
11252 /* Expand a copysign operation. Special case operand 0 being a constant. */
11254 void
11255 ix86_expand_copysign (rtx operands[])
11257 enum machine_mode mode;
11258 rtx dest, op0, op1, mask, nmask;
11260 dest = operands[0];
11261 op0 = operands[1];
11262 op1 = operands[2];
11264 mode = GET_MODE (dest);
11266 if (GET_CODE (op0) == CONST_DOUBLE)
11268 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
11270 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
11271 op0 = simplify_unary_operation (ABS, mode, op0, mode);
11273 if (mode == SFmode || mode == DFmode)
11275 enum machine_mode vmode;
11277 vmode = mode == SFmode ? V4SFmode : V2DFmode;
11279 if (op0 == CONST0_RTX (mode))
11280 op0 = CONST0_RTX (vmode);
11281 else
11283 rtvec v;
11285 if (mode == SFmode)
11286 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
11287 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
11288 else
11289 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
11291 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
11294 else if (op0 != CONST0_RTX (mode))
11295 op0 = force_reg (mode, op0);
11297 mask = ix86_build_signbit_mask (mode, 0, 0);
11299 if (mode == SFmode)
11300 copysign_insn = gen_copysignsf3_const;
11301 else if (mode == DFmode)
11302 copysign_insn = gen_copysigndf3_const;
11303 else
11304 copysign_insn = gen_copysigntf3_const;
11306 emit_insn (copysign_insn (dest, op0, op1, mask));
11308 else
11310 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
11312 nmask = ix86_build_signbit_mask (mode, 0, 1);
11313 mask = ix86_build_signbit_mask (mode, 0, 0);
11315 if (mode == SFmode)
11316 copysign_insn = gen_copysignsf3_var;
11317 else if (mode == DFmode)
11318 copysign_insn = gen_copysigndf3_var;
11319 else
11320 copysign_insn = gen_copysigntf3_var;
11322 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
11326 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
11327 be a constant, and so has already been expanded into a vector constant. */
11329 void
11330 ix86_split_copysign_const (rtx operands[])
11332 enum machine_mode mode, vmode;
11333 rtx dest, op0, op1, mask, x;
11335 dest = operands[0];
11336 op0 = operands[1];
11337 op1 = operands[2];
11338 mask = operands[3];
11340 mode = GET_MODE (dest);
11341 vmode = GET_MODE (mask);
11343 dest = simplify_gen_subreg (vmode, dest, mode, 0);
11344 x = gen_rtx_AND (vmode, dest, mask);
11345 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11347 if (op0 != CONST0_RTX (vmode))
11349 x = gen_rtx_IOR (vmode, dest, op0);
11350 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11354 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
11355 so we have to do two masks. */
11357 void
11358 ix86_split_copysign_var (rtx operands[])
11360 enum machine_mode mode, vmode;
11361 rtx dest, scratch, op0, op1, mask, nmask, x;
11363 dest = operands[0];
11364 scratch = operands[1];
11365 op0 = operands[2];
11366 op1 = operands[3];
11367 nmask = operands[4];
11368 mask = operands[5];
11370 mode = GET_MODE (dest);
11371 vmode = GET_MODE (mask);
11373 if (rtx_equal_p (op0, op1))
11375 /* Shouldn't happen often (it's useless, obviously), but when it does
11376 we'd generate incorrect code if we continue below. */
11377 emit_move_insn (dest, op0);
11378 return;
11381 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
11383 gcc_assert (REGNO (op1) == REGNO (scratch));
11385 x = gen_rtx_AND (vmode, scratch, mask);
11386 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
11388 dest = mask;
11389 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
11390 x = gen_rtx_NOT (vmode, dest);
11391 x = gen_rtx_AND (vmode, x, op0);
11392 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11394 else
11396 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
11398 x = gen_rtx_AND (vmode, scratch, mask);
11400 else /* alternative 2,4 */
11402 gcc_assert (REGNO (mask) == REGNO (scratch));
11403 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
11404 x = gen_rtx_AND (vmode, scratch, op1);
11406 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
11408 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
11410 dest = simplify_gen_subreg (vmode, op0, mode, 0);
11411 x = gen_rtx_AND (vmode, dest, nmask);
11413 else /* alternative 3,4 */
11415 gcc_assert (REGNO (nmask) == REGNO (dest));
11416 dest = nmask;
11417 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
11418 x = gen_rtx_AND (vmode, dest, op0);
11420 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11423 x = gen_rtx_IOR (vmode, dest, scratch);
11424 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11427 /* Return TRUE or FALSE depending on whether the first SET in INSN
11428 has source and destination with matching CC modes, and that the
11429 CC mode is at least as constrained as REQ_MODE. */
11432 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
11434 rtx set;
11435 enum machine_mode set_mode;
11437 set = PATTERN (insn);
11438 if (GET_CODE (set) == PARALLEL)
11439 set = XVECEXP (set, 0, 0);
11440 gcc_assert (GET_CODE (set) == SET);
11441 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
11443 set_mode = GET_MODE (SET_DEST (set));
11444 switch (set_mode)
11446 case CCNOmode:
11447 if (req_mode != CCNOmode
11448 && (req_mode != CCmode
11449 || XEXP (SET_SRC (set), 1) != const0_rtx))
11450 return 0;
11451 break;
11452 case CCmode:
11453 if (req_mode == CCGCmode)
11454 return 0;
11455 /* FALLTHRU */
11456 case CCGCmode:
11457 if (req_mode == CCGOCmode || req_mode == CCNOmode)
11458 return 0;
11459 /* FALLTHRU */
11460 case CCGOCmode:
11461 if (req_mode == CCZmode)
11462 return 0;
11463 /* FALLTHRU */
11464 case CCZmode:
11465 break;
11467 default:
11468 gcc_unreachable ();
11471 return (GET_MODE (SET_SRC (set)) == set_mode);
11474 /* Generate insn patterns to do an integer compare of OPERANDS. */
11476 static rtx
11477 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
11479 enum machine_mode cmpmode;
11480 rtx tmp, flags;
11482 cmpmode = SELECT_CC_MODE (code, op0, op1);
11483 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
11485 /* This is very simple, but making the interface the same as in the
11486 FP case makes the rest of the code easier. */
11487 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
11488 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
11490 /* Return the test that should be put into the flags user, i.e.
11491 the bcc, scc, or cmov instruction. */
11492 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
11495 /* Figure out whether to use ordered or unordered fp comparisons.
11496 Return the appropriate mode to use. */
11498 enum machine_mode
11499 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
11501 /* ??? In order to make all comparisons reversible, we do all comparisons
11502 non-trapping when compiling for IEEE. Once gcc is able to distinguish
11503 all forms trapping and nontrapping comparisons, we can make inequality
11504 comparisons trapping again, since it results in better code when using
11505 FCOM based compares. */
11506 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
11509 enum machine_mode
11510 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
11512 enum machine_mode mode = GET_MODE (op0);
11514 if (SCALAR_FLOAT_MODE_P (mode))
11516 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
11517 return ix86_fp_compare_mode (code);
11520 switch (code)
11522 /* Only zero flag is needed. */
11523 case EQ: /* ZF=0 */
11524 case NE: /* ZF!=0 */
11525 return CCZmode;
11526 /* Codes needing carry flag. */
11527 case GEU: /* CF=0 */
11528 case LTU: /* CF=1 */
11529 /* Detect overflow checks. They need just the carry flag. */
11530 if (GET_CODE (op0) == PLUS
11531 && rtx_equal_p (op1, XEXP (op0, 0)))
11532 return CCCmode;
11533 else
11534 return CCmode;
11535 case GTU: /* CF=0 & ZF=0 */
11536 case LEU: /* CF=1 | ZF=1 */
11537 /* Detect overflow checks. They need just the carry flag. */
11538 if (GET_CODE (op0) == MINUS
11539 && rtx_equal_p (op1, XEXP (op0, 0)))
11540 return CCCmode;
11541 else
11542 return CCmode;
11543 /* Codes possibly doable only with sign flag when
11544 comparing against zero. */
11545 case GE: /* SF=OF or SF=0 */
11546 case LT: /* SF<>OF or SF=1 */
11547 if (op1 == const0_rtx)
11548 return CCGOCmode;
11549 else
11550 /* For other cases Carry flag is not required. */
11551 return CCGCmode;
11552 /* Codes doable only with sign flag when comparing
11553 against zero, but we miss jump instruction for it
11554 so we need to use relational tests against overflow
11555 that thus needs to be zero. */
11556 case GT: /* ZF=0 & SF=OF */
11557 case LE: /* ZF=1 | SF<>OF */
11558 if (op1 == const0_rtx)
11559 return CCNOmode;
11560 else
11561 return CCGCmode;
11562 /* strcmp pattern do (use flags) and combine may ask us for proper
11563 mode. */
11564 case USE:
11565 return CCmode;
11566 default:
11567 gcc_unreachable ();
11571 /* Return the fixed registers used for condition codes. */
11573 static bool
11574 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
11576 *p1 = FLAGS_REG;
11577 *p2 = FPSR_REG;
11578 return true;
11581 /* If two condition code modes are compatible, return a condition code
11582 mode which is compatible with both. Otherwise, return
11583 VOIDmode. */
11585 static enum machine_mode
11586 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
11588 if (m1 == m2)
11589 return m1;
11591 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
11592 return VOIDmode;
11594 if ((m1 == CCGCmode && m2 == CCGOCmode)
11595 || (m1 == CCGOCmode && m2 == CCGCmode))
11596 return CCGCmode;
11598 switch (m1)
11600 default:
11601 gcc_unreachable ();
11603 case CCmode:
11604 case CCGCmode:
11605 case CCGOCmode:
11606 case CCNOmode:
11607 case CCAmode:
11608 case CCCmode:
11609 case CCOmode:
11610 case CCSmode:
11611 case CCZmode:
11612 switch (m2)
11614 default:
11615 return VOIDmode;
11617 case CCmode:
11618 case CCGCmode:
11619 case CCGOCmode:
11620 case CCNOmode:
11621 case CCAmode:
11622 case CCCmode:
11623 case CCOmode:
11624 case CCSmode:
11625 case CCZmode:
11626 return CCmode;
11629 case CCFPmode:
11630 case CCFPUmode:
11631 /* These are only compatible with themselves, which we already
11632 checked above. */
11633 return VOIDmode;
11637 /* Split comparison code CODE into comparisons we can do using branch
11638 instructions. BYPASS_CODE is comparison code for branch that will
11639 branch around FIRST_CODE and SECOND_CODE. If some of branches
11640 is not required, set value to UNKNOWN.
11641 We never require more than two branches. */
11643 void
11644 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
11645 enum rtx_code *first_code,
11646 enum rtx_code *second_code)
11648 *first_code = code;
11649 *bypass_code = UNKNOWN;
11650 *second_code = UNKNOWN;
11652 /* The fcomi comparison sets flags as follows:
11654 cmp ZF PF CF
11655 > 0 0 0
11656 < 0 0 1
11657 = 1 0 0
11658 un 1 1 1 */
11660 switch (code)
11662 case GT: /* GTU - CF=0 & ZF=0 */
11663 case GE: /* GEU - CF=0 */
11664 case ORDERED: /* PF=0 */
11665 case UNORDERED: /* PF=1 */
11666 case UNEQ: /* EQ - ZF=1 */
11667 case UNLT: /* LTU - CF=1 */
11668 case UNLE: /* LEU - CF=1 | ZF=1 */
11669 case LTGT: /* EQ - ZF=0 */
11670 break;
11671 case LT: /* LTU - CF=1 - fails on unordered */
11672 *first_code = UNLT;
11673 *bypass_code = UNORDERED;
11674 break;
11675 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
11676 *first_code = UNLE;
11677 *bypass_code = UNORDERED;
11678 break;
11679 case EQ: /* EQ - ZF=1 - fails on unordered */
11680 *first_code = UNEQ;
11681 *bypass_code = UNORDERED;
11682 break;
11683 case NE: /* NE - ZF=0 - fails on unordered */
11684 *first_code = LTGT;
11685 *second_code = UNORDERED;
11686 break;
11687 case UNGE: /* GEU - CF=0 - fails on unordered */
11688 *first_code = GE;
11689 *second_code = UNORDERED;
11690 break;
11691 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
11692 *first_code = GT;
11693 *second_code = UNORDERED;
11694 break;
11695 default:
11696 gcc_unreachable ();
11698 if (!TARGET_IEEE_FP)
11700 *second_code = UNKNOWN;
11701 *bypass_code = UNKNOWN;
11705 /* Return cost of comparison done fcom + arithmetics operations on AX.
11706 All following functions do use number of instructions as a cost metrics.
11707 In future this should be tweaked to compute bytes for optimize_size and
11708 take into account performance of various instructions on various CPUs. */
11709 static int
11710 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
11712 if (!TARGET_IEEE_FP)
11713 return 4;
11714 /* The cost of code output by ix86_expand_fp_compare. */
11715 switch (code)
11717 case UNLE:
11718 case UNLT:
11719 case LTGT:
11720 case GT:
11721 case GE:
11722 case UNORDERED:
11723 case ORDERED:
11724 case UNEQ:
11725 return 4;
11726 break;
11727 case LT:
11728 case NE:
11729 case EQ:
11730 case UNGE:
11731 return 5;
11732 break;
11733 case LE:
11734 case UNGT:
11735 return 6;
11736 break;
11737 default:
11738 gcc_unreachable ();
11742 /* Return cost of comparison done using fcomi operation.
11743 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11744 static int
11745 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
11747 enum rtx_code bypass_code, first_code, second_code;
11748 /* Return arbitrarily high cost when instruction is not supported - this
11749 prevents gcc from using it. */
11750 if (!TARGET_CMOVE)
11751 return 1024;
11752 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11753 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
11756 /* Return cost of comparison done using sahf operation.
11757 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11758 static int
11759 ix86_fp_comparison_sahf_cost (enum rtx_code code)
11761 enum rtx_code bypass_code, first_code, second_code;
11762 /* Return arbitrarily high cost when instruction is not preferred - this
11763 avoids gcc from using it. */
11764 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_size)))
11765 return 1024;
11766 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11767 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
11770 /* Compute cost of the comparison done using any method.
11771 See ix86_fp_comparison_arithmetics_cost for the metrics. */
11772 static int
11773 ix86_fp_comparison_cost (enum rtx_code code)
11775 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
11776 int min;
11778 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
11779 sahf_cost = ix86_fp_comparison_sahf_cost (code);
11781 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
11782 if (min > sahf_cost)
11783 min = sahf_cost;
11784 if (min > fcomi_cost)
11785 min = fcomi_cost;
11786 return min;
11789 /* Return true if we should use an FCOMI instruction for this
11790 fp comparison. */
11793 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
11795 enum rtx_code swapped_code = swap_condition (code);
11797 return ((ix86_fp_comparison_cost (code)
11798 == ix86_fp_comparison_fcomi_cost (code))
11799 || (ix86_fp_comparison_cost (swapped_code)
11800 == ix86_fp_comparison_fcomi_cost (swapped_code)));
11803 /* Swap, force into registers, or otherwise massage the two operands
11804 to a fp comparison. The operands are updated in place; the new
11805 comparison code is returned. */
11807 static enum rtx_code
11808 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
11810 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
11811 rtx op0 = *pop0, op1 = *pop1;
11812 enum machine_mode op_mode = GET_MODE (op0);
11813 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
11815 /* All of the unordered compare instructions only work on registers.
11816 The same is true of the fcomi compare instructions. The XFmode
11817 compare instructions require registers except when comparing
11818 against zero or when converting operand 1 from fixed point to
11819 floating point. */
11821 if (!is_sse
11822 && (fpcmp_mode == CCFPUmode
11823 || (op_mode == XFmode
11824 && ! (standard_80387_constant_p (op0) == 1
11825 || standard_80387_constant_p (op1) == 1)
11826 && GET_CODE (op1) != FLOAT)
11827 || ix86_use_fcomi_compare (code)))
11829 op0 = force_reg (op_mode, op0);
11830 op1 = force_reg (op_mode, op1);
11832 else
11834 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
11835 things around if they appear profitable, otherwise force op0
11836 into a register. */
11838 if (standard_80387_constant_p (op0) == 0
11839 || (MEM_P (op0)
11840 && ! (standard_80387_constant_p (op1) == 0
11841 || MEM_P (op1))))
11843 rtx tmp;
11844 tmp = op0, op0 = op1, op1 = tmp;
11845 code = swap_condition (code);
11848 if (!REG_P (op0))
11849 op0 = force_reg (op_mode, op0);
11851 if (CONSTANT_P (op1))
11853 int tmp = standard_80387_constant_p (op1);
11854 if (tmp == 0)
11855 op1 = validize_mem (force_const_mem (op_mode, op1));
11856 else if (tmp == 1)
11858 if (TARGET_CMOVE)
11859 op1 = force_reg (op_mode, op1);
11861 else
11862 op1 = force_reg (op_mode, op1);
11866 /* Try to rearrange the comparison to make it cheaper. */
11867 if (ix86_fp_comparison_cost (code)
11868 > ix86_fp_comparison_cost (swap_condition (code))
11869 && (REG_P (op1) || can_create_pseudo_p ()))
11871 rtx tmp;
11872 tmp = op0, op0 = op1, op1 = tmp;
11873 code = swap_condition (code);
11874 if (!REG_P (op0))
11875 op0 = force_reg (op_mode, op0);
11878 *pop0 = op0;
11879 *pop1 = op1;
11880 return code;
11883 /* Convert comparison codes we use to represent FP comparison to integer
11884 code that will result in proper branch. Return UNKNOWN if no such code
11885 is available. */
11887 enum rtx_code
11888 ix86_fp_compare_code_to_integer (enum rtx_code code)
11890 switch (code)
11892 case GT:
11893 return GTU;
11894 case GE:
11895 return GEU;
11896 case ORDERED:
11897 case UNORDERED:
11898 return code;
11899 break;
11900 case UNEQ:
11901 return EQ;
11902 break;
11903 case UNLT:
11904 return LTU;
11905 break;
11906 case UNLE:
11907 return LEU;
11908 break;
11909 case LTGT:
11910 return NE;
11911 break;
11912 default:
11913 return UNKNOWN;
11917 /* Generate insn patterns to do a floating point compare of OPERANDS. */
11919 static rtx
11920 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
11921 rtx *second_test, rtx *bypass_test)
11923 enum machine_mode fpcmp_mode, intcmp_mode;
11924 rtx tmp, tmp2;
11925 int cost = ix86_fp_comparison_cost (code);
11926 enum rtx_code bypass_code, first_code, second_code;
11928 fpcmp_mode = ix86_fp_compare_mode (code);
11929 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
11931 if (second_test)
11932 *second_test = NULL_RTX;
11933 if (bypass_test)
11934 *bypass_test = NULL_RTX;
11936 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
11938 /* Do fcomi/sahf based test when profitable. */
11939 if (ix86_fp_comparison_arithmetics_cost (code) > cost
11940 && (bypass_code == UNKNOWN || bypass_test)
11941 && (second_code == UNKNOWN || second_test))
11943 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11944 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
11945 tmp);
11946 if (TARGET_CMOVE)
11947 emit_insn (tmp);
11948 else
11950 gcc_assert (TARGET_SAHF);
11952 if (!scratch)
11953 scratch = gen_reg_rtx (HImode);
11954 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
11956 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
11959 /* The FP codes work out to act like unsigned. */
11960 intcmp_mode = fpcmp_mode;
11961 code = first_code;
11962 if (bypass_code != UNKNOWN)
11963 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
11964 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11965 const0_rtx);
11966 if (second_code != UNKNOWN)
11967 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
11968 gen_rtx_REG (intcmp_mode, FLAGS_REG),
11969 const0_rtx);
11971 else
11973 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
11974 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
11975 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
11976 if (!scratch)
11977 scratch = gen_reg_rtx (HImode);
11978 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
11980 /* In the unordered case, we have to check C2 for NaN's, which
11981 doesn't happen to work out to anything nice combination-wise.
11982 So do some bit twiddling on the value we've got in AH to come
11983 up with an appropriate set of condition codes. */
11985 intcmp_mode = CCNOmode;
11986 switch (code)
11988 case GT:
11989 case UNGT:
11990 if (code == GT || !TARGET_IEEE_FP)
11992 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
11993 code = EQ;
11995 else
11997 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
11998 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
11999 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
12000 intcmp_mode = CCmode;
12001 code = GEU;
12003 break;
12004 case LT:
12005 case UNLT:
12006 if (code == LT && TARGET_IEEE_FP)
12008 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12009 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
12010 intcmp_mode = CCmode;
12011 code = EQ;
12013 else
12015 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
12016 code = NE;
12018 break;
12019 case GE:
12020 case UNGE:
12021 if (code == GE || !TARGET_IEEE_FP)
12023 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
12024 code = EQ;
12026 else
12028 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12029 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
12030 GEN_INT (0x01)));
12031 code = NE;
12033 break;
12034 case LE:
12035 case UNLE:
12036 if (code == LE && TARGET_IEEE_FP)
12038 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12039 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
12040 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
12041 intcmp_mode = CCmode;
12042 code = LTU;
12044 else
12046 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
12047 code = NE;
12049 break;
12050 case EQ:
12051 case UNEQ:
12052 if (code == EQ && TARGET_IEEE_FP)
12054 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12055 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
12056 intcmp_mode = CCmode;
12057 code = EQ;
12059 else
12061 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
12062 code = NE;
12063 break;
12065 break;
12066 case NE:
12067 case LTGT:
12068 if (code == NE && TARGET_IEEE_FP)
12070 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
12071 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
12072 GEN_INT (0x40)));
12073 code = NE;
12075 else
12077 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
12078 code = EQ;
12080 break;
12082 case UNORDERED:
12083 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
12084 code = NE;
12085 break;
12086 case ORDERED:
12087 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
12088 code = EQ;
12089 break;
12091 default:
12092 gcc_unreachable ();
12096 /* Return the test that should be put into the flags user, i.e.
12097 the bcc, scc, or cmov instruction. */
12098 return gen_rtx_fmt_ee (code, VOIDmode,
12099 gen_rtx_REG (intcmp_mode, FLAGS_REG),
12100 const0_rtx);
12104 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
12106 rtx op0, op1, ret;
12107 op0 = ix86_compare_op0;
12108 op1 = ix86_compare_op1;
12110 if (second_test)
12111 *second_test = NULL_RTX;
12112 if (bypass_test)
12113 *bypass_test = NULL_RTX;
12115 if (ix86_compare_emitted)
12117 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
12118 ix86_compare_emitted = NULL_RTX;
12120 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
12122 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
12123 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
12124 second_test, bypass_test);
12126 else
12127 ret = ix86_expand_int_compare (code, op0, op1);
12129 return ret;
12132 /* Return true if the CODE will result in nontrivial jump sequence. */
12133 bool
12134 ix86_fp_jump_nontrivial_p (enum rtx_code code)
12136 enum rtx_code bypass_code, first_code, second_code;
12137 if (!TARGET_CMOVE)
12138 return true;
12139 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12140 return bypass_code != UNKNOWN || second_code != UNKNOWN;
12143 void
12144 ix86_expand_branch (enum rtx_code code, rtx label)
12146 rtx tmp;
12148 /* If we have emitted a compare insn, go straight to simple.
12149 ix86_expand_compare won't emit anything if ix86_compare_emitted
12150 is non NULL. */
12151 if (ix86_compare_emitted)
12152 goto simple;
12154 switch (GET_MODE (ix86_compare_op0))
12156 case QImode:
12157 case HImode:
12158 case SImode:
12159 simple:
12160 tmp = ix86_expand_compare (code, NULL, NULL);
12161 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12162 gen_rtx_LABEL_REF (VOIDmode, label),
12163 pc_rtx);
12164 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12165 return;
12167 case SFmode:
12168 case DFmode:
12169 case XFmode:
12171 rtvec vec;
12172 int use_fcomi;
12173 enum rtx_code bypass_code, first_code, second_code;
12175 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
12176 &ix86_compare_op1);
12178 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12180 /* Check whether we will use the natural sequence with one jump. If
12181 so, we can expand jump early. Otherwise delay expansion by
12182 creating compound insn to not confuse optimizers. */
12183 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
12185 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
12186 gen_rtx_LABEL_REF (VOIDmode, label),
12187 pc_rtx, NULL_RTX, NULL_RTX);
12189 else
12191 tmp = gen_rtx_fmt_ee (code, VOIDmode,
12192 ix86_compare_op0, ix86_compare_op1);
12193 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12194 gen_rtx_LABEL_REF (VOIDmode, label),
12195 pc_rtx);
12196 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
12198 use_fcomi = ix86_use_fcomi_compare (code);
12199 vec = rtvec_alloc (3 + !use_fcomi);
12200 RTVEC_ELT (vec, 0) = tmp;
12201 RTVEC_ELT (vec, 1)
12202 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
12203 RTVEC_ELT (vec, 2)
12204 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
12205 if (! use_fcomi)
12206 RTVEC_ELT (vec, 3)
12207 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
12209 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
12211 return;
12214 case DImode:
12215 if (TARGET_64BIT)
12216 goto simple;
12217 case TImode:
12218 /* Expand DImode branch into multiple compare+branch. */
12220 rtx lo[2], hi[2], label2;
12221 enum rtx_code code1, code2, code3;
12222 enum machine_mode submode;
12224 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
12226 tmp = ix86_compare_op0;
12227 ix86_compare_op0 = ix86_compare_op1;
12228 ix86_compare_op1 = tmp;
12229 code = swap_condition (code);
12231 if (GET_MODE (ix86_compare_op0) == DImode)
12233 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
12234 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
12235 submode = SImode;
12237 else
12239 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
12240 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
12241 submode = DImode;
12244 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
12245 avoid two branches. This costs one extra insn, so disable when
12246 optimizing for size. */
12248 if ((code == EQ || code == NE)
12249 && (!optimize_size
12250 || hi[1] == const0_rtx || lo[1] == const0_rtx))
12252 rtx xor0, xor1;
12254 xor1 = hi[0];
12255 if (hi[1] != const0_rtx)
12256 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
12257 NULL_RTX, 0, OPTAB_WIDEN);
12259 xor0 = lo[0];
12260 if (lo[1] != const0_rtx)
12261 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
12262 NULL_RTX, 0, OPTAB_WIDEN);
12264 tmp = expand_binop (submode, ior_optab, xor1, xor0,
12265 NULL_RTX, 0, OPTAB_WIDEN);
12267 ix86_compare_op0 = tmp;
12268 ix86_compare_op1 = const0_rtx;
12269 ix86_expand_branch (code, label);
12270 return;
12273 /* Otherwise, if we are doing less-than or greater-or-equal-than,
12274 op1 is a constant and the low word is zero, then we can just
12275 examine the high word. Similarly for low word -1 and
12276 less-or-equal-than or greater-than. */
12278 if (CONST_INT_P (hi[1]))
12279 switch (code)
12281 case LT: case LTU: case GE: case GEU:
12282 if (lo[1] == const0_rtx)
12284 ix86_compare_op0 = hi[0];
12285 ix86_compare_op1 = hi[1];
12286 ix86_expand_branch (code, label);
12287 return;
12289 break;
12290 case LE: case LEU: case GT: case GTU:
12291 if (lo[1] == constm1_rtx)
12293 ix86_compare_op0 = hi[0];
12294 ix86_compare_op1 = hi[1];
12295 ix86_expand_branch (code, label);
12296 return;
12298 break;
12299 default:
12300 break;
12303 /* Otherwise, we need two or three jumps. */
12305 label2 = gen_label_rtx ();
12307 code1 = code;
12308 code2 = swap_condition (code);
12309 code3 = unsigned_condition (code);
12311 switch (code)
12313 case LT: case GT: case LTU: case GTU:
12314 break;
12316 case LE: code1 = LT; code2 = GT; break;
12317 case GE: code1 = GT; code2 = LT; break;
12318 case LEU: code1 = LTU; code2 = GTU; break;
12319 case GEU: code1 = GTU; code2 = LTU; break;
12321 case EQ: code1 = UNKNOWN; code2 = NE; break;
12322 case NE: code2 = UNKNOWN; break;
12324 default:
12325 gcc_unreachable ();
12329 * a < b =>
12330 * if (hi(a) < hi(b)) goto true;
12331 * if (hi(a) > hi(b)) goto false;
12332 * if (lo(a) < lo(b)) goto true;
12333 * false:
12336 ix86_compare_op0 = hi[0];
12337 ix86_compare_op1 = hi[1];
12339 if (code1 != UNKNOWN)
12340 ix86_expand_branch (code1, label);
12341 if (code2 != UNKNOWN)
12342 ix86_expand_branch (code2, label2);
12344 ix86_compare_op0 = lo[0];
12345 ix86_compare_op1 = lo[1];
12346 ix86_expand_branch (code3, label);
12348 if (code2 != UNKNOWN)
12349 emit_label (label2);
12350 return;
12353 default:
12354 gcc_unreachable ();
12358 /* Split branch based on floating point condition. */
12359 void
12360 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
12361 rtx target1, rtx target2, rtx tmp, rtx pushed)
12363 rtx second, bypass;
12364 rtx label = NULL_RTX;
12365 rtx condition;
12366 int bypass_probability = -1, second_probability = -1, probability = -1;
12367 rtx i;
12369 if (target2 != pc_rtx)
12371 rtx tmp = target2;
12372 code = reverse_condition_maybe_unordered (code);
12373 target2 = target1;
12374 target1 = tmp;
12377 condition = ix86_expand_fp_compare (code, op1, op2,
12378 tmp, &second, &bypass);
12380 /* Remove pushed operand from stack. */
12381 if (pushed)
12382 ix86_free_from_memory (GET_MODE (pushed));
12384 if (split_branch_probability >= 0)
12386 /* Distribute the probabilities across the jumps.
12387 Assume the BYPASS and SECOND to be always test
12388 for UNORDERED. */
12389 probability = split_branch_probability;
12391 /* Value of 1 is low enough to make no need for probability
12392 to be updated. Later we may run some experiments and see
12393 if unordered values are more frequent in practice. */
12394 if (bypass)
12395 bypass_probability = 1;
12396 if (second)
12397 second_probability = 1;
12399 if (bypass != NULL_RTX)
12401 label = gen_label_rtx ();
12402 i = emit_jump_insn (gen_rtx_SET
12403 (VOIDmode, pc_rtx,
12404 gen_rtx_IF_THEN_ELSE (VOIDmode,
12405 bypass,
12406 gen_rtx_LABEL_REF (VOIDmode,
12407 label),
12408 pc_rtx)));
12409 if (bypass_probability >= 0)
12410 REG_NOTES (i)
12411 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12412 GEN_INT (bypass_probability),
12413 REG_NOTES (i));
12415 i = emit_jump_insn (gen_rtx_SET
12416 (VOIDmode, pc_rtx,
12417 gen_rtx_IF_THEN_ELSE (VOIDmode,
12418 condition, target1, target2)));
12419 if (probability >= 0)
12420 REG_NOTES (i)
12421 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12422 GEN_INT (probability),
12423 REG_NOTES (i));
12424 if (second != NULL_RTX)
12426 i = emit_jump_insn (gen_rtx_SET
12427 (VOIDmode, pc_rtx,
12428 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
12429 target2)));
12430 if (second_probability >= 0)
12431 REG_NOTES (i)
12432 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12433 GEN_INT (second_probability),
12434 REG_NOTES (i));
12436 if (label != NULL_RTX)
12437 emit_label (label);
12441 ix86_expand_setcc (enum rtx_code code, rtx dest)
12443 rtx ret, tmp, tmpreg, equiv;
12444 rtx second_test, bypass_test;
12446 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
12447 return 0; /* FAIL */
12449 gcc_assert (GET_MODE (dest) == QImode);
12451 ret = ix86_expand_compare (code, &second_test, &bypass_test);
12452 PUT_MODE (ret, QImode);
12454 tmp = dest;
12455 tmpreg = dest;
12457 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
12458 if (bypass_test || second_test)
12460 rtx test = second_test;
12461 int bypass = 0;
12462 rtx tmp2 = gen_reg_rtx (QImode);
12463 if (bypass_test)
12465 gcc_assert (!second_test);
12466 test = bypass_test;
12467 bypass = 1;
12468 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
12470 PUT_MODE (test, QImode);
12471 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
12473 if (bypass)
12474 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
12475 else
12476 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
12479 /* Attach a REG_EQUAL note describing the comparison result. */
12480 if (ix86_compare_op0 && ix86_compare_op1)
12482 equiv = simplify_gen_relational (code, QImode,
12483 GET_MODE (ix86_compare_op0),
12484 ix86_compare_op0, ix86_compare_op1);
12485 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
12488 return 1; /* DONE */
12491 /* Expand comparison setting or clearing carry flag. Return true when
12492 successful and set pop for the operation. */
12493 static bool
12494 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
12496 enum machine_mode mode =
12497 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
12499 /* Do not handle DImode compares that go through special path. */
12500 if (mode == (TARGET_64BIT ? TImode : DImode))
12501 return false;
12503 if (SCALAR_FLOAT_MODE_P (mode))
12505 rtx second_test = NULL, bypass_test = NULL;
12506 rtx compare_op, compare_seq;
12508 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
12510 /* Shortcut: following common codes never translate
12511 into carry flag compares. */
12512 if (code == EQ || code == NE || code == UNEQ || code == LTGT
12513 || code == ORDERED || code == UNORDERED)
12514 return false;
12516 /* These comparisons require zero flag; swap operands so they won't. */
12517 if ((code == GT || code == UNLE || code == LE || code == UNGT)
12518 && !TARGET_IEEE_FP)
12520 rtx tmp = op0;
12521 op0 = op1;
12522 op1 = tmp;
12523 code = swap_condition (code);
12526 /* Try to expand the comparison and verify that we end up with
12527 carry flag based comparison. This fails to be true only when
12528 we decide to expand comparison using arithmetic that is not
12529 too common scenario. */
12530 start_sequence ();
12531 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
12532 &second_test, &bypass_test);
12533 compare_seq = get_insns ();
12534 end_sequence ();
12536 if (second_test || bypass_test)
12537 return false;
12539 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12540 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12541 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
12542 else
12543 code = GET_CODE (compare_op);
12545 if (code != LTU && code != GEU)
12546 return false;
12548 emit_insn (compare_seq);
12549 *pop = compare_op;
12550 return true;
12553 if (!INTEGRAL_MODE_P (mode))
12554 return false;
12556 switch (code)
12558 case LTU:
12559 case GEU:
12560 break;
12562 /* Convert a==0 into (unsigned)a<1. */
12563 case EQ:
12564 case NE:
12565 if (op1 != const0_rtx)
12566 return false;
12567 op1 = const1_rtx;
12568 code = (code == EQ ? LTU : GEU);
12569 break;
12571 /* Convert a>b into b<a or a>=b-1. */
12572 case GTU:
12573 case LEU:
12574 if (CONST_INT_P (op1))
12576 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
12577 /* Bail out on overflow. We still can swap operands but that
12578 would force loading of the constant into register. */
12579 if (op1 == const0_rtx
12580 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
12581 return false;
12582 code = (code == GTU ? GEU : LTU);
12584 else
12586 rtx tmp = op1;
12587 op1 = op0;
12588 op0 = tmp;
12589 code = (code == GTU ? LTU : GEU);
12591 break;
12593 /* Convert a>=0 into (unsigned)a<0x80000000. */
12594 case LT:
12595 case GE:
12596 if (mode == DImode || op1 != const0_rtx)
12597 return false;
12598 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
12599 code = (code == LT ? GEU : LTU);
12600 break;
12601 case LE:
12602 case GT:
12603 if (mode == DImode || op1 != constm1_rtx)
12604 return false;
12605 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
12606 code = (code == LE ? GEU : LTU);
12607 break;
12609 default:
12610 return false;
12612 /* Swapping operands may cause constant to appear as first operand. */
12613 if (!nonimmediate_operand (op0, VOIDmode))
12615 if (!can_create_pseudo_p ())
12616 return false;
12617 op0 = force_reg (mode, op0);
12619 ix86_compare_op0 = op0;
12620 ix86_compare_op1 = op1;
12621 *pop = ix86_expand_compare (code, NULL, NULL);
12622 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
12623 return true;
12627 ix86_expand_int_movcc (rtx operands[])
12629 enum rtx_code code = GET_CODE (operands[1]), compare_code;
12630 rtx compare_seq, compare_op;
12631 rtx second_test, bypass_test;
12632 enum machine_mode mode = GET_MODE (operands[0]);
12633 bool sign_bit_compare_p = false;;
12635 start_sequence ();
12636 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
12637 compare_seq = get_insns ();
12638 end_sequence ();
12640 compare_code = GET_CODE (compare_op);
12642 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
12643 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
12644 sign_bit_compare_p = true;
12646 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
12647 HImode insns, we'd be swallowed in word prefix ops. */
12649 if ((mode != HImode || TARGET_FAST_PREFIX)
12650 && (mode != (TARGET_64BIT ? TImode : DImode))
12651 && CONST_INT_P (operands[2])
12652 && CONST_INT_P (operands[3]))
12654 rtx out = operands[0];
12655 HOST_WIDE_INT ct = INTVAL (operands[2]);
12656 HOST_WIDE_INT cf = INTVAL (operands[3]);
12657 HOST_WIDE_INT diff;
12659 diff = ct - cf;
12660 /* Sign bit compares are better done using shifts than we do by using
12661 sbb. */
12662 if (sign_bit_compare_p
12663 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
12664 ix86_compare_op1, &compare_op))
12666 /* Detect overlap between destination and compare sources. */
12667 rtx tmp = out;
12669 if (!sign_bit_compare_p)
12671 bool fpcmp = false;
12673 compare_code = GET_CODE (compare_op);
12675 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
12676 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
12678 fpcmp = true;
12679 compare_code = ix86_fp_compare_code_to_integer (compare_code);
12682 /* To simplify rest of code, restrict to the GEU case. */
12683 if (compare_code == LTU)
12685 HOST_WIDE_INT tmp = ct;
12686 ct = cf;
12687 cf = tmp;
12688 compare_code = reverse_condition (compare_code);
12689 code = reverse_condition (code);
12691 else
12693 if (fpcmp)
12694 PUT_CODE (compare_op,
12695 reverse_condition_maybe_unordered
12696 (GET_CODE (compare_op)));
12697 else
12698 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
12700 diff = ct - cf;
12702 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
12703 || reg_overlap_mentioned_p (out, ix86_compare_op1))
12704 tmp = gen_reg_rtx (mode);
12706 if (mode == DImode)
12707 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
12708 else
12709 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
12711 else
12713 if (code == GT || code == GE)
12714 code = reverse_condition (code);
12715 else
12717 HOST_WIDE_INT tmp = ct;
12718 ct = cf;
12719 cf = tmp;
12720 diff = ct - cf;
12722 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
12723 ix86_compare_op1, VOIDmode, 0, -1);
12726 if (diff == 1)
12729 * cmpl op0,op1
12730 * sbbl dest,dest
12731 * [addl dest, ct]
12733 * Size 5 - 8.
12735 if (ct)
12736 tmp = expand_simple_binop (mode, PLUS,
12737 tmp, GEN_INT (ct),
12738 copy_rtx (tmp), 1, OPTAB_DIRECT);
12740 else if (cf == -1)
12743 * cmpl op0,op1
12744 * sbbl dest,dest
12745 * orl $ct, dest
12747 * Size 8.
12749 tmp = expand_simple_binop (mode, IOR,
12750 tmp, GEN_INT (ct),
12751 copy_rtx (tmp), 1, OPTAB_DIRECT);
12753 else if (diff == -1 && ct)
12756 * cmpl op0,op1
12757 * sbbl dest,dest
12758 * notl dest
12759 * [addl dest, cf]
12761 * Size 8 - 11.
12763 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
12764 if (cf)
12765 tmp = expand_simple_binop (mode, PLUS,
12766 copy_rtx (tmp), GEN_INT (cf),
12767 copy_rtx (tmp), 1, OPTAB_DIRECT);
12769 else
12772 * cmpl op0,op1
12773 * sbbl dest,dest
12774 * [notl dest]
12775 * andl cf - ct, dest
12776 * [addl dest, ct]
12778 * Size 8 - 11.
12781 if (cf == 0)
12783 cf = ct;
12784 ct = 0;
12785 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
12788 tmp = expand_simple_binop (mode, AND,
12789 copy_rtx (tmp),
12790 gen_int_mode (cf - ct, mode),
12791 copy_rtx (tmp), 1, OPTAB_DIRECT);
12792 if (ct)
12793 tmp = expand_simple_binop (mode, PLUS,
12794 copy_rtx (tmp), GEN_INT (ct),
12795 copy_rtx (tmp), 1, OPTAB_DIRECT);
12798 if (!rtx_equal_p (tmp, out))
12799 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
12801 return 1; /* DONE */
12804 if (diff < 0)
12806 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
12808 HOST_WIDE_INT tmp;
12809 tmp = ct, ct = cf, cf = tmp;
12810 diff = -diff;
12812 if (SCALAR_FLOAT_MODE_P (cmp_mode))
12814 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
12816 /* We may be reversing unordered compare to normal compare, that
12817 is not valid in general (we may convert non-trapping condition
12818 to trapping one), however on i386 we currently emit all
12819 comparisons unordered. */
12820 compare_code = reverse_condition_maybe_unordered (compare_code);
12821 code = reverse_condition_maybe_unordered (code);
12823 else
12825 compare_code = reverse_condition (compare_code);
12826 code = reverse_condition (code);
12830 compare_code = UNKNOWN;
12831 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
12832 && CONST_INT_P (ix86_compare_op1))
12834 if (ix86_compare_op1 == const0_rtx
12835 && (code == LT || code == GE))
12836 compare_code = code;
12837 else if (ix86_compare_op1 == constm1_rtx)
12839 if (code == LE)
12840 compare_code = LT;
12841 else if (code == GT)
12842 compare_code = GE;
12846 /* Optimize dest = (op0 < 0) ? -1 : cf. */
12847 if (compare_code != UNKNOWN
12848 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
12849 && (cf == -1 || ct == -1))
12851 /* If lea code below could be used, only optimize
12852 if it results in a 2 insn sequence. */
12854 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
12855 || diff == 3 || diff == 5 || diff == 9)
12856 || (compare_code == LT && ct == -1)
12857 || (compare_code == GE && cf == -1))
12860 * notl op1 (if necessary)
12861 * sarl $31, op1
12862 * orl cf, op1
12864 if (ct != -1)
12866 cf = ct;
12867 ct = -1;
12868 code = reverse_condition (code);
12871 out = emit_store_flag (out, code, ix86_compare_op0,
12872 ix86_compare_op1, VOIDmode, 0, -1);
12874 out = expand_simple_binop (mode, IOR,
12875 out, GEN_INT (cf),
12876 out, 1, OPTAB_DIRECT);
12877 if (out != operands[0])
12878 emit_move_insn (operands[0], out);
12880 return 1; /* DONE */
12885 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
12886 || diff == 3 || diff == 5 || diff == 9)
12887 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
12888 && (mode != DImode
12889 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
12892 * xorl dest,dest
12893 * cmpl op1,op2
12894 * setcc dest
12895 * lea cf(dest*(ct-cf)),dest
12897 * Size 14.
12899 * This also catches the degenerate setcc-only case.
12902 rtx tmp;
12903 int nops;
12905 out = emit_store_flag (out, code, ix86_compare_op0,
12906 ix86_compare_op1, VOIDmode, 0, 1);
12908 nops = 0;
12909 /* On x86_64 the lea instruction operates on Pmode, so we need
12910 to get arithmetics done in proper mode to match. */
12911 if (diff == 1)
12912 tmp = copy_rtx (out);
12913 else
12915 rtx out1;
12916 out1 = copy_rtx (out);
12917 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
12918 nops++;
12919 if (diff & 1)
12921 tmp = gen_rtx_PLUS (mode, tmp, out1);
12922 nops++;
12925 if (cf != 0)
12927 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
12928 nops++;
12930 if (!rtx_equal_p (tmp, out))
12932 if (nops == 1)
12933 out = force_operand (tmp, copy_rtx (out));
12934 else
12935 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
12937 if (!rtx_equal_p (out, operands[0]))
12938 emit_move_insn (operands[0], copy_rtx (out));
12940 return 1; /* DONE */
12944 * General case: Jumpful:
12945 * xorl dest,dest cmpl op1, op2
12946 * cmpl op1, op2 movl ct, dest
12947 * setcc dest jcc 1f
12948 * decl dest movl cf, dest
12949 * andl (cf-ct),dest 1:
12950 * addl ct,dest
12952 * Size 20. Size 14.
12954 * This is reasonably steep, but branch mispredict costs are
12955 * high on modern cpus, so consider failing only if optimizing
12956 * for space.
12959 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
12960 && BRANCH_COST >= 2)
12962 if (cf == 0)
12964 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
12966 cf = ct;
12967 ct = 0;
12969 if (SCALAR_FLOAT_MODE_P (cmp_mode))
12971 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
12973 /* We may be reversing unordered compare to normal compare,
12974 that is not valid in general (we may convert non-trapping
12975 condition to trapping one), however on i386 we currently
12976 emit all comparisons unordered. */
12977 code = reverse_condition_maybe_unordered (code);
12979 else
12981 code = reverse_condition (code);
12982 if (compare_code != UNKNOWN)
12983 compare_code = reverse_condition (compare_code);
12987 if (compare_code != UNKNOWN)
12989 /* notl op1 (if needed)
12990 sarl $31, op1
12991 andl (cf-ct), op1
12992 addl ct, op1
12994 For x < 0 (resp. x <= -1) there will be no notl,
12995 so if possible swap the constants to get rid of the
12996 complement.
12997 True/false will be -1/0 while code below (store flag
12998 followed by decrement) is 0/-1, so the constants need
12999 to be exchanged once more. */
13001 if (compare_code == GE || !cf)
13003 code = reverse_condition (code);
13004 compare_code = LT;
13006 else
13008 HOST_WIDE_INT tmp = cf;
13009 cf = ct;
13010 ct = tmp;
13013 out = emit_store_flag (out, code, ix86_compare_op0,
13014 ix86_compare_op1, VOIDmode, 0, -1);
13016 else
13018 out = emit_store_flag (out, code, ix86_compare_op0,
13019 ix86_compare_op1, VOIDmode, 0, 1);
13021 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
13022 copy_rtx (out), 1, OPTAB_DIRECT);
13025 out = expand_simple_binop (mode, AND, copy_rtx (out),
13026 gen_int_mode (cf - ct, mode),
13027 copy_rtx (out), 1, OPTAB_DIRECT);
13028 if (ct)
13029 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
13030 copy_rtx (out), 1, OPTAB_DIRECT);
13031 if (!rtx_equal_p (out, operands[0]))
13032 emit_move_insn (operands[0], copy_rtx (out));
13034 return 1; /* DONE */
13038 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
13040 /* Try a few things more with specific constants and a variable. */
13042 optab op;
13043 rtx var, orig_out, out, tmp;
13045 if (BRANCH_COST <= 2)
13046 return 0; /* FAIL */
13048 /* If one of the two operands is an interesting constant, load a
13049 constant with the above and mask it in with a logical operation. */
13051 if (CONST_INT_P (operands[2]))
13053 var = operands[3];
13054 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
13055 operands[3] = constm1_rtx, op = and_optab;
13056 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
13057 operands[3] = const0_rtx, op = ior_optab;
13058 else
13059 return 0; /* FAIL */
13061 else if (CONST_INT_P (operands[3]))
13063 var = operands[2];
13064 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
13065 operands[2] = constm1_rtx, op = and_optab;
13066 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
13067 operands[2] = const0_rtx, op = ior_optab;
13068 else
13069 return 0; /* FAIL */
13071 else
13072 return 0; /* FAIL */
13074 orig_out = operands[0];
13075 tmp = gen_reg_rtx (mode);
13076 operands[0] = tmp;
13078 /* Recurse to get the constant loaded. */
13079 if (ix86_expand_int_movcc (operands) == 0)
13080 return 0; /* FAIL */
13082 /* Mask in the interesting variable. */
13083 out = expand_binop (mode, op, var, tmp, orig_out, 0,
13084 OPTAB_WIDEN);
13085 if (!rtx_equal_p (out, orig_out))
13086 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
13088 return 1; /* DONE */
13092 * For comparison with above,
13094 * movl cf,dest
13095 * movl ct,tmp
13096 * cmpl op1,op2
13097 * cmovcc tmp,dest
13099 * Size 15.
13102 if (! nonimmediate_operand (operands[2], mode))
13103 operands[2] = force_reg (mode, operands[2]);
13104 if (! nonimmediate_operand (operands[3], mode))
13105 operands[3] = force_reg (mode, operands[3]);
13107 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
13109 rtx tmp = gen_reg_rtx (mode);
13110 emit_move_insn (tmp, operands[3]);
13111 operands[3] = tmp;
13113 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
13115 rtx tmp = gen_reg_rtx (mode);
13116 emit_move_insn (tmp, operands[2]);
13117 operands[2] = tmp;
13120 if (! register_operand (operands[2], VOIDmode)
13121 && (mode == QImode
13122 || ! register_operand (operands[3], VOIDmode)))
13123 operands[2] = force_reg (mode, operands[2]);
13125 if (mode == QImode
13126 && ! register_operand (operands[3], VOIDmode))
13127 operands[3] = force_reg (mode, operands[3]);
13129 emit_insn (compare_seq);
13130 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13131 gen_rtx_IF_THEN_ELSE (mode,
13132 compare_op, operands[2],
13133 operands[3])));
13134 if (bypass_test)
13135 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
13136 gen_rtx_IF_THEN_ELSE (mode,
13137 bypass_test,
13138 copy_rtx (operands[3]),
13139 copy_rtx (operands[0]))));
13140 if (second_test)
13141 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
13142 gen_rtx_IF_THEN_ELSE (mode,
13143 second_test,
13144 copy_rtx (operands[2]),
13145 copy_rtx (operands[0]))));
13147 return 1; /* DONE */
13150 /* Swap, force into registers, or otherwise massage the two operands
13151 to an sse comparison with a mask result. Thus we differ a bit from
13152 ix86_prepare_fp_compare_args which expects to produce a flags result.
13154 The DEST operand exists to help determine whether to commute commutative
13155 operators. The POP0/POP1 operands are updated in place. The new
13156 comparison code is returned, or UNKNOWN if not implementable. */
13158 static enum rtx_code
13159 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
13160 rtx *pop0, rtx *pop1)
13162 rtx tmp;
13164 switch (code)
13166 case LTGT:
13167 case UNEQ:
13168 /* We have no LTGT as an operator. We could implement it with
13169 NE & ORDERED, but this requires an extra temporary. It's
13170 not clear that it's worth it. */
13171 return UNKNOWN;
13173 case LT:
13174 case LE:
13175 case UNGT:
13176 case UNGE:
13177 /* These are supported directly. */
13178 break;
13180 case EQ:
13181 case NE:
13182 case UNORDERED:
13183 case ORDERED:
13184 /* For commutative operators, try to canonicalize the destination
13185 operand to be first in the comparison - this helps reload to
13186 avoid extra moves. */
13187 if (!dest || !rtx_equal_p (dest, *pop1))
13188 break;
13189 /* FALLTHRU */
13191 case GE:
13192 case GT:
13193 case UNLE:
13194 case UNLT:
13195 /* These are not supported directly. Swap the comparison operands
13196 to transform into something that is supported. */
13197 tmp = *pop0;
13198 *pop0 = *pop1;
13199 *pop1 = tmp;
13200 code = swap_condition (code);
13201 break;
13203 default:
13204 gcc_unreachable ();
13207 return code;
13210 /* Detect conditional moves that exactly match min/max operational
13211 semantics. Note that this is IEEE safe, as long as we don't
13212 interchange the operands.
13214 Returns FALSE if this conditional move doesn't match a MIN/MAX,
13215 and TRUE if the operation is successful and instructions are emitted. */
13217 static bool
13218 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
13219 rtx cmp_op1, rtx if_true, rtx if_false)
13221 enum machine_mode mode;
13222 bool is_min;
13223 rtx tmp;
13225 if (code == LT)
13227 else if (code == UNGE)
13229 tmp = if_true;
13230 if_true = if_false;
13231 if_false = tmp;
13233 else
13234 return false;
13236 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
13237 is_min = true;
13238 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
13239 is_min = false;
13240 else
13241 return false;
13243 mode = GET_MODE (dest);
13245 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
13246 but MODE may be a vector mode and thus not appropriate. */
13247 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
13249 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
13250 rtvec v;
13252 if_true = force_reg (mode, if_true);
13253 v = gen_rtvec (2, if_true, if_false);
13254 tmp = gen_rtx_UNSPEC (mode, v, u);
13256 else
13258 code = is_min ? SMIN : SMAX;
13259 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
13262 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
13263 return true;
13266 /* Expand an sse vector comparison. Return the register with the result. */
13268 static rtx
13269 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
13270 rtx op_true, rtx op_false)
13272 enum machine_mode mode = GET_MODE (dest);
13273 rtx x;
13275 cmp_op0 = force_reg (mode, cmp_op0);
13276 if (!nonimmediate_operand (cmp_op1, mode))
13277 cmp_op1 = force_reg (mode, cmp_op1);
13279 if (optimize
13280 || reg_overlap_mentioned_p (dest, op_true)
13281 || reg_overlap_mentioned_p (dest, op_false))
13282 dest = gen_reg_rtx (mode);
13284 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
13285 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13287 return dest;
13290 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
13291 operations. This is used for both scalar and vector conditional moves. */
13293 static void
13294 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
13296 enum machine_mode mode = GET_MODE (dest);
13297 rtx t2, t3, x;
13299 if (op_false == CONST0_RTX (mode))
13301 op_true = force_reg (mode, op_true);
13302 x = gen_rtx_AND (mode, cmp, op_true);
13303 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13305 else if (op_true == CONST0_RTX (mode))
13307 op_false = force_reg (mode, op_false);
13308 x = gen_rtx_NOT (mode, cmp);
13309 x = gen_rtx_AND (mode, x, op_false);
13310 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13312 else if (TARGET_SSE5)
13314 rtx pcmov = gen_rtx_SET (mode, dest,
13315 gen_rtx_IF_THEN_ELSE (mode, cmp,
13316 op_true,
13317 op_false));
13318 emit_insn (pcmov);
13320 else
13322 op_true = force_reg (mode, op_true);
13323 op_false = force_reg (mode, op_false);
13325 t2 = gen_reg_rtx (mode);
13326 if (optimize)
13327 t3 = gen_reg_rtx (mode);
13328 else
13329 t3 = dest;
13331 x = gen_rtx_AND (mode, op_true, cmp);
13332 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
13334 x = gen_rtx_NOT (mode, cmp);
13335 x = gen_rtx_AND (mode, x, op_false);
13336 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
13338 x = gen_rtx_IOR (mode, t3, t2);
13339 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
13343 /* Expand a floating-point conditional move. Return true if successful. */
13346 ix86_expand_fp_movcc (rtx operands[])
13348 enum machine_mode mode = GET_MODE (operands[0]);
13349 enum rtx_code code = GET_CODE (operands[1]);
13350 rtx tmp, compare_op, second_test, bypass_test;
13352 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
13354 enum machine_mode cmode;
13356 /* Since we've no cmove for sse registers, don't force bad register
13357 allocation just to gain access to it. Deny movcc when the
13358 comparison mode doesn't match the move mode. */
13359 cmode = GET_MODE (ix86_compare_op0);
13360 if (cmode == VOIDmode)
13361 cmode = GET_MODE (ix86_compare_op1);
13362 if (cmode != mode)
13363 return 0;
13365 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
13366 &ix86_compare_op0,
13367 &ix86_compare_op1);
13368 if (code == UNKNOWN)
13369 return 0;
13371 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
13372 ix86_compare_op1, operands[2],
13373 operands[3]))
13374 return 1;
13376 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
13377 ix86_compare_op1, operands[2], operands[3]);
13378 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
13379 return 1;
13382 /* The floating point conditional move instructions don't directly
13383 support conditions resulting from a signed integer comparison. */
13385 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
13387 /* The floating point conditional move instructions don't directly
13388 support signed integer comparisons. */
13390 if (!fcmov_comparison_operator (compare_op, VOIDmode))
13392 gcc_assert (!second_test && !bypass_test);
13393 tmp = gen_reg_rtx (QImode);
13394 ix86_expand_setcc (code, tmp);
13395 code = NE;
13396 ix86_compare_op0 = tmp;
13397 ix86_compare_op1 = const0_rtx;
13398 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
13400 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
13402 tmp = gen_reg_rtx (mode);
13403 emit_move_insn (tmp, operands[3]);
13404 operands[3] = tmp;
13406 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
13408 tmp = gen_reg_rtx (mode);
13409 emit_move_insn (tmp, operands[2]);
13410 operands[2] = tmp;
13413 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13414 gen_rtx_IF_THEN_ELSE (mode, compare_op,
13415 operands[2], operands[3])));
13416 if (bypass_test)
13417 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13418 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
13419 operands[3], operands[0])));
13420 if (second_test)
13421 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
13422 gen_rtx_IF_THEN_ELSE (mode, second_test,
13423 operands[2], operands[0])));
13425 return 1;
13428 /* Expand a floating-point vector conditional move; a vcond operation
13429 rather than a movcc operation. */
13431 bool
13432 ix86_expand_fp_vcond (rtx operands[])
13434 enum rtx_code code = GET_CODE (operands[3]);
13435 rtx cmp;
13437 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
13438 &operands[4], &operands[5]);
13439 if (code == UNKNOWN)
13440 return false;
13442 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
13443 operands[5], operands[1], operands[2]))
13444 return true;
13446 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
13447 operands[1], operands[2]);
13448 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
13449 return true;
13452 /* Expand a signed/unsigned integral vector conditional move. */
13454 bool
13455 ix86_expand_int_vcond (rtx operands[])
13457 enum machine_mode mode = GET_MODE (operands[0]);
13458 enum rtx_code code = GET_CODE (operands[3]);
13459 bool negate = false;
13460 rtx x, cop0, cop1;
13462 cop0 = operands[4];
13463 cop1 = operands[5];
13465 /* SSE5 supports all of the comparisons on all vector int types. */
13466 if (!TARGET_SSE5)
13468 /* Canonicalize the comparison to EQ, GT, GTU. */
13469 switch (code)
13471 case EQ:
13472 case GT:
13473 case GTU:
13474 break;
13476 case NE:
13477 case LE:
13478 case LEU:
13479 code = reverse_condition (code);
13480 negate = true;
13481 break;
13483 case GE:
13484 case GEU:
13485 code = reverse_condition (code);
13486 negate = true;
13487 /* FALLTHRU */
13489 case LT:
13490 case LTU:
13491 code = swap_condition (code);
13492 x = cop0, cop0 = cop1, cop1 = x;
13493 break;
13495 default:
13496 gcc_unreachable ();
13499 /* Only SSE4.1/SSE4.2 supports V2DImode. */
13500 if (mode == V2DImode)
13502 switch (code)
13504 case EQ:
13505 /* SSE4.1 supports EQ. */
13506 if (!TARGET_SSE4_1)
13507 return false;
13508 break;
13510 case GT:
13511 case GTU:
13512 /* SSE4.2 supports GT/GTU. */
13513 if (!TARGET_SSE4_2)
13514 return false;
13515 break;
13517 default:
13518 gcc_unreachable ();
13522 /* Unsigned parallel compare is not supported by the hardware. Play some
13523 tricks to turn this into a signed comparison against 0. */
13524 if (code == GTU)
13526 cop0 = force_reg (mode, cop0);
13528 switch (mode)
13530 case V4SImode:
13531 case V2DImode:
13533 rtx t1, t2, mask;
13535 /* Perform a parallel modulo subtraction. */
13536 t1 = gen_reg_rtx (mode);
13537 emit_insn ((mode == V4SImode
13538 ? gen_subv4si3
13539 : gen_subv2di3) (t1, cop0, cop1));
13541 /* Extract the original sign bit of op0. */
13542 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
13543 true, false);
13544 t2 = gen_reg_rtx (mode);
13545 emit_insn ((mode == V4SImode
13546 ? gen_andv4si3
13547 : gen_andv2di3) (t2, cop0, mask));
13549 /* XOR it back into the result of the subtraction. This results
13550 in the sign bit set iff we saw unsigned underflow. */
13551 x = gen_reg_rtx (mode);
13552 emit_insn ((mode == V4SImode
13553 ? gen_xorv4si3
13554 : gen_xorv2di3) (x, t1, t2));
13556 code = GT;
13558 break;
13560 case V16QImode:
13561 case V8HImode:
13562 /* Perform a parallel unsigned saturating subtraction. */
13563 x = gen_reg_rtx (mode);
13564 emit_insn (gen_rtx_SET (VOIDmode, x,
13565 gen_rtx_US_MINUS (mode, cop0, cop1)));
13567 code = EQ;
13568 negate = !negate;
13569 break;
13571 default:
13572 gcc_unreachable ();
13575 cop0 = x;
13576 cop1 = CONST0_RTX (mode);
13580 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
13581 operands[1+negate], operands[2-negate]);
13583 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
13584 operands[2-negate]);
13585 return true;
13588 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
13589 true if we should do zero extension, else sign extension. HIGH_P is
13590 true if we want the N/2 high elements, else the low elements. */
13592 void
13593 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13595 enum machine_mode imode = GET_MODE (operands[1]);
13596 rtx (*unpack)(rtx, rtx, rtx);
13597 rtx se, dest;
13599 switch (imode)
13601 case V16QImode:
13602 if (high_p)
13603 unpack = gen_vec_interleave_highv16qi;
13604 else
13605 unpack = gen_vec_interleave_lowv16qi;
13606 break;
13607 case V8HImode:
13608 if (high_p)
13609 unpack = gen_vec_interleave_highv8hi;
13610 else
13611 unpack = gen_vec_interleave_lowv8hi;
13612 break;
13613 case V4SImode:
13614 if (high_p)
13615 unpack = gen_vec_interleave_highv4si;
13616 else
13617 unpack = gen_vec_interleave_lowv4si;
13618 break;
13619 default:
13620 gcc_unreachable ();
13623 dest = gen_lowpart (imode, operands[0]);
13625 if (unsigned_p)
13626 se = force_reg (imode, CONST0_RTX (imode));
13627 else
13628 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
13629 operands[1], pc_rtx, pc_rtx);
13631 emit_insn (unpack (dest, operands[1], se));
13634 /* This function performs the same task as ix86_expand_sse_unpack,
13635 but with SSE4.1 instructions. */
13637 void
13638 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13640 enum machine_mode imode = GET_MODE (operands[1]);
13641 rtx (*unpack)(rtx, rtx);
13642 rtx src, dest;
13644 switch (imode)
13646 case V16QImode:
13647 if (unsigned_p)
13648 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
13649 else
13650 unpack = gen_sse4_1_extendv8qiv8hi2;
13651 break;
13652 case V8HImode:
13653 if (unsigned_p)
13654 unpack = gen_sse4_1_zero_extendv4hiv4si2;
13655 else
13656 unpack = gen_sse4_1_extendv4hiv4si2;
13657 break;
13658 case V4SImode:
13659 if (unsigned_p)
13660 unpack = gen_sse4_1_zero_extendv2siv2di2;
13661 else
13662 unpack = gen_sse4_1_extendv2siv2di2;
13663 break;
13664 default:
13665 gcc_unreachable ();
13668 dest = operands[0];
13669 if (high_p)
13671 /* Shift higher 8 bytes to lower 8 bytes. */
13672 src = gen_reg_rtx (imode);
13673 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
13674 gen_lowpart (TImode, operands[1]),
13675 GEN_INT (64)));
13677 else
13678 src = operands[1];
13680 emit_insn (unpack (dest, src));
13683 /* This function performs the same task as ix86_expand_sse_unpack,
13684 but with sse5 instructions. */
13686 void
13687 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
13689 enum machine_mode imode = GET_MODE (operands[1]);
13690 int pperm_bytes[16];
13691 int i;
13692 int h = (high_p) ? 8 : 0;
13693 int h2;
13694 int sign_extend;
13695 rtvec v = rtvec_alloc (16);
13696 rtvec vs;
13697 rtx x, p;
13698 rtx op0 = operands[0], op1 = operands[1];
13700 switch (imode)
13702 case V16QImode:
13703 vs = rtvec_alloc (8);
13704 h2 = (high_p) ? 8 : 0;
13705 for (i = 0; i < 8; i++)
13707 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
13708 pperm_bytes[2*i+1] = ((unsigned_p)
13709 ? PPERM_ZERO
13710 : PPERM_SIGN | PPERM_SRC2 | i | h);
13713 for (i = 0; i < 16; i++)
13714 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13716 for (i = 0; i < 8; i++)
13717 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13719 p = gen_rtx_PARALLEL (VOIDmode, vs);
13720 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13721 if (unsigned_p)
13722 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
13723 else
13724 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
13725 break;
13727 case V8HImode:
13728 vs = rtvec_alloc (4);
13729 h2 = (high_p) ? 4 : 0;
13730 for (i = 0; i < 4; i++)
13732 sign_extend = ((unsigned_p)
13733 ? PPERM_ZERO
13734 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
13735 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
13736 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
13737 pperm_bytes[4*i+2] = sign_extend;
13738 pperm_bytes[4*i+3] = sign_extend;
13741 for (i = 0; i < 16; i++)
13742 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13744 for (i = 0; i < 4; i++)
13745 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13747 p = gen_rtx_PARALLEL (VOIDmode, vs);
13748 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13749 if (unsigned_p)
13750 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
13751 else
13752 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
13753 break;
13755 case V4SImode:
13756 vs = rtvec_alloc (2);
13757 h2 = (high_p) ? 2 : 0;
13758 for (i = 0; i < 2; i++)
13760 sign_extend = ((unsigned_p)
13761 ? PPERM_ZERO
13762 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
13763 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
13764 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
13765 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
13766 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
13767 pperm_bytes[8*i+4] = sign_extend;
13768 pperm_bytes[8*i+5] = sign_extend;
13769 pperm_bytes[8*i+6] = sign_extend;
13770 pperm_bytes[8*i+7] = sign_extend;
13773 for (i = 0; i < 16; i++)
13774 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13776 for (i = 0; i < 2; i++)
13777 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
13779 p = gen_rtx_PARALLEL (VOIDmode, vs);
13780 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13781 if (unsigned_p)
13782 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
13783 else
13784 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
13785 break;
13787 default:
13788 gcc_unreachable ();
13791 return;
13794 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
13795 next narrower integer vector type */
13796 void
13797 ix86_expand_sse5_pack (rtx operands[3])
13799 enum machine_mode imode = GET_MODE (operands[0]);
13800 int pperm_bytes[16];
13801 int i;
13802 rtvec v = rtvec_alloc (16);
13803 rtx x;
13804 rtx op0 = operands[0];
13805 rtx op1 = operands[1];
13806 rtx op2 = operands[2];
13808 switch (imode)
13810 case V16QImode:
13811 for (i = 0; i < 8; i++)
13813 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
13814 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
13817 for (i = 0; i < 16; i++)
13818 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13820 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13821 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
13822 break;
13824 case V8HImode:
13825 for (i = 0; i < 4; i++)
13827 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
13828 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
13829 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
13830 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
13833 for (i = 0; i < 16; i++)
13834 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13836 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13837 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
13838 break;
13840 case V4SImode:
13841 for (i = 0; i < 2; i++)
13843 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
13844 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
13845 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
13846 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
13847 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
13848 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
13849 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
13850 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
13853 for (i = 0; i < 16; i++)
13854 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
13856 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
13857 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
13858 break;
13860 default:
13861 gcc_unreachable ();
13864 return;
13867 /* Expand conditional increment or decrement using adb/sbb instructions.
13868 The default case using setcc followed by the conditional move can be
13869 done by generic code. */
13871 ix86_expand_int_addcc (rtx operands[])
13873 enum rtx_code code = GET_CODE (operands[1]);
13874 rtx compare_op;
13875 rtx val = const0_rtx;
13876 bool fpcmp = false;
13877 enum machine_mode mode = GET_MODE (operands[0]);
13879 if (operands[3] != const1_rtx
13880 && operands[3] != constm1_rtx)
13881 return 0;
13882 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
13883 ix86_compare_op1, &compare_op))
13884 return 0;
13885 code = GET_CODE (compare_op);
13887 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
13888 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
13890 fpcmp = true;
13891 code = ix86_fp_compare_code_to_integer (code);
13894 if (code != LTU)
13896 val = constm1_rtx;
13897 if (fpcmp)
13898 PUT_CODE (compare_op,
13899 reverse_condition_maybe_unordered
13900 (GET_CODE (compare_op)));
13901 else
13902 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
13904 PUT_MODE (compare_op, mode);
13906 /* Construct either adc or sbb insn. */
13907 if ((code == LTU) == (operands[3] == constm1_rtx))
13909 switch (GET_MODE (operands[0]))
13911 case QImode:
13912 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
13913 break;
13914 case HImode:
13915 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
13916 break;
13917 case SImode:
13918 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
13919 break;
13920 case DImode:
13921 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
13922 break;
13923 default:
13924 gcc_unreachable ();
13927 else
13929 switch (GET_MODE (operands[0]))
13931 case QImode:
13932 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
13933 break;
13934 case HImode:
13935 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
13936 break;
13937 case SImode:
13938 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
13939 break;
13940 case DImode:
13941 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
13942 break;
13943 default:
13944 gcc_unreachable ();
13947 return 1; /* DONE */
13951 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
13952 works for floating pointer parameters and nonoffsetable memories.
13953 For pushes, it returns just stack offsets; the values will be saved
13954 in the right order. Maximally three parts are generated. */
13956 static int
13957 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
13959 int size;
13961 if (!TARGET_64BIT)
13962 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
13963 else
13964 size = (GET_MODE_SIZE (mode) + 4) / 8;
13966 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
13967 gcc_assert (size >= 2 && size <= 3);
13969 /* Optimize constant pool reference to immediates. This is used by fp
13970 moves, that force all constants to memory to allow combining. */
13971 if (MEM_P (operand) && MEM_READONLY_P (operand))
13973 rtx tmp = maybe_get_pool_constant (operand);
13974 if (tmp)
13975 operand = tmp;
13978 if (MEM_P (operand) && !offsettable_memref_p (operand))
13980 /* The only non-offsetable memories we handle are pushes. */
13981 int ok = push_operand (operand, VOIDmode);
13983 gcc_assert (ok);
13985 operand = copy_rtx (operand);
13986 PUT_MODE (operand, Pmode);
13987 parts[0] = parts[1] = parts[2] = operand;
13988 return size;
13991 if (GET_CODE (operand) == CONST_VECTOR)
13993 enum machine_mode imode = int_mode_for_mode (mode);
13994 /* Caution: if we looked through a constant pool memory above,
13995 the operand may actually have a different mode now. That's
13996 ok, since we want to pun this all the way back to an integer. */
13997 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
13998 gcc_assert (operand != NULL);
13999 mode = imode;
14002 if (!TARGET_64BIT)
14004 if (mode == DImode)
14005 split_di (&operand, 1, &parts[0], &parts[1]);
14006 else
14008 if (REG_P (operand))
14010 gcc_assert (reload_completed);
14011 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
14012 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
14013 if (size == 3)
14014 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
14016 else if (offsettable_memref_p (operand))
14018 operand = adjust_address (operand, SImode, 0);
14019 parts[0] = operand;
14020 parts[1] = adjust_address (operand, SImode, 4);
14021 if (size == 3)
14022 parts[2] = adjust_address (operand, SImode, 8);
14024 else if (GET_CODE (operand) == CONST_DOUBLE)
14026 REAL_VALUE_TYPE r;
14027 long l[4];
14029 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
14030 switch (mode)
14032 case XFmode:
14033 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
14034 parts[2] = gen_int_mode (l[2], SImode);
14035 break;
14036 case DFmode:
14037 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
14038 break;
14039 default:
14040 gcc_unreachable ();
14042 parts[1] = gen_int_mode (l[1], SImode);
14043 parts[0] = gen_int_mode (l[0], SImode);
14045 else
14046 gcc_unreachable ();
14049 else
14051 if (mode == TImode)
14052 split_ti (&operand, 1, &parts[0], &parts[1]);
14053 if (mode == XFmode || mode == TFmode)
14055 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
14056 if (REG_P (operand))
14058 gcc_assert (reload_completed);
14059 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
14060 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
14062 else if (offsettable_memref_p (operand))
14064 operand = adjust_address (operand, DImode, 0);
14065 parts[0] = operand;
14066 parts[1] = adjust_address (operand, upper_mode, 8);
14068 else if (GET_CODE (operand) == CONST_DOUBLE)
14070 REAL_VALUE_TYPE r;
14071 long l[4];
14073 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
14074 real_to_target (l, &r, mode);
14076 /* Do not use shift by 32 to avoid warning on 32bit systems. */
14077 if (HOST_BITS_PER_WIDE_INT >= 64)
14078 parts[0]
14079 = gen_int_mode
14080 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
14081 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
14082 DImode);
14083 else
14084 parts[0] = immed_double_const (l[0], l[1], DImode);
14086 if (upper_mode == SImode)
14087 parts[1] = gen_int_mode (l[2], SImode);
14088 else if (HOST_BITS_PER_WIDE_INT >= 64)
14089 parts[1]
14090 = gen_int_mode
14091 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
14092 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
14093 DImode);
14094 else
14095 parts[1] = immed_double_const (l[2], l[3], DImode);
14097 else
14098 gcc_unreachable ();
14102 return size;
14105 /* Emit insns to perform a move or push of DI, DF, and XF values.
14106 Return false when normal moves are needed; true when all required
14107 insns have been emitted. Operands 2-4 contain the input values
14108 int the correct order; operands 5-7 contain the output values. */
14110 void
14111 ix86_split_long_move (rtx operands[])
14113 rtx part[2][3];
14114 int nparts;
14115 int push = 0;
14116 int collisions = 0;
14117 enum machine_mode mode = GET_MODE (operands[0]);
14119 /* The DFmode expanders may ask us to move double.
14120 For 64bit target this is single move. By hiding the fact
14121 here we simplify i386.md splitters. */
14122 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
14124 /* Optimize constant pool reference to immediates. This is used by
14125 fp moves, that force all constants to memory to allow combining. */
14127 if (MEM_P (operands[1])
14128 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
14129 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
14130 operands[1] = get_pool_constant (XEXP (operands[1], 0));
14131 if (push_operand (operands[0], VOIDmode))
14133 operands[0] = copy_rtx (operands[0]);
14134 PUT_MODE (operands[0], Pmode);
14136 else
14137 operands[0] = gen_lowpart (DImode, operands[0]);
14138 operands[1] = gen_lowpart (DImode, operands[1]);
14139 emit_move_insn (operands[0], operands[1]);
14140 return;
14143 /* The only non-offsettable memory we handle is push. */
14144 if (push_operand (operands[0], VOIDmode))
14145 push = 1;
14146 else
14147 gcc_assert (!MEM_P (operands[0])
14148 || offsettable_memref_p (operands[0]));
14150 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
14151 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
14153 /* When emitting push, take care for source operands on the stack. */
14154 if (push && MEM_P (operands[1])
14155 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
14157 if (nparts == 3)
14158 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
14159 XEXP (part[1][2], 0));
14160 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
14161 XEXP (part[1][1], 0));
14164 /* We need to do copy in the right order in case an address register
14165 of the source overlaps the destination. */
14166 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
14168 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
14169 collisions++;
14170 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
14171 collisions++;
14172 if (nparts == 3
14173 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
14174 collisions++;
14176 /* Collision in the middle part can be handled by reordering. */
14177 if (collisions == 1 && nparts == 3
14178 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
14180 rtx tmp;
14181 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
14182 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
14185 /* If there are more collisions, we can't handle it by reordering.
14186 Do an lea to the last part and use only one colliding move. */
14187 else if (collisions > 1)
14189 rtx base;
14191 collisions = 1;
14193 base = part[0][nparts - 1];
14195 /* Handle the case when the last part isn't valid for lea.
14196 Happens in 64-bit mode storing the 12-byte XFmode. */
14197 if (GET_MODE (base) != Pmode)
14198 base = gen_rtx_REG (Pmode, REGNO (base));
14200 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
14201 part[1][0] = replace_equiv_address (part[1][0], base);
14202 part[1][1] = replace_equiv_address (part[1][1],
14203 plus_constant (base, UNITS_PER_WORD));
14204 if (nparts == 3)
14205 part[1][2] = replace_equiv_address (part[1][2],
14206 plus_constant (base, 8));
14210 if (push)
14212 if (!TARGET_64BIT)
14214 if (nparts == 3)
14216 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
14217 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
14218 emit_move_insn (part[0][2], part[1][2]);
14221 else
14223 /* In 64bit mode we don't have 32bit push available. In case this is
14224 register, it is OK - we will just use larger counterpart. We also
14225 retype memory - these comes from attempt to avoid REX prefix on
14226 moving of second half of TFmode value. */
14227 if (GET_MODE (part[1][1]) == SImode)
14229 switch (GET_CODE (part[1][1]))
14231 case MEM:
14232 part[1][1] = adjust_address (part[1][1], DImode, 0);
14233 break;
14235 case REG:
14236 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
14237 break;
14239 default:
14240 gcc_unreachable ();
14243 if (GET_MODE (part[1][0]) == SImode)
14244 part[1][0] = part[1][1];
14247 emit_move_insn (part[0][1], part[1][1]);
14248 emit_move_insn (part[0][0], part[1][0]);
14249 return;
14252 /* Choose correct order to not overwrite the source before it is copied. */
14253 if ((REG_P (part[0][0])
14254 && REG_P (part[1][1])
14255 && (REGNO (part[0][0]) == REGNO (part[1][1])
14256 || (nparts == 3
14257 && REGNO (part[0][0]) == REGNO (part[1][2]))))
14258 || (collisions > 0
14259 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
14261 if (nparts == 3)
14263 operands[2] = part[0][2];
14264 operands[3] = part[0][1];
14265 operands[4] = part[0][0];
14266 operands[5] = part[1][2];
14267 operands[6] = part[1][1];
14268 operands[7] = part[1][0];
14270 else
14272 operands[2] = part[0][1];
14273 operands[3] = part[0][0];
14274 operands[5] = part[1][1];
14275 operands[6] = part[1][0];
14278 else
14280 if (nparts == 3)
14282 operands[2] = part[0][0];
14283 operands[3] = part[0][1];
14284 operands[4] = part[0][2];
14285 operands[5] = part[1][0];
14286 operands[6] = part[1][1];
14287 operands[7] = part[1][2];
14289 else
14291 operands[2] = part[0][0];
14292 operands[3] = part[0][1];
14293 operands[5] = part[1][0];
14294 operands[6] = part[1][1];
14298 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
14299 if (optimize_size)
14301 if (CONST_INT_P (operands[5])
14302 && operands[5] != const0_rtx
14303 && REG_P (operands[2]))
14305 if (CONST_INT_P (operands[6])
14306 && INTVAL (operands[6]) == INTVAL (operands[5]))
14307 operands[6] = operands[2];
14309 if (nparts == 3
14310 && CONST_INT_P (operands[7])
14311 && INTVAL (operands[7]) == INTVAL (operands[5]))
14312 operands[7] = operands[2];
14315 if (nparts == 3
14316 && CONST_INT_P (operands[6])
14317 && operands[6] != const0_rtx
14318 && REG_P (operands[3])
14319 && CONST_INT_P (operands[7])
14320 && INTVAL (operands[7]) == INTVAL (operands[6]))
14321 operands[7] = operands[3];
14324 emit_move_insn (operands[2], operands[5]);
14325 emit_move_insn (operands[3], operands[6]);
14326 if (nparts == 3)
14327 emit_move_insn (operands[4], operands[7]);
14329 return;
14332 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
14333 left shift by a constant, either using a single shift or
14334 a sequence of add instructions. */
14336 static void
14337 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
14339 if (count == 1)
14341 emit_insn ((mode == DImode
14342 ? gen_addsi3
14343 : gen_adddi3) (operand, operand, operand));
14345 else if (!optimize_size
14346 && count * ix86_cost->add <= ix86_cost->shift_const)
14348 int i;
14349 for (i=0; i<count; i++)
14351 emit_insn ((mode == DImode
14352 ? gen_addsi3
14353 : gen_adddi3) (operand, operand, operand));
14356 else
14357 emit_insn ((mode == DImode
14358 ? gen_ashlsi3
14359 : gen_ashldi3) (operand, operand, GEN_INT (count)));
14362 void
14363 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
14365 rtx low[2], high[2];
14366 int count;
14367 const int single_width = mode == DImode ? 32 : 64;
14369 if (CONST_INT_P (operands[2]))
14371 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14372 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14374 if (count >= single_width)
14376 emit_move_insn (high[0], low[1]);
14377 emit_move_insn (low[0], const0_rtx);
14379 if (count > single_width)
14380 ix86_expand_ashl_const (high[0], count - single_width, mode);
14382 else
14384 if (!rtx_equal_p (operands[0], operands[1]))
14385 emit_move_insn (operands[0], operands[1]);
14386 emit_insn ((mode == DImode
14387 ? gen_x86_shld_1
14388 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
14389 ix86_expand_ashl_const (low[0], count, mode);
14391 return;
14394 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14396 if (operands[1] == const1_rtx)
14398 /* Assuming we've chosen a QImode capable registers, then 1 << N
14399 can be done with two 32/64-bit shifts, no branches, no cmoves. */
14400 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
14402 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
14404 ix86_expand_clear (low[0]);
14405 ix86_expand_clear (high[0]);
14406 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
14408 d = gen_lowpart (QImode, low[0]);
14409 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
14410 s = gen_rtx_EQ (QImode, flags, const0_rtx);
14411 emit_insn (gen_rtx_SET (VOIDmode, d, s));
14413 d = gen_lowpart (QImode, high[0]);
14414 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
14415 s = gen_rtx_NE (QImode, flags, const0_rtx);
14416 emit_insn (gen_rtx_SET (VOIDmode, d, s));
14419 /* Otherwise, we can get the same results by manually performing
14420 a bit extract operation on bit 5/6, and then performing the two
14421 shifts. The two methods of getting 0/1 into low/high are exactly
14422 the same size. Avoiding the shift in the bit extract case helps
14423 pentium4 a bit; no one else seems to care much either way. */
14424 else
14426 rtx x;
14428 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
14429 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
14430 else
14431 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
14432 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
14434 emit_insn ((mode == DImode
14435 ? gen_lshrsi3
14436 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
14437 emit_insn ((mode == DImode
14438 ? gen_andsi3
14439 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
14440 emit_move_insn (low[0], high[0]);
14441 emit_insn ((mode == DImode
14442 ? gen_xorsi3
14443 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
14446 emit_insn ((mode == DImode
14447 ? gen_ashlsi3
14448 : gen_ashldi3) (low[0], low[0], operands[2]));
14449 emit_insn ((mode == DImode
14450 ? gen_ashlsi3
14451 : gen_ashldi3) (high[0], high[0], operands[2]));
14452 return;
14455 if (operands[1] == constm1_rtx)
14457 /* For -1 << N, we can avoid the shld instruction, because we
14458 know that we're shifting 0...31/63 ones into a -1. */
14459 emit_move_insn (low[0], constm1_rtx);
14460 if (optimize_size)
14461 emit_move_insn (high[0], low[0]);
14462 else
14463 emit_move_insn (high[0], constm1_rtx);
14465 else
14467 if (!rtx_equal_p (operands[0], operands[1]))
14468 emit_move_insn (operands[0], operands[1]);
14470 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14471 emit_insn ((mode == DImode
14472 ? gen_x86_shld_1
14473 : gen_x86_64_shld) (high[0], low[0], operands[2]));
14476 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
14478 if (TARGET_CMOVE && scratch)
14480 ix86_expand_clear (scratch);
14481 emit_insn ((mode == DImode
14482 ? gen_x86_shift_adj_1
14483 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
14485 else
14486 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
14489 void
14490 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
14492 rtx low[2], high[2];
14493 int count;
14494 const int single_width = mode == DImode ? 32 : 64;
14496 if (CONST_INT_P (operands[2]))
14498 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14499 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14501 if (count == single_width * 2 - 1)
14503 emit_move_insn (high[0], high[1]);
14504 emit_insn ((mode == DImode
14505 ? gen_ashrsi3
14506 : gen_ashrdi3) (high[0], high[0],
14507 GEN_INT (single_width - 1)));
14508 emit_move_insn (low[0], high[0]);
14511 else if (count >= single_width)
14513 emit_move_insn (low[0], high[1]);
14514 emit_move_insn (high[0], low[0]);
14515 emit_insn ((mode == DImode
14516 ? gen_ashrsi3
14517 : gen_ashrdi3) (high[0], high[0],
14518 GEN_INT (single_width - 1)));
14519 if (count > single_width)
14520 emit_insn ((mode == DImode
14521 ? gen_ashrsi3
14522 : gen_ashrdi3) (low[0], low[0],
14523 GEN_INT (count - single_width)));
14525 else
14527 if (!rtx_equal_p (operands[0], operands[1]))
14528 emit_move_insn (operands[0], operands[1]);
14529 emit_insn ((mode == DImode
14530 ? gen_x86_shrd_1
14531 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
14532 emit_insn ((mode == DImode
14533 ? gen_ashrsi3
14534 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
14537 else
14539 if (!rtx_equal_p (operands[0], operands[1]))
14540 emit_move_insn (operands[0], operands[1]);
14542 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14544 emit_insn ((mode == DImode
14545 ? gen_x86_shrd_1
14546 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
14547 emit_insn ((mode == DImode
14548 ? gen_ashrsi3
14549 : gen_ashrdi3) (high[0], high[0], operands[2]));
14551 if (TARGET_CMOVE && scratch)
14553 emit_move_insn (scratch, high[0]);
14554 emit_insn ((mode == DImode
14555 ? gen_ashrsi3
14556 : gen_ashrdi3) (scratch, scratch,
14557 GEN_INT (single_width - 1)));
14558 emit_insn ((mode == DImode
14559 ? gen_x86_shift_adj_1
14560 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
14561 scratch));
14563 else
14564 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
14568 void
14569 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
14571 rtx low[2], high[2];
14572 int count;
14573 const int single_width = mode == DImode ? 32 : 64;
14575 if (CONST_INT_P (operands[2]))
14577 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
14578 count = INTVAL (operands[2]) & (single_width * 2 - 1);
14580 if (count >= single_width)
14582 emit_move_insn (low[0], high[1]);
14583 ix86_expand_clear (high[0]);
14585 if (count > single_width)
14586 emit_insn ((mode == DImode
14587 ? gen_lshrsi3
14588 : gen_lshrdi3) (low[0], low[0],
14589 GEN_INT (count - single_width)));
14591 else
14593 if (!rtx_equal_p (operands[0], operands[1]))
14594 emit_move_insn (operands[0], operands[1]);
14595 emit_insn ((mode == DImode
14596 ? gen_x86_shrd_1
14597 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
14598 emit_insn ((mode == DImode
14599 ? gen_lshrsi3
14600 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
14603 else
14605 if (!rtx_equal_p (operands[0], operands[1]))
14606 emit_move_insn (operands[0], operands[1]);
14608 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
14610 emit_insn ((mode == DImode
14611 ? gen_x86_shrd_1
14612 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
14613 emit_insn ((mode == DImode
14614 ? gen_lshrsi3
14615 : gen_lshrdi3) (high[0], high[0], operands[2]));
14617 /* Heh. By reversing the arguments, we can reuse this pattern. */
14618 if (TARGET_CMOVE && scratch)
14620 ix86_expand_clear (scratch);
14621 emit_insn ((mode == DImode
14622 ? gen_x86_shift_adj_1
14623 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
14624 scratch));
14626 else
14627 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
14631 /* Predict just emitted jump instruction to be taken with probability PROB. */
14632 static void
14633 predict_jump (int prob)
14635 rtx insn = get_last_insn ();
14636 gcc_assert (JUMP_P (insn));
14637 REG_NOTES (insn)
14638 = gen_rtx_EXPR_LIST (REG_BR_PROB,
14639 GEN_INT (prob),
14640 REG_NOTES (insn));
14643 /* Helper function for the string operations below. Dest VARIABLE whether
14644 it is aligned to VALUE bytes. If true, jump to the label. */
14645 static rtx
14646 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
14648 rtx label = gen_label_rtx ();
14649 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
14650 if (GET_MODE (variable) == DImode)
14651 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
14652 else
14653 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
14654 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
14655 1, label);
14656 if (epilogue)
14657 predict_jump (REG_BR_PROB_BASE * 50 / 100);
14658 else
14659 predict_jump (REG_BR_PROB_BASE * 90 / 100);
14660 return label;
14663 /* Adjust COUNTER by the VALUE. */
14664 static void
14665 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
14667 if (GET_MODE (countreg) == DImode)
14668 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
14669 else
14670 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
14673 /* Zero extend possibly SImode EXP to Pmode register. */
14675 ix86_zero_extend_to_Pmode (rtx exp)
14677 rtx r;
14678 if (GET_MODE (exp) == VOIDmode)
14679 return force_reg (Pmode, exp);
14680 if (GET_MODE (exp) == Pmode)
14681 return copy_to_mode_reg (Pmode, exp);
14682 r = gen_reg_rtx (Pmode);
14683 emit_insn (gen_zero_extendsidi2 (r, exp));
14684 return r;
14687 /* Divide COUNTREG by SCALE. */
14688 static rtx
14689 scale_counter (rtx countreg, int scale)
14691 rtx sc;
14692 rtx piece_size_mask;
14694 if (scale == 1)
14695 return countreg;
14696 if (CONST_INT_P (countreg))
14697 return GEN_INT (INTVAL (countreg) / scale);
14698 gcc_assert (REG_P (countreg));
14700 piece_size_mask = GEN_INT (scale - 1);
14701 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
14702 GEN_INT (exact_log2 (scale)),
14703 NULL, 1, OPTAB_DIRECT);
14704 return sc;
14707 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
14708 DImode for constant loop counts. */
14710 static enum machine_mode
14711 counter_mode (rtx count_exp)
14713 if (GET_MODE (count_exp) != VOIDmode)
14714 return GET_MODE (count_exp);
14715 if (GET_CODE (count_exp) != CONST_INT)
14716 return Pmode;
14717 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
14718 return DImode;
14719 return SImode;
14722 /* When SRCPTR is non-NULL, output simple loop to move memory
14723 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
14724 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
14725 equivalent loop to set memory by VALUE (supposed to be in MODE).
14727 The size is rounded down to whole number of chunk size moved at once.
14728 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
14731 static void
14732 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
14733 rtx destptr, rtx srcptr, rtx value,
14734 rtx count, enum machine_mode mode, int unroll,
14735 int expected_size)
14737 rtx out_label, top_label, iter, tmp;
14738 enum machine_mode iter_mode = counter_mode (count);
14739 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
14740 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
14741 rtx size;
14742 rtx x_addr;
14743 rtx y_addr;
14744 int i;
14746 top_label = gen_label_rtx ();
14747 out_label = gen_label_rtx ();
14748 iter = gen_reg_rtx (iter_mode);
14750 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
14751 NULL, 1, OPTAB_DIRECT);
14752 /* Those two should combine. */
14753 if (piece_size == const1_rtx)
14755 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
14756 true, out_label);
14757 predict_jump (REG_BR_PROB_BASE * 10 / 100);
14759 emit_move_insn (iter, const0_rtx);
14761 emit_label (top_label);
14763 tmp = convert_modes (Pmode, iter_mode, iter, true);
14764 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
14765 destmem = change_address (destmem, mode, x_addr);
14767 if (srcmem)
14769 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
14770 srcmem = change_address (srcmem, mode, y_addr);
14772 /* When unrolling for chips that reorder memory reads and writes,
14773 we can save registers by using single temporary.
14774 Also using 4 temporaries is overkill in 32bit mode. */
14775 if (!TARGET_64BIT && 0)
14777 for (i = 0; i < unroll; i++)
14779 if (i)
14781 destmem =
14782 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14783 srcmem =
14784 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
14786 emit_move_insn (destmem, srcmem);
14789 else
14791 rtx tmpreg[4];
14792 gcc_assert (unroll <= 4);
14793 for (i = 0; i < unroll; i++)
14795 tmpreg[i] = gen_reg_rtx (mode);
14796 if (i)
14798 srcmem =
14799 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
14801 emit_move_insn (tmpreg[i], srcmem);
14803 for (i = 0; i < unroll; i++)
14805 if (i)
14807 destmem =
14808 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14810 emit_move_insn (destmem, tmpreg[i]);
14814 else
14815 for (i = 0; i < unroll; i++)
14817 if (i)
14818 destmem =
14819 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
14820 emit_move_insn (destmem, value);
14823 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
14824 true, OPTAB_LIB_WIDEN);
14825 if (tmp != iter)
14826 emit_move_insn (iter, tmp);
14828 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
14829 true, top_label);
14830 if (expected_size != -1)
14832 expected_size /= GET_MODE_SIZE (mode) * unroll;
14833 if (expected_size == 0)
14834 predict_jump (0);
14835 else if (expected_size > REG_BR_PROB_BASE)
14836 predict_jump (REG_BR_PROB_BASE - 1);
14837 else
14838 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
14840 else
14841 predict_jump (REG_BR_PROB_BASE * 80 / 100);
14842 iter = ix86_zero_extend_to_Pmode (iter);
14843 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
14844 true, OPTAB_LIB_WIDEN);
14845 if (tmp != destptr)
14846 emit_move_insn (destptr, tmp);
14847 if (srcptr)
14849 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
14850 true, OPTAB_LIB_WIDEN);
14851 if (tmp != srcptr)
14852 emit_move_insn (srcptr, tmp);
14854 emit_label (out_label);
14857 /* Output "rep; mov" instruction.
14858 Arguments have same meaning as for previous function */
14859 static void
14860 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
14861 rtx destptr, rtx srcptr,
14862 rtx count,
14863 enum machine_mode mode)
14865 rtx destexp;
14866 rtx srcexp;
14867 rtx countreg;
14869 /* If the size is known, it is shorter to use rep movs. */
14870 if (mode == QImode && CONST_INT_P (count)
14871 && !(INTVAL (count) & 3))
14872 mode = SImode;
14874 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
14875 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
14876 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
14877 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
14878 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
14879 if (mode != QImode)
14881 destexp = gen_rtx_ASHIFT (Pmode, countreg,
14882 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14883 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
14884 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
14885 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14886 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
14888 else
14890 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
14891 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
14893 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
14894 destexp, srcexp));
14897 /* Output "rep; stos" instruction.
14898 Arguments have same meaning as for previous function */
14899 static void
14900 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
14901 rtx count,
14902 enum machine_mode mode)
14904 rtx destexp;
14905 rtx countreg;
14907 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
14908 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
14909 value = force_reg (mode, gen_lowpart (mode, value));
14910 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
14911 if (mode != QImode)
14913 destexp = gen_rtx_ASHIFT (Pmode, countreg,
14914 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
14915 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
14917 else
14918 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
14919 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
14922 static void
14923 emit_strmov (rtx destmem, rtx srcmem,
14924 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
14926 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
14927 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
14928 emit_insn (gen_strmov (destptr, dest, srcptr, src));
14931 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
14932 static void
14933 expand_movmem_epilogue (rtx destmem, rtx srcmem,
14934 rtx destptr, rtx srcptr, rtx count, int max_size)
14936 rtx src, dest;
14937 if (CONST_INT_P (count))
14939 HOST_WIDE_INT countval = INTVAL (count);
14940 int offset = 0;
14942 if ((countval & 0x10) && max_size > 16)
14944 if (TARGET_64BIT)
14946 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
14947 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
14949 else
14950 gcc_unreachable ();
14951 offset += 16;
14953 if ((countval & 0x08) && max_size > 8)
14955 if (TARGET_64BIT)
14956 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
14957 else
14959 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
14960 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
14962 offset += 8;
14964 if ((countval & 0x04) && max_size > 4)
14966 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
14967 offset += 4;
14969 if ((countval & 0x02) && max_size > 2)
14971 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
14972 offset += 2;
14974 if ((countval & 0x01) && max_size > 1)
14976 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
14977 offset += 1;
14979 return;
14981 if (max_size > 8)
14983 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
14984 count, 1, OPTAB_DIRECT);
14985 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
14986 count, QImode, 1, 4);
14987 return;
14990 /* When there are stringops, we can cheaply increase dest and src pointers.
14991 Otherwise we save code size by maintaining offset (zero is readily
14992 available from preceding rep operation) and using x86 addressing modes.
14994 if (TARGET_SINGLE_STRINGOP)
14996 if (max_size > 4)
14998 rtx label = ix86_expand_aligntest (count, 4, true);
14999 src = change_address (srcmem, SImode, srcptr);
15000 dest = change_address (destmem, SImode, destptr);
15001 emit_insn (gen_strmov (destptr, dest, srcptr, src));
15002 emit_label (label);
15003 LABEL_NUSES (label) = 1;
15005 if (max_size > 2)
15007 rtx label = ix86_expand_aligntest (count, 2, true);
15008 src = change_address (srcmem, HImode, srcptr);
15009 dest = change_address (destmem, HImode, destptr);
15010 emit_insn (gen_strmov (destptr, dest, srcptr, src));
15011 emit_label (label);
15012 LABEL_NUSES (label) = 1;
15014 if (max_size > 1)
15016 rtx label = ix86_expand_aligntest (count, 1, true);
15017 src = change_address (srcmem, QImode, srcptr);
15018 dest = change_address (destmem, QImode, destptr);
15019 emit_insn (gen_strmov (destptr, dest, srcptr, src));
15020 emit_label (label);
15021 LABEL_NUSES (label) = 1;
15024 else
15026 rtx offset = force_reg (Pmode, const0_rtx);
15027 rtx tmp;
15029 if (max_size > 4)
15031 rtx label = ix86_expand_aligntest (count, 4, true);
15032 src = change_address (srcmem, SImode, srcptr);
15033 dest = change_address (destmem, SImode, destptr);
15034 emit_move_insn (dest, src);
15035 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
15036 true, OPTAB_LIB_WIDEN);
15037 if (tmp != offset)
15038 emit_move_insn (offset, tmp);
15039 emit_label (label);
15040 LABEL_NUSES (label) = 1;
15042 if (max_size > 2)
15044 rtx label = ix86_expand_aligntest (count, 2, true);
15045 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
15046 src = change_address (srcmem, HImode, tmp);
15047 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
15048 dest = change_address (destmem, HImode, tmp);
15049 emit_move_insn (dest, src);
15050 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
15051 true, OPTAB_LIB_WIDEN);
15052 if (tmp != offset)
15053 emit_move_insn (offset, tmp);
15054 emit_label (label);
15055 LABEL_NUSES (label) = 1;
15057 if (max_size > 1)
15059 rtx label = ix86_expand_aligntest (count, 1, true);
15060 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
15061 src = change_address (srcmem, QImode, tmp);
15062 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
15063 dest = change_address (destmem, QImode, tmp);
15064 emit_move_insn (dest, src);
15065 emit_label (label);
15066 LABEL_NUSES (label) = 1;
15071 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
15072 static void
15073 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
15074 rtx count, int max_size)
15076 count =
15077 expand_simple_binop (counter_mode (count), AND, count,
15078 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
15079 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
15080 gen_lowpart (QImode, value), count, QImode,
15081 1, max_size / 2);
15084 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
15085 static void
15086 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
15088 rtx dest;
15090 if (CONST_INT_P (count))
15092 HOST_WIDE_INT countval = INTVAL (count);
15093 int offset = 0;
15095 if ((countval & 0x10) && max_size > 16)
15097 if (TARGET_64BIT)
15099 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
15100 emit_insn (gen_strset (destptr, dest, value));
15101 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
15102 emit_insn (gen_strset (destptr, dest, value));
15104 else
15105 gcc_unreachable ();
15106 offset += 16;
15108 if ((countval & 0x08) && max_size > 8)
15110 if (TARGET_64BIT)
15112 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
15113 emit_insn (gen_strset (destptr, dest, value));
15115 else
15117 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
15118 emit_insn (gen_strset (destptr, dest, value));
15119 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
15120 emit_insn (gen_strset (destptr, dest, value));
15122 offset += 8;
15124 if ((countval & 0x04) && max_size > 4)
15126 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
15127 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
15128 offset += 4;
15130 if ((countval & 0x02) && max_size > 2)
15132 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
15133 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
15134 offset += 2;
15136 if ((countval & 0x01) && max_size > 1)
15138 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
15139 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
15140 offset += 1;
15142 return;
15144 if (max_size > 32)
15146 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
15147 return;
15149 if (max_size > 16)
15151 rtx label = ix86_expand_aligntest (count, 16, true);
15152 if (TARGET_64BIT)
15154 dest = change_address (destmem, DImode, destptr);
15155 emit_insn (gen_strset (destptr, dest, value));
15156 emit_insn (gen_strset (destptr, dest, value));
15158 else
15160 dest = change_address (destmem, SImode, destptr);
15161 emit_insn (gen_strset (destptr, dest, value));
15162 emit_insn (gen_strset (destptr, dest, value));
15163 emit_insn (gen_strset (destptr, dest, value));
15164 emit_insn (gen_strset (destptr, dest, value));
15166 emit_label (label);
15167 LABEL_NUSES (label) = 1;
15169 if (max_size > 8)
15171 rtx label = ix86_expand_aligntest (count, 8, true);
15172 if (TARGET_64BIT)
15174 dest = change_address (destmem, DImode, destptr);
15175 emit_insn (gen_strset (destptr, dest, value));
15177 else
15179 dest = change_address (destmem, SImode, destptr);
15180 emit_insn (gen_strset (destptr, dest, value));
15181 emit_insn (gen_strset (destptr, dest, value));
15183 emit_label (label);
15184 LABEL_NUSES (label) = 1;
15186 if (max_size > 4)
15188 rtx label = ix86_expand_aligntest (count, 4, true);
15189 dest = change_address (destmem, SImode, destptr);
15190 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
15191 emit_label (label);
15192 LABEL_NUSES (label) = 1;
15194 if (max_size > 2)
15196 rtx label = ix86_expand_aligntest (count, 2, true);
15197 dest = change_address (destmem, HImode, destptr);
15198 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
15199 emit_label (label);
15200 LABEL_NUSES (label) = 1;
15202 if (max_size > 1)
15204 rtx label = ix86_expand_aligntest (count, 1, true);
15205 dest = change_address (destmem, QImode, destptr);
15206 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
15207 emit_label (label);
15208 LABEL_NUSES (label) = 1;
15212 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
15213 DESIRED_ALIGNMENT. */
15214 static void
15215 expand_movmem_prologue (rtx destmem, rtx srcmem,
15216 rtx destptr, rtx srcptr, rtx count,
15217 int align, int desired_alignment)
15219 if (align <= 1 && desired_alignment > 1)
15221 rtx label = ix86_expand_aligntest (destptr, 1, false);
15222 srcmem = change_address (srcmem, QImode, srcptr);
15223 destmem = change_address (destmem, QImode, destptr);
15224 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15225 ix86_adjust_counter (count, 1);
15226 emit_label (label);
15227 LABEL_NUSES (label) = 1;
15229 if (align <= 2 && desired_alignment > 2)
15231 rtx label = ix86_expand_aligntest (destptr, 2, false);
15232 srcmem = change_address (srcmem, HImode, srcptr);
15233 destmem = change_address (destmem, HImode, destptr);
15234 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15235 ix86_adjust_counter (count, 2);
15236 emit_label (label);
15237 LABEL_NUSES (label) = 1;
15239 if (align <= 4 && desired_alignment > 4)
15241 rtx label = ix86_expand_aligntest (destptr, 4, false);
15242 srcmem = change_address (srcmem, SImode, srcptr);
15243 destmem = change_address (destmem, SImode, destptr);
15244 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
15245 ix86_adjust_counter (count, 4);
15246 emit_label (label);
15247 LABEL_NUSES (label) = 1;
15249 gcc_assert (desired_alignment <= 8);
15252 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
15253 DESIRED_ALIGNMENT. */
15254 static void
15255 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
15256 int align, int desired_alignment)
15258 if (align <= 1 && desired_alignment > 1)
15260 rtx label = ix86_expand_aligntest (destptr, 1, false);
15261 destmem = change_address (destmem, QImode, destptr);
15262 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
15263 ix86_adjust_counter (count, 1);
15264 emit_label (label);
15265 LABEL_NUSES (label) = 1;
15267 if (align <= 2 && desired_alignment > 2)
15269 rtx label = ix86_expand_aligntest (destptr, 2, false);
15270 destmem = change_address (destmem, HImode, destptr);
15271 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
15272 ix86_adjust_counter (count, 2);
15273 emit_label (label);
15274 LABEL_NUSES (label) = 1;
15276 if (align <= 4 && desired_alignment > 4)
15278 rtx label = ix86_expand_aligntest (destptr, 4, false);
15279 destmem = change_address (destmem, SImode, destptr);
15280 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
15281 ix86_adjust_counter (count, 4);
15282 emit_label (label);
15283 LABEL_NUSES (label) = 1;
15285 gcc_assert (desired_alignment <= 8);
15288 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
15289 static enum stringop_alg
15290 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
15291 int *dynamic_check)
15293 const struct stringop_algs * algs;
15294 /* Algorithms using the rep prefix want at least edi and ecx;
15295 additionally, memset wants eax and memcpy wants esi. Don't
15296 consider such algorithms if the user has appropriated those
15297 registers for their own purposes. */
15298 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
15299 || (memset
15300 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
15302 #define ALG_USABLE_P(alg) (rep_prefix_usable \
15303 || (alg != rep_prefix_1_byte \
15304 && alg != rep_prefix_4_byte \
15305 && alg != rep_prefix_8_byte))
15307 *dynamic_check = -1;
15308 if (memset)
15309 algs = &ix86_cost->memset[TARGET_64BIT != 0];
15310 else
15311 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
15312 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
15313 return stringop_alg;
15314 /* rep; movq or rep; movl is the smallest variant. */
15315 else if (optimize_size)
15317 if (!count || (count & 3))
15318 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
15319 else
15320 return rep_prefix_usable ? rep_prefix_4_byte : loop;
15322 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
15324 else if (expected_size != -1 && expected_size < 4)
15325 return loop_1_byte;
15326 else if (expected_size != -1)
15328 unsigned int i;
15329 enum stringop_alg alg = libcall;
15330 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
15332 /* We get here if the algorithms that were not libcall-based
15333 were rep-prefix based and we are unable to use rep prefixes
15334 based on global register usage. Break out of the loop and
15335 use the heuristic below. */
15336 if (algs->size[i].max == 0)
15337 break;
15338 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
15340 enum stringop_alg candidate = algs->size[i].alg;
15342 if (candidate != libcall && ALG_USABLE_P (candidate))
15343 alg = candidate;
15344 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
15345 last non-libcall inline algorithm. */
15346 if (TARGET_INLINE_ALL_STRINGOPS)
15348 /* When the current size is best to be copied by a libcall,
15349 but we are still forced to inline, run the heuristic below
15350 that will pick code for medium sized blocks. */
15351 if (alg != libcall)
15352 return alg;
15353 break;
15355 else if (ALG_USABLE_P (candidate))
15356 return candidate;
15359 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
15361 /* When asked to inline the call anyway, try to pick meaningful choice.
15362 We look for maximal size of block that is faster to copy by hand and
15363 take blocks of at most of that size guessing that average size will
15364 be roughly half of the block.
15366 If this turns out to be bad, we might simply specify the preferred
15367 choice in ix86_costs. */
15368 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15369 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
15371 int max = -1;
15372 enum stringop_alg alg;
15373 int i;
15374 bool any_alg_usable_p = true;
15376 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
15378 enum stringop_alg candidate = algs->size[i].alg;
15379 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
15381 if (candidate != libcall && candidate
15382 && ALG_USABLE_P (candidate))
15383 max = algs->size[i].max;
15385 /* If there aren't any usable algorithms, then recursing on
15386 smaller sizes isn't going to find anything. Just return the
15387 simple byte-at-a-time copy loop. */
15388 if (!any_alg_usable_p)
15390 /* Pick something reasonable. */
15391 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15392 *dynamic_check = 128;
15393 return loop_1_byte;
15395 if (max == -1)
15396 max = 4096;
15397 alg = decide_alg (count, max / 2, memset, dynamic_check);
15398 gcc_assert (*dynamic_check == -1);
15399 gcc_assert (alg != libcall);
15400 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
15401 *dynamic_check = max;
15402 return alg;
15404 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
15405 #undef ALG_USABLE_P
15408 /* Decide on alignment. We know that the operand is already aligned to ALIGN
15409 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
15410 static int
15411 decide_alignment (int align,
15412 enum stringop_alg alg,
15413 int expected_size)
15415 int desired_align = 0;
15416 switch (alg)
15418 case no_stringop:
15419 gcc_unreachable ();
15420 case loop:
15421 case unrolled_loop:
15422 desired_align = GET_MODE_SIZE (Pmode);
15423 break;
15424 case rep_prefix_8_byte:
15425 desired_align = 8;
15426 break;
15427 case rep_prefix_4_byte:
15428 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
15429 copying whole cacheline at once. */
15430 if (TARGET_PENTIUMPRO)
15431 desired_align = 8;
15432 else
15433 desired_align = 4;
15434 break;
15435 case rep_prefix_1_byte:
15436 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
15437 copying whole cacheline at once. */
15438 if (TARGET_PENTIUMPRO)
15439 desired_align = 8;
15440 else
15441 desired_align = 1;
15442 break;
15443 case loop_1_byte:
15444 desired_align = 1;
15445 break;
15446 case libcall:
15447 return 0;
15450 if (optimize_size)
15451 desired_align = 1;
15452 if (desired_align < align)
15453 desired_align = align;
15454 if (expected_size != -1 && expected_size < 4)
15455 desired_align = align;
15456 return desired_align;
15459 /* Return the smallest power of 2 greater than VAL. */
15460 static int
15461 smallest_pow2_greater_than (int val)
15463 int ret = 1;
15464 while (ret <= val)
15465 ret <<= 1;
15466 return ret;
15469 /* Expand string move (memcpy) operation. Use i386 string operations when
15470 profitable. expand_setmem contains similar code. The code depends upon
15471 architecture, block size and alignment, but always has the same
15472 overall structure:
15474 1) Prologue guard: Conditional that jumps up to epilogues for small
15475 blocks that can be handled by epilogue alone. This is faster but
15476 also needed for correctness, since prologue assume the block is larger
15477 than the desired alignment.
15479 Optional dynamic check for size and libcall for large
15480 blocks is emitted here too, with -minline-stringops-dynamically.
15482 2) Prologue: copy first few bytes in order to get destination aligned
15483 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
15484 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
15485 We emit either a jump tree on power of two sized blocks, or a byte loop.
15487 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
15488 with specified algorithm.
15490 4) Epilogue: code copying tail of the block that is too small to be
15491 handled by main body (or up to size guarded by prologue guard). */
15494 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
15495 rtx expected_align_exp, rtx expected_size_exp)
15497 rtx destreg;
15498 rtx srcreg;
15499 rtx label = NULL;
15500 rtx tmp;
15501 rtx jump_around_label = NULL;
15502 HOST_WIDE_INT align = 1;
15503 unsigned HOST_WIDE_INT count = 0;
15504 HOST_WIDE_INT expected_size = -1;
15505 int size_needed = 0, epilogue_size_needed;
15506 int desired_align = 0;
15507 enum stringop_alg alg;
15508 int dynamic_check;
15510 if (CONST_INT_P (align_exp))
15511 align = INTVAL (align_exp);
15512 /* i386 can do misaligned access on reasonably increased cost. */
15513 if (CONST_INT_P (expected_align_exp)
15514 && INTVAL (expected_align_exp) > align)
15515 align = INTVAL (expected_align_exp);
15516 if (CONST_INT_P (count_exp))
15517 count = expected_size = INTVAL (count_exp);
15518 if (CONST_INT_P (expected_size_exp) && count == 0)
15519 expected_size = INTVAL (expected_size_exp);
15521 /* Make sure we don't need to care about overflow later on. */
15522 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
15523 return 0;
15525 /* Step 0: Decide on preferred algorithm, desired alignment and
15526 size of chunks to be copied by main loop. */
15528 alg = decide_alg (count, expected_size, false, &dynamic_check);
15529 desired_align = decide_alignment (align, alg, expected_size);
15531 if (!TARGET_ALIGN_STRINGOPS)
15532 align = desired_align;
15534 if (alg == libcall)
15535 return 0;
15536 gcc_assert (alg != no_stringop);
15537 if (!count)
15538 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
15539 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
15540 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
15541 switch (alg)
15543 case libcall:
15544 case no_stringop:
15545 gcc_unreachable ();
15546 case loop:
15547 size_needed = GET_MODE_SIZE (Pmode);
15548 break;
15549 case unrolled_loop:
15550 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
15551 break;
15552 case rep_prefix_8_byte:
15553 size_needed = 8;
15554 break;
15555 case rep_prefix_4_byte:
15556 size_needed = 4;
15557 break;
15558 case rep_prefix_1_byte:
15559 case loop_1_byte:
15560 size_needed = 1;
15561 break;
15564 epilogue_size_needed = size_needed;
15566 /* Step 1: Prologue guard. */
15568 /* Alignment code needs count to be in register. */
15569 if (CONST_INT_P (count_exp) && desired_align > align)
15570 count_exp = force_reg (counter_mode (count_exp), count_exp);
15571 gcc_assert (desired_align >= 1 && align >= 1);
15573 /* Ensure that alignment prologue won't copy past end of block. */
15574 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
15576 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
15577 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
15578 Make sure it is power of 2. */
15579 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
15581 if (CONST_INT_P (count_exp))
15583 if (UINTVAL (count_exp) < (unsigned HOST_WIDE_INT)epilogue_size_needed)
15584 goto epilogue;
15586 else
15588 label = gen_label_rtx ();
15589 emit_cmp_and_jump_insns (count_exp,
15590 GEN_INT (epilogue_size_needed),
15591 LTU, 0, counter_mode (count_exp), 1, label);
15592 if (expected_size == -1 || expected_size < epilogue_size_needed)
15593 predict_jump (REG_BR_PROB_BASE * 60 / 100);
15594 else
15595 predict_jump (REG_BR_PROB_BASE * 20 / 100);
15599 /* Emit code to decide on runtime whether library call or inline should be
15600 used. */
15601 if (dynamic_check != -1)
15603 if (CONST_INT_P (count_exp))
15605 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
15607 emit_block_move_via_libcall (dst, src, count_exp, false);
15608 count_exp = const0_rtx;
15609 goto epilogue;
15612 else
15614 rtx hot_label = gen_label_rtx ();
15615 jump_around_label = gen_label_rtx ();
15616 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
15617 LEU, 0, GET_MODE (count_exp), 1, hot_label);
15618 predict_jump (REG_BR_PROB_BASE * 90 / 100);
15619 emit_block_move_via_libcall (dst, src, count_exp, false);
15620 emit_jump (jump_around_label);
15621 emit_label (hot_label);
15625 /* Step 2: Alignment prologue. */
15627 if (desired_align > align)
15629 /* Except for the first move in epilogue, we no longer know
15630 constant offset in aliasing info. It don't seems to worth
15631 the pain to maintain it for the first move, so throw away
15632 the info early. */
15633 src = change_address (src, BLKmode, srcreg);
15634 dst = change_address (dst, BLKmode, destreg);
15635 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
15636 desired_align);
15638 if (label && size_needed == 1)
15640 emit_label (label);
15641 LABEL_NUSES (label) = 1;
15642 label = NULL;
15645 /* Step 3: Main loop. */
15647 switch (alg)
15649 case libcall:
15650 case no_stringop:
15651 gcc_unreachable ();
15652 case loop_1_byte:
15653 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15654 count_exp, QImode, 1, expected_size);
15655 break;
15656 case loop:
15657 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15658 count_exp, Pmode, 1, expected_size);
15659 break;
15660 case unrolled_loop:
15661 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
15662 registers for 4 temporaries anyway. */
15663 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
15664 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
15665 expected_size);
15666 break;
15667 case rep_prefix_8_byte:
15668 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15669 DImode);
15670 break;
15671 case rep_prefix_4_byte:
15672 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15673 SImode);
15674 break;
15675 case rep_prefix_1_byte:
15676 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
15677 QImode);
15678 break;
15680 /* Adjust properly the offset of src and dest memory for aliasing. */
15681 if (CONST_INT_P (count_exp))
15683 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
15684 (count / size_needed) * size_needed);
15685 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
15686 (count / size_needed) * size_needed);
15688 else
15690 src = change_address (src, BLKmode, srcreg);
15691 dst = change_address (dst, BLKmode, destreg);
15694 /* Step 4: Epilogue to copy the remaining bytes. */
15695 epilogue:
15696 if (label)
15698 /* When the main loop is done, COUNT_EXP might hold original count,
15699 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
15700 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
15701 bytes. Compensate if needed. */
15703 if (size_needed < epilogue_size_needed)
15705 tmp =
15706 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
15707 GEN_INT (size_needed - 1), count_exp, 1,
15708 OPTAB_DIRECT);
15709 if (tmp != count_exp)
15710 emit_move_insn (count_exp, tmp);
15712 emit_label (label);
15713 LABEL_NUSES (label) = 1;
15716 if (count_exp != const0_rtx && epilogue_size_needed > 1)
15717 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
15718 epilogue_size_needed);
15719 if (jump_around_label)
15720 emit_label (jump_around_label);
15721 return 1;
15724 /* Helper function for memcpy. For QImode value 0xXY produce
15725 0xXYXYXYXY of wide specified by MODE. This is essentially
15726 a * 0x10101010, but we can do slightly better than
15727 synth_mult by unwinding the sequence by hand on CPUs with
15728 slow multiply. */
15729 static rtx
15730 promote_duplicated_reg (enum machine_mode mode, rtx val)
15732 enum machine_mode valmode = GET_MODE (val);
15733 rtx tmp;
15734 int nops = mode == DImode ? 3 : 2;
15736 gcc_assert (mode == SImode || mode == DImode);
15737 if (val == const0_rtx)
15738 return copy_to_mode_reg (mode, const0_rtx);
15739 if (CONST_INT_P (val))
15741 HOST_WIDE_INT v = INTVAL (val) & 255;
15743 v |= v << 8;
15744 v |= v << 16;
15745 if (mode == DImode)
15746 v |= (v << 16) << 16;
15747 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
15750 if (valmode == VOIDmode)
15751 valmode = QImode;
15752 if (valmode != QImode)
15753 val = gen_lowpart (QImode, val);
15754 if (mode == QImode)
15755 return val;
15756 if (!TARGET_PARTIAL_REG_STALL)
15757 nops--;
15758 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
15759 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
15760 <= (ix86_cost->shift_const + ix86_cost->add) * nops
15761 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
15763 rtx reg = convert_modes (mode, QImode, val, true);
15764 tmp = promote_duplicated_reg (mode, const1_rtx);
15765 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
15766 OPTAB_DIRECT);
15768 else
15770 rtx reg = convert_modes (mode, QImode, val, true);
15772 if (!TARGET_PARTIAL_REG_STALL)
15773 if (mode == SImode)
15774 emit_insn (gen_movsi_insv_1 (reg, reg));
15775 else
15776 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
15777 else
15779 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
15780 NULL, 1, OPTAB_DIRECT);
15781 reg =
15782 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15784 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
15785 NULL, 1, OPTAB_DIRECT);
15786 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15787 if (mode == SImode)
15788 return reg;
15789 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
15790 NULL, 1, OPTAB_DIRECT);
15791 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
15792 return reg;
15796 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
15797 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
15798 alignment from ALIGN to DESIRED_ALIGN. */
15799 static rtx
15800 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
15802 rtx promoted_val;
15804 if (TARGET_64BIT
15805 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
15806 promoted_val = promote_duplicated_reg (DImode, val);
15807 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
15808 promoted_val = promote_duplicated_reg (SImode, val);
15809 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
15810 promoted_val = promote_duplicated_reg (HImode, val);
15811 else
15812 promoted_val = val;
15814 return promoted_val;
15817 /* Expand string clear operation (bzero). Use i386 string operations when
15818 profitable. See expand_movmem comment for explanation of individual
15819 steps performed. */
15821 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
15822 rtx expected_align_exp, rtx expected_size_exp)
15824 rtx destreg;
15825 rtx label = NULL;
15826 rtx tmp;
15827 rtx jump_around_label = NULL;
15828 HOST_WIDE_INT align = 1;
15829 unsigned HOST_WIDE_INT count = 0;
15830 HOST_WIDE_INT expected_size = -1;
15831 int size_needed = 0, epilogue_size_needed;
15832 int desired_align = 0;
15833 enum stringop_alg alg;
15834 rtx promoted_val = NULL;
15835 bool force_loopy_epilogue = false;
15836 int dynamic_check;
15838 if (CONST_INT_P (align_exp))
15839 align = INTVAL (align_exp);
15840 /* i386 can do misaligned access on reasonably increased cost. */
15841 if (CONST_INT_P (expected_align_exp)
15842 && INTVAL (expected_align_exp) > align)
15843 align = INTVAL (expected_align_exp);
15844 if (CONST_INT_P (count_exp))
15845 count = expected_size = INTVAL (count_exp);
15846 if (CONST_INT_P (expected_size_exp) && count == 0)
15847 expected_size = INTVAL (expected_size_exp);
15849 /* Make sure we don't need to care about overflow later on. */
15850 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
15851 return 0;
15853 /* Step 0: Decide on preferred algorithm, desired alignment and
15854 size of chunks to be copied by main loop. */
15856 alg = decide_alg (count, expected_size, true, &dynamic_check);
15857 desired_align = decide_alignment (align, alg, expected_size);
15859 if (!TARGET_ALIGN_STRINGOPS)
15860 align = desired_align;
15862 if (alg == libcall)
15863 return 0;
15864 gcc_assert (alg != no_stringop);
15865 if (!count)
15866 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
15867 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
15868 switch (alg)
15870 case libcall:
15871 case no_stringop:
15872 gcc_unreachable ();
15873 case loop:
15874 size_needed = GET_MODE_SIZE (Pmode);
15875 break;
15876 case unrolled_loop:
15877 size_needed = GET_MODE_SIZE (Pmode) * 4;
15878 break;
15879 case rep_prefix_8_byte:
15880 size_needed = 8;
15881 break;
15882 case rep_prefix_4_byte:
15883 size_needed = 4;
15884 break;
15885 case rep_prefix_1_byte:
15886 case loop_1_byte:
15887 size_needed = 1;
15888 break;
15890 epilogue_size_needed = size_needed;
15892 /* Step 1: Prologue guard. */
15894 /* Alignment code needs count to be in register. */
15895 if (CONST_INT_P (count_exp) && desired_align > align)
15897 enum machine_mode mode = SImode;
15898 if (TARGET_64BIT && (count & ~0xffffffff))
15899 mode = DImode;
15900 count_exp = force_reg (mode, count_exp);
15902 /* Do the cheap promotion to allow better CSE across the
15903 main loop and epilogue (ie one load of the big constant in the
15904 front of all code. */
15905 if (CONST_INT_P (val_exp))
15906 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
15907 desired_align, align);
15908 /* Ensure that alignment prologue won't copy past end of block. */
15909 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
15911 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
15912 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
15913 Make sure it is power of 2. */
15914 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
15916 /* To improve performance of small blocks, we jump around the VAL
15917 promoting mode. This mean that if the promoted VAL is not constant,
15918 we might not use it in the epilogue and have to use byte
15919 loop variant. */
15920 if (epilogue_size_needed > 2 && !promoted_val)
15921 force_loopy_epilogue = true;
15922 label = gen_label_rtx ();
15923 emit_cmp_and_jump_insns (count_exp,
15924 GEN_INT (epilogue_size_needed),
15925 LTU, 0, counter_mode (count_exp), 1, label);
15926 if (GET_CODE (count_exp) == CONST_INT)
15928 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
15929 predict_jump (REG_BR_PROB_BASE * 60 / 100);
15930 else
15931 predict_jump (REG_BR_PROB_BASE * 20 / 100);
15933 if (dynamic_check != -1)
15935 rtx hot_label = gen_label_rtx ();
15936 jump_around_label = gen_label_rtx ();
15937 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
15938 LEU, 0, counter_mode (count_exp), 1, hot_label);
15939 predict_jump (REG_BR_PROB_BASE * 90 / 100);
15940 set_storage_via_libcall (dst, count_exp, val_exp, false);
15941 emit_jump (jump_around_label);
15942 emit_label (hot_label);
15945 /* Step 2: Alignment prologue. */
15947 /* Do the expensive promotion once we branched off the small blocks. */
15948 if (!promoted_val)
15949 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
15950 desired_align, align);
15951 gcc_assert (desired_align >= 1 && align >= 1);
15953 if (desired_align > align)
15955 /* Except for the first move in epilogue, we no longer know
15956 constant offset in aliasing info. It don't seems to worth
15957 the pain to maintain it for the first move, so throw away
15958 the info early. */
15959 dst = change_address (dst, BLKmode, destreg);
15960 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
15961 desired_align);
15963 if (label && size_needed == 1)
15965 emit_label (label);
15966 LABEL_NUSES (label) = 1;
15967 label = NULL;
15970 /* Step 3: Main loop. */
15972 switch (alg)
15974 case libcall:
15975 case no_stringop:
15976 gcc_unreachable ();
15977 case loop_1_byte:
15978 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15979 count_exp, QImode, 1, expected_size);
15980 break;
15981 case loop:
15982 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15983 count_exp, Pmode, 1, expected_size);
15984 break;
15985 case unrolled_loop:
15986 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
15987 count_exp, Pmode, 4, expected_size);
15988 break;
15989 case rep_prefix_8_byte:
15990 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15991 DImode);
15992 break;
15993 case rep_prefix_4_byte:
15994 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15995 SImode);
15996 break;
15997 case rep_prefix_1_byte:
15998 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
15999 QImode);
16000 break;
16002 /* Adjust properly the offset of src and dest memory for aliasing. */
16003 if (CONST_INT_P (count_exp))
16004 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
16005 (count / size_needed) * size_needed);
16006 else
16007 dst = change_address (dst, BLKmode, destreg);
16009 /* Step 4: Epilogue to copy the remaining bytes. */
16011 if (label)
16013 /* When the main loop is done, COUNT_EXP might hold original count,
16014 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
16015 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
16016 bytes. Compensate if needed. */
16018 if (size_needed < desired_align - align)
16020 tmp =
16021 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
16022 GEN_INT (size_needed - 1), count_exp, 1,
16023 OPTAB_DIRECT);
16024 size_needed = desired_align - align + 1;
16025 if (tmp != count_exp)
16026 emit_move_insn (count_exp, tmp);
16028 emit_label (label);
16029 LABEL_NUSES (label) = 1;
16031 if (count_exp != const0_rtx && epilogue_size_needed > 1)
16033 if (force_loopy_epilogue)
16034 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
16035 size_needed);
16036 else
16037 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
16038 size_needed);
16040 if (jump_around_label)
16041 emit_label (jump_around_label);
16042 return 1;
16045 /* Expand the appropriate insns for doing strlen if not just doing
16046 repnz; scasb
16048 out = result, initialized with the start address
16049 align_rtx = alignment of the address.
16050 scratch = scratch register, initialized with the startaddress when
16051 not aligned, otherwise undefined
16053 This is just the body. It needs the initializations mentioned above and
16054 some address computing at the end. These things are done in i386.md. */
16056 static void
16057 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
16059 int align;
16060 rtx tmp;
16061 rtx align_2_label = NULL_RTX;
16062 rtx align_3_label = NULL_RTX;
16063 rtx align_4_label = gen_label_rtx ();
16064 rtx end_0_label = gen_label_rtx ();
16065 rtx mem;
16066 rtx tmpreg = gen_reg_rtx (SImode);
16067 rtx scratch = gen_reg_rtx (SImode);
16068 rtx cmp;
16070 align = 0;
16071 if (CONST_INT_P (align_rtx))
16072 align = INTVAL (align_rtx);
16074 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
16076 /* Is there a known alignment and is it less than 4? */
16077 if (align < 4)
16079 rtx scratch1 = gen_reg_rtx (Pmode);
16080 emit_move_insn (scratch1, out);
16081 /* Is there a known alignment and is it not 2? */
16082 if (align != 2)
16084 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
16085 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
16087 /* Leave just the 3 lower bits. */
16088 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
16089 NULL_RTX, 0, OPTAB_WIDEN);
16091 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
16092 Pmode, 1, align_4_label);
16093 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
16094 Pmode, 1, align_2_label);
16095 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
16096 Pmode, 1, align_3_label);
16098 else
16100 /* Since the alignment is 2, we have to check 2 or 0 bytes;
16101 check if is aligned to 4 - byte. */
16103 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
16104 NULL_RTX, 0, OPTAB_WIDEN);
16106 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
16107 Pmode, 1, align_4_label);
16110 mem = change_address (src, QImode, out);
16112 /* Now compare the bytes. */
16114 /* Compare the first n unaligned byte on a byte per byte basis. */
16115 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
16116 QImode, 1, end_0_label);
16118 /* Increment the address. */
16119 if (TARGET_64BIT)
16120 emit_insn (gen_adddi3 (out, out, const1_rtx));
16121 else
16122 emit_insn (gen_addsi3 (out, out, const1_rtx));
16124 /* Not needed with an alignment of 2 */
16125 if (align != 2)
16127 emit_label (align_2_label);
16129 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
16130 end_0_label);
16132 if (TARGET_64BIT)
16133 emit_insn (gen_adddi3 (out, out, const1_rtx));
16134 else
16135 emit_insn (gen_addsi3 (out, out, const1_rtx));
16137 emit_label (align_3_label);
16140 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
16141 end_0_label);
16143 if (TARGET_64BIT)
16144 emit_insn (gen_adddi3 (out, out, const1_rtx));
16145 else
16146 emit_insn (gen_addsi3 (out, out, const1_rtx));
16149 /* Generate loop to check 4 bytes at a time. It is not a good idea to
16150 align this loop. It gives only huge programs, but does not help to
16151 speed up. */
16152 emit_label (align_4_label);
16154 mem = change_address (src, SImode, out);
16155 emit_move_insn (scratch, mem);
16156 if (TARGET_64BIT)
16157 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
16158 else
16159 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
16161 /* This formula yields a nonzero result iff one of the bytes is zero.
16162 This saves three branches inside loop and many cycles. */
16164 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
16165 emit_insn (gen_one_cmplsi2 (scratch, scratch));
16166 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
16167 emit_insn (gen_andsi3 (tmpreg, tmpreg,
16168 gen_int_mode (0x80808080, SImode)));
16169 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
16170 align_4_label);
16172 if (TARGET_CMOVE)
16174 rtx reg = gen_reg_rtx (SImode);
16175 rtx reg2 = gen_reg_rtx (Pmode);
16176 emit_move_insn (reg, tmpreg);
16177 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
16179 /* If zero is not in the first two bytes, move two bytes forward. */
16180 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
16181 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16182 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
16183 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
16184 gen_rtx_IF_THEN_ELSE (SImode, tmp,
16185 reg,
16186 tmpreg)));
16187 /* Emit lea manually to avoid clobbering of flags. */
16188 emit_insn (gen_rtx_SET (SImode, reg2,
16189 gen_rtx_PLUS (Pmode, out, const2_rtx)));
16191 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16192 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
16193 emit_insn (gen_rtx_SET (VOIDmode, out,
16194 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
16195 reg2,
16196 out)));
16199 else
16201 rtx end_2_label = gen_label_rtx ();
16202 /* Is zero in the first two bytes? */
16204 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
16205 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16206 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
16207 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
16208 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
16209 pc_rtx);
16210 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
16211 JUMP_LABEL (tmp) = end_2_label;
16213 /* Not in the first two. Move two bytes forward. */
16214 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
16215 if (TARGET_64BIT)
16216 emit_insn (gen_adddi3 (out, out, const2_rtx));
16217 else
16218 emit_insn (gen_addsi3 (out, out, const2_rtx));
16220 emit_label (end_2_label);
16224 /* Avoid branch in fixing the byte. */
16225 tmpreg = gen_lowpart (QImode, tmpreg);
16226 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
16227 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
16228 if (TARGET_64BIT)
16229 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
16230 else
16231 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
16233 emit_label (end_0_label);
16236 /* Expand strlen. */
16239 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
16241 rtx addr, scratch1, scratch2, scratch3, scratch4;
16243 /* The generic case of strlen expander is long. Avoid it's
16244 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
16246 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
16247 && !TARGET_INLINE_ALL_STRINGOPS
16248 && !optimize_size
16249 && (!CONST_INT_P (align) || INTVAL (align) < 4))
16250 return 0;
16252 addr = force_reg (Pmode, XEXP (src, 0));
16253 scratch1 = gen_reg_rtx (Pmode);
16255 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
16256 && !optimize_size)
16258 /* Well it seems that some optimizer does not combine a call like
16259 foo(strlen(bar), strlen(bar));
16260 when the move and the subtraction is done here. It does calculate
16261 the length just once when these instructions are done inside of
16262 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
16263 often used and I use one fewer register for the lifetime of
16264 output_strlen_unroll() this is better. */
16266 emit_move_insn (out, addr);
16268 ix86_expand_strlensi_unroll_1 (out, src, align);
16270 /* strlensi_unroll_1 returns the address of the zero at the end of
16271 the string, like memchr(), so compute the length by subtracting
16272 the start address. */
16273 if (TARGET_64BIT)
16274 emit_insn (gen_subdi3 (out, out, addr));
16275 else
16276 emit_insn (gen_subsi3 (out, out, addr));
16278 else
16280 rtx unspec;
16282 /* Can't use this if the user has appropriated eax, ecx, or edi. */
16283 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
16284 return false;
16286 scratch2 = gen_reg_rtx (Pmode);
16287 scratch3 = gen_reg_rtx (Pmode);
16288 scratch4 = force_reg (Pmode, constm1_rtx);
16290 emit_move_insn (scratch3, addr);
16291 eoschar = force_reg (QImode, eoschar);
16293 src = replace_equiv_address_nv (src, scratch3);
16295 /* If .md starts supporting :P, this can be done in .md. */
16296 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
16297 scratch4), UNSPEC_SCAS);
16298 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
16299 if (TARGET_64BIT)
16301 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
16302 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
16304 else
16306 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
16307 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
16310 return 1;
16313 /* For given symbol (function) construct code to compute address of it's PLT
16314 entry in large x86-64 PIC model. */
16316 construct_plt_address (rtx symbol)
16318 rtx tmp = gen_reg_rtx (Pmode);
16319 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
16321 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
16322 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
16324 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
16325 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
16326 return tmp;
16329 void
16330 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
16331 rtx callarg2 ATTRIBUTE_UNUSED,
16332 rtx pop, int sibcall)
16334 rtx use = NULL, call;
16336 if (pop == const0_rtx)
16337 pop = NULL;
16338 gcc_assert (!TARGET_64BIT || !pop);
16340 if (TARGET_MACHO && !TARGET_64BIT)
16342 #if TARGET_MACHO
16343 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
16344 fnaddr = machopic_indirect_call_target (fnaddr);
16345 #endif
16347 else
16349 /* Static functions and indirect calls don't need the pic register. */
16350 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
16351 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
16352 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
16353 use_reg (&use, pic_offset_table_rtx);
16356 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
16358 rtx al = gen_rtx_REG (QImode, AX_REG);
16359 emit_move_insn (al, callarg2);
16360 use_reg (&use, al);
16363 if (ix86_cmodel == CM_LARGE_PIC
16364 && GET_CODE (fnaddr) == MEM
16365 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
16366 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
16367 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
16368 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
16370 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
16371 fnaddr = gen_rtx_MEM (QImode, fnaddr);
16373 if (sibcall && TARGET_64BIT
16374 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
16376 rtx addr;
16377 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
16378 fnaddr = gen_rtx_REG (Pmode, R11_REG);
16379 emit_move_insn (fnaddr, addr);
16380 fnaddr = gen_rtx_MEM (QImode, fnaddr);
16383 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
16384 if (retval)
16385 call = gen_rtx_SET (VOIDmode, retval, call);
16386 if (pop)
16388 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
16389 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
16390 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
16393 call = emit_call_insn (call);
16394 if (use)
16395 CALL_INSN_FUNCTION_USAGE (call) = use;
16399 /* Clear stack slot assignments remembered from previous functions.
16400 This is called from INIT_EXPANDERS once before RTL is emitted for each
16401 function. */
16403 static struct machine_function *
16404 ix86_init_machine_status (void)
16406 struct machine_function *f;
16408 f = GGC_CNEW (struct machine_function);
16409 f->use_fast_prologue_epilogue_nregs = -1;
16410 f->tls_descriptor_call_expanded_p = 0;
16412 return f;
16415 /* Return a MEM corresponding to a stack slot with mode MODE.
16416 Allocate a new slot if necessary.
16418 The RTL for a function can have several slots available: N is
16419 which slot to use. */
16422 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
16424 struct stack_local_entry *s;
16426 gcc_assert (n < MAX_386_STACK_LOCALS);
16428 /* Virtual slot is valid only before vregs are instantiated. */
16429 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
16431 for (s = ix86_stack_locals; s; s = s->next)
16432 if (s->mode == mode && s->n == n)
16433 return copy_rtx (s->rtl);
16435 s = (struct stack_local_entry *)
16436 ggc_alloc (sizeof (struct stack_local_entry));
16437 s->n = n;
16438 s->mode = mode;
16439 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16441 s->next = ix86_stack_locals;
16442 ix86_stack_locals = s;
16443 return s->rtl;
16446 /* Construct the SYMBOL_REF for the tls_get_addr function. */
16448 static GTY(()) rtx ix86_tls_symbol;
16450 ix86_tls_get_addr (void)
16453 if (!ix86_tls_symbol)
16455 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
16456 (TARGET_ANY_GNU_TLS
16457 && !TARGET_64BIT)
16458 ? "___tls_get_addr"
16459 : "__tls_get_addr");
16462 return ix86_tls_symbol;
16465 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
16467 static GTY(()) rtx ix86_tls_module_base_symbol;
16469 ix86_tls_module_base (void)
16472 if (!ix86_tls_module_base_symbol)
16474 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
16475 "_TLS_MODULE_BASE_");
16476 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
16477 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
16480 return ix86_tls_module_base_symbol;
16483 /* Calculate the length of the memory address in the instruction
16484 encoding. Does not include the one-byte modrm, opcode, or prefix. */
16487 memory_address_length (rtx addr)
16489 struct ix86_address parts;
16490 rtx base, index, disp;
16491 int len;
16492 int ok;
16494 if (GET_CODE (addr) == PRE_DEC
16495 || GET_CODE (addr) == POST_INC
16496 || GET_CODE (addr) == PRE_MODIFY
16497 || GET_CODE (addr) == POST_MODIFY)
16498 return 0;
16500 ok = ix86_decompose_address (addr, &parts);
16501 gcc_assert (ok);
16503 if (parts.base && GET_CODE (parts.base) == SUBREG)
16504 parts.base = SUBREG_REG (parts.base);
16505 if (parts.index && GET_CODE (parts.index) == SUBREG)
16506 parts.index = SUBREG_REG (parts.index);
16508 base = parts.base;
16509 index = parts.index;
16510 disp = parts.disp;
16511 len = 0;
16513 /* Rule of thumb:
16514 - esp as the base always wants an index,
16515 - ebp as the base always wants a displacement. */
16517 /* Register Indirect. */
16518 if (base && !index && !disp)
16520 /* esp (for its index) and ebp (for its displacement) need
16521 the two-byte modrm form. */
16522 if (addr == stack_pointer_rtx
16523 || addr == arg_pointer_rtx
16524 || addr == frame_pointer_rtx
16525 || addr == hard_frame_pointer_rtx)
16526 len = 1;
16529 /* Direct Addressing. */
16530 else if (disp && !base && !index)
16531 len = 4;
16533 else
16535 /* Find the length of the displacement constant. */
16536 if (disp)
16538 if (base && satisfies_constraint_K (disp))
16539 len = 1;
16540 else
16541 len = 4;
16543 /* ebp always wants a displacement. */
16544 else if (base == hard_frame_pointer_rtx)
16545 len = 1;
16547 /* An index requires the two-byte modrm form.... */
16548 if (index
16549 /* ...like esp, which always wants an index. */
16550 || base == stack_pointer_rtx
16551 || base == arg_pointer_rtx
16552 || base == frame_pointer_rtx)
16553 len += 1;
16556 return len;
16559 /* Compute default value for "length_immediate" attribute. When SHORTFORM
16560 is set, expect that insn have 8bit immediate alternative. */
16562 ix86_attr_length_immediate_default (rtx insn, int shortform)
16564 int len = 0;
16565 int i;
16566 extract_insn_cached (insn);
16567 for (i = recog_data.n_operands - 1; i >= 0; --i)
16568 if (CONSTANT_P (recog_data.operand[i]))
16570 gcc_assert (!len);
16571 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
16572 len = 1;
16573 else
16575 switch (get_attr_mode (insn))
16577 case MODE_QI:
16578 len+=1;
16579 break;
16580 case MODE_HI:
16581 len+=2;
16582 break;
16583 case MODE_SI:
16584 len+=4;
16585 break;
16586 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
16587 case MODE_DI:
16588 len+=4;
16589 break;
16590 default:
16591 fatal_insn ("unknown insn mode", insn);
16595 return len;
16597 /* Compute default value for "length_address" attribute. */
16599 ix86_attr_length_address_default (rtx insn)
16601 int i;
16603 if (get_attr_type (insn) == TYPE_LEA)
16605 rtx set = PATTERN (insn);
16607 if (GET_CODE (set) == PARALLEL)
16608 set = XVECEXP (set, 0, 0);
16610 gcc_assert (GET_CODE (set) == SET);
16612 return memory_address_length (SET_SRC (set));
16615 extract_insn_cached (insn);
16616 for (i = recog_data.n_operands - 1; i >= 0; --i)
16617 if (MEM_P (recog_data.operand[i]))
16619 return memory_address_length (XEXP (recog_data.operand[i], 0));
16620 break;
16622 return 0;
16625 /* Return the maximum number of instructions a cpu can issue. */
16627 static int
16628 ix86_issue_rate (void)
16630 switch (ix86_tune)
16632 case PROCESSOR_PENTIUM:
16633 case PROCESSOR_K6:
16634 return 2;
16636 case PROCESSOR_PENTIUMPRO:
16637 case PROCESSOR_PENTIUM4:
16638 case PROCESSOR_ATHLON:
16639 case PROCESSOR_K8:
16640 case PROCESSOR_AMDFAM10:
16641 case PROCESSOR_NOCONA:
16642 case PROCESSOR_GENERIC32:
16643 case PROCESSOR_GENERIC64:
16644 return 3;
16646 case PROCESSOR_CORE2:
16647 return 4;
16649 default:
16650 return 1;
16654 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
16655 by DEP_INSN and nothing set by DEP_INSN. */
16657 static int
16658 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
16660 rtx set, set2;
16662 /* Simplify the test for uninteresting insns. */
16663 if (insn_type != TYPE_SETCC
16664 && insn_type != TYPE_ICMOV
16665 && insn_type != TYPE_FCMOV
16666 && insn_type != TYPE_IBR)
16667 return 0;
16669 if ((set = single_set (dep_insn)) != 0)
16671 set = SET_DEST (set);
16672 set2 = NULL_RTX;
16674 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
16675 && XVECLEN (PATTERN (dep_insn), 0) == 2
16676 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
16677 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
16679 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
16680 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
16682 else
16683 return 0;
16685 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
16686 return 0;
16688 /* This test is true if the dependent insn reads the flags but
16689 not any other potentially set register. */
16690 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
16691 return 0;
16693 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
16694 return 0;
16696 return 1;
16699 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
16700 address with operands set by DEP_INSN. */
16702 static int
16703 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
16705 rtx addr;
16707 if (insn_type == TYPE_LEA
16708 && TARGET_PENTIUM)
16710 addr = PATTERN (insn);
16712 if (GET_CODE (addr) == PARALLEL)
16713 addr = XVECEXP (addr, 0, 0);
16715 gcc_assert (GET_CODE (addr) == SET);
16717 addr = SET_SRC (addr);
16719 else
16721 int i;
16722 extract_insn_cached (insn);
16723 for (i = recog_data.n_operands - 1; i >= 0; --i)
16724 if (MEM_P (recog_data.operand[i]))
16726 addr = XEXP (recog_data.operand[i], 0);
16727 goto found;
16729 return 0;
16730 found:;
16733 return modified_in_p (addr, dep_insn);
16736 static int
16737 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
16739 enum attr_type insn_type, dep_insn_type;
16740 enum attr_memory memory;
16741 rtx set, set2;
16742 int dep_insn_code_number;
16744 /* Anti and output dependencies have zero cost on all CPUs. */
16745 if (REG_NOTE_KIND (link) != 0)
16746 return 0;
16748 dep_insn_code_number = recog_memoized (dep_insn);
16750 /* If we can't recognize the insns, we can't really do anything. */
16751 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
16752 return cost;
16754 insn_type = get_attr_type (insn);
16755 dep_insn_type = get_attr_type (dep_insn);
16757 switch (ix86_tune)
16759 case PROCESSOR_PENTIUM:
16760 /* Address Generation Interlock adds a cycle of latency. */
16761 if (ix86_agi_dependent (insn, dep_insn, insn_type))
16762 cost += 1;
16764 /* ??? Compares pair with jump/setcc. */
16765 if (ix86_flags_dependent (insn, dep_insn, insn_type))
16766 cost = 0;
16768 /* Floating point stores require value to be ready one cycle earlier. */
16769 if (insn_type == TYPE_FMOV
16770 && get_attr_memory (insn) == MEMORY_STORE
16771 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16772 cost += 1;
16773 break;
16775 case PROCESSOR_PENTIUMPRO:
16776 memory = get_attr_memory (insn);
16778 /* INT->FP conversion is expensive. */
16779 if (get_attr_fp_int_src (dep_insn))
16780 cost += 5;
16782 /* There is one cycle extra latency between an FP op and a store. */
16783 if (insn_type == TYPE_FMOV
16784 && (set = single_set (dep_insn)) != NULL_RTX
16785 && (set2 = single_set (insn)) != NULL_RTX
16786 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
16787 && MEM_P (SET_DEST (set2)))
16788 cost += 1;
16790 /* Show ability of reorder buffer to hide latency of load by executing
16791 in parallel with previous instruction in case
16792 previous instruction is not needed to compute the address. */
16793 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16794 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16796 /* Claim moves to take one cycle, as core can issue one load
16797 at time and the next load can start cycle later. */
16798 if (dep_insn_type == TYPE_IMOV
16799 || dep_insn_type == TYPE_FMOV)
16800 cost = 1;
16801 else if (cost > 1)
16802 cost--;
16804 break;
16806 case PROCESSOR_K6:
16807 memory = get_attr_memory (insn);
16809 /* The esp dependency is resolved before the instruction is really
16810 finished. */
16811 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
16812 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
16813 return 1;
16815 /* INT->FP conversion is expensive. */
16816 if (get_attr_fp_int_src (dep_insn))
16817 cost += 5;
16819 /* Show ability of reorder buffer to hide latency of load by executing
16820 in parallel with previous instruction in case
16821 previous instruction is not needed to compute the address. */
16822 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16823 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16825 /* Claim moves to take one cycle, as core can issue one load
16826 at time and the next load can start cycle later. */
16827 if (dep_insn_type == TYPE_IMOV
16828 || dep_insn_type == TYPE_FMOV)
16829 cost = 1;
16830 else if (cost > 2)
16831 cost -= 2;
16832 else
16833 cost = 1;
16835 break;
16837 case PROCESSOR_ATHLON:
16838 case PROCESSOR_K8:
16839 case PROCESSOR_AMDFAM10:
16840 case PROCESSOR_GENERIC32:
16841 case PROCESSOR_GENERIC64:
16842 memory = get_attr_memory (insn);
16844 /* Show ability of reorder buffer to hide latency of load by executing
16845 in parallel with previous instruction in case
16846 previous instruction is not needed to compute the address. */
16847 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
16848 && !ix86_agi_dependent (insn, dep_insn, insn_type))
16850 enum attr_unit unit = get_attr_unit (insn);
16851 int loadcost = 3;
16853 /* Because of the difference between the length of integer and
16854 floating unit pipeline preparation stages, the memory operands
16855 for floating point are cheaper.
16857 ??? For Athlon it the difference is most probably 2. */
16858 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
16859 loadcost = 3;
16860 else
16861 loadcost = TARGET_ATHLON ? 2 : 0;
16863 if (cost >= loadcost)
16864 cost -= loadcost;
16865 else
16866 cost = 0;
16869 default:
16870 break;
16873 return cost;
16876 /* How many alternative schedules to try. This should be as wide as the
16877 scheduling freedom in the DFA, but no wider. Making this value too
16878 large results extra work for the scheduler. */
16880 static int
16881 ia32_multipass_dfa_lookahead (void)
16883 switch (ix86_tune)
16885 case PROCESSOR_PENTIUM:
16886 return 2;
16888 case PROCESSOR_PENTIUMPRO:
16889 case PROCESSOR_K6:
16890 return 1;
16892 default:
16893 return 0;
16898 /* Compute the alignment given to a constant that is being placed in memory.
16899 EXP is the constant and ALIGN is the alignment that the object would
16900 ordinarily have.
16901 The value of this function is used instead of that alignment to align
16902 the object. */
16905 ix86_constant_alignment (tree exp, int align)
16907 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
16908 || TREE_CODE (exp) == INTEGER_CST)
16910 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
16911 return 64;
16912 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
16913 return 128;
16915 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
16916 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
16917 return BITS_PER_WORD;
16919 return align;
16922 /* Compute the alignment for a static variable.
16923 TYPE is the data type, and ALIGN is the alignment that
16924 the object would ordinarily have. The value of this function is used
16925 instead of that alignment to align the object. */
16928 ix86_data_alignment (tree type, int align)
16930 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
16932 if (AGGREGATE_TYPE_P (type)
16933 && TYPE_SIZE (type)
16934 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
16935 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
16936 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
16937 && align < max_align)
16938 align = max_align;
16940 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
16941 to 16byte boundary. */
16942 if (TARGET_64BIT)
16944 if (AGGREGATE_TYPE_P (type)
16945 && TYPE_SIZE (type)
16946 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
16947 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
16948 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
16949 return 128;
16952 if (TREE_CODE (type) == ARRAY_TYPE)
16954 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
16955 return 64;
16956 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
16957 return 128;
16959 else if (TREE_CODE (type) == COMPLEX_TYPE)
16962 if (TYPE_MODE (type) == DCmode && align < 64)
16963 return 64;
16964 if (TYPE_MODE (type) == XCmode && align < 128)
16965 return 128;
16967 else if ((TREE_CODE (type) == RECORD_TYPE
16968 || TREE_CODE (type) == UNION_TYPE
16969 || TREE_CODE (type) == QUAL_UNION_TYPE)
16970 && TYPE_FIELDS (type))
16972 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
16973 return 64;
16974 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
16975 return 128;
16977 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
16978 || TREE_CODE (type) == INTEGER_TYPE)
16980 if (TYPE_MODE (type) == DFmode && align < 64)
16981 return 64;
16982 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
16983 return 128;
16986 return align;
16989 /* Compute the alignment for a local variable.
16990 TYPE is the data type, and ALIGN is the alignment that
16991 the object would ordinarily have. The value of this macro is used
16992 instead of that alignment to align the object. */
16995 ix86_local_alignment (tree type, int align)
16997 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
16998 to 16byte boundary. */
16999 if (TARGET_64BIT)
17001 if (AGGREGATE_TYPE_P (type)
17002 && TYPE_SIZE (type)
17003 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
17004 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
17005 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
17006 return 128;
17008 if (TREE_CODE (type) == ARRAY_TYPE)
17010 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
17011 return 64;
17012 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
17013 return 128;
17015 else if (TREE_CODE (type) == COMPLEX_TYPE)
17017 if (TYPE_MODE (type) == DCmode && align < 64)
17018 return 64;
17019 if (TYPE_MODE (type) == XCmode && align < 128)
17020 return 128;
17022 else if ((TREE_CODE (type) == RECORD_TYPE
17023 || TREE_CODE (type) == UNION_TYPE
17024 || TREE_CODE (type) == QUAL_UNION_TYPE)
17025 && TYPE_FIELDS (type))
17027 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
17028 return 64;
17029 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
17030 return 128;
17032 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
17033 || TREE_CODE (type) == INTEGER_TYPE)
17036 if (TYPE_MODE (type) == DFmode && align < 64)
17037 return 64;
17038 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
17039 return 128;
17041 return align;
17044 /* Emit RTL insns to initialize the variable parts of a trampoline.
17045 FNADDR is an RTX for the address of the function's pure code.
17046 CXT is an RTX for the static chain value for the function. */
17047 void
17048 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
17050 if (!TARGET_64BIT)
17052 /* Compute offset from the end of the jmp to the target function. */
17053 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
17054 plus_constant (tramp, 10),
17055 NULL_RTX, 1, OPTAB_DIRECT);
17056 emit_move_insn (gen_rtx_MEM (QImode, tramp),
17057 gen_int_mode (0xb9, QImode));
17058 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
17059 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
17060 gen_int_mode (0xe9, QImode));
17061 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
17063 else
17065 int offset = 0;
17066 /* Try to load address using shorter movl instead of movabs.
17067 We may want to support movq for kernel mode, but kernel does not use
17068 trampolines at the moment. */
17069 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
17071 fnaddr = copy_to_mode_reg (DImode, fnaddr);
17072 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17073 gen_int_mode (0xbb41, HImode));
17074 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
17075 gen_lowpart (SImode, fnaddr));
17076 offset += 6;
17078 else
17080 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17081 gen_int_mode (0xbb49, HImode));
17082 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
17083 fnaddr);
17084 offset += 10;
17086 /* Load static chain using movabs to r10. */
17087 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17088 gen_int_mode (0xba49, HImode));
17089 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
17090 cxt);
17091 offset += 10;
17092 /* Jump to the r11 */
17093 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
17094 gen_int_mode (0xff49, HImode));
17095 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
17096 gen_int_mode (0xe3, QImode));
17097 offset += 3;
17098 gcc_assert (offset <= TRAMPOLINE_SIZE);
17101 #ifdef ENABLE_EXECUTE_STACK
17102 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
17103 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
17104 #endif
17107 /* Codes for all the SSE/MMX builtins. */
17108 enum ix86_builtins
17110 IX86_BUILTIN_ADDPS,
17111 IX86_BUILTIN_ADDSS,
17112 IX86_BUILTIN_DIVPS,
17113 IX86_BUILTIN_DIVSS,
17114 IX86_BUILTIN_MULPS,
17115 IX86_BUILTIN_MULSS,
17116 IX86_BUILTIN_SUBPS,
17117 IX86_BUILTIN_SUBSS,
17119 IX86_BUILTIN_CMPEQPS,
17120 IX86_BUILTIN_CMPLTPS,
17121 IX86_BUILTIN_CMPLEPS,
17122 IX86_BUILTIN_CMPGTPS,
17123 IX86_BUILTIN_CMPGEPS,
17124 IX86_BUILTIN_CMPNEQPS,
17125 IX86_BUILTIN_CMPNLTPS,
17126 IX86_BUILTIN_CMPNLEPS,
17127 IX86_BUILTIN_CMPNGTPS,
17128 IX86_BUILTIN_CMPNGEPS,
17129 IX86_BUILTIN_CMPORDPS,
17130 IX86_BUILTIN_CMPUNORDPS,
17131 IX86_BUILTIN_CMPEQSS,
17132 IX86_BUILTIN_CMPLTSS,
17133 IX86_BUILTIN_CMPLESS,
17134 IX86_BUILTIN_CMPNEQSS,
17135 IX86_BUILTIN_CMPNLTSS,
17136 IX86_BUILTIN_CMPNLESS,
17137 IX86_BUILTIN_CMPNGTSS,
17138 IX86_BUILTIN_CMPNGESS,
17139 IX86_BUILTIN_CMPORDSS,
17140 IX86_BUILTIN_CMPUNORDSS,
17142 IX86_BUILTIN_COMIEQSS,
17143 IX86_BUILTIN_COMILTSS,
17144 IX86_BUILTIN_COMILESS,
17145 IX86_BUILTIN_COMIGTSS,
17146 IX86_BUILTIN_COMIGESS,
17147 IX86_BUILTIN_COMINEQSS,
17148 IX86_BUILTIN_UCOMIEQSS,
17149 IX86_BUILTIN_UCOMILTSS,
17150 IX86_BUILTIN_UCOMILESS,
17151 IX86_BUILTIN_UCOMIGTSS,
17152 IX86_BUILTIN_UCOMIGESS,
17153 IX86_BUILTIN_UCOMINEQSS,
17155 IX86_BUILTIN_CVTPI2PS,
17156 IX86_BUILTIN_CVTPS2PI,
17157 IX86_BUILTIN_CVTSI2SS,
17158 IX86_BUILTIN_CVTSI642SS,
17159 IX86_BUILTIN_CVTSS2SI,
17160 IX86_BUILTIN_CVTSS2SI64,
17161 IX86_BUILTIN_CVTTPS2PI,
17162 IX86_BUILTIN_CVTTSS2SI,
17163 IX86_BUILTIN_CVTTSS2SI64,
17165 IX86_BUILTIN_MAXPS,
17166 IX86_BUILTIN_MAXSS,
17167 IX86_BUILTIN_MINPS,
17168 IX86_BUILTIN_MINSS,
17170 IX86_BUILTIN_LOADUPS,
17171 IX86_BUILTIN_STOREUPS,
17172 IX86_BUILTIN_MOVSS,
17174 IX86_BUILTIN_MOVHLPS,
17175 IX86_BUILTIN_MOVLHPS,
17176 IX86_BUILTIN_LOADHPS,
17177 IX86_BUILTIN_LOADLPS,
17178 IX86_BUILTIN_STOREHPS,
17179 IX86_BUILTIN_STORELPS,
17181 IX86_BUILTIN_MASKMOVQ,
17182 IX86_BUILTIN_MOVMSKPS,
17183 IX86_BUILTIN_PMOVMSKB,
17185 IX86_BUILTIN_MOVNTPS,
17186 IX86_BUILTIN_MOVNTQ,
17188 IX86_BUILTIN_LOADDQU,
17189 IX86_BUILTIN_STOREDQU,
17191 IX86_BUILTIN_PACKSSWB,
17192 IX86_BUILTIN_PACKSSDW,
17193 IX86_BUILTIN_PACKUSWB,
17195 IX86_BUILTIN_PADDB,
17196 IX86_BUILTIN_PADDW,
17197 IX86_BUILTIN_PADDD,
17198 IX86_BUILTIN_PADDQ,
17199 IX86_BUILTIN_PADDSB,
17200 IX86_BUILTIN_PADDSW,
17201 IX86_BUILTIN_PADDUSB,
17202 IX86_BUILTIN_PADDUSW,
17203 IX86_BUILTIN_PSUBB,
17204 IX86_BUILTIN_PSUBW,
17205 IX86_BUILTIN_PSUBD,
17206 IX86_BUILTIN_PSUBQ,
17207 IX86_BUILTIN_PSUBSB,
17208 IX86_BUILTIN_PSUBSW,
17209 IX86_BUILTIN_PSUBUSB,
17210 IX86_BUILTIN_PSUBUSW,
17212 IX86_BUILTIN_PAND,
17213 IX86_BUILTIN_PANDN,
17214 IX86_BUILTIN_POR,
17215 IX86_BUILTIN_PXOR,
17217 IX86_BUILTIN_PAVGB,
17218 IX86_BUILTIN_PAVGW,
17220 IX86_BUILTIN_PCMPEQB,
17221 IX86_BUILTIN_PCMPEQW,
17222 IX86_BUILTIN_PCMPEQD,
17223 IX86_BUILTIN_PCMPGTB,
17224 IX86_BUILTIN_PCMPGTW,
17225 IX86_BUILTIN_PCMPGTD,
17227 IX86_BUILTIN_PMADDWD,
17229 IX86_BUILTIN_PMAXSW,
17230 IX86_BUILTIN_PMAXUB,
17231 IX86_BUILTIN_PMINSW,
17232 IX86_BUILTIN_PMINUB,
17234 IX86_BUILTIN_PMULHUW,
17235 IX86_BUILTIN_PMULHW,
17236 IX86_BUILTIN_PMULLW,
17238 IX86_BUILTIN_PSADBW,
17239 IX86_BUILTIN_PSHUFW,
17241 IX86_BUILTIN_PSLLW,
17242 IX86_BUILTIN_PSLLD,
17243 IX86_BUILTIN_PSLLQ,
17244 IX86_BUILTIN_PSRAW,
17245 IX86_BUILTIN_PSRAD,
17246 IX86_BUILTIN_PSRLW,
17247 IX86_BUILTIN_PSRLD,
17248 IX86_BUILTIN_PSRLQ,
17249 IX86_BUILTIN_PSLLWI,
17250 IX86_BUILTIN_PSLLDI,
17251 IX86_BUILTIN_PSLLQI,
17252 IX86_BUILTIN_PSRAWI,
17253 IX86_BUILTIN_PSRADI,
17254 IX86_BUILTIN_PSRLWI,
17255 IX86_BUILTIN_PSRLDI,
17256 IX86_BUILTIN_PSRLQI,
17258 IX86_BUILTIN_PUNPCKHBW,
17259 IX86_BUILTIN_PUNPCKHWD,
17260 IX86_BUILTIN_PUNPCKHDQ,
17261 IX86_BUILTIN_PUNPCKLBW,
17262 IX86_BUILTIN_PUNPCKLWD,
17263 IX86_BUILTIN_PUNPCKLDQ,
17265 IX86_BUILTIN_SHUFPS,
17267 IX86_BUILTIN_RCPPS,
17268 IX86_BUILTIN_RCPSS,
17269 IX86_BUILTIN_RSQRTPS,
17270 IX86_BUILTIN_RSQRTPS_NR,
17271 IX86_BUILTIN_RSQRTSS,
17272 IX86_BUILTIN_RSQRTF,
17273 IX86_BUILTIN_SQRTPS,
17274 IX86_BUILTIN_SQRTPS_NR,
17275 IX86_BUILTIN_SQRTSS,
17277 IX86_BUILTIN_UNPCKHPS,
17278 IX86_BUILTIN_UNPCKLPS,
17280 IX86_BUILTIN_ANDPS,
17281 IX86_BUILTIN_ANDNPS,
17282 IX86_BUILTIN_ORPS,
17283 IX86_BUILTIN_XORPS,
17285 IX86_BUILTIN_EMMS,
17286 IX86_BUILTIN_LDMXCSR,
17287 IX86_BUILTIN_STMXCSR,
17288 IX86_BUILTIN_SFENCE,
17290 /* 3DNow! Original */
17291 IX86_BUILTIN_FEMMS,
17292 IX86_BUILTIN_PAVGUSB,
17293 IX86_BUILTIN_PF2ID,
17294 IX86_BUILTIN_PFACC,
17295 IX86_BUILTIN_PFADD,
17296 IX86_BUILTIN_PFCMPEQ,
17297 IX86_BUILTIN_PFCMPGE,
17298 IX86_BUILTIN_PFCMPGT,
17299 IX86_BUILTIN_PFMAX,
17300 IX86_BUILTIN_PFMIN,
17301 IX86_BUILTIN_PFMUL,
17302 IX86_BUILTIN_PFRCP,
17303 IX86_BUILTIN_PFRCPIT1,
17304 IX86_BUILTIN_PFRCPIT2,
17305 IX86_BUILTIN_PFRSQIT1,
17306 IX86_BUILTIN_PFRSQRT,
17307 IX86_BUILTIN_PFSUB,
17308 IX86_BUILTIN_PFSUBR,
17309 IX86_BUILTIN_PI2FD,
17310 IX86_BUILTIN_PMULHRW,
17312 /* 3DNow! Athlon Extensions */
17313 IX86_BUILTIN_PF2IW,
17314 IX86_BUILTIN_PFNACC,
17315 IX86_BUILTIN_PFPNACC,
17316 IX86_BUILTIN_PI2FW,
17317 IX86_BUILTIN_PSWAPDSI,
17318 IX86_BUILTIN_PSWAPDSF,
17320 /* SSE2 */
17321 IX86_BUILTIN_ADDPD,
17322 IX86_BUILTIN_ADDSD,
17323 IX86_BUILTIN_DIVPD,
17324 IX86_BUILTIN_DIVSD,
17325 IX86_BUILTIN_MULPD,
17326 IX86_BUILTIN_MULSD,
17327 IX86_BUILTIN_SUBPD,
17328 IX86_BUILTIN_SUBSD,
17330 IX86_BUILTIN_CMPEQPD,
17331 IX86_BUILTIN_CMPLTPD,
17332 IX86_BUILTIN_CMPLEPD,
17333 IX86_BUILTIN_CMPGTPD,
17334 IX86_BUILTIN_CMPGEPD,
17335 IX86_BUILTIN_CMPNEQPD,
17336 IX86_BUILTIN_CMPNLTPD,
17337 IX86_BUILTIN_CMPNLEPD,
17338 IX86_BUILTIN_CMPNGTPD,
17339 IX86_BUILTIN_CMPNGEPD,
17340 IX86_BUILTIN_CMPORDPD,
17341 IX86_BUILTIN_CMPUNORDPD,
17342 IX86_BUILTIN_CMPEQSD,
17343 IX86_BUILTIN_CMPLTSD,
17344 IX86_BUILTIN_CMPLESD,
17345 IX86_BUILTIN_CMPNEQSD,
17346 IX86_BUILTIN_CMPNLTSD,
17347 IX86_BUILTIN_CMPNLESD,
17348 IX86_BUILTIN_CMPORDSD,
17349 IX86_BUILTIN_CMPUNORDSD,
17351 IX86_BUILTIN_COMIEQSD,
17352 IX86_BUILTIN_COMILTSD,
17353 IX86_BUILTIN_COMILESD,
17354 IX86_BUILTIN_COMIGTSD,
17355 IX86_BUILTIN_COMIGESD,
17356 IX86_BUILTIN_COMINEQSD,
17357 IX86_BUILTIN_UCOMIEQSD,
17358 IX86_BUILTIN_UCOMILTSD,
17359 IX86_BUILTIN_UCOMILESD,
17360 IX86_BUILTIN_UCOMIGTSD,
17361 IX86_BUILTIN_UCOMIGESD,
17362 IX86_BUILTIN_UCOMINEQSD,
17364 IX86_BUILTIN_MAXPD,
17365 IX86_BUILTIN_MAXSD,
17366 IX86_BUILTIN_MINPD,
17367 IX86_BUILTIN_MINSD,
17369 IX86_BUILTIN_ANDPD,
17370 IX86_BUILTIN_ANDNPD,
17371 IX86_BUILTIN_ORPD,
17372 IX86_BUILTIN_XORPD,
17374 IX86_BUILTIN_SQRTPD,
17375 IX86_BUILTIN_SQRTSD,
17377 IX86_BUILTIN_UNPCKHPD,
17378 IX86_BUILTIN_UNPCKLPD,
17380 IX86_BUILTIN_SHUFPD,
17382 IX86_BUILTIN_LOADUPD,
17383 IX86_BUILTIN_STOREUPD,
17384 IX86_BUILTIN_MOVSD,
17386 IX86_BUILTIN_LOADHPD,
17387 IX86_BUILTIN_LOADLPD,
17389 IX86_BUILTIN_CVTDQ2PD,
17390 IX86_BUILTIN_CVTDQ2PS,
17392 IX86_BUILTIN_CVTPD2DQ,
17393 IX86_BUILTIN_CVTPD2PI,
17394 IX86_BUILTIN_CVTPD2PS,
17395 IX86_BUILTIN_CVTTPD2DQ,
17396 IX86_BUILTIN_CVTTPD2PI,
17398 IX86_BUILTIN_CVTPI2PD,
17399 IX86_BUILTIN_CVTSI2SD,
17400 IX86_BUILTIN_CVTSI642SD,
17402 IX86_BUILTIN_CVTSD2SI,
17403 IX86_BUILTIN_CVTSD2SI64,
17404 IX86_BUILTIN_CVTSD2SS,
17405 IX86_BUILTIN_CVTSS2SD,
17406 IX86_BUILTIN_CVTTSD2SI,
17407 IX86_BUILTIN_CVTTSD2SI64,
17409 IX86_BUILTIN_CVTPS2DQ,
17410 IX86_BUILTIN_CVTPS2PD,
17411 IX86_BUILTIN_CVTTPS2DQ,
17413 IX86_BUILTIN_MOVNTI,
17414 IX86_BUILTIN_MOVNTPD,
17415 IX86_BUILTIN_MOVNTDQ,
17417 /* SSE2 MMX */
17418 IX86_BUILTIN_MASKMOVDQU,
17419 IX86_BUILTIN_MOVMSKPD,
17420 IX86_BUILTIN_PMOVMSKB128,
17422 IX86_BUILTIN_PACKSSWB128,
17423 IX86_BUILTIN_PACKSSDW128,
17424 IX86_BUILTIN_PACKUSWB128,
17426 IX86_BUILTIN_PADDB128,
17427 IX86_BUILTIN_PADDW128,
17428 IX86_BUILTIN_PADDD128,
17429 IX86_BUILTIN_PADDQ128,
17430 IX86_BUILTIN_PADDSB128,
17431 IX86_BUILTIN_PADDSW128,
17432 IX86_BUILTIN_PADDUSB128,
17433 IX86_BUILTIN_PADDUSW128,
17434 IX86_BUILTIN_PSUBB128,
17435 IX86_BUILTIN_PSUBW128,
17436 IX86_BUILTIN_PSUBD128,
17437 IX86_BUILTIN_PSUBQ128,
17438 IX86_BUILTIN_PSUBSB128,
17439 IX86_BUILTIN_PSUBSW128,
17440 IX86_BUILTIN_PSUBUSB128,
17441 IX86_BUILTIN_PSUBUSW128,
17443 IX86_BUILTIN_PAND128,
17444 IX86_BUILTIN_PANDN128,
17445 IX86_BUILTIN_POR128,
17446 IX86_BUILTIN_PXOR128,
17448 IX86_BUILTIN_PAVGB128,
17449 IX86_BUILTIN_PAVGW128,
17451 IX86_BUILTIN_PCMPEQB128,
17452 IX86_BUILTIN_PCMPEQW128,
17453 IX86_BUILTIN_PCMPEQD128,
17454 IX86_BUILTIN_PCMPGTB128,
17455 IX86_BUILTIN_PCMPGTW128,
17456 IX86_BUILTIN_PCMPGTD128,
17458 IX86_BUILTIN_PMADDWD128,
17460 IX86_BUILTIN_PMAXSW128,
17461 IX86_BUILTIN_PMAXUB128,
17462 IX86_BUILTIN_PMINSW128,
17463 IX86_BUILTIN_PMINUB128,
17465 IX86_BUILTIN_PMULUDQ,
17466 IX86_BUILTIN_PMULUDQ128,
17467 IX86_BUILTIN_PMULHUW128,
17468 IX86_BUILTIN_PMULHW128,
17469 IX86_BUILTIN_PMULLW128,
17471 IX86_BUILTIN_PSADBW128,
17472 IX86_BUILTIN_PSHUFHW,
17473 IX86_BUILTIN_PSHUFLW,
17474 IX86_BUILTIN_PSHUFD,
17476 IX86_BUILTIN_PSLLDQI128,
17477 IX86_BUILTIN_PSLLWI128,
17478 IX86_BUILTIN_PSLLDI128,
17479 IX86_BUILTIN_PSLLQI128,
17480 IX86_BUILTIN_PSRAWI128,
17481 IX86_BUILTIN_PSRADI128,
17482 IX86_BUILTIN_PSRLDQI128,
17483 IX86_BUILTIN_PSRLWI128,
17484 IX86_BUILTIN_PSRLDI128,
17485 IX86_BUILTIN_PSRLQI128,
17487 IX86_BUILTIN_PSLLDQ128,
17488 IX86_BUILTIN_PSLLW128,
17489 IX86_BUILTIN_PSLLD128,
17490 IX86_BUILTIN_PSLLQ128,
17491 IX86_BUILTIN_PSRAW128,
17492 IX86_BUILTIN_PSRAD128,
17493 IX86_BUILTIN_PSRLW128,
17494 IX86_BUILTIN_PSRLD128,
17495 IX86_BUILTIN_PSRLQ128,
17497 IX86_BUILTIN_PUNPCKHBW128,
17498 IX86_BUILTIN_PUNPCKHWD128,
17499 IX86_BUILTIN_PUNPCKHDQ128,
17500 IX86_BUILTIN_PUNPCKHQDQ128,
17501 IX86_BUILTIN_PUNPCKLBW128,
17502 IX86_BUILTIN_PUNPCKLWD128,
17503 IX86_BUILTIN_PUNPCKLDQ128,
17504 IX86_BUILTIN_PUNPCKLQDQ128,
17506 IX86_BUILTIN_CLFLUSH,
17507 IX86_BUILTIN_MFENCE,
17508 IX86_BUILTIN_LFENCE,
17510 /* SSE3. */
17511 IX86_BUILTIN_ADDSUBPS,
17512 IX86_BUILTIN_HADDPS,
17513 IX86_BUILTIN_HSUBPS,
17514 IX86_BUILTIN_MOVSHDUP,
17515 IX86_BUILTIN_MOVSLDUP,
17516 IX86_BUILTIN_ADDSUBPD,
17517 IX86_BUILTIN_HADDPD,
17518 IX86_BUILTIN_HSUBPD,
17519 IX86_BUILTIN_LDDQU,
17521 IX86_BUILTIN_MONITOR,
17522 IX86_BUILTIN_MWAIT,
17524 /* SSSE3. */
17525 IX86_BUILTIN_PHADDW,
17526 IX86_BUILTIN_PHADDD,
17527 IX86_BUILTIN_PHADDSW,
17528 IX86_BUILTIN_PHSUBW,
17529 IX86_BUILTIN_PHSUBD,
17530 IX86_BUILTIN_PHSUBSW,
17531 IX86_BUILTIN_PMADDUBSW,
17532 IX86_BUILTIN_PMULHRSW,
17533 IX86_BUILTIN_PSHUFB,
17534 IX86_BUILTIN_PSIGNB,
17535 IX86_BUILTIN_PSIGNW,
17536 IX86_BUILTIN_PSIGND,
17537 IX86_BUILTIN_PALIGNR,
17538 IX86_BUILTIN_PABSB,
17539 IX86_BUILTIN_PABSW,
17540 IX86_BUILTIN_PABSD,
17542 IX86_BUILTIN_PHADDW128,
17543 IX86_BUILTIN_PHADDD128,
17544 IX86_BUILTIN_PHADDSW128,
17545 IX86_BUILTIN_PHSUBW128,
17546 IX86_BUILTIN_PHSUBD128,
17547 IX86_BUILTIN_PHSUBSW128,
17548 IX86_BUILTIN_PMADDUBSW128,
17549 IX86_BUILTIN_PMULHRSW128,
17550 IX86_BUILTIN_PSHUFB128,
17551 IX86_BUILTIN_PSIGNB128,
17552 IX86_BUILTIN_PSIGNW128,
17553 IX86_BUILTIN_PSIGND128,
17554 IX86_BUILTIN_PALIGNR128,
17555 IX86_BUILTIN_PABSB128,
17556 IX86_BUILTIN_PABSW128,
17557 IX86_BUILTIN_PABSD128,
17559 /* AMDFAM10 - SSE4A New Instructions. */
17560 IX86_BUILTIN_MOVNTSD,
17561 IX86_BUILTIN_MOVNTSS,
17562 IX86_BUILTIN_EXTRQI,
17563 IX86_BUILTIN_EXTRQ,
17564 IX86_BUILTIN_INSERTQI,
17565 IX86_BUILTIN_INSERTQ,
17567 /* SSE4.1. */
17568 IX86_BUILTIN_BLENDPD,
17569 IX86_BUILTIN_BLENDPS,
17570 IX86_BUILTIN_BLENDVPD,
17571 IX86_BUILTIN_BLENDVPS,
17572 IX86_BUILTIN_PBLENDVB128,
17573 IX86_BUILTIN_PBLENDW128,
17575 IX86_BUILTIN_DPPD,
17576 IX86_BUILTIN_DPPS,
17578 IX86_BUILTIN_INSERTPS128,
17580 IX86_BUILTIN_MOVNTDQA,
17581 IX86_BUILTIN_MPSADBW128,
17582 IX86_BUILTIN_PACKUSDW128,
17583 IX86_BUILTIN_PCMPEQQ,
17584 IX86_BUILTIN_PHMINPOSUW128,
17586 IX86_BUILTIN_PMAXSB128,
17587 IX86_BUILTIN_PMAXSD128,
17588 IX86_BUILTIN_PMAXUD128,
17589 IX86_BUILTIN_PMAXUW128,
17591 IX86_BUILTIN_PMINSB128,
17592 IX86_BUILTIN_PMINSD128,
17593 IX86_BUILTIN_PMINUD128,
17594 IX86_BUILTIN_PMINUW128,
17596 IX86_BUILTIN_PMOVSXBW128,
17597 IX86_BUILTIN_PMOVSXBD128,
17598 IX86_BUILTIN_PMOVSXBQ128,
17599 IX86_BUILTIN_PMOVSXWD128,
17600 IX86_BUILTIN_PMOVSXWQ128,
17601 IX86_BUILTIN_PMOVSXDQ128,
17603 IX86_BUILTIN_PMOVZXBW128,
17604 IX86_BUILTIN_PMOVZXBD128,
17605 IX86_BUILTIN_PMOVZXBQ128,
17606 IX86_BUILTIN_PMOVZXWD128,
17607 IX86_BUILTIN_PMOVZXWQ128,
17608 IX86_BUILTIN_PMOVZXDQ128,
17610 IX86_BUILTIN_PMULDQ128,
17611 IX86_BUILTIN_PMULLD128,
17613 IX86_BUILTIN_ROUNDPD,
17614 IX86_BUILTIN_ROUNDPS,
17615 IX86_BUILTIN_ROUNDSD,
17616 IX86_BUILTIN_ROUNDSS,
17618 IX86_BUILTIN_PTESTZ,
17619 IX86_BUILTIN_PTESTC,
17620 IX86_BUILTIN_PTESTNZC,
17622 IX86_BUILTIN_VEC_INIT_V2SI,
17623 IX86_BUILTIN_VEC_INIT_V4HI,
17624 IX86_BUILTIN_VEC_INIT_V8QI,
17625 IX86_BUILTIN_VEC_EXT_V2DF,
17626 IX86_BUILTIN_VEC_EXT_V2DI,
17627 IX86_BUILTIN_VEC_EXT_V4SF,
17628 IX86_BUILTIN_VEC_EXT_V4SI,
17629 IX86_BUILTIN_VEC_EXT_V8HI,
17630 IX86_BUILTIN_VEC_EXT_V2SI,
17631 IX86_BUILTIN_VEC_EXT_V4HI,
17632 IX86_BUILTIN_VEC_EXT_V16QI,
17633 IX86_BUILTIN_VEC_SET_V2DI,
17634 IX86_BUILTIN_VEC_SET_V4SF,
17635 IX86_BUILTIN_VEC_SET_V4SI,
17636 IX86_BUILTIN_VEC_SET_V8HI,
17637 IX86_BUILTIN_VEC_SET_V4HI,
17638 IX86_BUILTIN_VEC_SET_V16QI,
17640 IX86_BUILTIN_VEC_PACK_SFIX,
17642 /* SSE4.2. */
17643 IX86_BUILTIN_CRC32QI,
17644 IX86_BUILTIN_CRC32HI,
17645 IX86_BUILTIN_CRC32SI,
17646 IX86_BUILTIN_CRC32DI,
17648 IX86_BUILTIN_PCMPESTRI128,
17649 IX86_BUILTIN_PCMPESTRM128,
17650 IX86_BUILTIN_PCMPESTRA128,
17651 IX86_BUILTIN_PCMPESTRC128,
17652 IX86_BUILTIN_PCMPESTRO128,
17653 IX86_BUILTIN_PCMPESTRS128,
17654 IX86_BUILTIN_PCMPESTRZ128,
17655 IX86_BUILTIN_PCMPISTRI128,
17656 IX86_BUILTIN_PCMPISTRM128,
17657 IX86_BUILTIN_PCMPISTRA128,
17658 IX86_BUILTIN_PCMPISTRC128,
17659 IX86_BUILTIN_PCMPISTRO128,
17660 IX86_BUILTIN_PCMPISTRS128,
17661 IX86_BUILTIN_PCMPISTRZ128,
17663 IX86_BUILTIN_PCMPGTQ,
17665 /* AES instructions */
17666 IX86_BUILTIN_AESENC128,
17667 IX86_BUILTIN_AESENCLAST128,
17668 IX86_BUILTIN_AESDEC128,
17669 IX86_BUILTIN_AESDECLAST128,
17670 IX86_BUILTIN_AESIMC128,
17671 IX86_BUILTIN_AESKEYGENASSIST128,
17673 /* PCLMUL instruction */
17674 IX86_BUILTIN_PCLMULQDQ128,
17676 /* TFmode support builtins. */
17677 IX86_BUILTIN_INFQ,
17678 IX86_BUILTIN_FABSQ,
17679 IX86_BUILTIN_COPYSIGNQ,
17681 /* SSE5 instructions */
17682 IX86_BUILTIN_FMADDSS,
17683 IX86_BUILTIN_FMADDSD,
17684 IX86_BUILTIN_FMADDPS,
17685 IX86_BUILTIN_FMADDPD,
17686 IX86_BUILTIN_FMSUBSS,
17687 IX86_BUILTIN_FMSUBSD,
17688 IX86_BUILTIN_FMSUBPS,
17689 IX86_BUILTIN_FMSUBPD,
17690 IX86_BUILTIN_FNMADDSS,
17691 IX86_BUILTIN_FNMADDSD,
17692 IX86_BUILTIN_FNMADDPS,
17693 IX86_BUILTIN_FNMADDPD,
17694 IX86_BUILTIN_FNMSUBSS,
17695 IX86_BUILTIN_FNMSUBSD,
17696 IX86_BUILTIN_FNMSUBPS,
17697 IX86_BUILTIN_FNMSUBPD,
17698 IX86_BUILTIN_PCMOV_V2DI,
17699 IX86_BUILTIN_PCMOV_V4SI,
17700 IX86_BUILTIN_PCMOV_V8HI,
17701 IX86_BUILTIN_PCMOV_V16QI,
17702 IX86_BUILTIN_PCMOV_V4SF,
17703 IX86_BUILTIN_PCMOV_V2DF,
17704 IX86_BUILTIN_PPERM,
17705 IX86_BUILTIN_PERMPS,
17706 IX86_BUILTIN_PERMPD,
17707 IX86_BUILTIN_PMACSSWW,
17708 IX86_BUILTIN_PMACSWW,
17709 IX86_BUILTIN_PMACSSWD,
17710 IX86_BUILTIN_PMACSWD,
17711 IX86_BUILTIN_PMACSSDD,
17712 IX86_BUILTIN_PMACSDD,
17713 IX86_BUILTIN_PMACSSDQL,
17714 IX86_BUILTIN_PMACSSDQH,
17715 IX86_BUILTIN_PMACSDQL,
17716 IX86_BUILTIN_PMACSDQH,
17717 IX86_BUILTIN_PMADCSSWD,
17718 IX86_BUILTIN_PMADCSWD,
17719 IX86_BUILTIN_PHADDBW,
17720 IX86_BUILTIN_PHADDBD,
17721 IX86_BUILTIN_PHADDBQ,
17722 IX86_BUILTIN_PHADDWD,
17723 IX86_BUILTIN_PHADDWQ,
17724 IX86_BUILTIN_PHADDDQ,
17725 IX86_BUILTIN_PHADDUBW,
17726 IX86_BUILTIN_PHADDUBD,
17727 IX86_BUILTIN_PHADDUBQ,
17728 IX86_BUILTIN_PHADDUWD,
17729 IX86_BUILTIN_PHADDUWQ,
17730 IX86_BUILTIN_PHADDUDQ,
17731 IX86_BUILTIN_PHSUBBW,
17732 IX86_BUILTIN_PHSUBWD,
17733 IX86_BUILTIN_PHSUBDQ,
17734 IX86_BUILTIN_PROTB,
17735 IX86_BUILTIN_PROTW,
17736 IX86_BUILTIN_PROTD,
17737 IX86_BUILTIN_PROTQ,
17738 IX86_BUILTIN_PROTB_IMM,
17739 IX86_BUILTIN_PROTW_IMM,
17740 IX86_BUILTIN_PROTD_IMM,
17741 IX86_BUILTIN_PROTQ_IMM,
17742 IX86_BUILTIN_PSHLB,
17743 IX86_BUILTIN_PSHLW,
17744 IX86_BUILTIN_PSHLD,
17745 IX86_BUILTIN_PSHLQ,
17746 IX86_BUILTIN_PSHAB,
17747 IX86_BUILTIN_PSHAW,
17748 IX86_BUILTIN_PSHAD,
17749 IX86_BUILTIN_PSHAQ,
17750 IX86_BUILTIN_FRCZSS,
17751 IX86_BUILTIN_FRCZSD,
17752 IX86_BUILTIN_FRCZPS,
17753 IX86_BUILTIN_FRCZPD,
17754 IX86_BUILTIN_CVTPH2PS,
17755 IX86_BUILTIN_CVTPS2PH,
17757 IX86_BUILTIN_COMEQSS,
17758 IX86_BUILTIN_COMNESS,
17759 IX86_BUILTIN_COMLTSS,
17760 IX86_BUILTIN_COMLESS,
17761 IX86_BUILTIN_COMGTSS,
17762 IX86_BUILTIN_COMGESS,
17763 IX86_BUILTIN_COMUEQSS,
17764 IX86_BUILTIN_COMUNESS,
17765 IX86_BUILTIN_COMULTSS,
17766 IX86_BUILTIN_COMULESS,
17767 IX86_BUILTIN_COMUGTSS,
17768 IX86_BUILTIN_COMUGESS,
17769 IX86_BUILTIN_COMORDSS,
17770 IX86_BUILTIN_COMUNORDSS,
17771 IX86_BUILTIN_COMFALSESS,
17772 IX86_BUILTIN_COMTRUESS,
17774 IX86_BUILTIN_COMEQSD,
17775 IX86_BUILTIN_COMNESD,
17776 IX86_BUILTIN_COMLTSD,
17777 IX86_BUILTIN_COMLESD,
17778 IX86_BUILTIN_COMGTSD,
17779 IX86_BUILTIN_COMGESD,
17780 IX86_BUILTIN_COMUEQSD,
17781 IX86_BUILTIN_COMUNESD,
17782 IX86_BUILTIN_COMULTSD,
17783 IX86_BUILTIN_COMULESD,
17784 IX86_BUILTIN_COMUGTSD,
17785 IX86_BUILTIN_COMUGESD,
17786 IX86_BUILTIN_COMORDSD,
17787 IX86_BUILTIN_COMUNORDSD,
17788 IX86_BUILTIN_COMFALSESD,
17789 IX86_BUILTIN_COMTRUESD,
17791 IX86_BUILTIN_COMEQPS,
17792 IX86_BUILTIN_COMNEPS,
17793 IX86_BUILTIN_COMLTPS,
17794 IX86_BUILTIN_COMLEPS,
17795 IX86_BUILTIN_COMGTPS,
17796 IX86_BUILTIN_COMGEPS,
17797 IX86_BUILTIN_COMUEQPS,
17798 IX86_BUILTIN_COMUNEPS,
17799 IX86_BUILTIN_COMULTPS,
17800 IX86_BUILTIN_COMULEPS,
17801 IX86_BUILTIN_COMUGTPS,
17802 IX86_BUILTIN_COMUGEPS,
17803 IX86_BUILTIN_COMORDPS,
17804 IX86_BUILTIN_COMUNORDPS,
17805 IX86_BUILTIN_COMFALSEPS,
17806 IX86_BUILTIN_COMTRUEPS,
17808 IX86_BUILTIN_COMEQPD,
17809 IX86_BUILTIN_COMNEPD,
17810 IX86_BUILTIN_COMLTPD,
17811 IX86_BUILTIN_COMLEPD,
17812 IX86_BUILTIN_COMGTPD,
17813 IX86_BUILTIN_COMGEPD,
17814 IX86_BUILTIN_COMUEQPD,
17815 IX86_BUILTIN_COMUNEPD,
17816 IX86_BUILTIN_COMULTPD,
17817 IX86_BUILTIN_COMULEPD,
17818 IX86_BUILTIN_COMUGTPD,
17819 IX86_BUILTIN_COMUGEPD,
17820 IX86_BUILTIN_COMORDPD,
17821 IX86_BUILTIN_COMUNORDPD,
17822 IX86_BUILTIN_COMFALSEPD,
17823 IX86_BUILTIN_COMTRUEPD,
17825 IX86_BUILTIN_PCOMEQUB,
17826 IX86_BUILTIN_PCOMNEUB,
17827 IX86_BUILTIN_PCOMLTUB,
17828 IX86_BUILTIN_PCOMLEUB,
17829 IX86_BUILTIN_PCOMGTUB,
17830 IX86_BUILTIN_PCOMGEUB,
17831 IX86_BUILTIN_PCOMFALSEUB,
17832 IX86_BUILTIN_PCOMTRUEUB,
17833 IX86_BUILTIN_PCOMEQUW,
17834 IX86_BUILTIN_PCOMNEUW,
17835 IX86_BUILTIN_PCOMLTUW,
17836 IX86_BUILTIN_PCOMLEUW,
17837 IX86_BUILTIN_PCOMGTUW,
17838 IX86_BUILTIN_PCOMGEUW,
17839 IX86_BUILTIN_PCOMFALSEUW,
17840 IX86_BUILTIN_PCOMTRUEUW,
17841 IX86_BUILTIN_PCOMEQUD,
17842 IX86_BUILTIN_PCOMNEUD,
17843 IX86_BUILTIN_PCOMLTUD,
17844 IX86_BUILTIN_PCOMLEUD,
17845 IX86_BUILTIN_PCOMGTUD,
17846 IX86_BUILTIN_PCOMGEUD,
17847 IX86_BUILTIN_PCOMFALSEUD,
17848 IX86_BUILTIN_PCOMTRUEUD,
17849 IX86_BUILTIN_PCOMEQUQ,
17850 IX86_BUILTIN_PCOMNEUQ,
17851 IX86_BUILTIN_PCOMLTUQ,
17852 IX86_BUILTIN_PCOMLEUQ,
17853 IX86_BUILTIN_PCOMGTUQ,
17854 IX86_BUILTIN_PCOMGEUQ,
17855 IX86_BUILTIN_PCOMFALSEUQ,
17856 IX86_BUILTIN_PCOMTRUEUQ,
17858 IX86_BUILTIN_PCOMEQB,
17859 IX86_BUILTIN_PCOMNEB,
17860 IX86_BUILTIN_PCOMLTB,
17861 IX86_BUILTIN_PCOMLEB,
17862 IX86_BUILTIN_PCOMGTB,
17863 IX86_BUILTIN_PCOMGEB,
17864 IX86_BUILTIN_PCOMFALSEB,
17865 IX86_BUILTIN_PCOMTRUEB,
17866 IX86_BUILTIN_PCOMEQW,
17867 IX86_BUILTIN_PCOMNEW,
17868 IX86_BUILTIN_PCOMLTW,
17869 IX86_BUILTIN_PCOMLEW,
17870 IX86_BUILTIN_PCOMGTW,
17871 IX86_BUILTIN_PCOMGEW,
17872 IX86_BUILTIN_PCOMFALSEW,
17873 IX86_BUILTIN_PCOMTRUEW,
17874 IX86_BUILTIN_PCOMEQD,
17875 IX86_BUILTIN_PCOMNED,
17876 IX86_BUILTIN_PCOMLTD,
17877 IX86_BUILTIN_PCOMLED,
17878 IX86_BUILTIN_PCOMGTD,
17879 IX86_BUILTIN_PCOMGED,
17880 IX86_BUILTIN_PCOMFALSED,
17881 IX86_BUILTIN_PCOMTRUED,
17882 IX86_BUILTIN_PCOMEQQ,
17883 IX86_BUILTIN_PCOMNEQ,
17884 IX86_BUILTIN_PCOMLTQ,
17885 IX86_BUILTIN_PCOMLEQ,
17886 IX86_BUILTIN_PCOMGTQ,
17887 IX86_BUILTIN_PCOMGEQ,
17888 IX86_BUILTIN_PCOMFALSEQ,
17889 IX86_BUILTIN_PCOMTRUEQ,
17891 IX86_BUILTIN_MAX
17894 /* Table for the ix86 builtin decls. */
17895 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
17897 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Do so,
17898 * if the target_flags include one of MASK. Stores the function decl
17899 * in the ix86_builtins array.
17900 * Returns the function decl or NULL_TREE, if the builtin was not added. */
17902 static inline tree
17903 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
17905 tree decl = NULL_TREE;
17907 if (mask & ix86_isa_flags
17908 && (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT))
17910 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
17911 NULL, NULL_TREE);
17912 ix86_builtins[(int) code] = decl;
17915 return decl;
17918 /* Like def_builtin, but also marks the function decl "const". */
17920 static inline tree
17921 def_builtin_const (int mask, const char *name, tree type,
17922 enum ix86_builtins code)
17924 tree decl = def_builtin (mask, name, type, code);
17925 if (decl)
17926 TREE_READONLY (decl) = 1;
17927 return decl;
17930 /* Bits for builtin_description.flag. */
17932 /* Set when we don't support the comparison natively, and should
17933 swap_comparison in order to support it. */
17934 #define BUILTIN_DESC_SWAP_OPERANDS 1
17936 struct builtin_description
17938 const unsigned int mask;
17939 const enum insn_code icode;
17940 const char *const name;
17941 const enum ix86_builtins code;
17942 const enum rtx_code comparison;
17943 const int flag;
17946 static const struct builtin_description bdesc_comi[] =
17948 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
17949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
17950 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
17951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
17952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
17953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
17954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
17955 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
17956 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
17957 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
17958 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
17959 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
17960 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
17961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
17962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
17963 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
17964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
17965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
17966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
17967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
17968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
17969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
17970 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
17971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
17974 static const struct builtin_description bdesc_pcmpestr[] =
17976 /* SSE4.2 */
17977 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
17978 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
17979 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
17980 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
17981 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
17982 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
17983 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
17986 static const struct builtin_description bdesc_pcmpistr[] =
17988 /* SSE4.2 */
17989 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
17990 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
17991 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
17992 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
17993 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
17994 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
17995 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
17998 /* Special builtin types */
17999 enum ix86_special_builtin_type
18001 SPECIAL_FTYPE_UNKNOWN,
18002 VOID_FTYPE_VOID,
18003 V16QI_FTYPE_PCCHAR,
18004 V4SF_FTYPE_PCFLOAT,
18005 V2DF_FTYPE_PCDOUBLE,
18006 V4SF_FTYPE_V4SF_PCV2SF,
18007 V2DF_FTYPE_V2DF_PCDOUBLE,
18008 V2DI_FTYPE_PV2DI,
18009 VOID_FTYPE_PV2SF_V4SF,
18010 VOID_FTYPE_PV2DI_V2DI,
18011 VOID_FTYPE_PCHAR_V16QI,
18012 VOID_FTYPE_PFLOAT_V4SF,
18013 VOID_FTYPE_PDOUBLE_V2DF,
18014 VOID_FTYPE_PDI_DI,
18015 VOID_FTYPE_PINT_INT
18018 /* Builtin types */
18019 enum ix86_builtin_type
18021 FTYPE_UNKNOWN,
18022 FLOAT128_FTYPE_FLOAT128,
18023 FLOAT_FTYPE_FLOAT,
18024 FLOAT128_FTYPE_FLOAT128_FLOAT128,
18025 INT_FTYPE_V2DI_V2DI_PTEST,
18026 INT64_FTYPE_V4SF,
18027 INT64_FTYPE_V2DF,
18028 INT_FTYPE_V16QI,
18029 INT_FTYPE_V8QI,
18030 INT_FTYPE_V4SF,
18031 INT_FTYPE_V2DF,
18032 V16QI_FTYPE_V16QI,
18033 V8HI_FTYPE_V8HI,
18034 V8HI_FTYPE_V16QI,
18035 V8QI_FTYPE_V8QI,
18036 V4SI_FTYPE_V4SI,
18037 V4SI_FTYPE_V16QI,
18038 V4SI_FTYPE_V8HI,
18039 V4SI_FTYPE_V4SF,
18040 V4SI_FTYPE_V2DF,
18041 V4HI_FTYPE_V4HI,
18042 V4SF_FTYPE_V4SF,
18043 V4SF_FTYPE_V4SF_VEC_MERGE,
18044 V4SF_FTYPE_V4SI,
18045 V4SF_FTYPE_V2DF,
18046 V2DI_FTYPE_V2DI,
18047 V2DI_FTYPE_V16QI,
18048 V2DI_FTYPE_V8HI,
18049 V2DI_FTYPE_V4SI,
18050 V2DF_FTYPE_V2DF,
18051 V2DF_FTYPE_V2DF_VEC_MERGE,
18052 V2DF_FTYPE_V4SI,
18053 V2DF_FTYPE_V4SF,
18054 V2DF_FTYPE_V2SI,
18055 V2SI_FTYPE_V2SI,
18056 V2SI_FTYPE_V4SF,
18057 V2SI_FTYPE_V2SF,
18058 V2SI_FTYPE_V2DF,
18059 V2SF_FTYPE_V2SF,
18060 V2SF_FTYPE_V2SI,
18061 V16QI_FTYPE_V16QI_V16QI,
18062 V16QI_FTYPE_V8HI_V8HI,
18063 V8QI_FTYPE_V8QI_V8QI,
18064 V8QI_FTYPE_V4HI_V4HI,
18065 V8HI_FTYPE_V8HI_V8HI,
18066 V8HI_FTYPE_V8HI_V8HI_COUNT,
18067 V8HI_FTYPE_V16QI_V16QI,
18068 V8HI_FTYPE_V4SI_V4SI,
18069 V8HI_FTYPE_V8HI_SI_COUNT,
18070 V4SI_FTYPE_V4SI_V4SI,
18071 V4SI_FTYPE_V4SI_V4SI_COUNT,
18072 V4SI_FTYPE_V8HI_V8HI,
18073 V4SI_FTYPE_V4SF_V4SF,
18074 V4SI_FTYPE_V2DF_V2DF,
18075 V4SI_FTYPE_V4SI_SI_COUNT,
18076 V4HI_FTYPE_V4HI_V4HI,
18077 V4HI_FTYPE_V4HI_V4HI_COUNT,
18078 V4HI_FTYPE_V8QI_V8QI,
18079 V4HI_FTYPE_V2SI_V2SI,
18080 V4HI_FTYPE_V4HI_SI_COUNT,
18081 V4SF_FTYPE_V4SF_V4SF,
18082 V4SF_FTYPE_V4SF_V4SF_SWAP,
18083 V4SF_FTYPE_V4SF_V2SI,
18084 V4SF_FTYPE_V4SF_V2DF,
18085 V4SF_FTYPE_V4SF_DI,
18086 V4SF_FTYPE_V4SF_SI,
18087 V2DI_FTYPE_V2DI_V2DI,
18088 V2DI_FTYPE_V2DI_V2DI_COUNT,
18089 V2DI_FTYPE_V16QI_V16QI,
18090 V2DI_FTYPE_V4SI_V4SI,
18091 V2DI_FTYPE_V2DI_V16QI,
18092 V2DI_FTYPE_V2DF_V2DF,
18093 V2DI_FTYPE_V2DI_SI_COUNT,
18094 V2SI_FTYPE_V2SI_V2SI,
18095 V2SI_FTYPE_V2SI_V2SI_COUNT,
18096 V2SI_FTYPE_V4HI_V4HI,
18097 V2SI_FTYPE_V2SF_V2SF,
18098 V2SI_FTYPE_V2SI_SI_COUNT,
18099 V2DF_FTYPE_V2DF_V2DF,
18100 V2DF_FTYPE_V2DF_V2DF_SWAP,
18101 V2DF_FTYPE_V2DF_V4SF,
18102 V2DF_FTYPE_V2DF_DI,
18103 V2DF_FTYPE_V2DF_SI,
18104 V2SF_FTYPE_V2SF_V2SF,
18105 V1DI_FTYPE_V1DI_V1DI,
18106 V1DI_FTYPE_V1DI_V1DI_COUNT,
18107 V1DI_FTYPE_V8QI_V8QI,
18108 V1DI_FTYPE_V2SI_V2SI,
18109 V1DI_FTYPE_V1DI_SI_COUNT,
18110 UINT64_FTYPE_UINT64_UINT64,
18111 UINT_FTYPE_UINT_UINT,
18112 UINT_FTYPE_UINT_USHORT,
18113 UINT_FTYPE_UINT_UCHAR,
18114 V8HI_FTYPE_V8HI_INT,
18115 V4SI_FTYPE_V4SI_INT,
18116 V4HI_FTYPE_V4HI_INT,
18117 V4SF_FTYPE_V4SF_INT,
18118 V2DI_FTYPE_V2DI_INT,
18119 V2DI2TI_FTYPE_V2DI_INT,
18120 V2DF_FTYPE_V2DF_INT,
18121 V16QI_FTYPE_V16QI_V16QI_V16QI,
18122 V4SF_FTYPE_V4SF_V4SF_V4SF,
18123 V2DF_FTYPE_V2DF_V2DF_V2DF,
18124 V16QI_FTYPE_V16QI_V16QI_INT,
18125 V8HI_FTYPE_V8HI_V8HI_INT,
18126 V4SI_FTYPE_V4SI_V4SI_INT,
18127 V4SF_FTYPE_V4SF_V4SF_INT,
18128 V2DI_FTYPE_V2DI_V2DI_INT,
18129 V2DI2TI_FTYPE_V2DI_V2DI_INT,
18130 V1DI2DI_FTYPE_V1DI_V1DI_INT,
18131 V2DF_FTYPE_V2DF_V2DF_INT,
18132 V2DI_FTYPE_V2DI_UINT_UINT,
18133 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
18136 /* Special builtins with variable number of arguments. */
18137 static const struct builtin_description bdesc_special_args[] =
18139 /* MMX */
18140 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
18142 /* 3DNow! */
18143 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
18145 /* SSE */
18146 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
18147 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
18148 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
18150 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
18151 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
18152 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
18153 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
18155 /* SSE or 3DNow!A */
18156 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
18157 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
18159 /* SSE2 */
18160 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
18161 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
18162 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
18163 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
18164 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
18165 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
18166 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
18167 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
18168 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
18170 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
18171 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
18173 /* SSE3 */
18174 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
18176 /* SSE4.1 */
18177 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
18179 /* SSE4A */
18180 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
18181 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
18184 /* Builtins with variable number of arguments. */
18185 static const struct builtin_description bdesc_args[] =
18187 /* MMX */
18188 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18189 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18190 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18191 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18192 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18193 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18195 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18196 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18197 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18198 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18199 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18200 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18201 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18202 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18204 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18205 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18207 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18208 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18209 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18210 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18212 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18213 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18214 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18215 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18216 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18217 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18219 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18220 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18221 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18222 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18223 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
18224 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
18226 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
18227 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
18228 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
18230 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
18232 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
18233 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
18234 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
18235 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
18236 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
18237 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
18239 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
18240 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
18241 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
18242 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
18243 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
18244 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
18246 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
18247 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
18248 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
18249 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
18251 /* 3DNow! */
18252 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
18253 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
18254 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
18255 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
18257 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18258 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18259 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18260 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
18261 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
18262 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
18263 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18264 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18265 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18266 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18267 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18268 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18269 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18270 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18271 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18273 /* 3DNow!A */
18274 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
18275 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
18276 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
18277 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
18278 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18279 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
18281 /* SSE */
18282 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
18283 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18284 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18285 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18286 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18287 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18288 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
18289 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
18290 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
18291 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
18292 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
18293 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
18295 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
18297 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18298 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18299 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18300 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18301 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18302 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18303 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18304 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18306 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
18307 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
18308 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
18309 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
18310 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
18311 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
18312 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
18313 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
18314 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
18315 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
18316 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
18317 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
18318 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
18319 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
18320 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
18321 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
18322 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
18323 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
18324 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
18325 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
18326 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
18327 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
18329 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18330 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18331 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18332 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18334 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18335 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18336 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18337 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18339 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18340 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18341 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18342 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18343 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18345 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
18346 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
18347 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
18349 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
18351 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
18352 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
18353 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
18355 /* SSE MMX or 3Dnow!A */
18356 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18357 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18358 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18360 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18361 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18362 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18363 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18365 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
18366 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
18368 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
18370 /* SSE2 */
18371 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
18373 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
18374 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
18375 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
18376 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
18377 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
18379 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
18380 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
18381 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
18382 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
18383 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
18385 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
18387 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
18388 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
18389 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
18390 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
18392 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
18393 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
18394 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
18396 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18397 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18398 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18399 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18400 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18401 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18402 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18403 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18405 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
18406 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
18407 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
18408 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
18409 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
18410 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
18411 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
18412 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
18413 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
18414 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
18415 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
18416 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
18417 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
18418 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
18419 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
18420 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
18421 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
18422 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
18423 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
18424 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
18426 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18427 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18428 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18429 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18431 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18432 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18433 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18434 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18436 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18437 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18438 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18440 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
18442 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18443 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18444 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18445 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18446 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18447 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18448 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18449 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18451 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18452 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18453 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18454 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18455 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18456 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18457 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18458 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18460 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18461 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
18463 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18464 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18465 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18466 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18468 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18469 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18471 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18472 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18473 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18476 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18478 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18479 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18480 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18481 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18483 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18484 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18485 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18486 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18487 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18488 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18489 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18490 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18492 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
18493 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
18494 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
18496 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18497 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
18499 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
18500 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
18502 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
18504 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
18505 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
18506 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
18507 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
18509 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
18510 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
18511 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
18512 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
18513 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
18514 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
18515 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
18517 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
18518 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
18519 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
18520 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
18521 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
18522 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
18523 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
18525 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
18526 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
18527 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
18528 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
18530 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
18531 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
18532 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
18534 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
18536 /* SSE2 MMX */
18537 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
18538 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
18540 /* SSE3 */
18541 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
18542 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
18544 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18545 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18546 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18547 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18548 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
18549 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
18551 /* SSSE3 */
18552 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
18553 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
18554 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
18555 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
18556 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
18557 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
18559 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18560 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18561 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18562 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18563 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18564 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18565 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18566 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18567 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18568 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18569 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18570 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18571 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
18572 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
18573 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18574 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18575 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18576 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18577 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18578 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
18579 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18580 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
18581 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18582 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
18584 /* SSSE3. */
18585 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
18586 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
18588 /* SSE4.1 */
18589 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
18590 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
18591 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
18592 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
18593 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
18594 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
18595 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
18596 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
18597 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
18598 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
18600 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
18601 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
18602 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
18603 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
18604 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
18605 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
18606 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
18607 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
18608 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
18609 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
18610 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
18611 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
18612 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
18614 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
18615 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18616 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18617 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18618 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18619 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18620 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
18621 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18622 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18623 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
18624 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
18625 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
18627 /* SSE4.1 and SSE5 */
18628 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
18629 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
18630 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
18631 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
18633 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
18634 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
18635 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
18637 /* SSE4.2 */
18638 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18639 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
18640 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
18641 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
18642 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
18644 /* SSE4A */
18645 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
18646 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
18647 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
18648 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18650 /* AES */
18651 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
18652 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
18654 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18655 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18656 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18657 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
18659 /* PCLMUL */
18660 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
18662 /* 64bit */
18663 { OPTION_MASK_ISA_64BIT, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
18664 { OPTION_MASK_ISA_64BIT, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
18667 /* SSE5 */
18668 enum multi_arg_type {
18669 MULTI_ARG_UNKNOWN,
18670 MULTI_ARG_3_SF,
18671 MULTI_ARG_3_DF,
18672 MULTI_ARG_3_DI,
18673 MULTI_ARG_3_SI,
18674 MULTI_ARG_3_SI_DI,
18675 MULTI_ARG_3_HI,
18676 MULTI_ARG_3_HI_SI,
18677 MULTI_ARG_3_QI,
18678 MULTI_ARG_3_PERMPS,
18679 MULTI_ARG_3_PERMPD,
18680 MULTI_ARG_2_SF,
18681 MULTI_ARG_2_DF,
18682 MULTI_ARG_2_DI,
18683 MULTI_ARG_2_SI,
18684 MULTI_ARG_2_HI,
18685 MULTI_ARG_2_QI,
18686 MULTI_ARG_2_DI_IMM,
18687 MULTI_ARG_2_SI_IMM,
18688 MULTI_ARG_2_HI_IMM,
18689 MULTI_ARG_2_QI_IMM,
18690 MULTI_ARG_2_SF_CMP,
18691 MULTI_ARG_2_DF_CMP,
18692 MULTI_ARG_2_DI_CMP,
18693 MULTI_ARG_2_SI_CMP,
18694 MULTI_ARG_2_HI_CMP,
18695 MULTI_ARG_2_QI_CMP,
18696 MULTI_ARG_2_DI_TF,
18697 MULTI_ARG_2_SI_TF,
18698 MULTI_ARG_2_HI_TF,
18699 MULTI_ARG_2_QI_TF,
18700 MULTI_ARG_2_SF_TF,
18701 MULTI_ARG_2_DF_TF,
18702 MULTI_ARG_1_SF,
18703 MULTI_ARG_1_DF,
18704 MULTI_ARG_1_DI,
18705 MULTI_ARG_1_SI,
18706 MULTI_ARG_1_HI,
18707 MULTI_ARG_1_QI,
18708 MULTI_ARG_1_SI_DI,
18709 MULTI_ARG_1_HI_DI,
18710 MULTI_ARG_1_HI_SI,
18711 MULTI_ARG_1_QI_DI,
18712 MULTI_ARG_1_QI_SI,
18713 MULTI_ARG_1_QI_HI,
18714 MULTI_ARG_1_PH2PS,
18715 MULTI_ARG_1_PS2PH
18718 static const struct builtin_description bdesc_multi_arg[] =
18720 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
18721 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
18722 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
18723 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
18724 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
18725 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
18726 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
18727 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
18728 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
18729 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
18730 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
18731 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
18732 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
18733 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
18734 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
18735 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
18736 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
18737 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
18738 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
18739 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
18740 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
18741 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
18742 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
18743 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
18744 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
18745 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
18746 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
18747 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
18748 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
18749 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
18750 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
18751 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
18752 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
18753 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
18754 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
18755 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
18756 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
18757 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
18758 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
18759 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
18760 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
18761 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
18762 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
18763 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
18764 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
18765 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
18766 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
18767 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
18768 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
18769 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
18770 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
18771 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
18772 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
18773 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
18774 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
18775 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
18776 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
18777 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
18778 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
18779 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
18780 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
18781 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
18782 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
18783 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
18784 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
18785 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
18786 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
18787 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
18788 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
18789 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
18790 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
18791 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
18792 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
18793 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
18794 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
18796 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
18797 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
18798 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
18799 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
18800 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
18801 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
18802 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
18803 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
18804 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18805 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18806 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
18807 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
18808 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
18809 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
18810 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
18811 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
18813 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
18814 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
18815 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
18816 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
18817 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
18818 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
18819 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
18820 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
18821 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18822 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18823 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
18824 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
18825 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
18826 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
18827 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
18828 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
18830 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
18831 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
18832 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
18833 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
18834 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
18835 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
18836 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
18837 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
18838 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18839 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
18840 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
18841 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
18842 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
18843 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
18844 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
18845 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
18847 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
18848 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
18849 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
18850 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
18851 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
18852 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
18853 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
18854 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
18855 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18856 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
18857 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
18858 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
18859 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
18860 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
18861 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
18862 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
18864 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
18865 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
18866 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
18867 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
18868 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
18869 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
18870 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
18872 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
18873 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
18874 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
18875 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
18876 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
18877 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
18878 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
18880 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
18881 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
18882 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
18883 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
18884 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
18885 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
18886 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
18888 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
18889 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
18890 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
18891 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
18892 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
18893 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
18894 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
18896 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
18897 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
18898 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
18899 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
18900 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
18901 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
18902 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
18904 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
18905 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
18906 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
18907 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
18908 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
18909 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
18910 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
18912 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
18913 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
18914 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
18915 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
18916 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
18917 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
18918 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
18920 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
18921 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
18922 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
18923 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
18924 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
18925 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
18926 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
18928 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
18929 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
18930 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
18931 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
18932 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
18933 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
18934 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
18935 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
18937 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
18938 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
18939 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
18940 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
18941 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
18942 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
18943 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
18944 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
18946 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
18947 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
18948 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
18949 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
18950 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
18951 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
18952 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
18953 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
18956 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
18957 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
18958 builtins. */
18959 static void
18960 ix86_init_mmx_sse_builtins (void)
18962 const struct builtin_description * d;
18963 size_t i;
18965 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
18966 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
18967 tree V1DI_type_node
18968 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
18969 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
18970 tree V2DI_type_node
18971 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
18972 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
18973 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
18974 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
18975 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
18976 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
18977 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
18979 tree pchar_type_node = build_pointer_type (char_type_node);
18980 tree pcchar_type_node
18981 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
18982 tree pfloat_type_node = build_pointer_type (float_type_node);
18983 tree pcfloat_type_node
18984 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
18985 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
18986 tree pcv2sf_type_node
18987 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
18988 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
18989 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
18991 /* Comparisons. */
18992 tree int_ftype_v4sf_v4sf
18993 = build_function_type_list (integer_type_node,
18994 V4SF_type_node, V4SF_type_node, NULL_TREE);
18995 tree v4si_ftype_v4sf_v4sf
18996 = build_function_type_list (V4SI_type_node,
18997 V4SF_type_node, V4SF_type_node, NULL_TREE);
18998 /* MMX/SSE/integer conversions. */
18999 tree int_ftype_v4sf
19000 = build_function_type_list (integer_type_node,
19001 V4SF_type_node, NULL_TREE);
19002 tree int64_ftype_v4sf
19003 = build_function_type_list (long_long_integer_type_node,
19004 V4SF_type_node, NULL_TREE);
19005 tree int_ftype_v8qi
19006 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
19007 tree v4sf_ftype_v4sf_int
19008 = build_function_type_list (V4SF_type_node,
19009 V4SF_type_node, integer_type_node, NULL_TREE);
19010 tree v4sf_ftype_v4sf_int64
19011 = build_function_type_list (V4SF_type_node,
19012 V4SF_type_node, long_long_integer_type_node,
19013 NULL_TREE);
19014 tree v4sf_ftype_v4sf_v2si
19015 = build_function_type_list (V4SF_type_node,
19016 V4SF_type_node, V2SI_type_node, NULL_TREE);
19018 /* Miscellaneous. */
19019 tree v8qi_ftype_v4hi_v4hi
19020 = build_function_type_list (V8QI_type_node,
19021 V4HI_type_node, V4HI_type_node, NULL_TREE);
19022 tree v4hi_ftype_v2si_v2si
19023 = build_function_type_list (V4HI_type_node,
19024 V2SI_type_node, V2SI_type_node, NULL_TREE);
19025 tree v4sf_ftype_v4sf_v4sf_int
19026 = build_function_type_list (V4SF_type_node,
19027 V4SF_type_node, V4SF_type_node,
19028 integer_type_node, NULL_TREE);
19029 tree v2si_ftype_v4hi_v4hi
19030 = build_function_type_list (V2SI_type_node,
19031 V4HI_type_node, V4HI_type_node, NULL_TREE);
19032 tree v4hi_ftype_v4hi_int
19033 = build_function_type_list (V4HI_type_node,
19034 V4HI_type_node, integer_type_node, NULL_TREE);
19035 tree v2si_ftype_v2si_int
19036 = build_function_type_list (V2SI_type_node,
19037 V2SI_type_node, integer_type_node, NULL_TREE);
19038 tree v1di_ftype_v1di_int
19039 = build_function_type_list (V1DI_type_node,
19040 V1DI_type_node, integer_type_node, NULL_TREE);
19042 tree void_ftype_void
19043 = build_function_type (void_type_node, void_list_node);
19044 tree void_ftype_unsigned
19045 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
19046 tree void_ftype_unsigned_unsigned
19047 = build_function_type_list (void_type_node, unsigned_type_node,
19048 unsigned_type_node, NULL_TREE);
19049 tree void_ftype_pcvoid_unsigned_unsigned
19050 = build_function_type_list (void_type_node, const_ptr_type_node,
19051 unsigned_type_node, unsigned_type_node,
19052 NULL_TREE);
19053 tree unsigned_ftype_void
19054 = build_function_type (unsigned_type_node, void_list_node);
19055 tree v2si_ftype_v4sf
19056 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
19057 /* Loads/stores. */
19058 tree void_ftype_v8qi_v8qi_pchar
19059 = build_function_type_list (void_type_node,
19060 V8QI_type_node, V8QI_type_node,
19061 pchar_type_node, NULL_TREE);
19062 tree v4sf_ftype_pcfloat
19063 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
19064 tree v4sf_ftype_v4sf_pcv2sf
19065 = build_function_type_list (V4SF_type_node,
19066 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
19067 tree void_ftype_pv2sf_v4sf
19068 = build_function_type_list (void_type_node,
19069 pv2sf_type_node, V4SF_type_node, NULL_TREE);
19070 tree void_ftype_pfloat_v4sf
19071 = build_function_type_list (void_type_node,
19072 pfloat_type_node, V4SF_type_node, NULL_TREE);
19073 tree void_ftype_pdi_di
19074 = build_function_type_list (void_type_node,
19075 pdi_type_node, long_long_unsigned_type_node,
19076 NULL_TREE);
19077 tree void_ftype_pv2di_v2di
19078 = build_function_type_list (void_type_node,
19079 pv2di_type_node, V2DI_type_node, NULL_TREE);
19080 /* Normal vector unops. */
19081 tree v4sf_ftype_v4sf
19082 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
19083 tree v16qi_ftype_v16qi
19084 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
19085 tree v8hi_ftype_v8hi
19086 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
19087 tree v4si_ftype_v4si
19088 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
19089 tree v8qi_ftype_v8qi
19090 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
19091 tree v4hi_ftype_v4hi
19092 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
19094 /* Normal vector binops. */
19095 tree v4sf_ftype_v4sf_v4sf
19096 = build_function_type_list (V4SF_type_node,
19097 V4SF_type_node, V4SF_type_node, NULL_TREE);
19098 tree v8qi_ftype_v8qi_v8qi
19099 = build_function_type_list (V8QI_type_node,
19100 V8QI_type_node, V8QI_type_node, NULL_TREE);
19101 tree v4hi_ftype_v4hi_v4hi
19102 = build_function_type_list (V4HI_type_node,
19103 V4HI_type_node, V4HI_type_node, NULL_TREE);
19104 tree v2si_ftype_v2si_v2si
19105 = build_function_type_list (V2SI_type_node,
19106 V2SI_type_node, V2SI_type_node, NULL_TREE);
19107 tree v1di_ftype_v1di_v1di
19108 = build_function_type_list (V1DI_type_node,
19109 V1DI_type_node, V1DI_type_node, NULL_TREE);
19110 tree v1di_ftype_v1di_v1di_int
19111 = build_function_type_list (V1DI_type_node,
19112 V1DI_type_node, V1DI_type_node,
19113 integer_type_node, NULL_TREE);
19114 tree v2si_ftype_v2sf
19115 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
19116 tree v2sf_ftype_v2si
19117 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
19118 tree v2si_ftype_v2si
19119 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
19120 tree v2sf_ftype_v2sf
19121 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
19122 tree v2sf_ftype_v2sf_v2sf
19123 = build_function_type_list (V2SF_type_node,
19124 V2SF_type_node, V2SF_type_node, NULL_TREE);
19125 tree v2si_ftype_v2sf_v2sf
19126 = build_function_type_list (V2SI_type_node,
19127 V2SF_type_node, V2SF_type_node, NULL_TREE);
19128 tree pint_type_node = build_pointer_type (integer_type_node);
19129 tree pdouble_type_node = build_pointer_type (double_type_node);
19130 tree pcdouble_type_node = build_pointer_type (
19131 build_type_variant (double_type_node, 1, 0));
19132 tree int_ftype_v2df_v2df
19133 = build_function_type_list (integer_type_node,
19134 V2DF_type_node, V2DF_type_node, NULL_TREE);
19136 tree void_ftype_pcvoid
19137 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
19138 tree v4sf_ftype_v4si
19139 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
19140 tree v4si_ftype_v4sf
19141 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
19142 tree v2df_ftype_v4si
19143 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
19144 tree v4si_ftype_v2df
19145 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
19146 tree v4si_ftype_v2df_v2df
19147 = build_function_type_list (V4SI_type_node,
19148 V2DF_type_node, V2DF_type_node, NULL_TREE);
19149 tree v2si_ftype_v2df
19150 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
19151 tree v4sf_ftype_v2df
19152 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
19153 tree v2df_ftype_v2si
19154 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
19155 tree v2df_ftype_v4sf
19156 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
19157 tree int_ftype_v2df
19158 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
19159 tree int64_ftype_v2df
19160 = build_function_type_list (long_long_integer_type_node,
19161 V2DF_type_node, NULL_TREE);
19162 tree v2df_ftype_v2df_int
19163 = build_function_type_list (V2DF_type_node,
19164 V2DF_type_node, integer_type_node, NULL_TREE);
19165 tree v2df_ftype_v2df_int64
19166 = build_function_type_list (V2DF_type_node,
19167 V2DF_type_node, long_long_integer_type_node,
19168 NULL_TREE);
19169 tree v4sf_ftype_v4sf_v2df
19170 = build_function_type_list (V4SF_type_node,
19171 V4SF_type_node, V2DF_type_node, NULL_TREE);
19172 tree v2df_ftype_v2df_v4sf
19173 = build_function_type_list (V2DF_type_node,
19174 V2DF_type_node, V4SF_type_node, NULL_TREE);
19175 tree v2df_ftype_v2df_v2df_int
19176 = build_function_type_list (V2DF_type_node,
19177 V2DF_type_node, V2DF_type_node,
19178 integer_type_node,
19179 NULL_TREE);
19180 tree v2df_ftype_v2df_pcdouble
19181 = build_function_type_list (V2DF_type_node,
19182 V2DF_type_node, pcdouble_type_node, NULL_TREE);
19183 tree void_ftype_pdouble_v2df
19184 = build_function_type_list (void_type_node,
19185 pdouble_type_node, V2DF_type_node, NULL_TREE);
19186 tree void_ftype_pint_int
19187 = build_function_type_list (void_type_node,
19188 pint_type_node, integer_type_node, NULL_TREE);
19189 tree void_ftype_v16qi_v16qi_pchar
19190 = build_function_type_list (void_type_node,
19191 V16QI_type_node, V16QI_type_node,
19192 pchar_type_node, NULL_TREE);
19193 tree v2df_ftype_pcdouble
19194 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
19195 tree v2df_ftype_v2df_v2df
19196 = build_function_type_list (V2DF_type_node,
19197 V2DF_type_node, V2DF_type_node, NULL_TREE);
19198 tree v16qi_ftype_v16qi_v16qi
19199 = build_function_type_list (V16QI_type_node,
19200 V16QI_type_node, V16QI_type_node, NULL_TREE);
19201 tree v8hi_ftype_v8hi_v8hi
19202 = build_function_type_list (V8HI_type_node,
19203 V8HI_type_node, V8HI_type_node, NULL_TREE);
19204 tree v4si_ftype_v4si_v4si
19205 = build_function_type_list (V4SI_type_node,
19206 V4SI_type_node, V4SI_type_node, NULL_TREE);
19207 tree v2di_ftype_v2di_v2di
19208 = build_function_type_list (V2DI_type_node,
19209 V2DI_type_node, V2DI_type_node, NULL_TREE);
19210 tree v2di_ftype_v2df_v2df
19211 = build_function_type_list (V2DI_type_node,
19212 V2DF_type_node, V2DF_type_node, NULL_TREE);
19213 tree v2df_ftype_v2df
19214 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
19215 tree v2di_ftype_v2di_int
19216 = build_function_type_list (V2DI_type_node,
19217 V2DI_type_node, integer_type_node, NULL_TREE);
19218 tree v2di_ftype_v2di_v2di_int
19219 = build_function_type_list (V2DI_type_node, V2DI_type_node,
19220 V2DI_type_node, integer_type_node, NULL_TREE);
19221 tree v4si_ftype_v4si_int
19222 = build_function_type_list (V4SI_type_node,
19223 V4SI_type_node, integer_type_node, NULL_TREE);
19224 tree v8hi_ftype_v8hi_int
19225 = build_function_type_list (V8HI_type_node,
19226 V8HI_type_node, integer_type_node, NULL_TREE);
19227 tree v4si_ftype_v8hi_v8hi
19228 = build_function_type_list (V4SI_type_node,
19229 V8HI_type_node, V8HI_type_node, NULL_TREE);
19230 tree v1di_ftype_v8qi_v8qi
19231 = build_function_type_list (V1DI_type_node,
19232 V8QI_type_node, V8QI_type_node, NULL_TREE);
19233 tree v1di_ftype_v2si_v2si
19234 = build_function_type_list (V1DI_type_node,
19235 V2SI_type_node, V2SI_type_node, NULL_TREE);
19236 tree v2di_ftype_v16qi_v16qi
19237 = build_function_type_list (V2DI_type_node,
19238 V16QI_type_node, V16QI_type_node, NULL_TREE);
19239 tree v2di_ftype_v4si_v4si
19240 = build_function_type_list (V2DI_type_node,
19241 V4SI_type_node, V4SI_type_node, NULL_TREE);
19242 tree int_ftype_v16qi
19243 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
19244 tree v16qi_ftype_pcchar
19245 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
19246 tree void_ftype_pchar_v16qi
19247 = build_function_type_list (void_type_node,
19248 pchar_type_node, V16QI_type_node, NULL_TREE);
19250 tree v2di_ftype_v2di_unsigned_unsigned
19251 = build_function_type_list (V2DI_type_node, V2DI_type_node,
19252 unsigned_type_node, unsigned_type_node,
19253 NULL_TREE);
19254 tree v2di_ftype_v2di_v2di_unsigned_unsigned
19255 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
19256 unsigned_type_node, unsigned_type_node,
19257 NULL_TREE);
19258 tree v2di_ftype_v2di_v16qi
19259 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
19260 NULL_TREE);
19261 tree v2df_ftype_v2df_v2df_v2df
19262 = build_function_type_list (V2DF_type_node,
19263 V2DF_type_node, V2DF_type_node,
19264 V2DF_type_node, NULL_TREE);
19265 tree v4sf_ftype_v4sf_v4sf_v4sf
19266 = build_function_type_list (V4SF_type_node,
19267 V4SF_type_node, V4SF_type_node,
19268 V4SF_type_node, NULL_TREE);
19269 tree v8hi_ftype_v16qi
19270 = build_function_type_list (V8HI_type_node, V16QI_type_node,
19271 NULL_TREE);
19272 tree v4si_ftype_v16qi
19273 = build_function_type_list (V4SI_type_node, V16QI_type_node,
19274 NULL_TREE);
19275 tree v2di_ftype_v16qi
19276 = build_function_type_list (V2DI_type_node, V16QI_type_node,
19277 NULL_TREE);
19278 tree v4si_ftype_v8hi
19279 = build_function_type_list (V4SI_type_node, V8HI_type_node,
19280 NULL_TREE);
19281 tree v2di_ftype_v8hi
19282 = build_function_type_list (V2DI_type_node, V8HI_type_node,
19283 NULL_TREE);
19284 tree v2di_ftype_v4si
19285 = build_function_type_list (V2DI_type_node, V4SI_type_node,
19286 NULL_TREE);
19287 tree v2di_ftype_pv2di
19288 = build_function_type_list (V2DI_type_node, pv2di_type_node,
19289 NULL_TREE);
19290 tree v16qi_ftype_v16qi_v16qi_int
19291 = build_function_type_list (V16QI_type_node, V16QI_type_node,
19292 V16QI_type_node, integer_type_node,
19293 NULL_TREE);
19294 tree v16qi_ftype_v16qi_v16qi_v16qi
19295 = build_function_type_list (V16QI_type_node, V16QI_type_node,
19296 V16QI_type_node, V16QI_type_node,
19297 NULL_TREE);
19298 tree v8hi_ftype_v8hi_v8hi_int
19299 = build_function_type_list (V8HI_type_node, V8HI_type_node,
19300 V8HI_type_node, integer_type_node,
19301 NULL_TREE);
19302 tree v4si_ftype_v4si_v4si_int
19303 = build_function_type_list (V4SI_type_node, V4SI_type_node,
19304 V4SI_type_node, integer_type_node,
19305 NULL_TREE);
19306 tree int_ftype_v2di_v2di
19307 = build_function_type_list (integer_type_node,
19308 V2DI_type_node, V2DI_type_node,
19309 NULL_TREE);
19310 tree int_ftype_v16qi_int_v16qi_int_int
19311 = build_function_type_list (integer_type_node,
19312 V16QI_type_node,
19313 integer_type_node,
19314 V16QI_type_node,
19315 integer_type_node,
19316 integer_type_node,
19317 NULL_TREE);
19318 tree v16qi_ftype_v16qi_int_v16qi_int_int
19319 = build_function_type_list (V16QI_type_node,
19320 V16QI_type_node,
19321 integer_type_node,
19322 V16QI_type_node,
19323 integer_type_node,
19324 integer_type_node,
19325 NULL_TREE);
19326 tree int_ftype_v16qi_v16qi_int
19327 = build_function_type_list (integer_type_node,
19328 V16QI_type_node,
19329 V16QI_type_node,
19330 integer_type_node,
19331 NULL_TREE);
19333 /* SSE5 instructions */
19334 tree v2di_ftype_v2di_v2di_v2di
19335 = build_function_type_list (V2DI_type_node,
19336 V2DI_type_node,
19337 V2DI_type_node,
19338 V2DI_type_node,
19339 NULL_TREE);
19341 tree v4si_ftype_v4si_v4si_v4si
19342 = build_function_type_list (V4SI_type_node,
19343 V4SI_type_node,
19344 V4SI_type_node,
19345 V4SI_type_node,
19346 NULL_TREE);
19348 tree v4si_ftype_v4si_v4si_v2di
19349 = build_function_type_list (V4SI_type_node,
19350 V4SI_type_node,
19351 V4SI_type_node,
19352 V2DI_type_node,
19353 NULL_TREE);
19355 tree v8hi_ftype_v8hi_v8hi_v8hi
19356 = build_function_type_list (V8HI_type_node,
19357 V8HI_type_node,
19358 V8HI_type_node,
19359 V8HI_type_node,
19360 NULL_TREE);
19362 tree v8hi_ftype_v8hi_v8hi_v4si
19363 = build_function_type_list (V8HI_type_node,
19364 V8HI_type_node,
19365 V8HI_type_node,
19366 V4SI_type_node,
19367 NULL_TREE);
19369 tree v2df_ftype_v2df_v2df_v16qi
19370 = build_function_type_list (V2DF_type_node,
19371 V2DF_type_node,
19372 V2DF_type_node,
19373 V16QI_type_node,
19374 NULL_TREE);
19376 tree v4sf_ftype_v4sf_v4sf_v16qi
19377 = build_function_type_list (V4SF_type_node,
19378 V4SF_type_node,
19379 V4SF_type_node,
19380 V16QI_type_node,
19381 NULL_TREE);
19383 tree v2di_ftype_v2di_si
19384 = build_function_type_list (V2DI_type_node,
19385 V2DI_type_node,
19386 integer_type_node,
19387 NULL_TREE);
19389 tree v4si_ftype_v4si_si
19390 = build_function_type_list (V4SI_type_node,
19391 V4SI_type_node,
19392 integer_type_node,
19393 NULL_TREE);
19395 tree v8hi_ftype_v8hi_si
19396 = build_function_type_list (V8HI_type_node,
19397 V8HI_type_node,
19398 integer_type_node,
19399 NULL_TREE);
19401 tree v16qi_ftype_v16qi_si
19402 = build_function_type_list (V16QI_type_node,
19403 V16QI_type_node,
19404 integer_type_node,
19405 NULL_TREE);
19406 tree v4sf_ftype_v4hi
19407 = build_function_type_list (V4SF_type_node,
19408 V4HI_type_node,
19409 NULL_TREE);
19411 tree v4hi_ftype_v4sf
19412 = build_function_type_list (V4HI_type_node,
19413 V4SF_type_node,
19414 NULL_TREE);
19416 tree v2di_ftype_v2di
19417 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
19419 tree v16qi_ftype_v8hi_v8hi
19420 = build_function_type_list (V16QI_type_node,
19421 V8HI_type_node, V8HI_type_node,
19422 NULL_TREE);
19423 tree v8hi_ftype_v4si_v4si
19424 = build_function_type_list (V8HI_type_node,
19425 V4SI_type_node, V4SI_type_node,
19426 NULL_TREE);
19427 tree v8hi_ftype_v16qi_v16qi
19428 = build_function_type_list (V8HI_type_node,
19429 V16QI_type_node, V16QI_type_node,
19430 NULL_TREE);
19431 tree v4hi_ftype_v8qi_v8qi
19432 = build_function_type_list (V4HI_type_node,
19433 V8QI_type_node, V8QI_type_node,
19434 NULL_TREE);
19435 tree unsigned_ftype_unsigned_uchar
19436 = build_function_type_list (unsigned_type_node,
19437 unsigned_type_node,
19438 unsigned_char_type_node,
19439 NULL_TREE);
19440 tree unsigned_ftype_unsigned_ushort
19441 = build_function_type_list (unsigned_type_node,
19442 unsigned_type_node,
19443 short_unsigned_type_node,
19444 NULL_TREE);
19445 tree unsigned_ftype_unsigned_unsigned
19446 = build_function_type_list (unsigned_type_node,
19447 unsigned_type_node,
19448 unsigned_type_node,
19449 NULL_TREE);
19450 tree uint64_ftype_uint64_uint64
19451 = build_function_type_list (long_long_unsigned_type_node,
19452 long_long_unsigned_type_node,
19453 long_long_unsigned_type_node,
19454 NULL_TREE);
19455 tree float_ftype_float
19456 = build_function_type_list (float_type_node,
19457 float_type_node,
19458 NULL_TREE);
19460 tree ftype;
19462 /* The __float80 type. */
19463 if (TYPE_MODE (long_double_type_node) == XFmode)
19464 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
19465 "__float80");
19466 else
19468 /* The __float80 type. */
19469 tree float80_type_node = make_node (REAL_TYPE);
19471 TYPE_PRECISION (float80_type_node) = 80;
19472 layout_type (float80_type_node);
19473 (*lang_hooks.types.register_builtin_type) (float80_type_node,
19474 "__float80");
19477 if (TARGET_64BIT)
19479 tree float128_type_node = make_node (REAL_TYPE);
19481 TYPE_PRECISION (float128_type_node) = 128;
19482 layout_type (float128_type_node);
19483 (*lang_hooks.types.register_builtin_type) (float128_type_node,
19484 "__float128");
19486 /* TFmode support builtins. */
19487 ftype = build_function_type (float128_type_node,
19488 void_list_node);
19489 def_builtin (OPTION_MASK_ISA_64BIT, "__builtin_infq", ftype, IX86_BUILTIN_INFQ);
19491 ftype = build_function_type_list (float128_type_node,
19492 float128_type_node,
19493 NULL_TREE);
19494 def_builtin_const (OPTION_MASK_ISA_64BIT, "__builtin_fabsq", ftype, IX86_BUILTIN_FABSQ);
19496 ftype = build_function_type_list (float128_type_node,
19497 float128_type_node,
19498 float128_type_node,
19499 NULL_TREE);
19500 def_builtin_const (OPTION_MASK_ISA_64BIT, "__builtin_copysignq", ftype, IX86_BUILTIN_COPYSIGNQ);
19503 /* Add all special builtins with variable number of operands. */
19504 for (i = 0, d = bdesc_special_args;
19505 i < ARRAY_SIZE (bdesc_special_args);
19506 i++, d++)
19508 tree type;
19510 if (d->name == 0)
19511 continue;
19513 switch ((enum ix86_special_builtin_type) d->flag)
19515 case VOID_FTYPE_VOID:
19516 type = void_ftype_void;
19517 break;
19518 case V16QI_FTYPE_PCCHAR:
19519 type = v16qi_ftype_pcchar;
19520 break;
19521 case V4SF_FTYPE_PCFLOAT:
19522 type = v4sf_ftype_pcfloat;
19523 break;
19524 case V2DI_FTYPE_PV2DI:
19525 type = v2di_ftype_pv2di;
19526 break;
19527 case V2DF_FTYPE_PCDOUBLE:
19528 type = v2df_ftype_pcdouble;
19529 break;
19530 case V4SF_FTYPE_V4SF_PCV2SF:
19531 type = v4sf_ftype_v4sf_pcv2sf;
19532 break;
19533 case V2DF_FTYPE_V2DF_PCDOUBLE:
19534 type = v2df_ftype_v2df_pcdouble;
19535 break;
19536 case VOID_FTYPE_PV2SF_V4SF:
19537 type = void_ftype_pv2sf_v4sf;
19538 break;
19539 case VOID_FTYPE_PV2DI_V2DI:
19540 type = void_ftype_pv2di_v2di;
19541 break;
19542 case VOID_FTYPE_PCHAR_V16QI:
19543 type = void_ftype_pchar_v16qi;
19544 break;
19545 case VOID_FTYPE_PFLOAT_V4SF:
19546 type = void_ftype_pfloat_v4sf;
19547 break;
19548 case VOID_FTYPE_PDOUBLE_V2DF:
19549 type = void_ftype_pdouble_v2df;
19550 break;
19551 case VOID_FTYPE_PDI_DI:
19552 type = void_ftype_pdi_di;
19553 break;
19554 case VOID_FTYPE_PINT_INT:
19555 type = void_ftype_pint_int;
19556 break;
19557 default:
19558 gcc_unreachable ();
19561 def_builtin (d->mask, d->name, type, d->code);
19564 /* Add all builtins with variable number of operands. */
19565 for (i = 0, d = bdesc_args;
19566 i < ARRAY_SIZE (bdesc_args);
19567 i++, d++)
19569 tree type;
19571 if (d->name == 0)
19572 continue;
19574 switch ((enum ix86_builtin_type) d->flag)
19576 case FLOAT_FTYPE_FLOAT:
19577 type = float_ftype_float;
19578 break;
19579 case INT_FTYPE_V2DI_V2DI_PTEST:
19580 type = int_ftype_v2di_v2di;
19581 break;
19582 case INT64_FTYPE_V4SF:
19583 type = int64_ftype_v4sf;
19584 break;
19585 case INT64_FTYPE_V2DF:
19586 type = int64_ftype_v2df;
19587 break;
19588 case INT_FTYPE_V16QI:
19589 type = int_ftype_v16qi;
19590 break;
19591 case INT_FTYPE_V8QI:
19592 type = int_ftype_v8qi;
19593 break;
19594 case INT_FTYPE_V4SF:
19595 type = int_ftype_v4sf;
19596 break;
19597 case INT_FTYPE_V2DF:
19598 type = int_ftype_v2df;
19599 break;
19600 case V16QI_FTYPE_V16QI:
19601 type = v16qi_ftype_v16qi;
19602 break;
19603 case V8HI_FTYPE_V8HI:
19604 type = v8hi_ftype_v8hi;
19605 break;
19606 case V8HI_FTYPE_V16QI:
19607 type = v8hi_ftype_v16qi;
19608 break;
19609 case V8QI_FTYPE_V8QI:
19610 type = v8qi_ftype_v8qi;
19611 break;
19612 case V4SI_FTYPE_V4SI:
19613 type = v4si_ftype_v4si;
19614 break;
19615 case V4SI_FTYPE_V16QI:
19616 type = v4si_ftype_v16qi;
19617 break;
19618 case V4SI_FTYPE_V8HI:
19619 type = v4si_ftype_v8hi;
19620 break;
19621 case V4SI_FTYPE_V4SF:
19622 type = v4si_ftype_v4sf;
19623 break;
19624 case V4SI_FTYPE_V2DF:
19625 type = v4si_ftype_v2df;
19626 break;
19627 case V4HI_FTYPE_V4HI:
19628 type = v4hi_ftype_v4hi;
19629 break;
19630 case V4SF_FTYPE_V4SF:
19631 case V4SF_FTYPE_V4SF_VEC_MERGE:
19632 type = v4sf_ftype_v4sf;
19633 break;
19634 case V4SF_FTYPE_V4SI:
19635 type = v4sf_ftype_v4si;
19636 break;
19637 case V4SF_FTYPE_V2DF:
19638 type = v4sf_ftype_v2df;
19639 break;
19640 case V2DI_FTYPE_V2DI:
19641 type = v2di_ftype_v2di;
19642 break;
19643 case V2DI_FTYPE_V16QI:
19644 type = v2di_ftype_v16qi;
19645 break;
19646 case V2DI_FTYPE_V8HI:
19647 type = v2di_ftype_v8hi;
19648 break;
19649 case V2DI_FTYPE_V4SI:
19650 type = v2di_ftype_v4si;
19651 break;
19652 case V2SI_FTYPE_V2SI:
19653 type = v2si_ftype_v2si;
19654 break;
19655 case V2SI_FTYPE_V4SF:
19656 type = v2si_ftype_v4sf;
19657 break;
19658 case V2SI_FTYPE_V2DF:
19659 type = v2si_ftype_v2df;
19660 break;
19661 case V2SI_FTYPE_V2SF:
19662 type = v2si_ftype_v2sf;
19663 break;
19664 case V2DF_FTYPE_V4SF:
19665 type = v2df_ftype_v4sf;
19666 break;
19667 case V2DF_FTYPE_V2DF:
19668 case V2DF_FTYPE_V2DF_VEC_MERGE:
19669 type = v2df_ftype_v2df;
19670 break;
19671 case V2DF_FTYPE_V2SI:
19672 type = v2df_ftype_v2si;
19673 break;
19674 case V2DF_FTYPE_V4SI:
19675 type = v2df_ftype_v4si;
19676 break;
19677 case V2SF_FTYPE_V2SF:
19678 type = v2sf_ftype_v2sf;
19679 break;
19680 case V2SF_FTYPE_V2SI:
19681 type = v2sf_ftype_v2si;
19682 break;
19683 case V16QI_FTYPE_V16QI_V16QI:
19684 type = v16qi_ftype_v16qi_v16qi;
19685 break;
19686 case V16QI_FTYPE_V8HI_V8HI:
19687 type = v16qi_ftype_v8hi_v8hi;
19688 break;
19689 case V8QI_FTYPE_V8QI_V8QI:
19690 type = v8qi_ftype_v8qi_v8qi;
19691 break;
19692 case V8QI_FTYPE_V4HI_V4HI:
19693 type = v8qi_ftype_v4hi_v4hi;
19694 break;
19695 case V8HI_FTYPE_V8HI_V8HI:
19696 case V8HI_FTYPE_V8HI_V8HI_COUNT:
19697 type = v8hi_ftype_v8hi_v8hi;
19698 break;
19699 case V8HI_FTYPE_V16QI_V16QI:
19700 type = v8hi_ftype_v16qi_v16qi;
19701 break;
19702 case V8HI_FTYPE_V4SI_V4SI:
19703 type = v8hi_ftype_v4si_v4si;
19704 break;
19705 case V8HI_FTYPE_V8HI_SI_COUNT:
19706 type = v8hi_ftype_v8hi_int;
19707 break;
19708 case V4SI_FTYPE_V4SI_V4SI:
19709 case V4SI_FTYPE_V4SI_V4SI_COUNT:
19710 type = v4si_ftype_v4si_v4si;
19711 break;
19712 case V4SI_FTYPE_V8HI_V8HI:
19713 type = v4si_ftype_v8hi_v8hi;
19714 break;
19715 case V4SI_FTYPE_V4SF_V4SF:
19716 type = v4si_ftype_v4sf_v4sf;
19717 break;
19718 case V4SI_FTYPE_V2DF_V2DF:
19719 type = v4si_ftype_v2df_v2df;
19720 break;
19721 case V4SI_FTYPE_V4SI_SI_COUNT:
19722 type = v4si_ftype_v4si_int;
19723 break;
19724 case V4HI_FTYPE_V4HI_V4HI:
19725 case V4HI_FTYPE_V4HI_V4HI_COUNT:
19726 type = v4hi_ftype_v4hi_v4hi;
19727 break;
19728 case V4HI_FTYPE_V8QI_V8QI:
19729 type = v4hi_ftype_v8qi_v8qi;
19730 break;
19731 case V4HI_FTYPE_V2SI_V2SI:
19732 type = v4hi_ftype_v2si_v2si;
19733 break;
19734 case V4HI_FTYPE_V4HI_SI_COUNT:
19735 type = v4hi_ftype_v4hi_int;
19736 break;
19737 case V4SF_FTYPE_V4SF_V4SF:
19738 case V4SF_FTYPE_V4SF_V4SF_SWAP:
19739 type = v4sf_ftype_v4sf_v4sf;
19740 break;
19741 case V4SF_FTYPE_V4SF_V2SI:
19742 type = v4sf_ftype_v4sf_v2si;
19743 break;
19744 case V4SF_FTYPE_V4SF_V2DF:
19745 type = v4sf_ftype_v4sf_v2df;
19746 break;
19747 case V4SF_FTYPE_V4SF_DI:
19748 type = v4sf_ftype_v4sf_int64;
19749 break;
19750 case V4SF_FTYPE_V4SF_SI:
19751 type = v4sf_ftype_v4sf_int;
19752 break;
19753 case V2DI_FTYPE_V2DI_V2DI:
19754 case V2DI_FTYPE_V2DI_V2DI_COUNT:
19755 type = v2di_ftype_v2di_v2di;
19756 break;
19757 case V2DI_FTYPE_V16QI_V16QI:
19758 type = v2di_ftype_v16qi_v16qi;
19759 break;
19760 case V2DI_FTYPE_V4SI_V4SI:
19761 type = v2di_ftype_v4si_v4si;
19762 break;
19763 case V2DI_FTYPE_V2DI_V16QI:
19764 type = v2di_ftype_v2di_v16qi;
19765 break;
19766 case V2DI_FTYPE_V2DF_V2DF:
19767 type = v2di_ftype_v2df_v2df;
19768 break;
19769 case V2DI_FTYPE_V2DI_SI_COUNT:
19770 type = v2di_ftype_v2di_int;
19771 break;
19772 case V2SI_FTYPE_V2SI_V2SI:
19773 case V2SI_FTYPE_V2SI_V2SI_COUNT:
19774 type = v2si_ftype_v2si_v2si;
19775 break;
19776 case V2SI_FTYPE_V4HI_V4HI:
19777 type = v2si_ftype_v4hi_v4hi;
19778 break;
19779 case V2SI_FTYPE_V2SF_V2SF:
19780 type = v2si_ftype_v2sf_v2sf;
19781 break;
19782 case V2SI_FTYPE_V2SI_SI_COUNT:
19783 type = v2si_ftype_v2si_int;
19784 break;
19785 case V2DF_FTYPE_V2DF_V2DF:
19786 case V2DF_FTYPE_V2DF_V2DF_SWAP:
19787 type = v2df_ftype_v2df_v2df;
19788 break;
19789 case V2DF_FTYPE_V2DF_V4SF:
19790 type = v2df_ftype_v2df_v4sf;
19791 break;
19792 case V2DF_FTYPE_V2DF_DI:
19793 type = v2df_ftype_v2df_int64;
19794 break;
19795 case V2DF_FTYPE_V2DF_SI:
19796 type = v2df_ftype_v2df_int;
19797 break;
19798 case V2SF_FTYPE_V2SF_V2SF:
19799 type = v2sf_ftype_v2sf_v2sf;
19800 break;
19801 case V1DI_FTYPE_V1DI_V1DI:
19802 case V1DI_FTYPE_V1DI_V1DI_COUNT:
19803 type = v1di_ftype_v1di_v1di;
19804 break;
19805 case V1DI_FTYPE_V8QI_V8QI:
19806 type = v1di_ftype_v8qi_v8qi;
19807 break;
19808 case V1DI_FTYPE_V2SI_V2SI:
19809 type = v1di_ftype_v2si_v2si;
19810 break;
19811 case V1DI_FTYPE_V1DI_SI_COUNT:
19812 type = v1di_ftype_v1di_int;
19813 break;
19814 case UINT64_FTYPE_UINT64_UINT64:
19815 type = uint64_ftype_uint64_uint64;
19816 break;
19817 case UINT_FTYPE_UINT_UINT:
19818 type = unsigned_ftype_unsigned_unsigned;
19819 break;
19820 case UINT_FTYPE_UINT_USHORT:
19821 type = unsigned_ftype_unsigned_ushort;
19822 break;
19823 case UINT_FTYPE_UINT_UCHAR:
19824 type = unsigned_ftype_unsigned_uchar;
19825 break;
19826 case V8HI_FTYPE_V8HI_INT:
19827 type = v8hi_ftype_v8hi_int;
19828 break;
19829 case V4SI_FTYPE_V4SI_INT:
19830 type = v4si_ftype_v4si_int;
19831 break;
19832 case V4HI_FTYPE_V4HI_INT:
19833 type = v4hi_ftype_v4hi_int;
19834 break;
19835 case V4SF_FTYPE_V4SF_INT:
19836 type = v4sf_ftype_v4sf_int;
19837 break;
19838 case V2DI_FTYPE_V2DI_INT:
19839 case V2DI2TI_FTYPE_V2DI_INT:
19840 type = v2di_ftype_v2di_int;
19841 break;
19842 case V2DF_FTYPE_V2DF_INT:
19843 type = v2df_ftype_v2df_int;
19844 break;
19845 case V16QI_FTYPE_V16QI_V16QI_V16QI:
19846 type = v16qi_ftype_v16qi_v16qi_v16qi;
19847 break;
19848 case V4SF_FTYPE_V4SF_V4SF_V4SF:
19849 type = v4sf_ftype_v4sf_v4sf_v4sf;
19850 break;
19851 case V2DF_FTYPE_V2DF_V2DF_V2DF:
19852 type = v2df_ftype_v2df_v2df_v2df;
19853 break;
19854 case V16QI_FTYPE_V16QI_V16QI_INT:
19855 type = v16qi_ftype_v16qi_v16qi_int;
19856 break;
19857 case V8HI_FTYPE_V8HI_V8HI_INT:
19858 type = v8hi_ftype_v8hi_v8hi_int;
19859 break;
19860 case V4SI_FTYPE_V4SI_V4SI_INT:
19861 type = v4si_ftype_v4si_v4si_int;
19862 break;
19863 case V4SF_FTYPE_V4SF_V4SF_INT:
19864 type = v4sf_ftype_v4sf_v4sf_int;
19865 break;
19866 case V2DI_FTYPE_V2DI_V2DI_INT:
19867 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
19868 type = v2di_ftype_v2di_v2di_int;
19869 break;
19870 case V2DF_FTYPE_V2DF_V2DF_INT:
19871 type = v2df_ftype_v2df_v2df_int;
19872 break;
19873 case V2DI_FTYPE_V2DI_UINT_UINT:
19874 type = v2di_ftype_v2di_unsigned_unsigned;
19875 break;
19876 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
19877 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
19878 break;
19879 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
19880 type = v1di_ftype_v1di_v1di_int;
19881 break;
19882 default:
19883 gcc_unreachable ();
19886 def_builtin_const (d->mask, d->name, type, d->code);
19889 /* pcmpestr[im] insns. */
19890 for (i = 0, d = bdesc_pcmpestr;
19891 i < ARRAY_SIZE (bdesc_pcmpestr);
19892 i++, d++)
19894 if (d->code == IX86_BUILTIN_PCMPESTRM128)
19895 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
19896 else
19897 ftype = int_ftype_v16qi_int_v16qi_int_int;
19898 def_builtin_const (d->mask, d->name, ftype, d->code);
19901 /* pcmpistr[im] insns. */
19902 for (i = 0, d = bdesc_pcmpistr;
19903 i < ARRAY_SIZE (bdesc_pcmpistr);
19904 i++, d++)
19906 if (d->code == IX86_BUILTIN_PCMPISTRM128)
19907 ftype = v16qi_ftype_v16qi_v16qi_int;
19908 else
19909 ftype = int_ftype_v16qi_v16qi_int;
19910 def_builtin_const (d->mask, d->name, ftype, d->code);
19913 /* comi/ucomi insns. */
19914 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
19915 if (d->mask == OPTION_MASK_ISA_SSE2)
19916 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
19917 else
19918 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
19920 /* SSE */
19921 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
19922 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
19924 /* SSE or 3DNow!A */
19925 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
19927 /* SSE2 */
19928 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
19930 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
19931 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
19933 /* SSE3. */
19934 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
19935 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
19937 /* AES */
19938 if (TARGET_AES)
19940 /* Define AES built-in functions only if AES is enabled. */
19941 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
19942 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
19943 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
19944 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
19945 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
19946 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
19949 /* PCLMUL */
19950 if (TARGET_PCLMUL)
19952 /* Define PCLMUL built-in function only if PCLMUL is enabled. */
19953 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
19956 /* Access to the vec_init patterns. */
19957 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
19958 integer_type_node, NULL_TREE);
19959 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
19961 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
19962 short_integer_type_node,
19963 short_integer_type_node,
19964 short_integer_type_node, NULL_TREE);
19965 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
19967 ftype = build_function_type_list (V8QI_type_node, char_type_node,
19968 char_type_node, char_type_node,
19969 char_type_node, char_type_node,
19970 char_type_node, char_type_node,
19971 char_type_node, NULL_TREE);
19972 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
19974 /* Access to the vec_extract patterns. */
19975 ftype = build_function_type_list (double_type_node, V2DF_type_node,
19976 integer_type_node, NULL_TREE);
19977 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
19979 ftype = build_function_type_list (long_long_integer_type_node,
19980 V2DI_type_node, integer_type_node,
19981 NULL_TREE);
19982 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
19984 ftype = build_function_type_list (float_type_node, V4SF_type_node,
19985 integer_type_node, NULL_TREE);
19986 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
19988 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
19989 integer_type_node, NULL_TREE);
19990 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
19992 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
19993 integer_type_node, NULL_TREE);
19994 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
19996 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
19997 integer_type_node, NULL_TREE);
19998 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
20000 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
20001 integer_type_node, NULL_TREE);
20002 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
20004 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
20005 integer_type_node, NULL_TREE);
20006 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
20008 /* Access to the vec_set patterns. */
20009 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
20010 intDI_type_node,
20011 integer_type_node, NULL_TREE);
20012 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
20014 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
20015 float_type_node,
20016 integer_type_node, NULL_TREE);
20017 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
20019 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
20020 intSI_type_node,
20021 integer_type_node, NULL_TREE);
20022 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
20024 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
20025 intHI_type_node,
20026 integer_type_node, NULL_TREE);
20027 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
20029 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
20030 intHI_type_node,
20031 integer_type_node, NULL_TREE);
20032 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
20034 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
20035 intQI_type_node,
20036 integer_type_node, NULL_TREE);
20037 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
20039 /* Add SSE5 multi-arg argument instructions */
20040 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
20042 tree mtype = NULL_TREE;
20044 if (d->name == 0)
20045 continue;
20047 switch ((enum multi_arg_type)d->flag)
20049 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
20050 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
20051 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
20052 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
20053 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
20054 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
20055 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
20056 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
20057 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
20058 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
20059 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
20060 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
20061 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
20062 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
20063 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
20064 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
20065 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
20066 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
20067 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
20068 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
20069 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
20070 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
20071 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
20072 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
20073 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
20074 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
20075 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
20076 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
20077 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
20078 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
20079 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
20080 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
20081 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
20082 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
20083 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
20084 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
20085 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
20086 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
20087 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
20088 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
20089 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
20090 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
20091 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
20092 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
20093 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
20094 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
20095 case MULTI_ARG_UNKNOWN:
20096 default:
20097 gcc_unreachable ();
20100 if (mtype)
20101 def_builtin_const (d->mask, d->name, mtype, d->code);
20105 static void
20106 ix86_init_builtins (void)
20108 if (TARGET_MMX)
20109 ix86_init_mmx_sse_builtins ();
20112 /* Errors in the source file can cause expand_expr to return const0_rtx
20113 where we expect a vector. To avoid crashing, use one of the vector
20114 clear instructions. */
20115 static rtx
20116 safe_vector_operand (rtx x, enum machine_mode mode)
20118 if (x == const0_rtx)
20119 x = CONST0_RTX (mode);
20120 return x;
20123 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
20125 static rtx
20126 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
20128 rtx pat;
20129 tree arg0 = CALL_EXPR_ARG (exp, 0);
20130 tree arg1 = CALL_EXPR_ARG (exp, 1);
20131 rtx op0 = expand_normal (arg0);
20132 rtx op1 = expand_normal (arg1);
20133 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20134 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
20135 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
20137 if (VECTOR_MODE_P (mode0))
20138 op0 = safe_vector_operand (op0, mode0);
20139 if (VECTOR_MODE_P (mode1))
20140 op1 = safe_vector_operand (op1, mode1);
20142 if (optimize || !target
20143 || GET_MODE (target) != tmode
20144 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20145 target = gen_reg_rtx (tmode);
20147 if (GET_MODE (op1) == SImode && mode1 == TImode)
20149 rtx x = gen_reg_rtx (V4SImode);
20150 emit_insn (gen_sse2_loadd (x, op1));
20151 op1 = gen_lowpart (TImode, x);
20154 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
20155 op0 = copy_to_mode_reg (mode0, op0);
20156 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
20157 op1 = copy_to_mode_reg (mode1, op1);
20159 pat = GEN_FCN (icode) (target, op0, op1);
20160 if (! pat)
20161 return 0;
20163 emit_insn (pat);
20165 return target;
20168 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
20170 static rtx
20171 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
20172 enum multi_arg_type m_type,
20173 enum insn_code sub_code)
20175 rtx pat;
20176 int i;
20177 int nargs;
20178 bool comparison_p = false;
20179 bool tf_p = false;
20180 bool last_arg_constant = false;
20181 int num_memory = 0;
20182 struct {
20183 rtx op;
20184 enum machine_mode mode;
20185 } args[4];
20187 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20189 switch (m_type)
20191 case MULTI_ARG_3_SF:
20192 case MULTI_ARG_3_DF:
20193 case MULTI_ARG_3_DI:
20194 case MULTI_ARG_3_SI:
20195 case MULTI_ARG_3_SI_DI:
20196 case MULTI_ARG_3_HI:
20197 case MULTI_ARG_3_HI_SI:
20198 case MULTI_ARG_3_QI:
20199 case MULTI_ARG_3_PERMPS:
20200 case MULTI_ARG_3_PERMPD:
20201 nargs = 3;
20202 break;
20204 case MULTI_ARG_2_SF:
20205 case MULTI_ARG_2_DF:
20206 case MULTI_ARG_2_DI:
20207 case MULTI_ARG_2_SI:
20208 case MULTI_ARG_2_HI:
20209 case MULTI_ARG_2_QI:
20210 nargs = 2;
20211 break;
20213 case MULTI_ARG_2_DI_IMM:
20214 case MULTI_ARG_2_SI_IMM:
20215 case MULTI_ARG_2_HI_IMM:
20216 case MULTI_ARG_2_QI_IMM:
20217 nargs = 2;
20218 last_arg_constant = true;
20219 break;
20221 case MULTI_ARG_1_SF:
20222 case MULTI_ARG_1_DF:
20223 case MULTI_ARG_1_DI:
20224 case MULTI_ARG_1_SI:
20225 case MULTI_ARG_1_HI:
20226 case MULTI_ARG_1_QI:
20227 case MULTI_ARG_1_SI_DI:
20228 case MULTI_ARG_1_HI_DI:
20229 case MULTI_ARG_1_HI_SI:
20230 case MULTI_ARG_1_QI_DI:
20231 case MULTI_ARG_1_QI_SI:
20232 case MULTI_ARG_1_QI_HI:
20233 case MULTI_ARG_1_PH2PS:
20234 case MULTI_ARG_1_PS2PH:
20235 nargs = 1;
20236 break;
20238 case MULTI_ARG_2_SF_CMP:
20239 case MULTI_ARG_2_DF_CMP:
20240 case MULTI_ARG_2_DI_CMP:
20241 case MULTI_ARG_2_SI_CMP:
20242 case MULTI_ARG_2_HI_CMP:
20243 case MULTI_ARG_2_QI_CMP:
20244 nargs = 2;
20245 comparison_p = true;
20246 break;
20248 case MULTI_ARG_2_SF_TF:
20249 case MULTI_ARG_2_DF_TF:
20250 case MULTI_ARG_2_DI_TF:
20251 case MULTI_ARG_2_SI_TF:
20252 case MULTI_ARG_2_HI_TF:
20253 case MULTI_ARG_2_QI_TF:
20254 nargs = 2;
20255 tf_p = true;
20256 break;
20258 case MULTI_ARG_UNKNOWN:
20259 default:
20260 gcc_unreachable ();
20263 if (optimize || !target
20264 || GET_MODE (target) != tmode
20265 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20266 target = gen_reg_rtx (tmode);
20268 gcc_assert (nargs <= 4);
20270 for (i = 0; i < nargs; i++)
20272 tree arg = CALL_EXPR_ARG (exp, i);
20273 rtx op = expand_normal (arg);
20274 int adjust = (comparison_p) ? 1 : 0;
20275 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
20277 if (last_arg_constant && i == nargs-1)
20279 if (GET_CODE (op) != CONST_INT)
20281 error ("last argument must be an immediate");
20282 return gen_reg_rtx (tmode);
20285 else
20287 if (VECTOR_MODE_P (mode))
20288 op = safe_vector_operand (op, mode);
20290 /* If we aren't optimizing, only allow one memory operand to be
20291 generated. */
20292 if (memory_operand (op, mode))
20293 num_memory++;
20295 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
20297 if (optimize
20298 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
20299 || num_memory > 1)
20300 op = force_reg (mode, op);
20303 args[i].op = op;
20304 args[i].mode = mode;
20307 switch (nargs)
20309 case 1:
20310 pat = GEN_FCN (icode) (target, args[0].op);
20311 break;
20313 case 2:
20314 if (tf_p)
20315 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
20316 GEN_INT ((int)sub_code));
20317 else if (! comparison_p)
20318 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
20319 else
20321 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
20322 args[0].op,
20323 args[1].op);
20325 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
20327 break;
20329 case 3:
20330 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
20331 break;
20333 default:
20334 gcc_unreachable ();
20337 if (! pat)
20338 return 0;
20340 emit_insn (pat);
20341 return target;
20344 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
20345 insns with vec_merge. */
20347 static rtx
20348 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
20349 rtx target)
20351 rtx pat;
20352 tree arg0 = CALL_EXPR_ARG (exp, 0);
20353 rtx op1, op0 = expand_normal (arg0);
20354 enum machine_mode tmode = insn_data[icode].operand[0].mode;
20355 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
20357 if (optimize || !target
20358 || GET_MODE (target) != tmode
20359 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
20360 target = gen_reg_rtx (tmode);
20362 if (VECTOR_MODE_P (mode0))
20363 op0 = safe_vector_operand (op0, mode0);
20365 if ((optimize && !register_operand (op0, mode0))
20366 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
20367 op0 = copy_to_mode_reg (mode0, op0);
20369 op1 = op0;
20370 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
20371 op1 = copy_to_mode_reg (mode0, op1);
20373 pat = GEN_FCN (icode) (target, op0, op1);
20374 if (! pat)
20375 return 0;
20376 emit_insn (pat);
20377 return target;
20380 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
20382 static rtx
20383 ix86_expand_sse_compare (const struct builtin_description *d,
20384 tree exp, rtx target, bool swap)
20386 rtx pat;
20387 tree arg0 = CALL_EXPR_ARG (exp, 0);
20388 tree arg1 = CALL_EXPR_ARG (exp, 1);
20389 rtx op0 = expand_normal (arg0);
20390 rtx op1 = expand_normal (arg1);
20391 rtx op2;
20392 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
20393 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
20394 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
20395 enum rtx_code comparison = d->comparison;
20397 if (VECTOR_MODE_P (mode0))
20398 op0 = safe_vector_operand (op0, mode0);
20399 if (VECTOR_MODE_P (mode1))
20400 op1 = safe_vector_operand (op1, mode1);
20402 /* Swap operands if we have a comparison that isn't available in
20403 hardware. */
20404 if (swap)
20406 rtx tmp = gen_reg_rtx (mode1);
20407 emit_move_insn (tmp, op1);
20408 op1 = op0;
20409 op0 = tmp;
20412 if (optimize || !target
20413 || GET_MODE (target) != tmode
20414 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
20415 target = gen_reg_rtx (tmode);
20417 if ((optimize && !register_operand (op0, mode0))
20418 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
20419 op0 = copy_to_mode_reg (mode0, op0);
20420 if ((optimize && !register_operand (op1, mode1))
20421 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
20422 op1 = copy_to_mode_reg (mode1, op1);
20424 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
20425 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
20426 if (! pat)
20427 return 0;
20428 emit_insn (pat);
20429 return target;
20432 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
20434 static rtx
20435 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
20436 rtx target)
20438 rtx pat;
20439 tree arg0 = CALL_EXPR_ARG (exp, 0);
20440 tree arg1 = CALL_EXPR_ARG (exp, 1);
20441 rtx op0 = expand_normal (arg0);
20442 rtx op1 = expand_normal (arg1);
20443 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
20444 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
20445 enum rtx_code comparison = d->comparison;
20447 if (VECTOR_MODE_P (mode0))
20448 op0 = safe_vector_operand (op0, mode0);
20449 if (VECTOR_MODE_P (mode1))
20450 op1 = safe_vector_operand (op1, mode1);
20452 /* Swap operands if we have a comparison that isn't available in
20453 hardware. */
20454 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
20456 rtx tmp = op1;
20457 op1 = op0;
20458 op0 = tmp;
20461 target = gen_reg_rtx (SImode);
20462 emit_move_insn (target, const0_rtx);
20463 target = gen_rtx_SUBREG (QImode, target, 0);
20465 if ((optimize && !register_operand (op0, mode0))
20466 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
20467 op0 = copy_to_mode_reg (mode0, op0);
20468 if ((optimize && !register_operand (op1, mode1))
20469 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
20470 op1 = copy_to_mode_reg (mode1, op1);
20472 pat = GEN_FCN (d->icode) (op0, op1);
20473 if (! pat)
20474 return 0;
20475 emit_insn (pat);
20476 emit_insn (gen_rtx_SET (VOIDmode,
20477 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20478 gen_rtx_fmt_ee (comparison, QImode,
20479 SET_DEST (pat),
20480 const0_rtx)));
20482 return SUBREG_REG (target);
20485 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
20487 static rtx
20488 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
20489 rtx target)
20491 rtx pat;
20492 tree arg0 = CALL_EXPR_ARG (exp, 0);
20493 tree arg1 = CALL_EXPR_ARG (exp, 1);
20494 rtx op0 = expand_normal (arg0);
20495 rtx op1 = expand_normal (arg1);
20496 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
20497 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
20498 enum rtx_code comparison = d->comparison;
20500 if (VECTOR_MODE_P (mode0))
20501 op0 = safe_vector_operand (op0, mode0);
20502 if (VECTOR_MODE_P (mode1))
20503 op1 = safe_vector_operand (op1, mode1);
20505 target = gen_reg_rtx (SImode);
20506 emit_move_insn (target, const0_rtx);
20507 target = gen_rtx_SUBREG (QImode, target, 0);
20509 if ((optimize && !register_operand (op0, mode0))
20510 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
20511 op0 = copy_to_mode_reg (mode0, op0);
20512 if ((optimize && !register_operand (op1, mode1))
20513 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
20514 op1 = copy_to_mode_reg (mode1, op1);
20516 pat = GEN_FCN (d->icode) (op0, op1);
20517 if (! pat)
20518 return 0;
20519 emit_insn (pat);
20520 emit_insn (gen_rtx_SET (VOIDmode,
20521 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20522 gen_rtx_fmt_ee (comparison, QImode,
20523 SET_DEST (pat),
20524 const0_rtx)));
20526 return SUBREG_REG (target);
20529 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
20531 static rtx
20532 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
20533 tree exp, rtx target)
20535 rtx pat;
20536 tree arg0 = CALL_EXPR_ARG (exp, 0);
20537 tree arg1 = CALL_EXPR_ARG (exp, 1);
20538 tree arg2 = CALL_EXPR_ARG (exp, 2);
20539 tree arg3 = CALL_EXPR_ARG (exp, 3);
20540 tree arg4 = CALL_EXPR_ARG (exp, 4);
20541 rtx scratch0, scratch1;
20542 rtx op0 = expand_normal (arg0);
20543 rtx op1 = expand_normal (arg1);
20544 rtx op2 = expand_normal (arg2);
20545 rtx op3 = expand_normal (arg3);
20546 rtx op4 = expand_normal (arg4);
20547 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
20549 tmode0 = insn_data[d->icode].operand[0].mode;
20550 tmode1 = insn_data[d->icode].operand[1].mode;
20551 modev2 = insn_data[d->icode].operand[2].mode;
20552 modei3 = insn_data[d->icode].operand[3].mode;
20553 modev4 = insn_data[d->icode].operand[4].mode;
20554 modei5 = insn_data[d->icode].operand[5].mode;
20555 modeimm = insn_data[d->icode].operand[6].mode;
20557 if (VECTOR_MODE_P (modev2))
20558 op0 = safe_vector_operand (op0, modev2);
20559 if (VECTOR_MODE_P (modev4))
20560 op2 = safe_vector_operand (op2, modev4);
20562 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
20563 op0 = copy_to_mode_reg (modev2, op0);
20564 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
20565 op1 = copy_to_mode_reg (modei3, op1);
20566 if ((optimize && !register_operand (op2, modev4))
20567 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
20568 op2 = copy_to_mode_reg (modev4, op2);
20569 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
20570 op3 = copy_to_mode_reg (modei5, op3);
20572 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
20574 error ("the fifth argument must be a 8-bit immediate");
20575 return const0_rtx;
20578 if (d->code == IX86_BUILTIN_PCMPESTRI128)
20580 if (optimize || !target
20581 || GET_MODE (target) != tmode0
20582 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
20583 target = gen_reg_rtx (tmode0);
20585 scratch1 = gen_reg_rtx (tmode1);
20587 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
20589 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
20591 if (optimize || !target
20592 || GET_MODE (target) != tmode1
20593 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
20594 target = gen_reg_rtx (tmode1);
20596 scratch0 = gen_reg_rtx (tmode0);
20598 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
20600 else
20602 gcc_assert (d->flag);
20604 scratch0 = gen_reg_rtx (tmode0);
20605 scratch1 = gen_reg_rtx (tmode1);
20607 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
20610 if (! pat)
20611 return 0;
20613 emit_insn (pat);
20615 if (d->flag)
20617 target = gen_reg_rtx (SImode);
20618 emit_move_insn (target, const0_rtx);
20619 target = gen_rtx_SUBREG (QImode, target, 0);
20621 emit_insn
20622 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20623 gen_rtx_fmt_ee (EQ, QImode,
20624 gen_rtx_REG ((enum machine_mode) d->flag,
20625 FLAGS_REG),
20626 const0_rtx)));
20627 return SUBREG_REG (target);
20629 else
20630 return target;
20634 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
20636 static rtx
20637 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
20638 tree exp, rtx target)
20640 rtx pat;
20641 tree arg0 = CALL_EXPR_ARG (exp, 0);
20642 tree arg1 = CALL_EXPR_ARG (exp, 1);
20643 tree arg2 = CALL_EXPR_ARG (exp, 2);
20644 rtx scratch0, scratch1;
20645 rtx op0 = expand_normal (arg0);
20646 rtx op1 = expand_normal (arg1);
20647 rtx op2 = expand_normal (arg2);
20648 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
20650 tmode0 = insn_data[d->icode].operand[0].mode;
20651 tmode1 = insn_data[d->icode].operand[1].mode;
20652 modev2 = insn_data[d->icode].operand[2].mode;
20653 modev3 = insn_data[d->icode].operand[3].mode;
20654 modeimm = insn_data[d->icode].operand[4].mode;
20656 if (VECTOR_MODE_P (modev2))
20657 op0 = safe_vector_operand (op0, modev2);
20658 if (VECTOR_MODE_P (modev3))
20659 op1 = safe_vector_operand (op1, modev3);
20661 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
20662 op0 = copy_to_mode_reg (modev2, op0);
20663 if ((optimize && !register_operand (op1, modev3))
20664 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
20665 op1 = copy_to_mode_reg (modev3, op1);
20667 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
20669 error ("the third argument must be a 8-bit immediate");
20670 return const0_rtx;
20673 if (d->code == IX86_BUILTIN_PCMPISTRI128)
20675 if (optimize || !target
20676 || GET_MODE (target) != tmode0
20677 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
20678 target = gen_reg_rtx (tmode0);
20680 scratch1 = gen_reg_rtx (tmode1);
20682 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
20684 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
20686 if (optimize || !target
20687 || GET_MODE (target) != tmode1
20688 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
20689 target = gen_reg_rtx (tmode1);
20691 scratch0 = gen_reg_rtx (tmode0);
20693 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
20695 else
20697 gcc_assert (d->flag);
20699 scratch0 = gen_reg_rtx (tmode0);
20700 scratch1 = gen_reg_rtx (tmode1);
20702 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
20705 if (! pat)
20706 return 0;
20708 emit_insn (pat);
20710 if (d->flag)
20712 target = gen_reg_rtx (SImode);
20713 emit_move_insn (target, const0_rtx);
20714 target = gen_rtx_SUBREG (QImode, target, 0);
20716 emit_insn
20717 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
20718 gen_rtx_fmt_ee (EQ, QImode,
20719 gen_rtx_REG ((enum machine_mode) d->flag,
20720 FLAGS_REG),
20721 const0_rtx)));
20722 return SUBREG_REG (target);
20724 else
20725 return target;
20728 /* Subroutine of ix86_expand_builtin to take care of insns with
20729 variable number of operands. */
20731 static rtx
20732 ix86_expand_args_builtin (const struct builtin_description *d,
20733 tree exp, rtx target)
20735 rtx pat, real_target;
20736 unsigned int i, nargs;
20737 unsigned int nargs_constant = 0;
20738 int num_memory = 0;
20739 struct
20741 rtx op;
20742 enum machine_mode mode;
20743 } args[4];
20744 bool last_arg_count = false;
20745 enum insn_code icode = d->icode;
20746 const struct insn_data *insn_p = &insn_data[icode];
20747 enum machine_mode tmode = insn_p->operand[0].mode;
20748 enum machine_mode rmode = VOIDmode;
20749 bool swap = false;
20750 enum rtx_code comparison = d->comparison;
20752 switch ((enum ix86_builtin_type) d->flag)
20754 case INT_FTYPE_V2DI_V2DI_PTEST:
20755 return ix86_expand_sse_ptest (d, exp, target);
20756 case FLOAT128_FTYPE_FLOAT128:
20757 case FLOAT_FTYPE_FLOAT:
20758 case INT64_FTYPE_V4SF:
20759 case INT64_FTYPE_V2DF:
20760 case INT_FTYPE_V16QI:
20761 case INT_FTYPE_V8QI:
20762 case INT_FTYPE_V4SF:
20763 case INT_FTYPE_V2DF:
20764 case V16QI_FTYPE_V16QI:
20765 case V8HI_FTYPE_V8HI:
20766 case V8HI_FTYPE_V16QI:
20767 case V8QI_FTYPE_V8QI:
20768 case V4SI_FTYPE_V4SI:
20769 case V4SI_FTYPE_V16QI:
20770 case V4SI_FTYPE_V4SF:
20771 case V4SI_FTYPE_V8HI:
20772 case V4SI_FTYPE_V2DF:
20773 case V4HI_FTYPE_V4HI:
20774 case V4SF_FTYPE_V4SF:
20775 case V4SF_FTYPE_V4SI:
20776 case V4SF_FTYPE_V2DF:
20777 case V2DI_FTYPE_V2DI:
20778 case V2DI_FTYPE_V16QI:
20779 case V2DI_FTYPE_V8HI:
20780 case V2DI_FTYPE_V4SI:
20781 case V2DF_FTYPE_V2DF:
20782 case V2DF_FTYPE_V4SI:
20783 case V2DF_FTYPE_V4SF:
20784 case V2DF_FTYPE_V2SI:
20785 case V2SI_FTYPE_V2SI:
20786 case V2SI_FTYPE_V4SF:
20787 case V2SI_FTYPE_V2SF:
20788 case V2SI_FTYPE_V2DF:
20789 case V2SF_FTYPE_V2SF:
20790 case V2SF_FTYPE_V2SI:
20791 nargs = 1;
20792 break;
20793 case V4SF_FTYPE_V4SF_VEC_MERGE:
20794 case V2DF_FTYPE_V2DF_VEC_MERGE:
20795 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
20796 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
20797 case V16QI_FTYPE_V16QI_V16QI:
20798 case V16QI_FTYPE_V8HI_V8HI:
20799 case V8QI_FTYPE_V8QI_V8QI:
20800 case V8QI_FTYPE_V4HI_V4HI:
20801 case V8HI_FTYPE_V8HI_V8HI:
20802 case V8HI_FTYPE_V16QI_V16QI:
20803 case V8HI_FTYPE_V4SI_V4SI:
20804 case V4SI_FTYPE_V4SI_V4SI:
20805 case V4SI_FTYPE_V8HI_V8HI:
20806 case V4SI_FTYPE_V4SF_V4SF:
20807 case V4SI_FTYPE_V2DF_V2DF:
20808 case V4HI_FTYPE_V4HI_V4HI:
20809 case V4HI_FTYPE_V8QI_V8QI:
20810 case V4HI_FTYPE_V2SI_V2SI:
20811 case V4SF_FTYPE_V4SF_V4SF:
20812 case V4SF_FTYPE_V4SF_V2SI:
20813 case V4SF_FTYPE_V4SF_V2DF:
20814 case V4SF_FTYPE_V4SF_DI:
20815 case V4SF_FTYPE_V4SF_SI:
20816 case V2DI_FTYPE_V2DI_V2DI:
20817 case V2DI_FTYPE_V16QI_V16QI:
20818 case V2DI_FTYPE_V4SI_V4SI:
20819 case V2DI_FTYPE_V2DI_V16QI:
20820 case V2DI_FTYPE_V2DF_V2DF:
20821 case V2SI_FTYPE_V2SI_V2SI:
20822 case V2SI_FTYPE_V4HI_V4HI:
20823 case V2SI_FTYPE_V2SF_V2SF:
20824 case V2DF_FTYPE_V2DF_V2DF:
20825 case V2DF_FTYPE_V2DF_V4SF:
20826 case V2DF_FTYPE_V2DF_DI:
20827 case V2DF_FTYPE_V2DF_SI:
20828 case V2SF_FTYPE_V2SF_V2SF:
20829 case V1DI_FTYPE_V1DI_V1DI:
20830 case V1DI_FTYPE_V8QI_V8QI:
20831 case V1DI_FTYPE_V2SI_V2SI:
20832 if (comparison == UNKNOWN)
20833 return ix86_expand_binop_builtin (icode, exp, target);
20834 nargs = 2;
20835 break;
20836 case V4SF_FTYPE_V4SF_V4SF_SWAP:
20837 case V2DF_FTYPE_V2DF_V2DF_SWAP:
20838 gcc_assert (comparison != UNKNOWN);
20839 nargs = 2;
20840 swap = true;
20841 break;
20842 case V8HI_FTYPE_V8HI_V8HI_COUNT:
20843 case V8HI_FTYPE_V8HI_SI_COUNT:
20844 case V4SI_FTYPE_V4SI_V4SI_COUNT:
20845 case V4SI_FTYPE_V4SI_SI_COUNT:
20846 case V4HI_FTYPE_V4HI_V4HI_COUNT:
20847 case V4HI_FTYPE_V4HI_SI_COUNT:
20848 case V2DI_FTYPE_V2DI_V2DI_COUNT:
20849 case V2DI_FTYPE_V2DI_SI_COUNT:
20850 case V2SI_FTYPE_V2SI_V2SI_COUNT:
20851 case V2SI_FTYPE_V2SI_SI_COUNT:
20852 case V1DI_FTYPE_V1DI_V1DI_COUNT:
20853 case V1DI_FTYPE_V1DI_SI_COUNT:
20854 nargs = 2;
20855 last_arg_count = true;
20856 break;
20857 case UINT64_FTYPE_UINT64_UINT64:
20858 case UINT_FTYPE_UINT_UINT:
20859 case UINT_FTYPE_UINT_USHORT:
20860 case UINT_FTYPE_UINT_UCHAR:
20861 nargs = 2;
20862 break;
20863 case V2DI2TI_FTYPE_V2DI_INT:
20864 nargs = 2;
20865 rmode = V2DImode;
20866 nargs_constant = 1;
20867 break;
20868 case V8HI_FTYPE_V8HI_INT:
20869 case V4SI_FTYPE_V4SI_INT:
20870 case V4HI_FTYPE_V4HI_INT:
20871 case V4SF_FTYPE_V4SF_INT:
20872 case V2DI_FTYPE_V2DI_INT:
20873 case V2DF_FTYPE_V2DF_INT:
20874 nargs = 2;
20875 nargs_constant = 1;
20876 break;
20877 case V16QI_FTYPE_V16QI_V16QI_V16QI:
20878 case V4SF_FTYPE_V4SF_V4SF_V4SF:
20879 case V2DF_FTYPE_V2DF_V2DF_V2DF:
20880 nargs = 3;
20881 break;
20882 case V16QI_FTYPE_V16QI_V16QI_INT:
20883 case V8HI_FTYPE_V8HI_V8HI_INT:
20884 case V4SI_FTYPE_V4SI_V4SI_INT:
20885 case V4SF_FTYPE_V4SF_V4SF_INT:
20886 case V2DI_FTYPE_V2DI_V2DI_INT:
20887 case V2DF_FTYPE_V2DF_V2DF_INT:
20888 nargs = 3;
20889 nargs_constant = 1;
20890 break;
20891 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
20892 nargs = 3;
20893 rmode = V2DImode;
20894 nargs_constant = 1;
20895 break;
20896 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
20897 nargs = 3;
20898 rmode = DImode;
20899 nargs_constant = 1;
20900 break;
20901 case V2DI_FTYPE_V2DI_UINT_UINT:
20902 nargs = 3;
20903 nargs_constant = 2;
20904 break;
20905 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
20906 nargs = 4;
20907 nargs_constant = 2;
20908 break;
20909 default:
20910 gcc_unreachable ();
20913 gcc_assert (nargs <= ARRAY_SIZE (args));
20915 if (comparison != UNKNOWN)
20917 gcc_assert (nargs == 2);
20918 return ix86_expand_sse_compare (d, exp, target, swap);
20921 if (rmode == VOIDmode || rmode == tmode)
20923 if (optimize
20924 || target == 0
20925 || GET_MODE (target) != tmode
20926 || ! (*insn_p->operand[0].predicate) (target, tmode))
20927 target = gen_reg_rtx (tmode);
20928 real_target = target;
20930 else
20932 target = gen_reg_rtx (rmode);
20933 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
20936 for (i = 0; i < nargs; i++)
20938 tree arg = CALL_EXPR_ARG (exp, i);
20939 rtx op = expand_normal (arg);
20940 enum machine_mode mode = insn_p->operand[i + 1].mode;
20941 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
20943 if (last_arg_count && (i + 1) == nargs)
20945 /* SIMD shift insns take either an 8-bit immediate or
20946 register as count. But builtin functions take int as
20947 count. If count doesn't match, we put it in register. */
20948 if (!match)
20950 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
20951 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
20952 op = copy_to_reg (op);
20955 else if ((nargs - i) <= nargs_constant)
20957 if (!match)
20958 switch (icode)
20960 case CODE_FOR_sse4_1_roundpd:
20961 case CODE_FOR_sse4_1_roundps:
20962 case CODE_FOR_sse4_1_roundsd:
20963 case CODE_FOR_sse4_1_roundss:
20964 case CODE_FOR_sse4_1_blendps:
20965 error ("the last argument must be a 4-bit immediate");
20966 return const0_rtx;
20968 case CODE_FOR_sse4_1_blendpd:
20969 error ("the last argument must be a 2-bit immediate");
20970 return const0_rtx;
20972 default:
20973 switch (nargs_constant)
20975 case 2:
20976 if ((nargs - i) == nargs_constant)
20978 error ("the next to last argument must be an 8-bit immediate");
20979 break;
20981 case 1:
20982 error ("the last argument must be an 8-bit immediate");
20983 break;
20984 default:
20985 gcc_unreachable ();
20987 return const0_rtx;
20990 else
20992 if (VECTOR_MODE_P (mode))
20993 op = safe_vector_operand (op, mode);
20995 /* If we aren't optimizing, only allow one memory operand to
20996 be generated. */
20997 if (memory_operand (op, mode))
20998 num_memory++;
21000 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
21002 if (optimize || !match || num_memory > 1)
21003 op = copy_to_mode_reg (mode, op);
21005 else
21007 op = copy_to_reg (op);
21008 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
21012 args[i].op = op;
21013 args[i].mode = mode;
21016 switch (nargs)
21018 case 1:
21019 pat = GEN_FCN (icode) (real_target, args[0].op);
21020 break;
21021 case 2:
21022 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
21023 break;
21024 case 3:
21025 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
21026 args[2].op);
21027 break;
21028 case 4:
21029 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
21030 args[2].op, args[3].op);
21031 break;
21032 default:
21033 gcc_unreachable ();
21036 if (! pat)
21037 return 0;
21039 emit_insn (pat);
21040 return target;
21043 /* Subroutine of ix86_expand_builtin to take care of special insns
21044 with variable number of operands. */
21046 static rtx
21047 ix86_expand_special_args_builtin (const struct builtin_description *d,
21048 tree exp, rtx target)
21050 tree arg;
21051 rtx pat, op;
21052 unsigned int i, nargs, arg_adjust, memory;
21053 struct
21055 rtx op;
21056 enum machine_mode mode;
21057 } args[2];
21058 enum insn_code icode = d->icode;
21059 bool last_arg_constant = false;
21060 const struct insn_data *insn_p = &insn_data[icode];
21061 enum machine_mode tmode = insn_p->operand[0].mode;
21062 enum { load, store } class;
21064 switch ((enum ix86_special_builtin_type) d->flag)
21066 case VOID_FTYPE_VOID:
21067 emit_insn (GEN_FCN (icode) (target));
21068 return 0;
21069 case V2DI_FTYPE_PV2DI:
21070 case V16QI_FTYPE_PCCHAR:
21071 case V4SF_FTYPE_PCFLOAT:
21072 case V2DF_FTYPE_PCDOUBLE:
21073 nargs = 1;
21074 class = load;
21075 memory = 0;
21076 break;
21077 case VOID_FTYPE_PV2SF_V4SF:
21078 case VOID_FTYPE_PV2DI_V2DI:
21079 case VOID_FTYPE_PCHAR_V16QI:
21080 case VOID_FTYPE_PFLOAT_V4SF:
21081 case VOID_FTYPE_PDOUBLE_V2DF:
21082 case VOID_FTYPE_PDI_DI:
21083 case VOID_FTYPE_PINT_INT:
21084 nargs = 1;
21085 class = store;
21086 /* Reserve memory operand for target. */
21087 memory = ARRAY_SIZE (args);
21088 break;
21089 case V4SF_FTYPE_V4SF_PCV2SF:
21090 case V2DF_FTYPE_V2DF_PCDOUBLE:
21091 nargs = 2;
21092 class = load;
21093 memory = 1;
21094 break;
21095 default:
21096 gcc_unreachable ();
21099 gcc_assert (nargs <= ARRAY_SIZE (args));
21101 if (class == store)
21103 arg = CALL_EXPR_ARG (exp, 0);
21104 op = expand_normal (arg);
21105 gcc_assert (target == 0);
21106 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
21107 arg_adjust = 1;
21109 else
21111 arg_adjust = 0;
21112 if (optimize
21113 || target == 0
21114 || GET_MODE (target) != tmode
21115 || ! (*insn_p->operand[0].predicate) (target, tmode))
21116 target = gen_reg_rtx (tmode);
21119 for (i = 0; i < nargs; i++)
21121 enum machine_mode mode = insn_p->operand[i + 1].mode;
21122 bool match;
21124 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
21125 op = expand_normal (arg);
21126 match = (*insn_p->operand[i + 1].predicate) (op, mode);
21128 if (last_arg_constant && (i + 1) == nargs)
21130 if (!match)
21131 switch (icode)
21133 default:
21134 error ("the last argument must be an 8-bit immediate");
21135 return const0_rtx;
21138 else
21140 if (i == memory)
21142 /* This must be the memory operand. */
21143 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
21144 gcc_assert (GET_MODE (op) == mode
21145 || GET_MODE (op) == VOIDmode);
21147 else
21149 /* This must be register. */
21150 if (VECTOR_MODE_P (mode))
21151 op = safe_vector_operand (op, mode);
21153 gcc_assert (GET_MODE (op) == mode
21154 || GET_MODE (op) == VOIDmode);
21155 op = copy_to_mode_reg (mode, op);
21159 args[i].op = op;
21160 args[i].mode = mode;
21163 switch (nargs)
21165 case 1:
21166 pat = GEN_FCN (icode) (target, args[0].op);
21167 break;
21168 case 2:
21169 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
21170 break;
21171 default:
21172 gcc_unreachable ();
21175 if (! pat)
21176 return 0;
21177 emit_insn (pat);
21178 return class == store ? 0 : target;
21181 /* Return the integer constant in ARG. Constrain it to be in the range
21182 of the subparts of VEC_TYPE; issue an error if not. */
21184 static int
21185 get_element_number (tree vec_type, tree arg)
21187 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
21189 if (!host_integerp (arg, 1)
21190 || (elt = tree_low_cst (arg, 1), elt > max))
21192 error ("selector must be an integer constant in the range 0..%wi", max);
21193 return 0;
21196 return elt;
21199 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
21200 ix86_expand_vector_init. We DO have language-level syntax for this, in
21201 the form of (type){ init-list }. Except that since we can't place emms
21202 instructions from inside the compiler, we can't allow the use of MMX
21203 registers unless the user explicitly asks for it. So we do *not* define
21204 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
21205 we have builtins invoked by mmintrin.h that gives us license to emit
21206 these sorts of instructions. */
21208 static rtx
21209 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
21211 enum machine_mode tmode = TYPE_MODE (type);
21212 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
21213 int i, n_elt = GET_MODE_NUNITS (tmode);
21214 rtvec v = rtvec_alloc (n_elt);
21216 gcc_assert (VECTOR_MODE_P (tmode));
21217 gcc_assert (call_expr_nargs (exp) == n_elt);
21219 for (i = 0; i < n_elt; ++i)
21221 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
21222 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
21225 if (!target || !register_operand (target, tmode))
21226 target = gen_reg_rtx (tmode);
21228 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
21229 return target;
21232 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
21233 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
21234 had a language-level syntax for referencing vector elements. */
21236 static rtx
21237 ix86_expand_vec_ext_builtin (tree exp, rtx target)
21239 enum machine_mode tmode, mode0;
21240 tree arg0, arg1;
21241 int elt;
21242 rtx op0;
21244 arg0 = CALL_EXPR_ARG (exp, 0);
21245 arg1 = CALL_EXPR_ARG (exp, 1);
21247 op0 = expand_normal (arg0);
21248 elt = get_element_number (TREE_TYPE (arg0), arg1);
21250 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
21251 mode0 = TYPE_MODE (TREE_TYPE (arg0));
21252 gcc_assert (VECTOR_MODE_P (mode0));
21254 op0 = force_reg (mode0, op0);
21256 if (optimize || !target || !register_operand (target, tmode))
21257 target = gen_reg_rtx (tmode);
21259 ix86_expand_vector_extract (true, target, op0, elt);
21261 return target;
21264 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
21265 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
21266 a language-level syntax for referencing vector elements. */
21268 static rtx
21269 ix86_expand_vec_set_builtin (tree exp)
21271 enum machine_mode tmode, mode1;
21272 tree arg0, arg1, arg2;
21273 int elt;
21274 rtx op0, op1, target;
21276 arg0 = CALL_EXPR_ARG (exp, 0);
21277 arg1 = CALL_EXPR_ARG (exp, 1);
21278 arg2 = CALL_EXPR_ARG (exp, 2);
21280 tmode = TYPE_MODE (TREE_TYPE (arg0));
21281 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
21282 gcc_assert (VECTOR_MODE_P (tmode));
21284 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
21285 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
21286 elt = get_element_number (TREE_TYPE (arg0), arg2);
21288 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
21289 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
21291 op0 = force_reg (tmode, op0);
21292 op1 = force_reg (mode1, op1);
21294 /* OP0 is the source of these builtin functions and shouldn't be
21295 modified. Create a copy, use it and return it as target. */
21296 target = gen_reg_rtx (tmode);
21297 emit_move_insn (target, op0);
21298 ix86_expand_vector_set (true, target, op1, elt);
21300 return target;
21303 /* Expand an expression EXP that calls a built-in function,
21304 with result going to TARGET if that's convenient
21305 (and in mode MODE if that's convenient).
21306 SUBTARGET may be used as the target for computing one of EXP's operands.
21307 IGNORE is nonzero if the value is to be ignored. */
21309 static rtx
21310 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
21311 enum machine_mode mode ATTRIBUTE_UNUSED,
21312 int ignore ATTRIBUTE_UNUSED)
21314 const struct builtin_description *d;
21315 size_t i;
21316 enum insn_code icode;
21317 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
21318 tree arg0, arg1, arg2;
21319 rtx op0, op1, op2, pat;
21320 enum machine_mode mode0, mode1, mode2;
21321 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
21323 switch (fcode)
21325 case IX86_BUILTIN_MASKMOVQ:
21326 case IX86_BUILTIN_MASKMOVDQU:
21327 icode = (fcode == IX86_BUILTIN_MASKMOVQ
21328 ? CODE_FOR_mmx_maskmovq
21329 : CODE_FOR_sse2_maskmovdqu);
21330 /* Note the arg order is different from the operand order. */
21331 arg1 = CALL_EXPR_ARG (exp, 0);
21332 arg2 = CALL_EXPR_ARG (exp, 1);
21333 arg0 = CALL_EXPR_ARG (exp, 2);
21334 op0 = expand_normal (arg0);
21335 op1 = expand_normal (arg1);
21336 op2 = expand_normal (arg2);
21337 mode0 = insn_data[icode].operand[0].mode;
21338 mode1 = insn_data[icode].operand[1].mode;
21339 mode2 = insn_data[icode].operand[2].mode;
21341 op0 = force_reg (Pmode, op0);
21342 op0 = gen_rtx_MEM (mode1, op0);
21344 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
21345 op0 = copy_to_mode_reg (mode0, op0);
21346 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
21347 op1 = copy_to_mode_reg (mode1, op1);
21348 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
21349 op2 = copy_to_mode_reg (mode2, op2);
21350 pat = GEN_FCN (icode) (op0, op1, op2);
21351 if (! pat)
21352 return 0;
21353 emit_insn (pat);
21354 return 0;
21356 case IX86_BUILTIN_LDMXCSR:
21357 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
21358 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
21359 emit_move_insn (target, op0);
21360 emit_insn (gen_sse_ldmxcsr (target));
21361 return 0;
21363 case IX86_BUILTIN_STMXCSR:
21364 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
21365 emit_insn (gen_sse_stmxcsr (target));
21366 return copy_to_mode_reg (SImode, target);
21368 case IX86_BUILTIN_CLFLUSH:
21369 arg0 = CALL_EXPR_ARG (exp, 0);
21370 op0 = expand_normal (arg0);
21371 icode = CODE_FOR_sse2_clflush;
21372 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
21373 op0 = copy_to_mode_reg (Pmode, op0);
21375 emit_insn (gen_sse2_clflush (op0));
21376 return 0;
21378 case IX86_BUILTIN_MONITOR:
21379 arg0 = CALL_EXPR_ARG (exp, 0);
21380 arg1 = CALL_EXPR_ARG (exp, 1);
21381 arg2 = CALL_EXPR_ARG (exp, 2);
21382 op0 = expand_normal (arg0);
21383 op1 = expand_normal (arg1);
21384 op2 = expand_normal (arg2);
21385 if (!REG_P (op0))
21386 op0 = copy_to_mode_reg (Pmode, op0);
21387 if (!REG_P (op1))
21388 op1 = copy_to_mode_reg (SImode, op1);
21389 if (!REG_P (op2))
21390 op2 = copy_to_mode_reg (SImode, op2);
21391 if (!TARGET_64BIT)
21392 emit_insn (gen_sse3_monitor (op0, op1, op2));
21393 else
21394 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
21395 return 0;
21397 case IX86_BUILTIN_MWAIT:
21398 arg0 = CALL_EXPR_ARG (exp, 0);
21399 arg1 = CALL_EXPR_ARG (exp, 1);
21400 op0 = expand_normal (arg0);
21401 op1 = expand_normal (arg1);
21402 if (!REG_P (op0))
21403 op0 = copy_to_mode_reg (SImode, op0);
21404 if (!REG_P (op1))
21405 op1 = copy_to_mode_reg (SImode, op1);
21406 emit_insn (gen_sse3_mwait (op0, op1));
21407 return 0;
21409 case IX86_BUILTIN_VEC_INIT_V2SI:
21410 case IX86_BUILTIN_VEC_INIT_V4HI:
21411 case IX86_BUILTIN_VEC_INIT_V8QI:
21412 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
21414 case IX86_BUILTIN_VEC_EXT_V2DF:
21415 case IX86_BUILTIN_VEC_EXT_V2DI:
21416 case IX86_BUILTIN_VEC_EXT_V4SF:
21417 case IX86_BUILTIN_VEC_EXT_V4SI:
21418 case IX86_BUILTIN_VEC_EXT_V8HI:
21419 case IX86_BUILTIN_VEC_EXT_V2SI:
21420 case IX86_BUILTIN_VEC_EXT_V4HI:
21421 case IX86_BUILTIN_VEC_EXT_V16QI:
21422 return ix86_expand_vec_ext_builtin (exp, target);
21424 case IX86_BUILTIN_VEC_SET_V2DI:
21425 case IX86_BUILTIN_VEC_SET_V4SF:
21426 case IX86_BUILTIN_VEC_SET_V4SI:
21427 case IX86_BUILTIN_VEC_SET_V8HI:
21428 case IX86_BUILTIN_VEC_SET_V4HI:
21429 case IX86_BUILTIN_VEC_SET_V16QI:
21430 return ix86_expand_vec_set_builtin (exp);
21432 case IX86_BUILTIN_INFQ:
21434 REAL_VALUE_TYPE inf;
21435 rtx tmp;
21437 real_inf (&inf);
21438 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
21440 tmp = validize_mem (force_const_mem (mode, tmp));
21442 if (target == 0)
21443 target = gen_reg_rtx (mode);
21445 emit_move_insn (target, tmp);
21446 return target;
21449 default:
21450 break;
21453 for (i = 0, d = bdesc_special_args;
21454 i < ARRAY_SIZE (bdesc_special_args);
21455 i++, d++)
21456 if (d->code == fcode)
21457 return ix86_expand_special_args_builtin (d, exp, target);
21459 for (i = 0, d = bdesc_args;
21460 i < ARRAY_SIZE (bdesc_args);
21461 i++, d++)
21462 if (d->code == fcode)
21463 return ix86_expand_args_builtin (d, exp, target);
21465 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
21466 if (d->code == fcode)
21467 return ix86_expand_sse_comi (d, exp, target);
21469 for (i = 0, d = bdesc_pcmpestr;
21470 i < ARRAY_SIZE (bdesc_pcmpestr);
21471 i++, d++)
21472 if (d->code == fcode)
21473 return ix86_expand_sse_pcmpestr (d, exp, target);
21475 for (i = 0, d = bdesc_pcmpistr;
21476 i < ARRAY_SIZE (bdesc_pcmpistr);
21477 i++, d++)
21478 if (d->code == fcode)
21479 return ix86_expand_sse_pcmpistr (d, exp, target);
21481 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
21482 if (d->code == fcode)
21483 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
21484 (enum multi_arg_type)d->flag,
21485 d->comparison);
21487 gcc_unreachable ();
21490 /* Returns a function decl for a vectorized version of the builtin function
21491 with builtin function code FN and the result vector type TYPE, or NULL_TREE
21492 if it is not available. */
21494 static tree
21495 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
21496 tree type_in)
21498 enum machine_mode in_mode, out_mode;
21499 int in_n, out_n;
21501 if (TREE_CODE (type_out) != VECTOR_TYPE
21502 || TREE_CODE (type_in) != VECTOR_TYPE)
21503 return NULL_TREE;
21505 out_mode = TYPE_MODE (TREE_TYPE (type_out));
21506 out_n = TYPE_VECTOR_SUBPARTS (type_out);
21507 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21508 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21510 switch (fn)
21512 case BUILT_IN_SQRT:
21513 if (out_mode == DFmode && out_n == 2
21514 && in_mode == DFmode && in_n == 2)
21515 return ix86_builtins[IX86_BUILTIN_SQRTPD];
21516 break;
21518 case BUILT_IN_SQRTF:
21519 if (out_mode == SFmode && out_n == 4
21520 && in_mode == SFmode && in_n == 4)
21521 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
21522 break;
21524 case BUILT_IN_LRINT:
21525 if (out_mode == SImode && out_n == 4
21526 && in_mode == DFmode && in_n == 2)
21527 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
21528 break;
21530 case BUILT_IN_LRINTF:
21531 if (out_mode == SImode && out_n == 4
21532 && in_mode == SFmode && in_n == 4)
21533 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
21534 break;
21536 default:
21540 /* Dispatch to a handler for a vectorization library. */
21541 if (ix86_veclib_handler)
21542 return (*ix86_veclib_handler)(fn, type_out, type_in);
21544 return NULL_TREE;
21547 /* Handler for an SVML-style interface to
21548 a library with vectorized intrinsics. */
21550 static tree
21551 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
21553 char name[20];
21554 tree fntype, new_fndecl, args;
21555 unsigned arity;
21556 const char *bname;
21557 enum machine_mode el_mode, in_mode;
21558 int n, in_n;
21560 /* The SVML is suitable for unsafe math only. */
21561 if (!flag_unsafe_math_optimizations)
21562 return NULL_TREE;
21564 el_mode = TYPE_MODE (TREE_TYPE (type_out));
21565 n = TYPE_VECTOR_SUBPARTS (type_out);
21566 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21567 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21568 if (el_mode != in_mode
21569 || n != in_n)
21570 return NULL_TREE;
21572 switch (fn)
21574 case BUILT_IN_EXP:
21575 case BUILT_IN_LOG:
21576 case BUILT_IN_LOG10:
21577 case BUILT_IN_POW:
21578 case BUILT_IN_TANH:
21579 case BUILT_IN_TAN:
21580 case BUILT_IN_ATAN:
21581 case BUILT_IN_ATAN2:
21582 case BUILT_IN_ATANH:
21583 case BUILT_IN_CBRT:
21584 case BUILT_IN_SINH:
21585 case BUILT_IN_SIN:
21586 case BUILT_IN_ASINH:
21587 case BUILT_IN_ASIN:
21588 case BUILT_IN_COSH:
21589 case BUILT_IN_COS:
21590 case BUILT_IN_ACOSH:
21591 case BUILT_IN_ACOS:
21592 if (el_mode != DFmode || n != 2)
21593 return NULL_TREE;
21594 break;
21596 case BUILT_IN_EXPF:
21597 case BUILT_IN_LOGF:
21598 case BUILT_IN_LOG10F:
21599 case BUILT_IN_POWF:
21600 case BUILT_IN_TANHF:
21601 case BUILT_IN_TANF:
21602 case BUILT_IN_ATANF:
21603 case BUILT_IN_ATAN2F:
21604 case BUILT_IN_ATANHF:
21605 case BUILT_IN_CBRTF:
21606 case BUILT_IN_SINHF:
21607 case BUILT_IN_SINF:
21608 case BUILT_IN_ASINHF:
21609 case BUILT_IN_ASINF:
21610 case BUILT_IN_COSHF:
21611 case BUILT_IN_COSF:
21612 case BUILT_IN_ACOSHF:
21613 case BUILT_IN_ACOSF:
21614 if (el_mode != SFmode || n != 4)
21615 return NULL_TREE;
21616 break;
21618 default:
21619 return NULL_TREE;
21622 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
21624 if (fn == BUILT_IN_LOGF)
21625 strcpy (name, "vmlsLn4");
21626 else if (fn == BUILT_IN_LOG)
21627 strcpy (name, "vmldLn2");
21628 else if (n == 4)
21630 sprintf (name, "vmls%s", bname+10);
21631 name[strlen (name)-1] = '4';
21633 else
21634 sprintf (name, "vmld%s2", bname+10);
21636 /* Convert to uppercase. */
21637 name[4] &= ~0x20;
21639 arity = 0;
21640 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
21641 args = TREE_CHAIN (args))
21642 arity++;
21644 if (arity == 1)
21645 fntype = build_function_type_list (type_out, type_in, NULL);
21646 else
21647 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
21649 /* Build a function declaration for the vectorized function. */
21650 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
21651 TREE_PUBLIC (new_fndecl) = 1;
21652 DECL_EXTERNAL (new_fndecl) = 1;
21653 DECL_IS_NOVOPS (new_fndecl) = 1;
21654 TREE_READONLY (new_fndecl) = 1;
21656 return new_fndecl;
21659 /* Handler for an ACML-style interface to
21660 a library with vectorized intrinsics. */
21662 static tree
21663 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
21665 char name[20] = "__vr.._";
21666 tree fntype, new_fndecl, args;
21667 unsigned arity;
21668 const char *bname;
21669 enum machine_mode el_mode, in_mode;
21670 int n, in_n;
21672 /* The ACML is 64bits only and suitable for unsafe math only as
21673 it does not correctly support parts of IEEE with the required
21674 precision such as denormals. */
21675 if (!TARGET_64BIT
21676 || !flag_unsafe_math_optimizations)
21677 return NULL_TREE;
21679 el_mode = TYPE_MODE (TREE_TYPE (type_out));
21680 n = TYPE_VECTOR_SUBPARTS (type_out);
21681 in_mode = TYPE_MODE (TREE_TYPE (type_in));
21682 in_n = TYPE_VECTOR_SUBPARTS (type_in);
21683 if (el_mode != in_mode
21684 || n != in_n)
21685 return NULL_TREE;
21687 switch (fn)
21689 case BUILT_IN_SIN:
21690 case BUILT_IN_COS:
21691 case BUILT_IN_EXP:
21692 case BUILT_IN_LOG:
21693 case BUILT_IN_LOG2:
21694 case BUILT_IN_LOG10:
21695 name[4] = 'd';
21696 name[5] = '2';
21697 if (el_mode != DFmode
21698 || n != 2)
21699 return NULL_TREE;
21700 break;
21702 case BUILT_IN_SINF:
21703 case BUILT_IN_COSF:
21704 case BUILT_IN_EXPF:
21705 case BUILT_IN_POWF:
21706 case BUILT_IN_LOGF:
21707 case BUILT_IN_LOG2F:
21708 case BUILT_IN_LOG10F:
21709 name[4] = 's';
21710 name[5] = '4';
21711 if (el_mode != SFmode
21712 || n != 4)
21713 return NULL_TREE;
21714 break;
21716 default:
21717 return NULL_TREE;
21720 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
21721 sprintf (name + 7, "%s", bname+10);
21723 arity = 0;
21724 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
21725 args = TREE_CHAIN (args))
21726 arity++;
21728 if (arity == 1)
21729 fntype = build_function_type_list (type_out, type_in, NULL);
21730 else
21731 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
21733 /* Build a function declaration for the vectorized function. */
21734 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
21735 TREE_PUBLIC (new_fndecl) = 1;
21736 DECL_EXTERNAL (new_fndecl) = 1;
21737 DECL_IS_NOVOPS (new_fndecl) = 1;
21738 TREE_READONLY (new_fndecl) = 1;
21740 return new_fndecl;
21744 /* Returns a decl of a function that implements conversion of the
21745 input vector of type TYPE, or NULL_TREE if it is not available. */
21747 static tree
21748 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
21750 if (TREE_CODE (type) != VECTOR_TYPE)
21751 return NULL_TREE;
21753 switch (code)
21755 case FLOAT_EXPR:
21756 switch (TYPE_MODE (type))
21758 case V4SImode:
21759 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
21760 default:
21761 return NULL_TREE;
21764 case FIX_TRUNC_EXPR:
21765 switch (TYPE_MODE (type))
21767 case V4SFmode:
21768 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
21769 default:
21770 return NULL_TREE;
21772 default:
21773 return NULL_TREE;
21778 /* Returns a code for a target-specific builtin that implements
21779 reciprocal of the function, or NULL_TREE if not available. */
21781 static tree
21782 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
21783 bool sqrt ATTRIBUTE_UNUSED)
21785 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
21786 && flag_finite_math_only && !flag_trapping_math
21787 && flag_unsafe_math_optimizations))
21788 return NULL_TREE;
21790 if (md_fn)
21791 /* Machine dependent builtins. */
21792 switch (fn)
21794 /* Vectorized version of sqrt to rsqrt conversion. */
21795 case IX86_BUILTIN_SQRTPS_NR:
21796 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
21798 default:
21799 return NULL_TREE;
21801 else
21802 /* Normal builtins. */
21803 switch (fn)
21805 /* Sqrt to rsqrt conversion. */
21806 case BUILT_IN_SQRTF:
21807 return ix86_builtins[IX86_BUILTIN_RSQRTF];
21809 default:
21810 return NULL_TREE;
21814 /* Store OPERAND to the memory after reload is completed. This means
21815 that we can't easily use assign_stack_local. */
21817 ix86_force_to_memory (enum machine_mode mode, rtx operand)
21819 rtx result;
21821 gcc_assert (reload_completed);
21822 if (TARGET_RED_ZONE)
21824 result = gen_rtx_MEM (mode,
21825 gen_rtx_PLUS (Pmode,
21826 stack_pointer_rtx,
21827 GEN_INT (-RED_ZONE_SIZE)));
21828 emit_move_insn (result, operand);
21830 else if (!TARGET_RED_ZONE && TARGET_64BIT)
21832 switch (mode)
21834 case HImode:
21835 case SImode:
21836 operand = gen_lowpart (DImode, operand);
21837 /* FALLTHRU */
21838 case DImode:
21839 emit_insn (
21840 gen_rtx_SET (VOIDmode,
21841 gen_rtx_MEM (DImode,
21842 gen_rtx_PRE_DEC (DImode,
21843 stack_pointer_rtx)),
21844 operand));
21845 break;
21846 default:
21847 gcc_unreachable ();
21849 result = gen_rtx_MEM (mode, stack_pointer_rtx);
21851 else
21853 switch (mode)
21855 case DImode:
21857 rtx operands[2];
21858 split_di (&operand, 1, operands, operands + 1);
21859 emit_insn (
21860 gen_rtx_SET (VOIDmode,
21861 gen_rtx_MEM (SImode,
21862 gen_rtx_PRE_DEC (Pmode,
21863 stack_pointer_rtx)),
21864 operands[1]));
21865 emit_insn (
21866 gen_rtx_SET (VOIDmode,
21867 gen_rtx_MEM (SImode,
21868 gen_rtx_PRE_DEC (Pmode,
21869 stack_pointer_rtx)),
21870 operands[0]));
21872 break;
21873 case HImode:
21874 /* Store HImodes as SImodes. */
21875 operand = gen_lowpart (SImode, operand);
21876 /* FALLTHRU */
21877 case SImode:
21878 emit_insn (
21879 gen_rtx_SET (VOIDmode,
21880 gen_rtx_MEM (GET_MODE (operand),
21881 gen_rtx_PRE_DEC (SImode,
21882 stack_pointer_rtx)),
21883 operand));
21884 break;
21885 default:
21886 gcc_unreachable ();
21888 result = gen_rtx_MEM (mode, stack_pointer_rtx);
21890 return result;
21893 /* Free operand from the memory. */
21894 void
21895 ix86_free_from_memory (enum machine_mode mode)
21897 if (!TARGET_RED_ZONE)
21899 int size;
21901 if (mode == DImode || TARGET_64BIT)
21902 size = 8;
21903 else
21904 size = 4;
21905 /* Use LEA to deallocate stack space. In peephole2 it will be converted
21906 to pop or add instruction if registers are available. */
21907 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
21908 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
21909 GEN_INT (size))));
21913 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
21914 QImode must go into class Q_REGS.
21915 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
21916 movdf to do mem-to-mem moves through integer regs. */
21917 enum reg_class
21918 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
21920 enum machine_mode mode = GET_MODE (x);
21922 /* We're only allowed to return a subclass of CLASS. Many of the
21923 following checks fail for NO_REGS, so eliminate that early. */
21924 if (regclass == NO_REGS)
21925 return NO_REGS;
21927 /* All classes can load zeros. */
21928 if (x == CONST0_RTX (mode))
21929 return regclass;
21931 /* Force constants into memory if we are loading a (nonzero) constant into
21932 an MMX or SSE register. This is because there are no MMX/SSE instructions
21933 to load from a constant. */
21934 if (CONSTANT_P (x)
21935 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
21936 return NO_REGS;
21938 /* Prefer SSE regs only, if we can use them for math. */
21939 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
21940 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
21942 /* Floating-point constants need more complex checks. */
21943 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
21945 /* General regs can load everything. */
21946 if (reg_class_subset_p (regclass, GENERAL_REGS))
21947 return regclass;
21949 /* Floats can load 0 and 1 plus some others. Note that we eliminated
21950 zero above. We only want to wind up preferring 80387 registers if
21951 we plan on doing computation with them. */
21952 if (TARGET_80387
21953 && standard_80387_constant_p (x))
21955 /* Limit class to non-sse. */
21956 if (regclass == FLOAT_SSE_REGS)
21957 return FLOAT_REGS;
21958 if (regclass == FP_TOP_SSE_REGS)
21959 return FP_TOP_REG;
21960 if (regclass == FP_SECOND_SSE_REGS)
21961 return FP_SECOND_REG;
21962 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
21963 return regclass;
21966 return NO_REGS;
21969 /* Generally when we see PLUS here, it's the function invariant
21970 (plus soft-fp const_int). Which can only be computed into general
21971 regs. */
21972 if (GET_CODE (x) == PLUS)
21973 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
21975 /* QImode constants are easy to load, but non-constant QImode data
21976 must go into Q_REGS. */
21977 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
21979 if (reg_class_subset_p (regclass, Q_REGS))
21980 return regclass;
21981 if (reg_class_subset_p (Q_REGS, regclass))
21982 return Q_REGS;
21983 return NO_REGS;
21986 return regclass;
21989 /* Discourage putting floating-point values in SSE registers unless
21990 SSE math is being used, and likewise for the 387 registers. */
21991 enum reg_class
21992 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
21994 enum machine_mode mode = GET_MODE (x);
21996 /* Restrict the output reload class to the register bank that we are doing
21997 math on. If we would like not to return a subset of CLASS, reject this
21998 alternative: if reload cannot do this, it will still use its choice. */
21999 mode = GET_MODE (x);
22000 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
22001 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
22003 if (X87_FLOAT_MODE_P (mode))
22005 if (regclass == FP_TOP_SSE_REGS)
22006 return FP_TOP_REG;
22007 else if (regclass == FP_SECOND_SSE_REGS)
22008 return FP_SECOND_REG;
22009 else
22010 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
22013 return regclass;
22016 static enum reg_class
22017 ix86_secondary_reload (bool in_p, rtx x, enum reg_class class,
22018 enum machine_mode mode,
22019 secondary_reload_info *sri ATTRIBUTE_UNUSED)
22021 /* QImode spills from non-QI registers require
22022 intermediate register on 32bit targets. */
22023 if (!in_p && mode == QImode && !TARGET_64BIT
22024 && (class == GENERAL_REGS
22025 || class == LEGACY_REGS
22026 || class == INDEX_REGS))
22028 int regno;
22030 if (REG_P (x))
22031 regno = REGNO (x);
22032 else
22033 regno = -1;
22035 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
22036 regno = true_regnum (x);
22038 /* Return Q_REGS if the operand is in memory. */
22039 if (regno == -1)
22040 return Q_REGS;
22043 return NO_REGS;
22046 /* If we are copying between general and FP registers, we need a memory
22047 location. The same is true for SSE and MMX registers.
22049 To optimize register_move_cost performance, allow inline variant.
22051 The macro can't work reliably when one of the CLASSES is class containing
22052 registers from multiple units (SSE, MMX, integer). We avoid this by never
22053 combining those units in single alternative in the machine description.
22054 Ensure that this constraint holds to avoid unexpected surprises.
22056 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
22057 enforce these sanity checks. */
22059 static inline int
22060 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
22061 enum machine_mode mode, int strict)
22063 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
22064 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
22065 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
22066 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
22067 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
22068 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
22070 gcc_assert (!strict);
22071 return true;
22074 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
22075 return true;
22077 /* ??? This is a lie. We do have moves between mmx/general, and for
22078 mmx/sse2. But by saying we need secondary memory we discourage the
22079 register allocator from using the mmx registers unless needed. */
22080 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
22081 return true;
22083 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
22085 /* SSE1 doesn't have any direct moves from other classes. */
22086 if (!TARGET_SSE2)
22087 return true;
22089 /* If the target says that inter-unit moves are more expensive
22090 than moving through memory, then don't generate them. */
22091 if (!TARGET_INTER_UNIT_MOVES)
22092 return true;
22094 /* Between SSE and general, we have moves no larger than word size. */
22095 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
22096 return true;
22099 return false;
22103 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
22104 enum machine_mode mode, int strict)
22106 return inline_secondary_memory_needed (class1, class2, mode, strict);
22109 /* Return true if the registers in CLASS cannot represent the change from
22110 modes FROM to TO. */
22112 bool
22113 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
22114 enum reg_class regclass)
22116 if (from == to)
22117 return false;
22119 /* x87 registers can't do subreg at all, as all values are reformatted
22120 to extended precision. */
22121 if (MAYBE_FLOAT_CLASS_P (regclass))
22122 return true;
22124 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
22126 /* Vector registers do not support QI or HImode loads. If we don't
22127 disallow a change to these modes, reload will assume it's ok to
22128 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
22129 the vec_dupv4hi pattern. */
22130 if (GET_MODE_SIZE (from) < 4)
22131 return true;
22133 /* Vector registers do not support subreg with nonzero offsets, which
22134 are otherwise valid for integer registers. Since we can't see
22135 whether we have a nonzero offset from here, prohibit all
22136 nonparadoxical subregs changing size. */
22137 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
22138 return true;
22141 return false;
22144 /* Return the cost of moving data of mode M between a
22145 register and memory. A value of 2 is the default; this cost is
22146 relative to those in `REGISTER_MOVE_COST'.
22148 This function is used extensively by register_move_cost that is used to
22149 build tables at startup. Make it inline in this case.
22150 When IN is 2, return maximum of in and out move cost.
22152 If moving between registers and memory is more expensive than
22153 between two registers, you should define this macro to express the
22154 relative cost.
22156 Model also increased moving costs of QImode registers in non
22157 Q_REGS classes.
22159 static inline int
22160 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
22161 int in)
22163 int cost;
22164 if (FLOAT_CLASS_P (regclass))
22166 int index;
22167 switch (mode)
22169 case SFmode:
22170 index = 0;
22171 break;
22172 case DFmode:
22173 index = 1;
22174 break;
22175 case XFmode:
22176 index = 2;
22177 break;
22178 default:
22179 return 100;
22181 if (in == 2)
22182 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
22183 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
22185 if (SSE_CLASS_P (regclass))
22187 int index;
22188 switch (GET_MODE_SIZE (mode))
22190 case 4:
22191 index = 0;
22192 break;
22193 case 8:
22194 index = 1;
22195 break;
22196 case 16:
22197 index = 2;
22198 break;
22199 default:
22200 return 100;
22202 if (in == 2)
22203 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
22204 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
22206 if (MMX_CLASS_P (regclass))
22208 int index;
22209 switch (GET_MODE_SIZE (mode))
22211 case 4:
22212 index = 0;
22213 break;
22214 case 8:
22215 index = 1;
22216 break;
22217 default:
22218 return 100;
22220 if (in)
22221 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
22222 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
22224 switch (GET_MODE_SIZE (mode))
22226 case 1:
22227 if (Q_CLASS_P (regclass) || TARGET_64BIT)
22229 if (!in)
22230 return ix86_cost->int_store[0];
22231 if (TARGET_PARTIAL_REG_DEPENDENCY && !optimize_size)
22232 cost = ix86_cost->movzbl_load;
22233 else
22234 cost = ix86_cost->int_load[0];
22235 if (in == 2)
22236 return MAX (cost, ix86_cost->int_store[0]);
22237 return cost;
22239 else
22241 if (in == 2)
22242 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
22243 if (in)
22244 return ix86_cost->movzbl_load;
22245 else
22246 return ix86_cost->int_store[0] + 4;
22248 break;
22249 case 2:
22250 if (in == 2)
22251 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
22252 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
22253 default:
22254 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
22255 if (mode == TFmode)
22256 mode = XFmode;
22257 if (in == 2)
22258 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
22259 else if (in)
22260 cost = ix86_cost->int_load[2];
22261 else
22262 cost = ix86_cost->int_store[2];
22263 return (cost * (((int) GET_MODE_SIZE (mode)
22264 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
22269 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
22271 return inline_memory_move_cost (mode, regclass, in);
22275 /* Return the cost of moving data from a register in class CLASS1 to
22276 one in class CLASS2.
22278 It is not required that the cost always equal 2 when FROM is the same as TO;
22279 on some machines it is expensive to move between registers if they are not
22280 general registers. */
22283 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
22284 enum reg_class class2)
22286 /* In case we require secondary memory, compute cost of the store followed
22287 by load. In order to avoid bad register allocation choices, we need
22288 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
22290 if (inline_secondary_memory_needed (class1, class2, mode, 0))
22292 int cost = 1;
22294 cost += inline_memory_move_cost (mode, class1, 2);
22295 cost += inline_memory_move_cost (mode, class2, 2);
22297 /* In case of copying from general_purpose_register we may emit multiple
22298 stores followed by single load causing memory size mismatch stall.
22299 Count this as arbitrarily high cost of 20. */
22300 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
22301 cost += 20;
22303 /* In the case of FP/MMX moves, the registers actually overlap, and we
22304 have to switch modes in order to treat them differently. */
22305 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
22306 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
22307 cost += 20;
22309 return cost;
22312 /* Moves between SSE/MMX and integer unit are expensive. */
22313 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
22314 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
22316 /* ??? By keeping returned value relatively high, we limit the number
22317 of moves between integer and MMX/SSE registers for all targets.
22318 Additionally, high value prevents problem with x86_modes_tieable_p(),
22319 where integer modes in MMX/SSE registers are not tieable
22320 because of missing QImode and HImode moves to, from or between
22321 MMX/SSE registers. */
22322 return MAX (8, ix86_cost->mmxsse_to_integer);
22324 if (MAYBE_FLOAT_CLASS_P (class1))
22325 return ix86_cost->fp_move;
22326 if (MAYBE_SSE_CLASS_P (class1))
22327 return ix86_cost->sse_move;
22328 if (MAYBE_MMX_CLASS_P (class1))
22329 return ix86_cost->mmx_move;
22330 return 2;
22333 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
22335 bool
22336 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
22338 /* Flags and only flags can only hold CCmode values. */
22339 if (CC_REGNO_P (regno))
22340 return GET_MODE_CLASS (mode) == MODE_CC;
22341 if (GET_MODE_CLASS (mode) == MODE_CC
22342 || GET_MODE_CLASS (mode) == MODE_RANDOM
22343 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
22344 return 0;
22345 if (FP_REGNO_P (regno))
22346 return VALID_FP_MODE_P (mode);
22347 if (SSE_REGNO_P (regno))
22349 /* We implement the move patterns for all vector modes into and
22350 out of SSE registers, even when no operation instructions
22351 are available. */
22352 return (VALID_SSE_REG_MODE (mode)
22353 || VALID_SSE2_REG_MODE (mode)
22354 || VALID_MMX_REG_MODE (mode)
22355 || VALID_MMX_REG_MODE_3DNOW (mode));
22357 if (MMX_REGNO_P (regno))
22359 /* We implement the move patterns for 3DNOW modes even in MMX mode,
22360 so if the register is available at all, then we can move data of
22361 the given mode into or out of it. */
22362 return (VALID_MMX_REG_MODE (mode)
22363 || VALID_MMX_REG_MODE_3DNOW (mode));
22366 if (mode == QImode)
22368 /* Take care for QImode values - they can be in non-QI regs,
22369 but then they do cause partial register stalls. */
22370 if (regno < 4 || TARGET_64BIT)
22371 return 1;
22372 if (!TARGET_PARTIAL_REG_STALL)
22373 return 1;
22374 return reload_in_progress || reload_completed;
22376 /* We handle both integer and floats in the general purpose registers. */
22377 else if (VALID_INT_MODE_P (mode))
22378 return 1;
22379 else if (VALID_FP_MODE_P (mode))
22380 return 1;
22381 else if (VALID_DFP_MODE_P (mode))
22382 return 1;
22383 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
22384 on to use that value in smaller contexts, this can easily force a
22385 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
22386 supporting DImode, allow it. */
22387 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
22388 return 1;
22390 return 0;
22393 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
22394 tieable integer mode. */
22396 static bool
22397 ix86_tieable_integer_mode_p (enum machine_mode mode)
22399 switch (mode)
22401 case HImode:
22402 case SImode:
22403 return true;
22405 case QImode:
22406 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
22408 case DImode:
22409 return TARGET_64BIT;
22411 default:
22412 return false;
22416 /* Return true if MODE1 is accessible in a register that can hold MODE2
22417 without copying. That is, all register classes that can hold MODE2
22418 can also hold MODE1. */
22420 bool
22421 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
22423 if (mode1 == mode2)
22424 return true;
22426 if (ix86_tieable_integer_mode_p (mode1)
22427 && ix86_tieable_integer_mode_p (mode2))
22428 return true;
22430 /* MODE2 being XFmode implies fp stack or general regs, which means we
22431 can tie any smaller floating point modes to it. Note that we do not
22432 tie this with TFmode. */
22433 if (mode2 == XFmode)
22434 return mode1 == SFmode || mode1 == DFmode;
22436 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
22437 that we can tie it with SFmode. */
22438 if (mode2 == DFmode)
22439 return mode1 == SFmode;
22441 /* If MODE2 is only appropriate for an SSE register, then tie with
22442 any other mode acceptable to SSE registers. */
22443 if (GET_MODE_SIZE (mode2) == 16
22444 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
22445 return (GET_MODE_SIZE (mode1) == 16
22446 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
22448 /* If MODE2 is appropriate for an MMX register, then tie
22449 with any other mode acceptable to MMX registers. */
22450 if (GET_MODE_SIZE (mode2) == 8
22451 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
22452 return (GET_MODE_SIZE (mode1) == 8
22453 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
22455 return false;
22458 /* Compute a (partial) cost for rtx X. Return true if the complete
22459 cost has been computed, and false if subexpressions should be
22460 scanned. In either case, *TOTAL contains the cost result. */
22462 static bool
22463 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total)
22465 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
22466 enum machine_mode mode = GET_MODE (x);
22468 switch (code)
22470 case CONST_INT:
22471 case CONST:
22472 case LABEL_REF:
22473 case SYMBOL_REF:
22474 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
22475 *total = 3;
22476 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
22477 *total = 2;
22478 else if (flag_pic && SYMBOLIC_CONST (x)
22479 && (!TARGET_64BIT
22480 || (!GET_CODE (x) != LABEL_REF
22481 && (GET_CODE (x) != SYMBOL_REF
22482 || !SYMBOL_REF_LOCAL_P (x)))))
22483 *total = 1;
22484 else
22485 *total = 0;
22486 return true;
22488 case CONST_DOUBLE:
22489 if (mode == VOIDmode)
22490 *total = 0;
22491 else
22492 switch (standard_80387_constant_p (x))
22494 case 1: /* 0.0 */
22495 *total = 1;
22496 break;
22497 default: /* Other constants */
22498 *total = 2;
22499 break;
22500 case 0:
22501 case -1:
22502 /* Start with (MEM (SYMBOL_REF)), since that's where
22503 it'll probably end up. Add a penalty for size. */
22504 *total = (COSTS_N_INSNS (1)
22505 + (flag_pic != 0 && !TARGET_64BIT)
22506 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
22507 break;
22509 return true;
22511 case ZERO_EXTEND:
22512 /* The zero extensions is often completely free on x86_64, so make
22513 it as cheap as possible. */
22514 if (TARGET_64BIT && mode == DImode
22515 && GET_MODE (XEXP (x, 0)) == SImode)
22516 *total = 1;
22517 else if (TARGET_ZERO_EXTEND_WITH_AND)
22518 *total = ix86_cost->add;
22519 else
22520 *total = ix86_cost->movzx;
22521 return false;
22523 case SIGN_EXTEND:
22524 *total = ix86_cost->movsx;
22525 return false;
22527 case ASHIFT:
22528 if (CONST_INT_P (XEXP (x, 1))
22529 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
22531 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
22532 if (value == 1)
22534 *total = ix86_cost->add;
22535 return false;
22537 if ((value == 2 || value == 3)
22538 && ix86_cost->lea <= ix86_cost->shift_const)
22540 *total = ix86_cost->lea;
22541 return false;
22544 /* FALLTHRU */
22546 case ROTATE:
22547 case ASHIFTRT:
22548 case LSHIFTRT:
22549 case ROTATERT:
22550 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
22552 if (CONST_INT_P (XEXP (x, 1)))
22554 if (INTVAL (XEXP (x, 1)) > 32)
22555 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
22556 else
22557 *total = ix86_cost->shift_const * 2;
22559 else
22561 if (GET_CODE (XEXP (x, 1)) == AND)
22562 *total = ix86_cost->shift_var * 2;
22563 else
22564 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
22567 else
22569 if (CONST_INT_P (XEXP (x, 1)))
22570 *total = ix86_cost->shift_const;
22571 else
22572 *total = ix86_cost->shift_var;
22574 return false;
22576 case MULT:
22577 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22579 /* ??? SSE scalar cost should be used here. */
22580 *total = ix86_cost->fmul;
22581 return false;
22583 else if (X87_FLOAT_MODE_P (mode))
22585 *total = ix86_cost->fmul;
22586 return false;
22588 else if (FLOAT_MODE_P (mode))
22590 /* ??? SSE vector cost should be used here. */
22591 *total = ix86_cost->fmul;
22592 return false;
22594 else
22596 rtx op0 = XEXP (x, 0);
22597 rtx op1 = XEXP (x, 1);
22598 int nbits;
22599 if (CONST_INT_P (XEXP (x, 1)))
22601 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
22602 for (nbits = 0; value != 0; value &= value - 1)
22603 nbits++;
22605 else
22606 /* This is arbitrary. */
22607 nbits = 7;
22609 /* Compute costs correctly for widening multiplication. */
22610 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
22611 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
22612 == GET_MODE_SIZE (mode))
22614 int is_mulwiden = 0;
22615 enum machine_mode inner_mode = GET_MODE (op0);
22617 if (GET_CODE (op0) == GET_CODE (op1))
22618 is_mulwiden = 1, op1 = XEXP (op1, 0);
22619 else if (CONST_INT_P (op1))
22621 if (GET_CODE (op0) == SIGN_EXTEND)
22622 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
22623 == INTVAL (op1);
22624 else
22625 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
22628 if (is_mulwiden)
22629 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
22632 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
22633 + nbits * ix86_cost->mult_bit
22634 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
22636 return true;
22639 case DIV:
22640 case UDIV:
22641 case MOD:
22642 case UMOD:
22643 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22644 /* ??? SSE cost should be used here. */
22645 *total = ix86_cost->fdiv;
22646 else if (X87_FLOAT_MODE_P (mode))
22647 *total = ix86_cost->fdiv;
22648 else if (FLOAT_MODE_P (mode))
22649 /* ??? SSE vector cost should be used here. */
22650 *total = ix86_cost->fdiv;
22651 else
22652 *total = ix86_cost->divide[MODE_INDEX (mode)];
22653 return false;
22655 case PLUS:
22656 if (GET_MODE_CLASS (mode) == MODE_INT
22657 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
22659 if (GET_CODE (XEXP (x, 0)) == PLUS
22660 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
22661 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
22662 && CONSTANT_P (XEXP (x, 1)))
22664 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
22665 if (val == 2 || val == 4 || val == 8)
22667 *total = ix86_cost->lea;
22668 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
22669 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
22670 outer_code);
22671 *total += rtx_cost (XEXP (x, 1), outer_code);
22672 return true;
22675 else if (GET_CODE (XEXP (x, 0)) == MULT
22676 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
22678 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
22679 if (val == 2 || val == 4 || val == 8)
22681 *total = ix86_cost->lea;
22682 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
22683 *total += rtx_cost (XEXP (x, 1), outer_code);
22684 return true;
22687 else if (GET_CODE (XEXP (x, 0)) == PLUS)
22689 *total = ix86_cost->lea;
22690 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
22691 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
22692 *total += rtx_cost (XEXP (x, 1), outer_code);
22693 return true;
22696 /* FALLTHRU */
22698 case MINUS:
22699 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22701 /* ??? SSE cost should be used here. */
22702 *total = ix86_cost->fadd;
22703 return false;
22705 else if (X87_FLOAT_MODE_P (mode))
22707 *total = ix86_cost->fadd;
22708 return false;
22710 else if (FLOAT_MODE_P (mode))
22712 /* ??? SSE vector cost should be used here. */
22713 *total = ix86_cost->fadd;
22714 return false;
22716 /* FALLTHRU */
22718 case AND:
22719 case IOR:
22720 case XOR:
22721 if (!TARGET_64BIT && mode == DImode)
22723 *total = (ix86_cost->add * 2
22724 + (rtx_cost (XEXP (x, 0), outer_code)
22725 << (GET_MODE (XEXP (x, 0)) != DImode))
22726 + (rtx_cost (XEXP (x, 1), outer_code)
22727 << (GET_MODE (XEXP (x, 1)) != DImode)));
22728 return true;
22730 /* FALLTHRU */
22732 case NEG:
22733 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22735 /* ??? SSE cost should be used here. */
22736 *total = ix86_cost->fchs;
22737 return false;
22739 else if (X87_FLOAT_MODE_P (mode))
22741 *total = ix86_cost->fchs;
22742 return false;
22744 else if (FLOAT_MODE_P (mode))
22746 /* ??? SSE vector cost should be used here. */
22747 *total = ix86_cost->fchs;
22748 return false;
22750 /* FALLTHRU */
22752 case NOT:
22753 if (!TARGET_64BIT && mode == DImode)
22754 *total = ix86_cost->add * 2;
22755 else
22756 *total = ix86_cost->add;
22757 return false;
22759 case COMPARE:
22760 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
22761 && XEXP (XEXP (x, 0), 1) == const1_rtx
22762 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
22763 && XEXP (x, 1) == const0_rtx)
22765 /* This kind of construct is implemented using test[bwl].
22766 Treat it as if we had an AND. */
22767 *total = (ix86_cost->add
22768 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
22769 + rtx_cost (const1_rtx, outer_code));
22770 return true;
22772 return false;
22774 case FLOAT_EXTEND:
22775 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
22776 *total = 0;
22777 return false;
22779 case ABS:
22780 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22781 /* ??? SSE cost should be used here. */
22782 *total = ix86_cost->fabs;
22783 else if (X87_FLOAT_MODE_P (mode))
22784 *total = ix86_cost->fabs;
22785 else if (FLOAT_MODE_P (mode))
22786 /* ??? SSE vector cost should be used here. */
22787 *total = ix86_cost->fabs;
22788 return false;
22790 case SQRT:
22791 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
22792 /* ??? SSE cost should be used here. */
22793 *total = ix86_cost->fsqrt;
22794 else if (X87_FLOAT_MODE_P (mode))
22795 *total = ix86_cost->fsqrt;
22796 else if (FLOAT_MODE_P (mode))
22797 /* ??? SSE vector cost should be used here. */
22798 *total = ix86_cost->fsqrt;
22799 return false;
22801 case UNSPEC:
22802 if (XINT (x, 1) == UNSPEC_TP)
22803 *total = 0;
22804 return false;
22806 default:
22807 return false;
22811 #if TARGET_MACHO
22813 static int current_machopic_label_num;
22815 /* Given a symbol name and its associated stub, write out the
22816 definition of the stub. */
22818 void
22819 machopic_output_stub (FILE *file, const char *symb, const char *stub)
22821 unsigned int length;
22822 char *binder_name, *symbol_name, lazy_ptr_name[32];
22823 int label = ++current_machopic_label_num;
22825 /* For 64-bit we shouldn't get here. */
22826 gcc_assert (!TARGET_64BIT);
22828 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
22829 symb = (*targetm.strip_name_encoding) (symb);
22831 length = strlen (stub);
22832 binder_name = alloca (length + 32);
22833 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
22835 length = strlen (symb);
22836 symbol_name = alloca (length + 32);
22837 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
22839 sprintf (lazy_ptr_name, "L%d$lz", label);
22841 if (MACHOPIC_PURE)
22842 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
22843 else
22844 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
22846 fprintf (file, "%s:\n", stub);
22847 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
22849 if (MACHOPIC_PURE)
22851 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
22852 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
22853 fprintf (file, "\tjmp\t*%%edx\n");
22855 else
22856 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
22858 fprintf (file, "%s:\n", binder_name);
22860 if (MACHOPIC_PURE)
22862 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
22863 fprintf (file, "\tpushl\t%%eax\n");
22865 else
22866 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
22868 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
22870 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
22871 fprintf (file, "%s:\n", lazy_ptr_name);
22872 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
22873 fprintf (file, "\t.long %s\n", binder_name);
22876 void
22877 darwin_x86_file_end (void)
22879 darwin_file_end ();
22880 ix86_file_end ();
22882 #endif /* TARGET_MACHO */
22884 /* Order the registers for register allocator. */
22886 void
22887 x86_order_regs_for_local_alloc (void)
22889 int pos = 0;
22890 int i;
22892 /* First allocate the local general purpose registers. */
22893 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
22894 if (GENERAL_REGNO_P (i) && call_used_regs[i])
22895 reg_alloc_order [pos++] = i;
22897 /* Global general purpose registers. */
22898 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
22899 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
22900 reg_alloc_order [pos++] = i;
22902 /* x87 registers come first in case we are doing FP math
22903 using them. */
22904 if (!TARGET_SSE_MATH)
22905 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
22906 reg_alloc_order [pos++] = i;
22908 /* SSE registers. */
22909 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
22910 reg_alloc_order [pos++] = i;
22911 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
22912 reg_alloc_order [pos++] = i;
22914 /* x87 registers. */
22915 if (TARGET_SSE_MATH)
22916 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
22917 reg_alloc_order [pos++] = i;
22919 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
22920 reg_alloc_order [pos++] = i;
22922 /* Initialize the rest of array as we do not allocate some registers
22923 at all. */
22924 while (pos < FIRST_PSEUDO_REGISTER)
22925 reg_alloc_order [pos++] = 0;
22928 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
22929 struct attribute_spec.handler. */
22930 static tree
22931 ix86_handle_struct_attribute (tree *node, tree name,
22932 tree args ATTRIBUTE_UNUSED,
22933 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
22935 tree *type = NULL;
22936 if (DECL_P (*node))
22938 if (TREE_CODE (*node) == TYPE_DECL)
22939 type = &TREE_TYPE (*node);
22941 else
22942 type = node;
22944 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
22945 || TREE_CODE (*type) == UNION_TYPE)))
22947 warning (OPT_Wattributes, "%qs attribute ignored",
22948 IDENTIFIER_POINTER (name));
22949 *no_add_attrs = true;
22952 else if ((is_attribute_p ("ms_struct", name)
22953 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
22954 || ((is_attribute_p ("gcc_struct", name)
22955 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
22957 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
22958 IDENTIFIER_POINTER (name));
22959 *no_add_attrs = true;
22962 return NULL_TREE;
22965 static bool
22966 ix86_ms_bitfield_layout_p (const_tree record_type)
22968 return (TARGET_MS_BITFIELD_LAYOUT &&
22969 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
22970 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
22973 /* Returns an expression indicating where the this parameter is
22974 located on entry to the FUNCTION. */
22976 static rtx
22977 x86_this_parameter (tree function)
22979 tree type = TREE_TYPE (function);
22980 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
22981 int nregs;
22983 if (TARGET_64BIT)
22985 const int *parm_regs;
22987 if (TARGET_64BIT_MS_ABI)
22988 parm_regs = x86_64_ms_abi_int_parameter_registers;
22989 else
22990 parm_regs = x86_64_int_parameter_registers;
22991 return gen_rtx_REG (DImode, parm_regs[aggr]);
22994 nregs = ix86_function_regparm (type, function);
22996 if (nregs > 0 && !stdarg_p (type))
22998 int regno;
23000 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
23001 regno = aggr ? DX_REG : CX_REG;
23002 else
23004 regno = AX_REG;
23005 if (aggr)
23007 regno = DX_REG;
23008 if (nregs == 1)
23009 return gen_rtx_MEM (SImode,
23010 plus_constant (stack_pointer_rtx, 4));
23013 return gen_rtx_REG (SImode, regno);
23016 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
23019 /* Determine whether x86_output_mi_thunk can succeed. */
23021 static bool
23022 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
23023 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
23024 HOST_WIDE_INT vcall_offset, const_tree function)
23026 /* 64-bit can handle anything. */
23027 if (TARGET_64BIT)
23028 return true;
23030 /* For 32-bit, everything's fine if we have one free register. */
23031 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
23032 return true;
23034 /* Need a free register for vcall_offset. */
23035 if (vcall_offset)
23036 return false;
23038 /* Need a free register for GOT references. */
23039 if (flag_pic && !(*targetm.binds_local_p) (function))
23040 return false;
23042 /* Otherwise ok. */
23043 return true;
23046 /* Output the assembler code for a thunk function. THUNK_DECL is the
23047 declaration for the thunk function itself, FUNCTION is the decl for
23048 the target function. DELTA is an immediate constant offset to be
23049 added to THIS. If VCALL_OFFSET is nonzero, the word at
23050 *(*this + vcall_offset) should be added to THIS. */
23052 static void
23053 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
23054 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
23055 HOST_WIDE_INT vcall_offset, tree function)
23057 rtx xops[3];
23058 rtx this_param = x86_this_parameter (function);
23059 rtx this_reg, tmp;
23061 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
23062 pull it in now and let DELTA benefit. */
23063 if (REG_P (this_param))
23064 this_reg = this_param;
23065 else if (vcall_offset)
23067 /* Put the this parameter into %eax. */
23068 xops[0] = this_param;
23069 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
23070 if (TARGET_64BIT)
23071 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23072 else
23073 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
23075 else
23076 this_reg = NULL_RTX;
23078 /* Adjust the this parameter by a fixed constant. */
23079 if (delta)
23081 xops[0] = GEN_INT (delta);
23082 xops[1] = this_reg ? this_reg : this_param;
23083 if (TARGET_64BIT)
23085 if (!x86_64_general_operand (xops[0], DImode))
23087 tmp = gen_rtx_REG (DImode, R10_REG);
23088 xops[1] = tmp;
23089 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
23090 xops[0] = tmp;
23091 xops[1] = this_param;
23093 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
23095 else
23096 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
23099 /* Adjust the this parameter by a value stored in the vtable. */
23100 if (vcall_offset)
23102 if (TARGET_64BIT)
23103 tmp = gen_rtx_REG (DImode, R10_REG);
23104 else
23106 int tmp_regno = CX_REG;
23107 if (lookup_attribute ("fastcall",
23108 TYPE_ATTRIBUTES (TREE_TYPE (function))))
23109 tmp_regno = AX_REG;
23110 tmp = gen_rtx_REG (SImode, tmp_regno);
23113 xops[0] = gen_rtx_MEM (Pmode, this_reg);
23114 xops[1] = tmp;
23115 if (TARGET_64BIT)
23116 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23117 else
23118 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
23120 /* Adjust the this parameter. */
23121 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
23122 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
23124 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
23125 xops[0] = GEN_INT (vcall_offset);
23126 xops[1] = tmp2;
23127 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23128 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
23130 xops[1] = this_reg;
23131 if (TARGET_64BIT)
23132 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
23133 else
23134 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
23137 /* If necessary, drop THIS back to its stack slot. */
23138 if (this_reg && this_reg != this_param)
23140 xops[0] = this_reg;
23141 xops[1] = this_param;
23142 if (TARGET_64BIT)
23143 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
23144 else
23145 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
23148 xops[0] = XEXP (DECL_RTL (function), 0);
23149 if (TARGET_64BIT)
23151 if (!flag_pic || (*targetm.binds_local_p) (function))
23152 output_asm_insn ("jmp\t%P0", xops);
23153 /* All thunks should be in the same object as their target,
23154 and thus binds_local_p should be true. */
23155 else if (TARGET_64BIT_MS_ABI)
23156 gcc_unreachable ();
23157 else
23159 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
23160 tmp = gen_rtx_CONST (Pmode, tmp);
23161 tmp = gen_rtx_MEM (QImode, tmp);
23162 xops[0] = tmp;
23163 output_asm_insn ("jmp\t%A0", xops);
23166 else
23168 if (!flag_pic || (*targetm.binds_local_p) (function))
23169 output_asm_insn ("jmp\t%P0", xops);
23170 else
23171 #if TARGET_MACHO
23172 if (TARGET_MACHO)
23174 rtx sym_ref = XEXP (DECL_RTL (function), 0);
23175 tmp = (gen_rtx_SYMBOL_REF
23176 (Pmode,
23177 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
23178 tmp = gen_rtx_MEM (QImode, tmp);
23179 xops[0] = tmp;
23180 output_asm_insn ("jmp\t%0", xops);
23182 else
23183 #endif /* TARGET_MACHO */
23185 tmp = gen_rtx_REG (SImode, CX_REG);
23186 output_set_got (tmp, NULL_RTX);
23188 xops[1] = tmp;
23189 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
23190 output_asm_insn ("jmp\t{*}%1", xops);
23195 static void
23196 x86_file_start (void)
23198 default_file_start ();
23199 #if TARGET_MACHO
23200 darwin_file_start ();
23201 #endif
23202 if (X86_FILE_START_VERSION_DIRECTIVE)
23203 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
23204 if (X86_FILE_START_FLTUSED)
23205 fputs ("\t.global\t__fltused\n", asm_out_file);
23206 if (ix86_asm_dialect == ASM_INTEL)
23207 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
23211 x86_field_alignment (tree field, int computed)
23213 enum machine_mode mode;
23214 tree type = TREE_TYPE (field);
23216 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
23217 return computed;
23218 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
23219 ? get_inner_array_type (type) : type);
23220 if (mode == DFmode || mode == DCmode
23221 || GET_MODE_CLASS (mode) == MODE_INT
23222 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
23223 return MIN (32, computed);
23224 return computed;
23227 /* Output assembler code to FILE to increment profiler label # LABELNO
23228 for profiling a function entry. */
23229 void
23230 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
23232 if (TARGET_64BIT)
23234 #ifndef NO_PROFILE_COUNTERS
23235 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
23236 #endif
23238 if (!TARGET_64BIT_MS_ABI && flag_pic)
23239 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
23240 else
23241 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
23243 else if (flag_pic)
23245 #ifndef NO_PROFILE_COUNTERS
23246 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
23247 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
23248 #endif
23249 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
23251 else
23253 #ifndef NO_PROFILE_COUNTERS
23254 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
23255 PROFILE_COUNT_REGISTER);
23256 #endif
23257 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
23261 /* We don't have exact information about the insn sizes, but we may assume
23262 quite safely that we are informed about all 1 byte insns and memory
23263 address sizes. This is enough to eliminate unnecessary padding in
23264 99% of cases. */
23266 static int
23267 min_insn_size (rtx insn)
23269 int l = 0;
23271 if (!INSN_P (insn) || !active_insn_p (insn))
23272 return 0;
23274 /* Discard alignments we've emit and jump instructions. */
23275 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
23276 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
23277 return 0;
23278 if (JUMP_P (insn)
23279 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
23280 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
23281 return 0;
23283 /* Important case - calls are always 5 bytes.
23284 It is common to have many calls in the row. */
23285 if (CALL_P (insn)
23286 && symbolic_reference_mentioned_p (PATTERN (insn))
23287 && !SIBLING_CALL_P (insn))
23288 return 5;
23289 if (get_attr_length (insn) <= 1)
23290 return 1;
23292 /* For normal instructions we may rely on the sizes of addresses
23293 and the presence of symbol to require 4 bytes of encoding.
23294 This is not the case for jumps where references are PC relative. */
23295 if (!JUMP_P (insn))
23297 l = get_attr_length_address (insn);
23298 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
23299 l = 4;
23301 if (l)
23302 return 1+l;
23303 else
23304 return 2;
23307 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
23308 window. */
23310 static void
23311 ix86_avoid_jump_misspredicts (void)
23313 rtx insn, start = get_insns ();
23314 int nbytes = 0, njumps = 0;
23315 int isjump = 0;
23317 /* Look for all minimal intervals of instructions containing 4 jumps.
23318 The intervals are bounded by START and INSN. NBYTES is the total
23319 size of instructions in the interval including INSN and not including
23320 START. When the NBYTES is smaller than 16 bytes, it is possible
23321 that the end of START and INSN ends up in the same 16byte page.
23323 The smallest offset in the page INSN can start is the case where START
23324 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
23325 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
23327 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
23330 nbytes += min_insn_size (insn);
23331 if (dump_file)
23332 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
23333 INSN_UID (insn), min_insn_size (insn));
23334 if ((JUMP_P (insn)
23335 && GET_CODE (PATTERN (insn)) != ADDR_VEC
23336 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
23337 || CALL_P (insn))
23338 njumps++;
23339 else
23340 continue;
23342 while (njumps > 3)
23344 start = NEXT_INSN (start);
23345 if ((JUMP_P (start)
23346 && GET_CODE (PATTERN (start)) != ADDR_VEC
23347 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
23348 || CALL_P (start))
23349 njumps--, isjump = 1;
23350 else
23351 isjump = 0;
23352 nbytes -= min_insn_size (start);
23354 gcc_assert (njumps >= 0);
23355 if (dump_file)
23356 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
23357 INSN_UID (start), INSN_UID (insn), nbytes);
23359 if (njumps == 3 && isjump && nbytes < 16)
23361 int padsize = 15 - nbytes + min_insn_size (insn);
23363 if (dump_file)
23364 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
23365 INSN_UID (insn), padsize);
23366 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
23371 /* AMD Athlon works faster
23372 when RET is not destination of conditional jump or directly preceded
23373 by other jump instruction. We avoid the penalty by inserting NOP just
23374 before the RET instructions in such cases. */
23375 static void
23376 ix86_pad_returns (void)
23378 edge e;
23379 edge_iterator ei;
23381 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
23383 basic_block bb = e->src;
23384 rtx ret = BB_END (bb);
23385 rtx prev;
23386 bool replace = false;
23388 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
23389 || !maybe_hot_bb_p (bb))
23390 continue;
23391 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
23392 if (active_insn_p (prev) || LABEL_P (prev))
23393 break;
23394 if (prev && LABEL_P (prev))
23396 edge e;
23397 edge_iterator ei;
23399 FOR_EACH_EDGE (e, ei, bb->preds)
23400 if (EDGE_FREQUENCY (e) && e->src->index >= 0
23401 && !(e->flags & EDGE_FALLTHRU))
23402 replace = true;
23404 if (!replace)
23406 prev = prev_active_insn (ret);
23407 if (prev
23408 && ((JUMP_P (prev) && any_condjump_p (prev))
23409 || CALL_P (prev)))
23410 replace = true;
23411 /* Empty functions get branch mispredict even when the jump destination
23412 is not visible to us. */
23413 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
23414 replace = true;
23416 if (replace)
23418 emit_insn_before (gen_return_internal_long (), ret);
23419 delete_insn (ret);
23424 /* Implement machine specific optimizations. We implement padding of returns
23425 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
23426 static void
23427 ix86_reorg (void)
23429 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
23430 ix86_pad_returns ();
23431 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
23432 ix86_avoid_jump_misspredicts ();
23435 /* Return nonzero when QImode register that must be represented via REX prefix
23436 is used. */
23437 bool
23438 x86_extended_QIreg_mentioned_p (rtx insn)
23440 int i;
23441 extract_insn_cached (insn);
23442 for (i = 0; i < recog_data.n_operands; i++)
23443 if (REG_P (recog_data.operand[i])
23444 && REGNO (recog_data.operand[i]) >= 4)
23445 return true;
23446 return false;
23449 /* Return nonzero when P points to register encoded via REX prefix.
23450 Called via for_each_rtx. */
23451 static int
23452 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
23454 unsigned int regno;
23455 if (!REG_P (*p))
23456 return 0;
23457 regno = REGNO (*p);
23458 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
23461 /* Return true when INSN mentions register that must be encoded using REX
23462 prefix. */
23463 bool
23464 x86_extended_reg_mentioned_p (rtx insn)
23466 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
23469 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
23470 optabs would emit if we didn't have TFmode patterns. */
23472 void
23473 x86_emit_floatuns (rtx operands[2])
23475 rtx neglab, donelab, i0, i1, f0, in, out;
23476 enum machine_mode mode, inmode;
23478 inmode = GET_MODE (operands[1]);
23479 gcc_assert (inmode == SImode || inmode == DImode);
23481 out = operands[0];
23482 in = force_reg (inmode, operands[1]);
23483 mode = GET_MODE (out);
23484 neglab = gen_label_rtx ();
23485 donelab = gen_label_rtx ();
23486 f0 = gen_reg_rtx (mode);
23488 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
23490 expand_float (out, in, 0);
23492 emit_jump_insn (gen_jump (donelab));
23493 emit_barrier ();
23495 emit_label (neglab);
23497 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
23498 1, OPTAB_DIRECT);
23499 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
23500 1, OPTAB_DIRECT);
23501 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
23503 expand_float (f0, i0, 0);
23505 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
23507 emit_label (donelab);
23510 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23511 with all elements equal to VAR. Return true if successful. */
23513 static bool
23514 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
23515 rtx target, rtx val)
23517 enum machine_mode smode, wsmode, wvmode;
23518 rtx x;
23520 switch (mode)
23522 case V2SImode:
23523 case V2SFmode:
23524 if (!mmx_ok)
23525 return false;
23526 /* FALLTHRU */
23528 case V2DFmode:
23529 case V2DImode:
23530 case V4SFmode:
23531 case V4SImode:
23532 val = force_reg (GET_MODE_INNER (mode), val);
23533 x = gen_rtx_VEC_DUPLICATE (mode, val);
23534 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23535 return true;
23537 case V4HImode:
23538 if (!mmx_ok)
23539 return false;
23540 if (TARGET_SSE || TARGET_3DNOW_A)
23542 val = gen_lowpart (SImode, val);
23543 x = gen_rtx_TRUNCATE (HImode, val);
23544 x = gen_rtx_VEC_DUPLICATE (mode, x);
23545 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23546 return true;
23548 else
23550 smode = HImode;
23551 wsmode = SImode;
23552 wvmode = V2SImode;
23553 goto widen;
23556 case V8QImode:
23557 if (!mmx_ok)
23558 return false;
23559 smode = QImode;
23560 wsmode = HImode;
23561 wvmode = V4HImode;
23562 goto widen;
23563 case V8HImode:
23564 if (TARGET_SSE2)
23566 rtx tmp1, tmp2;
23567 /* Extend HImode to SImode using a paradoxical SUBREG. */
23568 tmp1 = gen_reg_rtx (SImode);
23569 emit_move_insn (tmp1, gen_lowpart (SImode, val));
23570 /* Insert the SImode value as low element of V4SImode vector. */
23571 tmp2 = gen_reg_rtx (V4SImode);
23572 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
23573 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
23574 CONST0_RTX (V4SImode),
23575 const1_rtx);
23576 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
23577 /* Cast the V4SImode vector back to a V8HImode vector. */
23578 tmp1 = gen_reg_rtx (V8HImode);
23579 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
23580 /* Duplicate the low short through the whole low SImode word. */
23581 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
23582 /* Cast the V8HImode vector back to a V4SImode vector. */
23583 tmp2 = gen_reg_rtx (V4SImode);
23584 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
23585 /* Replicate the low element of the V4SImode vector. */
23586 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
23587 /* Cast the V2SImode back to V8HImode, and store in target. */
23588 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
23589 return true;
23591 smode = HImode;
23592 wsmode = SImode;
23593 wvmode = V4SImode;
23594 goto widen;
23595 case V16QImode:
23596 if (TARGET_SSE2)
23598 rtx tmp1, tmp2;
23599 /* Extend QImode to SImode using a paradoxical SUBREG. */
23600 tmp1 = gen_reg_rtx (SImode);
23601 emit_move_insn (tmp1, gen_lowpart (SImode, val));
23602 /* Insert the SImode value as low element of V4SImode vector. */
23603 tmp2 = gen_reg_rtx (V4SImode);
23604 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
23605 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
23606 CONST0_RTX (V4SImode),
23607 const1_rtx);
23608 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
23609 /* Cast the V4SImode vector back to a V16QImode vector. */
23610 tmp1 = gen_reg_rtx (V16QImode);
23611 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
23612 /* Duplicate the low byte through the whole low SImode word. */
23613 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
23614 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
23615 /* Cast the V16QImode vector back to a V4SImode vector. */
23616 tmp2 = gen_reg_rtx (V4SImode);
23617 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
23618 /* Replicate the low element of the V4SImode vector. */
23619 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
23620 /* Cast the V2SImode back to V16QImode, and store in target. */
23621 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
23622 return true;
23624 smode = QImode;
23625 wsmode = HImode;
23626 wvmode = V8HImode;
23627 goto widen;
23628 widen:
23629 /* Replicate the value once into the next wider mode and recurse. */
23630 val = convert_modes (wsmode, smode, val, true);
23631 x = expand_simple_binop (wsmode, ASHIFT, val,
23632 GEN_INT (GET_MODE_BITSIZE (smode)),
23633 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23634 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
23636 x = gen_reg_rtx (wvmode);
23637 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
23638 gcc_unreachable ();
23639 emit_move_insn (target, gen_lowpart (mode, x));
23640 return true;
23642 default:
23643 return false;
23647 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23648 whose ONE_VAR element is VAR, and other elements are zero. Return true
23649 if successful. */
23651 static bool
23652 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
23653 rtx target, rtx var, int one_var)
23655 enum machine_mode vsimode;
23656 rtx new_target;
23657 rtx x, tmp;
23658 bool use_vector_set = false;
23660 switch (mode)
23662 case V2DImode:
23663 use_vector_set = TARGET_64BIT && TARGET_SSE4_1;
23664 break;
23665 case V16QImode:
23666 case V4SImode:
23667 case V4SFmode:
23668 use_vector_set = TARGET_SSE4_1;
23669 break;
23670 case V8HImode:
23671 use_vector_set = TARGET_SSE2;
23672 break;
23673 case V4HImode:
23674 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
23675 break;
23676 default:
23677 break;
23680 if (use_vector_set)
23682 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
23683 var = force_reg (GET_MODE_INNER (mode), var);
23684 ix86_expand_vector_set (mmx_ok, target, var, one_var);
23685 return true;
23688 switch (mode)
23690 case V2SFmode:
23691 case V2SImode:
23692 if (!mmx_ok)
23693 return false;
23694 /* FALLTHRU */
23696 case V2DFmode:
23697 case V2DImode:
23698 if (one_var != 0)
23699 return false;
23700 var = force_reg (GET_MODE_INNER (mode), var);
23701 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
23702 emit_insn (gen_rtx_SET (VOIDmode, target, x));
23703 return true;
23705 case V4SFmode:
23706 case V4SImode:
23707 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
23708 new_target = gen_reg_rtx (mode);
23709 else
23710 new_target = target;
23711 var = force_reg (GET_MODE_INNER (mode), var);
23712 x = gen_rtx_VEC_DUPLICATE (mode, var);
23713 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
23714 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
23715 if (one_var != 0)
23717 /* We need to shuffle the value to the correct position, so
23718 create a new pseudo to store the intermediate result. */
23720 /* With SSE2, we can use the integer shuffle insns. */
23721 if (mode != V4SFmode && TARGET_SSE2)
23723 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
23724 GEN_INT (1),
23725 GEN_INT (one_var == 1 ? 0 : 1),
23726 GEN_INT (one_var == 2 ? 0 : 1),
23727 GEN_INT (one_var == 3 ? 0 : 1)));
23728 if (target != new_target)
23729 emit_move_insn (target, new_target);
23730 return true;
23733 /* Otherwise convert the intermediate result to V4SFmode and
23734 use the SSE1 shuffle instructions. */
23735 if (mode != V4SFmode)
23737 tmp = gen_reg_rtx (V4SFmode);
23738 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
23740 else
23741 tmp = new_target;
23743 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
23744 GEN_INT (1),
23745 GEN_INT (one_var == 1 ? 0 : 1),
23746 GEN_INT (one_var == 2 ? 0+4 : 1+4),
23747 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
23749 if (mode != V4SFmode)
23750 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
23751 else if (tmp != target)
23752 emit_move_insn (target, tmp);
23754 else if (target != new_target)
23755 emit_move_insn (target, new_target);
23756 return true;
23758 case V8HImode:
23759 case V16QImode:
23760 vsimode = V4SImode;
23761 goto widen;
23762 case V4HImode:
23763 case V8QImode:
23764 if (!mmx_ok)
23765 return false;
23766 vsimode = V2SImode;
23767 goto widen;
23768 widen:
23769 if (one_var != 0)
23770 return false;
23772 /* Zero extend the variable element to SImode and recurse. */
23773 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
23775 x = gen_reg_rtx (vsimode);
23776 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
23777 var, one_var))
23778 gcc_unreachable ();
23780 emit_move_insn (target, gen_lowpart (mode, x));
23781 return true;
23783 default:
23784 return false;
23788 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
23789 consisting of the values in VALS. It is known that all elements
23790 except ONE_VAR are constants. Return true if successful. */
23792 static bool
23793 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
23794 rtx target, rtx vals, int one_var)
23796 rtx var = XVECEXP (vals, 0, one_var);
23797 enum machine_mode wmode;
23798 rtx const_vec, x;
23800 const_vec = copy_rtx (vals);
23801 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
23802 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
23804 switch (mode)
23806 case V2DFmode:
23807 case V2DImode:
23808 case V2SFmode:
23809 case V2SImode:
23810 /* For the two element vectors, it's just as easy to use
23811 the general case. */
23812 return false;
23814 case V4SFmode:
23815 case V4SImode:
23816 case V8HImode:
23817 case V4HImode:
23818 break;
23820 case V16QImode:
23821 if (TARGET_SSE4_1)
23822 break;
23823 wmode = V8HImode;
23824 goto widen;
23825 case V8QImode:
23826 wmode = V4HImode;
23827 goto widen;
23828 widen:
23829 /* There's no way to set one QImode entry easily. Combine
23830 the variable value with its adjacent constant value, and
23831 promote to an HImode set. */
23832 x = XVECEXP (vals, 0, one_var ^ 1);
23833 if (one_var & 1)
23835 var = convert_modes (HImode, QImode, var, true);
23836 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
23837 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23838 x = GEN_INT (INTVAL (x) & 0xff);
23840 else
23842 var = convert_modes (HImode, QImode, var, true);
23843 x = gen_int_mode (INTVAL (x) << 8, HImode);
23845 if (x != const0_rtx)
23846 var = expand_simple_binop (HImode, IOR, var, x, var,
23847 1, OPTAB_LIB_WIDEN);
23849 x = gen_reg_rtx (wmode);
23850 emit_move_insn (x, gen_lowpart (wmode, const_vec));
23851 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
23853 emit_move_insn (target, gen_lowpart (mode, x));
23854 return true;
23856 default:
23857 return false;
23860 emit_move_insn (target, const_vec);
23861 ix86_expand_vector_set (mmx_ok, target, var, one_var);
23862 return true;
23865 /* A subroutine of ix86_expand_vector_init_general. Use vector
23866 concatenate to handle the most general case: all values variable,
23867 and none identical. */
23869 static void
23870 ix86_expand_vector_init_concat (enum machine_mode mode,
23871 rtx target, rtx *ops, int n)
23873 enum machine_mode cmode, hmode = VOIDmode;
23874 rtx first[4], second[2];
23875 rtvec v;
23876 int i, j;
23878 switch (n)
23880 case 2:
23881 switch (mode)
23883 case V4SImode:
23884 cmode = V2SImode;
23885 break;
23886 case V4SFmode:
23887 cmode = V2SFmode;
23888 break;
23889 case V2DImode:
23890 cmode = DImode;
23891 break;
23892 case V2SImode:
23893 cmode = SImode;
23894 break;
23895 case V2DFmode:
23896 cmode = DFmode;
23897 break;
23898 case V2SFmode:
23899 cmode = SFmode;
23900 break;
23901 default:
23902 gcc_unreachable ();
23905 if (!register_operand (ops[1], cmode))
23906 ops[1] = force_reg (cmode, ops[1]);
23907 if (!register_operand (ops[0], cmode))
23908 ops[0] = force_reg (cmode, ops[0]);
23909 emit_insn (gen_rtx_SET (VOIDmode, target,
23910 gen_rtx_VEC_CONCAT (mode, ops[0],
23911 ops[1])));
23912 break;
23914 case 4:
23915 switch (mode)
23917 case V4SImode:
23918 cmode = V2SImode;
23919 break;
23920 case V4SFmode:
23921 cmode = V2SFmode;
23922 break;
23923 default:
23924 gcc_unreachable ();
23926 goto half;
23928 half:
23929 /* FIXME: We process inputs backward to help RA. PR 36222. */
23930 i = n - 1;
23931 j = (n >> 1) - 1;
23932 for (; i > 0; i -= 2, j--)
23934 first[j] = gen_reg_rtx (cmode);
23935 v = gen_rtvec (2, ops[i - 1], ops[i]);
23936 ix86_expand_vector_init (false, first[j],
23937 gen_rtx_PARALLEL (cmode, v));
23940 n >>= 1;
23941 if (n > 2)
23943 gcc_assert (hmode != VOIDmode);
23944 for (i = j = 0; i < n; i += 2, j++)
23946 second[j] = gen_reg_rtx (hmode);
23947 ix86_expand_vector_init_concat (hmode, second [j],
23948 &first [i], 2);
23950 n >>= 1;
23951 ix86_expand_vector_init_concat (mode, target, second, n);
23953 else
23954 ix86_expand_vector_init_concat (mode, target, first, n);
23955 break;
23957 default:
23958 gcc_unreachable ();
23962 /* A subroutine of ix86_expand_vector_init_general. Use vector
23963 interleave to handle the most general case: all values variable,
23964 and none identical. */
23966 static void
23967 ix86_expand_vector_init_interleave (enum machine_mode mode,
23968 rtx target, rtx *ops, int n)
23970 enum machine_mode first_imode, second_imode, third_imode;
23971 int i, j;
23972 rtx op0, op1;
23973 rtx (*gen_load_even) (rtx, rtx, rtx);
23974 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
23975 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
23977 switch (mode)
23979 case V8HImode:
23980 gen_load_even = gen_vec_setv8hi;
23981 gen_interleave_first_low = gen_vec_interleave_lowv4si;
23982 gen_interleave_second_low = gen_vec_interleave_lowv2di;
23983 first_imode = V4SImode;
23984 second_imode = V2DImode;
23985 third_imode = VOIDmode;
23986 break;
23987 case V16QImode:
23988 gen_load_even = gen_vec_setv16qi;
23989 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
23990 gen_interleave_second_low = gen_vec_interleave_lowv4si;
23991 first_imode = V8HImode;
23992 second_imode = V4SImode;
23993 third_imode = V2DImode;
23994 break;
23995 default:
23996 gcc_unreachable ();
23999 for (i = 0; i < n; i++)
24001 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
24002 op0 = gen_reg_rtx (SImode);
24003 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
24005 /* Insert the SImode value as low element of V4SImode vector. */
24006 op1 = gen_reg_rtx (V4SImode);
24007 op0 = gen_rtx_VEC_MERGE (V4SImode,
24008 gen_rtx_VEC_DUPLICATE (V4SImode,
24009 op0),
24010 CONST0_RTX (V4SImode),
24011 const1_rtx);
24012 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
24014 /* Cast the V4SImode vector back to a vector in orignal mode. */
24015 op0 = gen_reg_rtx (mode);
24016 emit_move_insn (op0, gen_lowpart (mode, op1));
24018 /* Load even elements into the second positon. */
24019 emit_insn ((*gen_load_even) (op0, ops [i + i + 1],
24020 const1_rtx));
24022 /* Cast vector to FIRST_IMODE vector. */
24023 ops[i] = gen_reg_rtx (first_imode);
24024 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
24027 /* Interleave low FIRST_IMODE vectors. */
24028 for (i = j = 0; i < n; i += 2, j++)
24030 op0 = gen_reg_rtx (first_imode);
24031 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
24033 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
24034 ops[j] = gen_reg_rtx (second_imode);
24035 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
24038 /* Interleave low SECOND_IMODE vectors. */
24039 switch (second_imode)
24041 case V4SImode:
24042 for (i = j = 0; i < n / 2; i += 2, j++)
24044 op0 = gen_reg_rtx (second_imode);
24045 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
24046 ops[i + 1]));
24048 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
24049 vector. */
24050 ops[j] = gen_reg_rtx (third_imode);
24051 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
24053 second_imode = V2DImode;
24054 gen_interleave_second_low = gen_vec_interleave_lowv2di;
24055 /* FALLTHRU */
24057 case V2DImode:
24058 op0 = gen_reg_rtx (second_imode);
24059 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
24060 ops[1]));
24062 /* Cast the SECOND_IMODE vector back to a vector on original
24063 mode. */
24064 emit_insn (gen_rtx_SET (VOIDmode, target,
24065 gen_lowpart (mode, op0)));
24066 break;
24068 default:
24069 gcc_unreachable ();
24073 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
24074 all values variable, and none identical. */
24076 static void
24077 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
24078 rtx target, rtx vals)
24080 rtx ops[16];
24081 int n, i;
24083 switch (mode)
24085 case V2SFmode:
24086 case V2SImode:
24087 if (!mmx_ok && !TARGET_SSE)
24088 break;
24089 /* FALLTHRU */
24091 case V4SFmode:
24092 case V4SImode:
24093 case V2DFmode:
24094 case V2DImode:
24095 n = GET_MODE_NUNITS (mode);
24096 for (i = 0; i < n; i++)
24097 ops[i] = XVECEXP (vals, 0, i);
24098 ix86_expand_vector_init_concat (mode, target, ops, n);
24099 return;
24101 case V16QImode:
24102 if (!TARGET_SSE4_1)
24103 break;
24104 /* FALLTHRU */
24106 case V8HImode:
24107 if (!TARGET_SSE2)
24108 break;
24110 n = GET_MODE_NUNITS (mode);
24111 for (i = 0; i < n; i++)
24112 ops[i] = XVECEXP (vals, 0, i);
24113 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
24114 return;
24116 case V4HImode:
24117 case V8QImode:
24118 break;
24120 default:
24121 gcc_unreachable ();
24125 int i, j, n_elts, n_words, n_elt_per_word;
24126 enum machine_mode inner_mode;
24127 rtx words[4], shift;
24129 inner_mode = GET_MODE_INNER (mode);
24130 n_elts = GET_MODE_NUNITS (mode);
24131 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
24132 n_elt_per_word = n_elts / n_words;
24133 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
24135 for (i = 0; i < n_words; ++i)
24137 rtx word = NULL_RTX;
24139 for (j = 0; j < n_elt_per_word; ++j)
24141 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
24142 elt = convert_modes (word_mode, inner_mode, elt, true);
24144 if (j == 0)
24145 word = elt;
24146 else
24148 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
24149 word, 1, OPTAB_LIB_WIDEN);
24150 word = expand_simple_binop (word_mode, IOR, word, elt,
24151 word, 1, OPTAB_LIB_WIDEN);
24155 words[i] = word;
24158 if (n_words == 1)
24159 emit_move_insn (target, gen_lowpart (mode, words[0]));
24160 else if (n_words == 2)
24162 rtx tmp = gen_reg_rtx (mode);
24163 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
24164 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
24165 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
24166 emit_move_insn (target, tmp);
24168 else if (n_words == 4)
24170 rtx tmp = gen_reg_rtx (V4SImode);
24171 gcc_assert (word_mode == SImode);
24172 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
24173 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
24174 emit_move_insn (target, gen_lowpart (mode, tmp));
24176 else
24177 gcc_unreachable ();
24181 /* Initialize vector TARGET via VALS. Suppress the use of MMX
24182 instructions unless MMX_OK is true. */
24184 void
24185 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
24187 enum machine_mode mode = GET_MODE (target);
24188 enum machine_mode inner_mode = GET_MODE_INNER (mode);
24189 int n_elts = GET_MODE_NUNITS (mode);
24190 int n_var = 0, one_var = -1;
24191 bool all_same = true, all_const_zero = true;
24192 int i;
24193 rtx x;
24195 for (i = 0; i < n_elts; ++i)
24197 x = XVECEXP (vals, 0, i);
24198 if (!(CONST_INT_P (x)
24199 || GET_CODE (x) == CONST_DOUBLE
24200 || GET_CODE (x) == CONST_FIXED))
24201 n_var++, one_var = i;
24202 else if (x != CONST0_RTX (inner_mode))
24203 all_const_zero = false;
24204 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
24205 all_same = false;
24208 /* Constants are best loaded from the constant pool. */
24209 if (n_var == 0)
24211 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
24212 return;
24215 /* If all values are identical, broadcast the value. */
24216 if (all_same
24217 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
24218 XVECEXP (vals, 0, 0)))
24219 return;
24221 /* Values where only one field is non-constant are best loaded from
24222 the pool and overwritten via move later. */
24223 if (n_var == 1)
24225 if (all_const_zero
24226 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
24227 XVECEXP (vals, 0, one_var),
24228 one_var))
24229 return;
24231 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
24232 return;
24235 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
24238 void
24239 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
24241 enum machine_mode mode = GET_MODE (target);
24242 enum machine_mode inner_mode = GET_MODE_INNER (mode);
24243 bool use_vec_merge = false;
24244 rtx tmp;
24246 switch (mode)
24248 case V2SFmode:
24249 case V2SImode:
24250 if (mmx_ok)
24252 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
24253 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
24254 if (elt == 0)
24255 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
24256 else
24257 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
24258 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24259 return;
24261 break;
24263 case V2DImode:
24264 use_vec_merge = TARGET_SSE4_1;
24265 if (use_vec_merge)
24266 break;
24268 case V2DFmode:
24270 rtx op0, op1;
24272 /* For the two element vectors, we implement a VEC_CONCAT with
24273 the extraction of the other element. */
24275 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
24276 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
24278 if (elt == 0)
24279 op0 = val, op1 = tmp;
24280 else
24281 op0 = tmp, op1 = val;
24283 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
24284 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24286 return;
24288 case V4SFmode:
24289 use_vec_merge = TARGET_SSE4_1;
24290 if (use_vec_merge)
24291 break;
24293 switch (elt)
24295 case 0:
24296 use_vec_merge = true;
24297 break;
24299 case 1:
24300 /* tmp = target = A B C D */
24301 tmp = copy_to_reg (target);
24302 /* target = A A B B */
24303 emit_insn (gen_sse_unpcklps (target, target, target));
24304 /* target = X A B B */
24305 ix86_expand_vector_set (false, target, val, 0);
24306 /* target = A X C D */
24307 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24308 GEN_INT (1), GEN_INT (0),
24309 GEN_INT (2+4), GEN_INT (3+4)));
24310 return;
24312 case 2:
24313 /* tmp = target = A B C D */
24314 tmp = copy_to_reg (target);
24315 /* tmp = X B C D */
24316 ix86_expand_vector_set (false, tmp, val, 0);
24317 /* target = A B X D */
24318 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24319 GEN_INT (0), GEN_INT (1),
24320 GEN_INT (0+4), GEN_INT (3+4)));
24321 return;
24323 case 3:
24324 /* tmp = target = A B C D */
24325 tmp = copy_to_reg (target);
24326 /* tmp = X B C D */
24327 ix86_expand_vector_set (false, tmp, val, 0);
24328 /* target = A B X D */
24329 emit_insn (gen_sse_shufps_1 (target, target, tmp,
24330 GEN_INT (0), GEN_INT (1),
24331 GEN_INT (2+4), GEN_INT (0+4)));
24332 return;
24334 default:
24335 gcc_unreachable ();
24337 break;
24339 case V4SImode:
24340 use_vec_merge = TARGET_SSE4_1;
24341 if (use_vec_merge)
24342 break;
24344 /* Element 0 handled by vec_merge below. */
24345 if (elt == 0)
24347 use_vec_merge = true;
24348 break;
24351 if (TARGET_SSE2)
24353 /* With SSE2, use integer shuffles to swap element 0 and ELT,
24354 store into element 0, then shuffle them back. */
24356 rtx order[4];
24358 order[0] = GEN_INT (elt);
24359 order[1] = const1_rtx;
24360 order[2] = const2_rtx;
24361 order[3] = GEN_INT (3);
24362 order[elt] = const0_rtx;
24364 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
24365 order[1], order[2], order[3]));
24367 ix86_expand_vector_set (false, target, val, 0);
24369 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
24370 order[1], order[2], order[3]));
24372 else
24374 /* For SSE1, we have to reuse the V4SF code. */
24375 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
24376 gen_lowpart (SFmode, val), elt);
24378 return;
24380 case V8HImode:
24381 use_vec_merge = TARGET_SSE2;
24382 break;
24383 case V4HImode:
24384 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
24385 break;
24387 case V16QImode:
24388 use_vec_merge = TARGET_SSE4_1;
24389 break;
24391 case V8QImode:
24392 default:
24393 break;
24396 if (use_vec_merge)
24398 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
24399 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
24400 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24402 else
24404 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
24406 emit_move_insn (mem, target);
24408 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
24409 emit_move_insn (tmp, val);
24411 emit_move_insn (target, mem);
24415 void
24416 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
24418 enum machine_mode mode = GET_MODE (vec);
24419 enum machine_mode inner_mode = GET_MODE_INNER (mode);
24420 bool use_vec_extr = false;
24421 rtx tmp;
24423 switch (mode)
24425 case V2SImode:
24426 case V2SFmode:
24427 if (!mmx_ok)
24428 break;
24429 /* FALLTHRU */
24431 case V2DFmode:
24432 case V2DImode:
24433 use_vec_extr = true;
24434 break;
24436 case V4SFmode:
24437 use_vec_extr = TARGET_SSE4_1;
24438 if (use_vec_extr)
24439 break;
24441 switch (elt)
24443 case 0:
24444 tmp = vec;
24445 break;
24447 case 1:
24448 case 3:
24449 tmp = gen_reg_rtx (mode);
24450 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
24451 GEN_INT (elt), GEN_INT (elt),
24452 GEN_INT (elt+4), GEN_INT (elt+4)));
24453 break;
24455 case 2:
24456 tmp = gen_reg_rtx (mode);
24457 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
24458 break;
24460 default:
24461 gcc_unreachable ();
24463 vec = tmp;
24464 use_vec_extr = true;
24465 elt = 0;
24466 break;
24468 case V4SImode:
24469 use_vec_extr = TARGET_SSE4_1;
24470 if (use_vec_extr)
24471 break;
24473 if (TARGET_SSE2)
24475 switch (elt)
24477 case 0:
24478 tmp = vec;
24479 break;
24481 case 1:
24482 case 3:
24483 tmp = gen_reg_rtx (mode);
24484 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
24485 GEN_INT (elt), GEN_INT (elt),
24486 GEN_INT (elt), GEN_INT (elt)));
24487 break;
24489 case 2:
24490 tmp = gen_reg_rtx (mode);
24491 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
24492 break;
24494 default:
24495 gcc_unreachable ();
24497 vec = tmp;
24498 use_vec_extr = true;
24499 elt = 0;
24501 else
24503 /* For SSE1, we have to reuse the V4SF code. */
24504 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
24505 gen_lowpart (V4SFmode, vec), elt);
24506 return;
24508 break;
24510 case V8HImode:
24511 use_vec_extr = TARGET_SSE2;
24512 break;
24513 case V4HImode:
24514 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
24515 break;
24517 case V16QImode:
24518 use_vec_extr = TARGET_SSE4_1;
24519 break;
24521 case V8QImode:
24522 /* ??? Could extract the appropriate HImode element and shift. */
24523 default:
24524 break;
24527 if (use_vec_extr)
24529 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
24530 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
24532 /* Let the rtl optimizers know about the zero extension performed. */
24533 if (inner_mode == QImode || inner_mode == HImode)
24535 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
24536 target = gen_lowpart (SImode, target);
24539 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
24541 else
24543 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
24545 emit_move_insn (mem, vec);
24547 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
24548 emit_move_insn (target, tmp);
24552 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
24553 pattern to reduce; DEST is the destination; IN is the input vector. */
24555 void
24556 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
24558 rtx tmp1, tmp2, tmp3;
24560 tmp1 = gen_reg_rtx (V4SFmode);
24561 tmp2 = gen_reg_rtx (V4SFmode);
24562 tmp3 = gen_reg_rtx (V4SFmode);
24564 emit_insn (gen_sse_movhlps (tmp1, in, in));
24565 emit_insn (fn (tmp2, tmp1, in));
24567 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
24568 GEN_INT (1), GEN_INT (1),
24569 GEN_INT (1+4), GEN_INT (1+4)));
24570 emit_insn (fn (dest, tmp2, tmp3));
24573 /* Target hook for scalar_mode_supported_p. */
24574 static bool
24575 ix86_scalar_mode_supported_p (enum machine_mode mode)
24577 if (DECIMAL_FLOAT_MODE_P (mode))
24578 return true;
24579 else if (mode == TFmode)
24580 return TARGET_64BIT;
24581 else
24582 return default_scalar_mode_supported_p (mode);
24585 /* Implements target hook vector_mode_supported_p. */
24586 static bool
24587 ix86_vector_mode_supported_p (enum machine_mode mode)
24589 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
24590 return true;
24591 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
24592 return true;
24593 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
24594 return true;
24595 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
24596 return true;
24597 return false;
24600 /* Target hook for c_mode_for_suffix. */
24601 static enum machine_mode
24602 ix86_c_mode_for_suffix (char suffix)
24604 if (TARGET_64BIT && suffix == 'q')
24605 return TFmode;
24606 if (TARGET_MMX && suffix == 'w')
24607 return XFmode;
24609 return VOIDmode;
24612 /* Worker function for TARGET_MD_ASM_CLOBBERS.
24614 We do this in the new i386 backend to maintain source compatibility
24615 with the old cc0-based compiler. */
24617 static tree
24618 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
24619 tree inputs ATTRIBUTE_UNUSED,
24620 tree clobbers)
24622 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
24623 clobbers);
24624 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
24625 clobbers);
24626 return clobbers;
24629 /* Implements target vector targetm.asm.encode_section_info. This
24630 is not used by netware. */
24632 static void ATTRIBUTE_UNUSED
24633 ix86_encode_section_info (tree decl, rtx rtl, int first)
24635 default_encode_section_info (decl, rtl, first);
24637 if (TREE_CODE (decl) == VAR_DECL
24638 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
24639 && ix86_in_large_data_p (decl))
24640 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
24643 /* Worker function for REVERSE_CONDITION. */
24645 enum rtx_code
24646 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
24648 return (mode != CCFPmode && mode != CCFPUmode
24649 ? reverse_condition (code)
24650 : reverse_condition_maybe_unordered (code));
24653 /* Output code to perform an x87 FP register move, from OPERANDS[1]
24654 to OPERANDS[0]. */
24656 const char *
24657 output_387_reg_move (rtx insn, rtx *operands)
24659 if (REG_P (operands[0]))
24661 if (REG_P (operands[1])
24662 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
24664 if (REGNO (operands[0]) == FIRST_STACK_REG)
24665 return output_387_ffreep (operands, 0);
24666 return "fstp\t%y0";
24668 if (STACK_TOP_P (operands[0]))
24669 return "fld%z1\t%y1";
24670 return "fst\t%y0";
24672 else if (MEM_P (operands[0]))
24674 gcc_assert (REG_P (operands[1]));
24675 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
24676 return "fstp%z0\t%y0";
24677 else
24679 /* There is no non-popping store to memory for XFmode.
24680 So if we need one, follow the store with a load. */
24681 if (GET_MODE (operands[0]) == XFmode)
24682 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
24683 else
24684 return "fst%z0\t%y0";
24687 else
24688 gcc_unreachable();
24691 /* Output code to perform a conditional jump to LABEL, if C2 flag in
24692 FP status register is set. */
24694 void
24695 ix86_emit_fp_unordered_jump (rtx label)
24697 rtx reg = gen_reg_rtx (HImode);
24698 rtx temp;
24700 emit_insn (gen_x86_fnstsw_1 (reg));
24702 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_size))
24704 emit_insn (gen_x86_sahf_1 (reg));
24706 temp = gen_rtx_REG (CCmode, FLAGS_REG);
24707 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
24709 else
24711 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
24713 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
24714 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
24717 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
24718 gen_rtx_LABEL_REF (VOIDmode, label),
24719 pc_rtx);
24720 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
24722 emit_jump_insn (temp);
24723 predict_jump (REG_BR_PROB_BASE * 10 / 100);
24726 /* Output code to perform a log1p XFmode calculation. */
24728 void ix86_emit_i387_log1p (rtx op0, rtx op1)
24730 rtx label1 = gen_label_rtx ();
24731 rtx label2 = gen_label_rtx ();
24733 rtx tmp = gen_reg_rtx (XFmode);
24734 rtx tmp2 = gen_reg_rtx (XFmode);
24736 emit_insn (gen_absxf2 (tmp, op1));
24737 emit_insn (gen_cmpxf (tmp,
24738 CONST_DOUBLE_FROM_REAL_VALUE (
24739 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
24740 XFmode)));
24741 emit_jump_insn (gen_bge (label1));
24743 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
24744 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
24745 emit_jump (label2);
24747 emit_label (label1);
24748 emit_move_insn (tmp, CONST1_RTX (XFmode));
24749 emit_insn (gen_addxf3 (tmp, op1, tmp));
24750 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
24751 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
24753 emit_label (label2);
24756 /* Output code to perform a Newton-Rhapson approximation of a single precision
24757 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
24759 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
24761 rtx x0, x1, e0, e1, two;
24763 x0 = gen_reg_rtx (mode);
24764 e0 = gen_reg_rtx (mode);
24765 e1 = gen_reg_rtx (mode);
24766 x1 = gen_reg_rtx (mode);
24768 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
24770 if (VECTOR_MODE_P (mode))
24771 two = ix86_build_const_vector (SFmode, true, two);
24773 two = force_reg (mode, two);
24775 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
24777 /* x0 = rcp(b) estimate */
24778 emit_insn (gen_rtx_SET (VOIDmode, x0,
24779 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
24780 UNSPEC_RCP)));
24781 /* e0 = x0 * b */
24782 emit_insn (gen_rtx_SET (VOIDmode, e0,
24783 gen_rtx_MULT (mode, x0, b)));
24784 /* e1 = 2. - e0 */
24785 emit_insn (gen_rtx_SET (VOIDmode, e1,
24786 gen_rtx_MINUS (mode, two, e0)));
24787 /* x1 = x0 * e1 */
24788 emit_insn (gen_rtx_SET (VOIDmode, x1,
24789 gen_rtx_MULT (mode, x0, e1)));
24790 /* res = a * x1 */
24791 emit_insn (gen_rtx_SET (VOIDmode, res,
24792 gen_rtx_MULT (mode, a, x1)));
24795 /* Output code to perform a Newton-Rhapson approximation of a
24796 single precision floating point [reciprocal] square root. */
24798 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
24799 bool recip)
24801 rtx x0, e0, e1, e2, e3, mthree, mhalf;
24802 REAL_VALUE_TYPE r;
24804 x0 = gen_reg_rtx (mode);
24805 e0 = gen_reg_rtx (mode);
24806 e1 = gen_reg_rtx (mode);
24807 e2 = gen_reg_rtx (mode);
24808 e3 = gen_reg_rtx (mode);
24810 real_from_integer (&r, VOIDmode, -3, -1, 0);
24811 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
24813 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
24814 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
24816 if (VECTOR_MODE_P (mode))
24818 mthree = ix86_build_const_vector (SFmode, true, mthree);
24819 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
24822 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
24823 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
24825 /* x0 = rsqrt(a) estimate */
24826 emit_insn (gen_rtx_SET (VOIDmode, x0,
24827 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
24828 UNSPEC_RSQRT)));
24830 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
24831 if (!recip)
24833 rtx zero, mask;
24835 zero = gen_reg_rtx (mode);
24836 mask = gen_reg_rtx (mode);
24838 zero = force_reg (mode, CONST0_RTX(mode));
24839 emit_insn (gen_rtx_SET (VOIDmode, mask,
24840 gen_rtx_NE (mode, zero, a)));
24842 emit_insn (gen_rtx_SET (VOIDmode, x0,
24843 gen_rtx_AND (mode, x0, mask)));
24846 /* e0 = x0 * a */
24847 emit_insn (gen_rtx_SET (VOIDmode, e0,
24848 gen_rtx_MULT (mode, x0, a)));
24849 /* e1 = e0 * x0 */
24850 emit_insn (gen_rtx_SET (VOIDmode, e1,
24851 gen_rtx_MULT (mode, e0, x0)));
24853 /* e2 = e1 - 3. */
24854 mthree = force_reg (mode, mthree);
24855 emit_insn (gen_rtx_SET (VOIDmode, e2,
24856 gen_rtx_PLUS (mode, e1, mthree)));
24858 mhalf = force_reg (mode, mhalf);
24859 if (recip)
24860 /* e3 = -.5 * x0 */
24861 emit_insn (gen_rtx_SET (VOIDmode, e3,
24862 gen_rtx_MULT (mode, x0, mhalf)));
24863 else
24864 /* e3 = -.5 * e0 */
24865 emit_insn (gen_rtx_SET (VOIDmode, e3,
24866 gen_rtx_MULT (mode, e0, mhalf)));
24867 /* ret = e2 * e3 */
24868 emit_insn (gen_rtx_SET (VOIDmode, res,
24869 gen_rtx_MULT (mode, e2, e3)));
24872 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
24874 static void ATTRIBUTE_UNUSED
24875 i386_solaris_elf_named_section (const char *name, unsigned int flags,
24876 tree decl)
24878 /* With Binutils 2.15, the "@unwind" marker must be specified on
24879 every occurrence of the ".eh_frame" section, not just the first
24880 one. */
24881 if (TARGET_64BIT
24882 && strcmp (name, ".eh_frame") == 0)
24884 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
24885 flags & SECTION_WRITE ? "aw" : "a");
24886 return;
24888 default_elf_asm_named_section (name, flags, decl);
24891 /* Return the mangling of TYPE if it is an extended fundamental type. */
24893 static const char *
24894 ix86_mangle_type (const_tree type)
24896 type = TYPE_MAIN_VARIANT (type);
24898 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24899 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24900 return NULL;
24902 switch (TYPE_MODE (type))
24904 case TFmode:
24905 /* __float128 is "g". */
24906 return "g";
24907 case XFmode:
24908 /* "long double" or __float80 is "e". */
24909 return "e";
24910 default:
24911 return NULL;
24915 /* For 32-bit code we can save PIC register setup by using
24916 __stack_chk_fail_local hidden function instead of calling
24917 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
24918 register, so it is better to call __stack_chk_fail directly. */
24920 static tree
24921 ix86_stack_protect_fail (void)
24923 return TARGET_64BIT
24924 ? default_external_stack_protect_fail ()
24925 : default_hidden_stack_protect_fail ();
24928 /* Select a format to encode pointers in exception handling data. CODE
24929 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
24930 true if the symbol may be affected by dynamic relocations.
24932 ??? All x86 object file formats are capable of representing this.
24933 After all, the relocation needed is the same as for the call insn.
24934 Whether or not a particular assembler allows us to enter such, I
24935 guess we'll have to see. */
24937 asm_preferred_eh_data_format (int code, int global)
24939 if (flag_pic)
24941 int type = DW_EH_PE_sdata8;
24942 if (!TARGET_64BIT
24943 || ix86_cmodel == CM_SMALL_PIC
24944 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
24945 type = DW_EH_PE_sdata4;
24946 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
24948 if (ix86_cmodel == CM_SMALL
24949 || (ix86_cmodel == CM_MEDIUM && code))
24950 return DW_EH_PE_udata4;
24951 return DW_EH_PE_absptr;
24954 /* Expand copysign from SIGN to the positive value ABS_VALUE
24955 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
24956 the sign-bit. */
24957 static void
24958 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
24960 enum machine_mode mode = GET_MODE (sign);
24961 rtx sgn = gen_reg_rtx (mode);
24962 if (mask == NULL_RTX)
24964 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
24965 if (!VECTOR_MODE_P (mode))
24967 /* We need to generate a scalar mode mask in this case. */
24968 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
24969 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
24970 mask = gen_reg_rtx (mode);
24971 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
24974 else
24975 mask = gen_rtx_NOT (mode, mask);
24976 emit_insn (gen_rtx_SET (VOIDmode, sgn,
24977 gen_rtx_AND (mode, mask, sign)));
24978 emit_insn (gen_rtx_SET (VOIDmode, result,
24979 gen_rtx_IOR (mode, abs_value, sgn)));
24982 /* Expand fabs (OP0) and return a new rtx that holds the result. The
24983 mask for masking out the sign-bit is stored in *SMASK, if that is
24984 non-null. */
24985 static rtx
24986 ix86_expand_sse_fabs (rtx op0, rtx *smask)
24988 enum machine_mode mode = GET_MODE (op0);
24989 rtx xa, mask;
24991 xa = gen_reg_rtx (mode);
24992 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
24993 if (!VECTOR_MODE_P (mode))
24995 /* We need to generate a scalar mode mask in this case. */
24996 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
24997 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
24998 mask = gen_reg_rtx (mode);
24999 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
25001 emit_insn (gen_rtx_SET (VOIDmode, xa,
25002 gen_rtx_AND (mode, op0, mask)));
25004 if (smask)
25005 *smask = mask;
25007 return xa;
25010 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
25011 swapping the operands if SWAP_OPERANDS is true. The expanded
25012 code is a forward jump to a newly created label in case the
25013 comparison is true. The generated label rtx is returned. */
25014 static rtx
25015 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
25016 bool swap_operands)
25018 rtx label, tmp;
25020 if (swap_operands)
25022 tmp = op0;
25023 op0 = op1;
25024 op1 = tmp;
25027 label = gen_label_rtx ();
25028 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
25029 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25030 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
25031 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
25032 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
25033 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
25034 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
25035 JUMP_LABEL (tmp) = label;
25037 return label;
25040 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
25041 using comparison code CODE. Operands are swapped for the comparison if
25042 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
25043 static rtx
25044 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
25045 bool swap_operands)
25047 enum machine_mode mode = GET_MODE (op0);
25048 rtx mask = gen_reg_rtx (mode);
25050 if (swap_operands)
25052 rtx tmp = op0;
25053 op0 = op1;
25054 op1 = tmp;
25057 if (mode == DFmode)
25058 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
25059 gen_rtx_fmt_ee (code, mode, op0, op1)));
25060 else
25061 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
25062 gen_rtx_fmt_ee (code, mode, op0, op1)));
25064 return mask;
25067 /* Generate and return a rtx of mode MODE for 2**n where n is the number
25068 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
25069 static rtx
25070 ix86_gen_TWO52 (enum machine_mode mode)
25072 REAL_VALUE_TYPE TWO52r;
25073 rtx TWO52;
25075 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
25076 TWO52 = const_double_from_real_value (TWO52r, mode);
25077 TWO52 = force_reg (mode, TWO52);
25079 return TWO52;
25082 /* Expand SSE sequence for computing lround from OP1 storing
25083 into OP0. */
25084 void
25085 ix86_expand_lround (rtx op0, rtx op1)
25087 /* C code for the stuff we're doing below:
25088 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
25089 return (long)tmp;
25091 enum machine_mode mode = GET_MODE (op1);
25092 const struct real_format *fmt;
25093 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
25094 rtx adj;
25096 /* load nextafter (0.5, 0.0) */
25097 fmt = REAL_MODE_FORMAT (mode);
25098 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
25099 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
25101 /* adj = copysign (0.5, op1) */
25102 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
25103 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
25105 /* adj = op1 + adj */
25106 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
25108 /* op0 = (imode)adj */
25109 expand_fix (op0, adj, 0);
25112 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
25113 into OPERAND0. */
25114 void
25115 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
25117 /* C code for the stuff we're doing below (for do_floor):
25118 xi = (long)op1;
25119 xi -= (double)xi > op1 ? 1 : 0;
25120 return xi;
25122 enum machine_mode fmode = GET_MODE (op1);
25123 enum machine_mode imode = GET_MODE (op0);
25124 rtx ireg, freg, label, tmp;
25126 /* reg = (long)op1 */
25127 ireg = gen_reg_rtx (imode);
25128 expand_fix (ireg, op1, 0);
25130 /* freg = (double)reg */
25131 freg = gen_reg_rtx (fmode);
25132 expand_float (freg, ireg, 0);
25134 /* ireg = (freg > op1) ? ireg - 1 : ireg */
25135 label = ix86_expand_sse_compare_and_jump (UNLE,
25136 freg, op1, !do_floor);
25137 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
25138 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
25139 emit_move_insn (ireg, tmp);
25141 emit_label (label);
25142 LABEL_NUSES (label) = 1;
25144 emit_move_insn (op0, ireg);
25147 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
25148 result in OPERAND0. */
25149 void
25150 ix86_expand_rint (rtx operand0, rtx operand1)
25152 /* C code for the stuff we're doing below:
25153 xa = fabs (operand1);
25154 if (!isless (xa, 2**52))
25155 return operand1;
25156 xa = xa + 2**52 - 2**52;
25157 return copysign (xa, operand1);
25159 enum machine_mode mode = GET_MODE (operand0);
25160 rtx res, xa, label, TWO52, mask;
25162 res = gen_reg_rtx (mode);
25163 emit_move_insn (res, operand1);
25165 /* xa = abs (operand1) */
25166 xa = ix86_expand_sse_fabs (res, &mask);
25168 /* if (!isless (xa, TWO52)) goto label; */
25169 TWO52 = ix86_gen_TWO52 (mode);
25170 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25172 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25173 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
25175 ix86_sse_copysign_to_positive (res, xa, res, mask);
25177 emit_label (label);
25178 LABEL_NUSES (label) = 1;
25180 emit_move_insn (operand0, res);
25183 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
25184 into OPERAND0. */
25185 void
25186 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
25188 /* C code for the stuff we expand below.
25189 double xa = fabs (x), x2;
25190 if (!isless (xa, TWO52))
25191 return x;
25192 xa = xa + TWO52 - TWO52;
25193 x2 = copysign (xa, x);
25194 Compensate. Floor:
25195 if (x2 > x)
25196 x2 -= 1;
25197 Compensate. Ceil:
25198 if (x2 < x)
25199 x2 -= -1;
25200 return x2;
25202 enum machine_mode mode = GET_MODE (operand0);
25203 rtx xa, TWO52, tmp, label, one, res, mask;
25205 TWO52 = ix86_gen_TWO52 (mode);
25207 /* Temporary for holding the result, initialized to the input
25208 operand to ease control flow. */
25209 res = gen_reg_rtx (mode);
25210 emit_move_insn (res, operand1);
25212 /* xa = abs (operand1) */
25213 xa = ix86_expand_sse_fabs (res, &mask);
25215 /* if (!isless (xa, TWO52)) goto label; */
25216 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25218 /* xa = xa + TWO52 - TWO52; */
25219 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25220 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
25222 /* xa = copysign (xa, operand1) */
25223 ix86_sse_copysign_to_positive (xa, xa, res, mask);
25225 /* generate 1.0 or -1.0 */
25226 one = force_reg (mode,
25227 const_double_from_real_value (do_floor
25228 ? dconst1 : dconstm1, mode));
25230 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
25231 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
25232 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25233 gen_rtx_AND (mode, one, tmp)));
25234 /* We always need to subtract here to preserve signed zero. */
25235 tmp = expand_simple_binop (mode, MINUS,
25236 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25237 emit_move_insn (res, tmp);
25239 emit_label (label);
25240 LABEL_NUSES (label) = 1;
25242 emit_move_insn (operand0, res);
25245 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
25246 into OPERAND0. */
25247 void
25248 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
25250 /* C code for the stuff we expand below.
25251 double xa = fabs (x), x2;
25252 if (!isless (xa, TWO52))
25253 return x;
25254 x2 = (double)(long)x;
25255 Compensate. Floor:
25256 if (x2 > x)
25257 x2 -= 1;
25258 Compensate. Ceil:
25259 if (x2 < x)
25260 x2 += 1;
25261 if (HONOR_SIGNED_ZEROS (mode))
25262 return copysign (x2, x);
25263 return x2;
25265 enum machine_mode mode = GET_MODE (operand0);
25266 rtx xa, xi, TWO52, tmp, label, one, res, mask;
25268 TWO52 = ix86_gen_TWO52 (mode);
25270 /* Temporary for holding the result, initialized to the input
25271 operand to ease control flow. */
25272 res = gen_reg_rtx (mode);
25273 emit_move_insn (res, operand1);
25275 /* xa = abs (operand1) */
25276 xa = ix86_expand_sse_fabs (res, &mask);
25278 /* if (!isless (xa, TWO52)) goto label; */
25279 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25281 /* xa = (double)(long)x */
25282 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
25283 expand_fix (xi, res, 0);
25284 expand_float (xa, xi, 0);
25286 /* generate 1.0 */
25287 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
25289 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
25290 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
25291 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25292 gen_rtx_AND (mode, one, tmp)));
25293 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
25294 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25295 emit_move_insn (res, tmp);
25297 if (HONOR_SIGNED_ZEROS (mode))
25298 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
25300 emit_label (label);
25301 LABEL_NUSES (label) = 1;
25303 emit_move_insn (operand0, res);
25306 /* Expand SSE sequence for computing round from OPERAND1 storing
25307 into OPERAND0. Sequence that works without relying on DImode truncation
25308 via cvttsd2siq that is only available on 64bit targets. */
25309 void
25310 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
25312 /* C code for the stuff we expand below.
25313 double xa = fabs (x), xa2, x2;
25314 if (!isless (xa, TWO52))
25315 return x;
25316 Using the absolute value and copying back sign makes
25317 -0.0 -> -0.0 correct.
25318 xa2 = xa + TWO52 - TWO52;
25319 Compensate.
25320 dxa = xa2 - xa;
25321 if (dxa <= -0.5)
25322 xa2 += 1;
25323 else if (dxa > 0.5)
25324 xa2 -= 1;
25325 x2 = copysign (xa2, x);
25326 return x2;
25328 enum machine_mode mode = GET_MODE (operand0);
25329 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
25331 TWO52 = ix86_gen_TWO52 (mode);
25333 /* Temporary for holding the result, initialized to the input
25334 operand to ease control flow. */
25335 res = gen_reg_rtx (mode);
25336 emit_move_insn (res, operand1);
25338 /* xa = abs (operand1) */
25339 xa = ix86_expand_sse_fabs (res, &mask);
25341 /* if (!isless (xa, TWO52)) goto label; */
25342 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25344 /* xa2 = xa + TWO52 - TWO52; */
25345 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25346 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
25348 /* dxa = xa2 - xa; */
25349 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
25351 /* generate 0.5, 1.0 and -0.5 */
25352 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
25353 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
25354 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
25355 0, OPTAB_DIRECT);
25357 /* Compensate. */
25358 tmp = gen_reg_rtx (mode);
25359 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
25360 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
25361 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25362 gen_rtx_AND (mode, one, tmp)));
25363 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25364 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
25365 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
25366 emit_insn (gen_rtx_SET (VOIDmode, tmp,
25367 gen_rtx_AND (mode, one, tmp)));
25368 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
25370 /* res = copysign (xa2, operand1) */
25371 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
25373 emit_label (label);
25374 LABEL_NUSES (label) = 1;
25376 emit_move_insn (operand0, res);
25379 /* Expand SSE sequence for computing trunc from OPERAND1 storing
25380 into OPERAND0. */
25381 void
25382 ix86_expand_trunc (rtx operand0, rtx operand1)
25384 /* C code for SSE variant we expand below.
25385 double xa = fabs (x), x2;
25386 if (!isless (xa, TWO52))
25387 return x;
25388 x2 = (double)(long)x;
25389 if (HONOR_SIGNED_ZEROS (mode))
25390 return copysign (x2, x);
25391 return x2;
25393 enum machine_mode mode = GET_MODE (operand0);
25394 rtx xa, xi, TWO52, label, res, mask;
25396 TWO52 = ix86_gen_TWO52 (mode);
25398 /* Temporary for holding the result, initialized to the input
25399 operand to ease control flow. */
25400 res = gen_reg_rtx (mode);
25401 emit_move_insn (res, operand1);
25403 /* xa = abs (operand1) */
25404 xa = ix86_expand_sse_fabs (res, &mask);
25406 /* if (!isless (xa, TWO52)) goto label; */
25407 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25409 /* x = (double)(long)x */
25410 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
25411 expand_fix (xi, res, 0);
25412 expand_float (res, xi, 0);
25414 if (HONOR_SIGNED_ZEROS (mode))
25415 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
25417 emit_label (label);
25418 LABEL_NUSES (label) = 1;
25420 emit_move_insn (operand0, res);
25423 /* Expand SSE sequence for computing trunc from OPERAND1 storing
25424 into OPERAND0. */
25425 void
25426 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
25428 enum machine_mode mode = GET_MODE (operand0);
25429 rtx xa, mask, TWO52, label, one, res, smask, tmp;
25431 /* C code for SSE variant we expand below.
25432 double xa = fabs (x), x2;
25433 if (!isless (xa, TWO52))
25434 return x;
25435 xa2 = xa + TWO52 - TWO52;
25436 Compensate:
25437 if (xa2 > xa)
25438 xa2 -= 1.0;
25439 x2 = copysign (xa2, x);
25440 return x2;
25443 TWO52 = ix86_gen_TWO52 (mode);
25445 /* Temporary for holding the result, initialized to the input
25446 operand to ease control flow. */
25447 res = gen_reg_rtx (mode);
25448 emit_move_insn (res, operand1);
25450 /* xa = abs (operand1) */
25451 xa = ix86_expand_sse_fabs (res, &smask);
25453 /* if (!isless (xa, TWO52)) goto label; */
25454 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25456 /* res = xa + TWO52 - TWO52; */
25457 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
25458 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
25459 emit_move_insn (res, tmp);
25461 /* generate 1.0 */
25462 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
25464 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
25465 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
25466 emit_insn (gen_rtx_SET (VOIDmode, mask,
25467 gen_rtx_AND (mode, mask, one)));
25468 tmp = expand_simple_binop (mode, MINUS,
25469 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
25470 emit_move_insn (res, tmp);
25472 /* res = copysign (res, operand1) */
25473 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
25475 emit_label (label);
25476 LABEL_NUSES (label) = 1;
25478 emit_move_insn (operand0, res);
25481 /* Expand SSE sequence for computing round from OPERAND1 storing
25482 into OPERAND0. */
25483 void
25484 ix86_expand_round (rtx operand0, rtx operand1)
25486 /* C code for the stuff we're doing below:
25487 double xa = fabs (x);
25488 if (!isless (xa, TWO52))
25489 return x;
25490 xa = (double)(long)(xa + nextafter (0.5, 0.0));
25491 return copysign (xa, x);
25493 enum machine_mode mode = GET_MODE (operand0);
25494 rtx res, TWO52, xa, label, xi, half, mask;
25495 const struct real_format *fmt;
25496 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
25498 /* Temporary for holding the result, initialized to the input
25499 operand to ease control flow. */
25500 res = gen_reg_rtx (mode);
25501 emit_move_insn (res, operand1);
25503 TWO52 = ix86_gen_TWO52 (mode);
25504 xa = ix86_expand_sse_fabs (res, &mask);
25505 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
25507 /* load nextafter (0.5, 0.0) */
25508 fmt = REAL_MODE_FORMAT (mode);
25509 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
25510 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
25512 /* xa = xa + 0.5 */
25513 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
25514 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
25516 /* xa = (double)(int64_t)xa */
25517 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
25518 expand_fix (xi, xa, 0);
25519 expand_float (xa, xi, 0);
25521 /* res = copysign (xa, operand1) */
25522 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
25524 emit_label (label);
25525 LABEL_NUSES (label) = 1;
25527 emit_move_insn (operand0, res);
25531 /* Validate whether a SSE5 instruction is valid or not.
25532 OPERANDS is the array of operands.
25533 NUM is the number of operands.
25534 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
25535 NUM_MEMORY is the maximum number of memory operands to accept. */
25537 bool
25538 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
25539 bool uses_oc0, int num_memory)
25541 int mem_mask;
25542 int mem_count;
25543 int i;
25545 /* Count the number of memory arguments */
25546 mem_mask = 0;
25547 mem_count = 0;
25548 for (i = 0; i < num; i++)
25550 enum machine_mode mode = GET_MODE (operands[i]);
25551 if (register_operand (operands[i], mode))
25554 else if (memory_operand (operands[i], mode))
25556 mem_mask |= (1 << i);
25557 mem_count++;
25560 else
25562 rtx pattern = PATTERN (insn);
25564 /* allow 0 for pcmov */
25565 if (GET_CODE (pattern) != SET
25566 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
25567 || i < 2
25568 || operands[i] != CONST0_RTX (mode))
25569 return false;
25573 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
25574 a memory operation. */
25575 if (num_memory < 0)
25577 num_memory = -num_memory;
25578 if ((mem_mask & (1 << (num-1))) != 0)
25580 mem_mask &= ~(1 << (num-1));
25581 mem_count--;
25585 /* If there were no memory operations, allow the insn */
25586 if (mem_mask == 0)
25587 return true;
25589 /* Do not allow the destination register to be a memory operand. */
25590 else if (mem_mask & (1 << 0))
25591 return false;
25593 /* If there are too many memory operations, disallow the instruction. While
25594 the hardware only allows 1 memory reference, before register allocation
25595 for some insns, we allow two memory operations sometimes in order to allow
25596 code like the following to be optimized:
25598 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
25600 or similar cases that are vectorized into using the fmaddss
25601 instruction. */
25602 else if (mem_count > num_memory)
25603 return false;
25605 /* Don't allow more than one memory operation if not optimizing. */
25606 else if (mem_count > 1 && !optimize)
25607 return false;
25609 else if (num == 4 && mem_count == 1)
25611 /* formats (destination is the first argument), example fmaddss:
25612 xmm1, xmm1, xmm2, xmm3/mem
25613 xmm1, xmm1, xmm2/mem, xmm3
25614 xmm1, xmm2, xmm3/mem, xmm1
25615 xmm1, xmm2/mem, xmm3, xmm1 */
25616 if (uses_oc0)
25617 return ((mem_mask == (1 << 1))
25618 || (mem_mask == (1 << 2))
25619 || (mem_mask == (1 << 3)));
25621 /* format, example pmacsdd:
25622 xmm1, xmm2, xmm3/mem, xmm1 */
25623 else
25624 return (mem_mask == (1 << 2));
25627 else if (num == 4 && num_memory == 2)
25629 /* If there are two memory operations, we can load one of the memory ops
25630 into the destination register. This is for optimizing the
25631 multiply/add ops, which the combiner has optimized both the multiply
25632 and the add insns to have a memory operation. We have to be careful
25633 that the destination doesn't overlap with the inputs. */
25634 rtx op0 = operands[0];
25636 if (reg_mentioned_p (op0, operands[1])
25637 || reg_mentioned_p (op0, operands[2])
25638 || reg_mentioned_p (op0, operands[3]))
25639 return false;
25641 /* formats (destination is the first argument), example fmaddss:
25642 xmm1, xmm1, xmm2, xmm3/mem
25643 xmm1, xmm1, xmm2/mem, xmm3
25644 xmm1, xmm2, xmm3/mem, xmm1
25645 xmm1, xmm2/mem, xmm3, xmm1
25647 For the oc0 case, we will load either operands[1] or operands[3] into
25648 operands[0], so any combination of 2 memory operands is ok. */
25649 if (uses_oc0)
25650 return true;
25652 /* format, example pmacsdd:
25653 xmm1, xmm2, xmm3/mem, xmm1
25655 For the integer multiply/add instructions be more restrictive and
25656 require operands[2] and operands[3] to be the memory operands. */
25657 else
25658 return (mem_mask == ((1 << 2) | (1 << 3)));
25661 else if (num == 3 && num_memory == 1)
25663 /* formats, example protb:
25664 xmm1, xmm2, xmm3/mem
25665 xmm1, xmm2/mem, xmm3 */
25666 if (uses_oc0)
25667 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
25669 /* format, example comeq:
25670 xmm1, xmm2, xmm3/mem */
25671 else
25672 return (mem_mask == (1 << 2));
25675 else
25676 gcc_unreachable ();
25678 return false;
25682 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
25683 hardware will allow by using the destination register to load one of the
25684 memory operations. Presently this is used by the multiply/add routines to
25685 allow 2 memory references. */
25687 void
25688 ix86_expand_sse5_multiple_memory (rtx operands[],
25689 int num,
25690 enum machine_mode mode)
25692 rtx op0 = operands[0];
25693 if (num != 4
25694 || memory_operand (op0, mode)
25695 || reg_mentioned_p (op0, operands[1])
25696 || reg_mentioned_p (op0, operands[2])
25697 || reg_mentioned_p (op0, operands[3]))
25698 gcc_unreachable ();
25700 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
25701 the destination register. */
25702 if (memory_operand (operands[1], mode))
25704 emit_move_insn (op0, operands[1]);
25705 operands[1] = op0;
25707 else if (memory_operand (operands[3], mode))
25709 emit_move_insn (op0, operands[3]);
25710 operands[3] = op0;
25712 else
25713 gcc_unreachable ();
25715 return;
25719 /* Table of valid machine attributes. */
25720 static const struct attribute_spec ix86_attribute_table[] =
25722 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
25723 /* Stdcall attribute says callee is responsible for popping arguments
25724 if they are not variable. */
25725 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25726 /* Fastcall attribute says callee is responsible for popping arguments
25727 if they are not variable. */
25728 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25729 /* Cdecl attribute says the callee is a normal C declaration */
25730 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25731 /* Regparm attribute specifies how many integer arguments are to be
25732 passed in registers. */
25733 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
25734 /* Sseregparm attribute says we are using x86_64 calling conventions
25735 for FP arguments. */
25736 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
25737 /* force_align_arg_pointer says this function realigns the stack at entry. */
25738 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
25739 false, true, true, ix86_handle_cconv_attribute },
25740 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25741 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
25742 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
25743 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
25744 #endif
25745 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
25746 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
25747 #ifdef SUBTARGET_ATTRIBUTE_TABLE
25748 SUBTARGET_ATTRIBUTE_TABLE,
25749 #endif
25750 { NULL, 0, 0, false, false, false, NULL }
25753 /* Implement targetm.vectorize.builtin_vectorization_cost. */
25754 static int
25755 x86_builtin_vectorization_cost (bool runtime_test)
25757 /* If the branch of the runtime test is taken - i.e. - the vectorized
25758 version is skipped - this incurs a misprediction cost (because the
25759 vectorized version is expected to be the fall-through). So we subtract
25760 the latency of a mispredicted branch from the costs that are incured
25761 when the vectorized version is executed.
25763 TODO: The values in individual target tables have to be tuned or new
25764 fields may be needed. For eg. on K8, the default branch path is the
25765 not-taken path. If the taken path is predicted correctly, the minimum
25766 penalty of going down the taken-path is 1 cycle. If the taken-path is
25767 not predicted correctly, then the minimum penalty is 10 cycles. */
25769 if (runtime_test)
25771 return (-(ix86_cost->cond_taken_branch_cost));
25773 else
25774 return 0;
25777 /* Initialize the GCC target structure. */
25778 #undef TARGET_RETURN_IN_MEMORY
25779 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
25781 #undef TARGET_ATTRIBUTE_TABLE
25782 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
25783 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25784 # undef TARGET_MERGE_DECL_ATTRIBUTES
25785 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
25786 #endif
25788 #undef TARGET_COMP_TYPE_ATTRIBUTES
25789 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
25791 #undef TARGET_INIT_BUILTINS
25792 #define TARGET_INIT_BUILTINS ix86_init_builtins
25793 #undef TARGET_EXPAND_BUILTIN
25794 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
25796 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
25797 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
25798 ix86_builtin_vectorized_function
25800 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
25801 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
25803 #undef TARGET_BUILTIN_RECIPROCAL
25804 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
25806 #undef TARGET_ASM_FUNCTION_EPILOGUE
25807 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
25809 #undef TARGET_ENCODE_SECTION_INFO
25810 #ifndef SUBTARGET_ENCODE_SECTION_INFO
25811 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
25812 #else
25813 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
25814 #endif
25816 #undef TARGET_ASM_OPEN_PAREN
25817 #define TARGET_ASM_OPEN_PAREN ""
25818 #undef TARGET_ASM_CLOSE_PAREN
25819 #define TARGET_ASM_CLOSE_PAREN ""
25821 #undef TARGET_ASM_ALIGNED_HI_OP
25822 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
25823 #undef TARGET_ASM_ALIGNED_SI_OP
25824 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
25825 #ifdef ASM_QUAD
25826 #undef TARGET_ASM_ALIGNED_DI_OP
25827 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
25828 #endif
25830 #undef TARGET_ASM_UNALIGNED_HI_OP
25831 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
25832 #undef TARGET_ASM_UNALIGNED_SI_OP
25833 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
25834 #undef TARGET_ASM_UNALIGNED_DI_OP
25835 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
25837 #undef TARGET_SCHED_ADJUST_COST
25838 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
25839 #undef TARGET_SCHED_ISSUE_RATE
25840 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
25841 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
25842 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
25843 ia32_multipass_dfa_lookahead
25845 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
25846 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
25848 #ifdef HAVE_AS_TLS
25849 #undef TARGET_HAVE_TLS
25850 #define TARGET_HAVE_TLS true
25851 #endif
25852 #undef TARGET_CANNOT_FORCE_CONST_MEM
25853 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
25854 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
25855 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
25857 #undef TARGET_DELEGITIMIZE_ADDRESS
25858 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
25860 #undef TARGET_MS_BITFIELD_LAYOUT_P
25861 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
25863 #if TARGET_MACHO
25864 #undef TARGET_BINDS_LOCAL_P
25865 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
25866 #endif
25867 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
25868 #undef TARGET_BINDS_LOCAL_P
25869 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
25870 #endif
25872 #undef TARGET_ASM_OUTPUT_MI_THUNK
25873 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
25874 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
25875 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
25877 #undef TARGET_ASM_FILE_START
25878 #define TARGET_ASM_FILE_START x86_file_start
25880 #undef TARGET_DEFAULT_TARGET_FLAGS
25881 #define TARGET_DEFAULT_TARGET_FLAGS \
25882 (TARGET_DEFAULT \
25883 | TARGET_SUBTARGET_DEFAULT \
25884 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
25886 #undef TARGET_HANDLE_OPTION
25887 #define TARGET_HANDLE_OPTION ix86_handle_option
25889 #undef TARGET_RTX_COSTS
25890 #define TARGET_RTX_COSTS ix86_rtx_costs
25891 #undef TARGET_ADDRESS_COST
25892 #define TARGET_ADDRESS_COST ix86_address_cost
25894 #undef TARGET_FIXED_CONDITION_CODE_REGS
25895 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
25896 #undef TARGET_CC_MODES_COMPATIBLE
25897 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
25899 #undef TARGET_MACHINE_DEPENDENT_REORG
25900 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
25902 #undef TARGET_BUILD_BUILTIN_VA_LIST
25903 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
25905 #undef TARGET_EXPAND_BUILTIN_VA_START
25906 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
25908 #undef TARGET_MD_ASM_CLOBBERS
25909 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
25911 #undef TARGET_PROMOTE_PROTOTYPES
25912 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
25913 #undef TARGET_STRUCT_VALUE_RTX
25914 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
25915 #undef TARGET_SETUP_INCOMING_VARARGS
25916 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
25917 #undef TARGET_MUST_PASS_IN_STACK
25918 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
25919 #undef TARGET_PASS_BY_REFERENCE
25920 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
25921 #undef TARGET_INTERNAL_ARG_POINTER
25922 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
25923 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
25924 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
25925 #undef TARGET_STRICT_ARGUMENT_NAMING
25926 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
25928 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
25929 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
25931 #undef TARGET_SCALAR_MODE_SUPPORTED_P
25932 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
25934 #undef TARGET_VECTOR_MODE_SUPPORTED_P
25935 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
25937 #undef TARGET_C_MODE_FOR_SUFFIX
25938 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
25940 #ifdef HAVE_AS_TLS
25941 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
25942 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
25943 #endif
25945 #ifdef SUBTARGET_INSERT_ATTRIBUTES
25946 #undef TARGET_INSERT_ATTRIBUTES
25947 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
25948 #endif
25950 #undef TARGET_MANGLE_TYPE
25951 #define TARGET_MANGLE_TYPE ix86_mangle_type
25953 #undef TARGET_STACK_PROTECT_FAIL
25954 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
25956 #undef TARGET_FUNCTION_VALUE
25957 #define TARGET_FUNCTION_VALUE ix86_function_value
25959 #undef TARGET_SECONDARY_RELOAD
25960 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
25962 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
25963 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
25965 struct gcc_target targetm = TARGET_INITIALIZER;
25967 #include "gt-i386.h"