Introduce TLS descriptors for i386 and x86_64.
[official-gcc.git] / gcc / config / i386 / i386.c
blobe45d2a266bd53ad029ea056e0e37c7fa7932f6d5
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
53 #ifndef CHECK_STACK_LIMIT
54 #define CHECK_STACK_LIMIT (-1)
55 #endif
57 /* Return index of given mode in mult and division cost tables. */
58 #define MODE_INDEX(mode) \
59 ((mode) == QImode ? 0 \
60 : (mode) == HImode ? 1 \
61 : (mode) == SImode ? 2 \
62 : (mode) == DImode ? 3 \
63 : 4)
65 /* Processor costs (relative to an add) */
66 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
67 #define COSTS_N_BYTES(N) ((N) * 2)
69 static const
70 struct processor_costs size_cost = { /* costs for tunning for size */
71 COSTS_N_BYTES (2), /* cost of an add instruction */
72 COSTS_N_BYTES (3), /* cost of a lea instruction */
73 COSTS_N_BYTES (2), /* variable shift costs */
74 COSTS_N_BYTES (3), /* constant shift costs */
75 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
76 COSTS_N_BYTES (3), /* HI */
77 COSTS_N_BYTES (3), /* SI */
78 COSTS_N_BYTES (3), /* DI */
79 COSTS_N_BYTES (5)}, /* other */
80 0, /* cost of multiply per each bit set */
81 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
82 COSTS_N_BYTES (3), /* HI */
83 COSTS_N_BYTES (3), /* SI */
84 COSTS_N_BYTES (3), /* DI */
85 COSTS_N_BYTES (5)}, /* other */
86 COSTS_N_BYTES (3), /* cost of movsx */
87 COSTS_N_BYTES (3), /* cost of movzx */
88 0, /* "large" insn */
89 2, /* MOVE_RATIO */
90 2, /* cost for loading QImode using movzbl */
91 {2, 2, 2}, /* cost of loading integer registers
92 in QImode, HImode and SImode.
93 Relative to reg-reg move (2). */
94 {2, 2, 2}, /* cost of storing integer registers */
95 2, /* cost of reg,reg fld/fst */
96 {2, 2, 2}, /* cost of loading fp registers
97 in SFmode, DFmode and XFmode */
98 {2, 2, 2}, /* cost of loading integer registers */
99 3, /* cost of moving MMX register */
100 {3, 3}, /* cost of loading MMX registers
101 in SImode and DImode */
102 {3, 3}, /* cost of storing MMX registers
103 in SImode and DImode */
104 3, /* cost of moving SSE register */
105 {3, 3, 3}, /* cost of loading SSE registers
106 in SImode, DImode and TImode */
107 {3, 3, 3}, /* cost of storing SSE registers
108 in SImode, DImode and TImode */
109 3, /* MMX or SSE register to integer */
110 0, /* size of prefetch block */
111 0, /* number of parallel prefetches */
112 2, /* Branch cost */
113 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
114 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
115 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
116 COSTS_N_BYTES (2), /* cost of FABS instruction. */
117 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
118 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
121 /* Processor costs (relative to an add) */
122 static const
123 struct processor_costs i386_cost = { /* 386 specific costs */
124 COSTS_N_INSNS (1), /* cost of an add instruction */
125 COSTS_N_INSNS (1), /* cost of a lea instruction */
126 COSTS_N_INSNS (3), /* variable shift costs */
127 COSTS_N_INSNS (2), /* constant shift costs */
128 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
129 COSTS_N_INSNS (6), /* HI */
130 COSTS_N_INSNS (6), /* SI */
131 COSTS_N_INSNS (6), /* DI */
132 COSTS_N_INSNS (6)}, /* other */
133 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
134 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
135 COSTS_N_INSNS (23), /* HI */
136 COSTS_N_INSNS (23), /* SI */
137 COSTS_N_INSNS (23), /* DI */
138 COSTS_N_INSNS (23)}, /* other */
139 COSTS_N_INSNS (3), /* cost of movsx */
140 COSTS_N_INSNS (2), /* cost of movzx */
141 15, /* "large" insn */
142 3, /* MOVE_RATIO */
143 4, /* cost for loading QImode using movzbl */
144 {2, 4, 2}, /* cost of loading integer registers
145 in QImode, HImode and SImode.
146 Relative to reg-reg move (2). */
147 {2, 4, 2}, /* cost of storing integer registers */
148 2, /* cost of reg,reg fld/fst */
149 {8, 8, 8}, /* cost of loading fp registers
150 in SFmode, DFmode and XFmode */
151 {8, 8, 8}, /* cost of loading integer registers */
152 2, /* cost of moving MMX register */
153 {4, 8}, /* cost of loading MMX registers
154 in SImode and DImode */
155 {4, 8}, /* cost of storing MMX registers
156 in SImode and DImode */
157 2, /* cost of moving SSE register */
158 {4, 8, 16}, /* cost of loading SSE registers
159 in SImode, DImode and TImode */
160 {4, 8, 16}, /* cost of storing SSE registers
161 in SImode, DImode and TImode */
162 3, /* MMX or SSE register to integer */
163 0, /* size of prefetch block */
164 0, /* number of parallel prefetches */
165 1, /* Branch cost */
166 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
167 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
168 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
169 COSTS_N_INSNS (22), /* cost of FABS instruction. */
170 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
171 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
174 static const
175 struct processor_costs i486_cost = { /* 486 specific costs */
176 COSTS_N_INSNS (1), /* cost of an add instruction */
177 COSTS_N_INSNS (1), /* cost of a lea instruction */
178 COSTS_N_INSNS (3), /* variable shift costs */
179 COSTS_N_INSNS (2), /* constant shift costs */
180 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
181 COSTS_N_INSNS (12), /* HI */
182 COSTS_N_INSNS (12), /* SI */
183 COSTS_N_INSNS (12), /* DI */
184 COSTS_N_INSNS (12)}, /* other */
185 1, /* cost of multiply per each bit set */
186 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
187 COSTS_N_INSNS (40), /* HI */
188 COSTS_N_INSNS (40), /* SI */
189 COSTS_N_INSNS (40), /* DI */
190 COSTS_N_INSNS (40)}, /* other */
191 COSTS_N_INSNS (3), /* cost of movsx */
192 COSTS_N_INSNS (2), /* cost of movzx */
193 15, /* "large" insn */
194 3, /* MOVE_RATIO */
195 4, /* cost for loading QImode using movzbl */
196 {2, 4, 2}, /* cost of loading integer registers
197 in QImode, HImode and SImode.
198 Relative to reg-reg move (2). */
199 {2, 4, 2}, /* cost of storing integer registers */
200 2, /* cost of reg,reg fld/fst */
201 {8, 8, 8}, /* cost of loading fp registers
202 in SFmode, DFmode and XFmode */
203 {8, 8, 8}, /* cost of loading integer registers */
204 2, /* cost of moving MMX register */
205 {4, 8}, /* cost of loading MMX registers
206 in SImode and DImode */
207 {4, 8}, /* cost of storing MMX registers
208 in SImode and DImode */
209 2, /* cost of moving SSE register */
210 {4, 8, 16}, /* cost of loading SSE registers
211 in SImode, DImode and TImode */
212 {4, 8, 16}, /* cost of storing SSE registers
213 in SImode, DImode and TImode */
214 3, /* MMX or SSE register to integer */
215 0, /* size of prefetch block */
216 0, /* number of parallel prefetches */
217 1, /* Branch cost */
218 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
219 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
220 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
221 COSTS_N_INSNS (3), /* cost of FABS instruction. */
222 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
223 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
226 static const
227 struct processor_costs pentium_cost = {
228 COSTS_N_INSNS (1), /* cost of an add instruction */
229 COSTS_N_INSNS (1), /* cost of a lea instruction */
230 COSTS_N_INSNS (4), /* variable shift costs */
231 COSTS_N_INSNS (1), /* constant shift costs */
232 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
233 COSTS_N_INSNS (11), /* HI */
234 COSTS_N_INSNS (11), /* SI */
235 COSTS_N_INSNS (11), /* DI */
236 COSTS_N_INSNS (11)}, /* other */
237 0, /* cost of multiply per each bit set */
238 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
239 COSTS_N_INSNS (25), /* HI */
240 COSTS_N_INSNS (25), /* SI */
241 COSTS_N_INSNS (25), /* DI */
242 COSTS_N_INSNS (25)}, /* other */
243 COSTS_N_INSNS (3), /* cost of movsx */
244 COSTS_N_INSNS (2), /* cost of movzx */
245 8, /* "large" insn */
246 6, /* MOVE_RATIO */
247 6, /* cost for loading QImode using movzbl */
248 {2, 4, 2}, /* cost of loading integer registers
249 in QImode, HImode and SImode.
250 Relative to reg-reg move (2). */
251 {2, 4, 2}, /* cost of storing integer registers */
252 2, /* cost of reg,reg fld/fst */
253 {2, 2, 6}, /* cost of loading fp registers
254 in SFmode, DFmode and XFmode */
255 {4, 4, 6}, /* cost of loading integer registers */
256 8, /* cost of moving MMX register */
257 {8, 8}, /* cost of loading MMX registers
258 in SImode and DImode */
259 {8, 8}, /* cost of storing MMX registers
260 in SImode and DImode */
261 2, /* cost of moving SSE register */
262 {4, 8, 16}, /* cost of loading SSE registers
263 in SImode, DImode and TImode */
264 {4, 8, 16}, /* cost of storing SSE registers
265 in SImode, DImode and TImode */
266 3, /* MMX or SSE register to integer */
267 0, /* size of prefetch block */
268 0, /* number of parallel prefetches */
269 2, /* Branch cost */
270 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
271 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
272 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
273 COSTS_N_INSNS (1), /* cost of FABS instruction. */
274 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
275 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
278 static const
279 struct processor_costs pentiumpro_cost = {
280 COSTS_N_INSNS (1), /* cost of an add instruction */
281 COSTS_N_INSNS (1), /* cost of a lea instruction */
282 COSTS_N_INSNS (1), /* variable shift costs */
283 COSTS_N_INSNS (1), /* constant shift costs */
284 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
285 COSTS_N_INSNS (4), /* HI */
286 COSTS_N_INSNS (4), /* SI */
287 COSTS_N_INSNS (4), /* DI */
288 COSTS_N_INSNS (4)}, /* other */
289 0, /* cost of multiply per each bit set */
290 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
291 COSTS_N_INSNS (17), /* HI */
292 COSTS_N_INSNS (17), /* SI */
293 COSTS_N_INSNS (17), /* DI */
294 COSTS_N_INSNS (17)}, /* other */
295 COSTS_N_INSNS (1), /* cost of movsx */
296 COSTS_N_INSNS (1), /* cost of movzx */
297 8, /* "large" insn */
298 6, /* MOVE_RATIO */
299 2, /* cost for loading QImode using movzbl */
300 {4, 4, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 2, 2}, /* cost of storing integer registers */
304 2, /* cost of reg,reg fld/fst */
305 {2, 2, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 6}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 3, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 6, /* number of parallel prefetches */
321 2, /* Branch cost */
322 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
323 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
324 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
325 COSTS_N_INSNS (2), /* cost of FABS instruction. */
326 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
327 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
330 static const
331 struct processor_costs k6_cost = {
332 COSTS_N_INSNS (1), /* cost of an add instruction */
333 COSTS_N_INSNS (2), /* cost of a lea instruction */
334 COSTS_N_INSNS (1), /* variable shift costs */
335 COSTS_N_INSNS (1), /* constant shift costs */
336 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
337 COSTS_N_INSNS (3), /* HI */
338 COSTS_N_INSNS (3), /* SI */
339 COSTS_N_INSNS (3), /* DI */
340 COSTS_N_INSNS (3)}, /* other */
341 0, /* cost of multiply per each bit set */
342 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
343 COSTS_N_INSNS (18), /* HI */
344 COSTS_N_INSNS (18), /* SI */
345 COSTS_N_INSNS (18), /* DI */
346 COSTS_N_INSNS (18)}, /* other */
347 COSTS_N_INSNS (2), /* cost of movsx */
348 COSTS_N_INSNS (2), /* cost of movzx */
349 8, /* "large" insn */
350 4, /* MOVE_RATIO */
351 3, /* cost for loading QImode using movzbl */
352 {4, 5, 4}, /* cost of loading integer registers
353 in QImode, HImode and SImode.
354 Relative to reg-reg move (2). */
355 {2, 3, 2}, /* cost of storing integer registers */
356 4, /* cost of reg,reg fld/fst */
357 {6, 6, 6}, /* cost of loading fp registers
358 in SFmode, DFmode and XFmode */
359 {4, 4, 4}, /* cost of loading integer registers */
360 2, /* cost of moving MMX register */
361 {2, 2}, /* cost of loading MMX registers
362 in SImode and DImode */
363 {2, 2}, /* cost of storing MMX registers
364 in SImode and DImode */
365 2, /* cost of moving SSE register */
366 {2, 2, 8}, /* cost of loading SSE registers
367 in SImode, DImode and TImode */
368 {2, 2, 8}, /* cost of storing SSE registers
369 in SImode, DImode and TImode */
370 6, /* MMX or SSE register to integer */
371 32, /* size of prefetch block */
372 1, /* number of parallel prefetches */
373 1, /* Branch cost */
374 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
375 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
376 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
377 COSTS_N_INSNS (2), /* cost of FABS instruction. */
378 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
379 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
382 static const
383 struct processor_costs athlon_cost = {
384 COSTS_N_INSNS (1), /* cost of an add instruction */
385 COSTS_N_INSNS (2), /* cost of a lea instruction */
386 COSTS_N_INSNS (1), /* variable shift costs */
387 COSTS_N_INSNS (1), /* constant shift costs */
388 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
389 COSTS_N_INSNS (5), /* HI */
390 COSTS_N_INSNS (5), /* SI */
391 COSTS_N_INSNS (5), /* DI */
392 COSTS_N_INSNS (5)}, /* other */
393 0, /* cost of multiply per each bit set */
394 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
395 COSTS_N_INSNS (26), /* HI */
396 COSTS_N_INSNS (42), /* SI */
397 COSTS_N_INSNS (74), /* DI */
398 COSTS_N_INSNS (74)}, /* other */
399 COSTS_N_INSNS (1), /* cost of movsx */
400 COSTS_N_INSNS (1), /* cost of movzx */
401 8, /* "large" insn */
402 9, /* MOVE_RATIO */
403 4, /* cost for loading QImode using movzbl */
404 {3, 4, 3}, /* cost of loading integer registers
405 in QImode, HImode and SImode.
406 Relative to reg-reg move (2). */
407 {3, 4, 3}, /* cost of storing integer registers */
408 4, /* cost of reg,reg fld/fst */
409 {4, 4, 12}, /* cost of loading fp registers
410 in SFmode, DFmode and XFmode */
411 {6, 6, 8}, /* cost of loading integer registers */
412 2, /* cost of moving MMX register */
413 {4, 4}, /* cost of loading MMX registers
414 in SImode and DImode */
415 {4, 4}, /* cost of storing MMX registers
416 in SImode and DImode */
417 2, /* cost of moving SSE register */
418 {4, 4, 6}, /* cost of loading SSE registers
419 in SImode, DImode and TImode */
420 {4, 4, 5}, /* cost of storing SSE registers
421 in SImode, DImode and TImode */
422 5, /* MMX or SSE register to integer */
423 64, /* size of prefetch block */
424 6, /* number of parallel prefetches */
425 5, /* Branch cost */
426 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
427 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
428 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
429 COSTS_N_INSNS (2), /* cost of FABS instruction. */
430 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
431 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
434 static const
435 struct processor_costs k8_cost = {
436 COSTS_N_INSNS (1), /* cost of an add instruction */
437 COSTS_N_INSNS (2), /* cost of a lea instruction */
438 COSTS_N_INSNS (1), /* variable shift costs */
439 COSTS_N_INSNS (1), /* constant shift costs */
440 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
441 COSTS_N_INSNS (4), /* HI */
442 COSTS_N_INSNS (3), /* SI */
443 COSTS_N_INSNS (4), /* DI */
444 COSTS_N_INSNS (5)}, /* other */
445 0, /* cost of multiply per each bit set */
446 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
447 COSTS_N_INSNS (26), /* HI */
448 COSTS_N_INSNS (42), /* SI */
449 COSTS_N_INSNS (74), /* DI */
450 COSTS_N_INSNS (74)}, /* other */
451 COSTS_N_INSNS (1), /* cost of movsx */
452 COSTS_N_INSNS (1), /* cost of movzx */
453 8, /* "large" insn */
454 9, /* MOVE_RATIO */
455 4, /* cost for loading QImode using movzbl */
456 {3, 4, 3}, /* cost of loading integer registers
457 in QImode, HImode and SImode.
458 Relative to reg-reg move (2). */
459 {3, 4, 3}, /* cost of storing integer registers */
460 4, /* cost of reg,reg fld/fst */
461 {4, 4, 12}, /* cost of loading fp registers
462 in SFmode, DFmode and XFmode */
463 {6, 6, 8}, /* cost of loading integer registers */
464 2, /* cost of moving MMX register */
465 {3, 3}, /* cost of loading MMX registers
466 in SImode and DImode */
467 {4, 4}, /* cost of storing MMX registers
468 in SImode and DImode */
469 2, /* cost of moving SSE register */
470 {4, 3, 6}, /* cost of loading SSE registers
471 in SImode, DImode and TImode */
472 {4, 4, 5}, /* cost of storing SSE registers
473 in SImode, DImode and TImode */
474 5, /* MMX or SSE register to integer */
475 64, /* size of prefetch block */
476 6, /* number of parallel prefetches */
477 5, /* Branch cost */
478 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
479 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
480 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
481 COSTS_N_INSNS (2), /* cost of FABS instruction. */
482 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
483 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
486 static const
487 struct processor_costs pentium4_cost = {
488 COSTS_N_INSNS (1), /* cost of an add instruction */
489 COSTS_N_INSNS (3), /* cost of a lea instruction */
490 COSTS_N_INSNS (4), /* variable shift costs */
491 COSTS_N_INSNS (4), /* constant shift costs */
492 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
493 COSTS_N_INSNS (15), /* HI */
494 COSTS_N_INSNS (15), /* SI */
495 COSTS_N_INSNS (15), /* DI */
496 COSTS_N_INSNS (15)}, /* other */
497 0, /* cost of multiply per each bit set */
498 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
499 COSTS_N_INSNS (56), /* HI */
500 COSTS_N_INSNS (56), /* SI */
501 COSTS_N_INSNS (56), /* DI */
502 COSTS_N_INSNS (56)}, /* other */
503 COSTS_N_INSNS (1), /* cost of movsx */
504 COSTS_N_INSNS (1), /* cost of movzx */
505 16, /* "large" insn */
506 6, /* MOVE_RATIO */
507 2, /* cost for loading QImode using movzbl */
508 {4, 5, 4}, /* cost of loading integer registers
509 in QImode, HImode and SImode.
510 Relative to reg-reg move (2). */
511 {2, 3, 2}, /* cost of storing integer registers */
512 2, /* cost of reg,reg fld/fst */
513 {2, 2, 6}, /* cost of loading fp registers
514 in SFmode, DFmode and XFmode */
515 {4, 4, 6}, /* cost of loading integer registers */
516 2, /* cost of moving MMX register */
517 {2, 2}, /* cost of loading MMX registers
518 in SImode and DImode */
519 {2, 2}, /* cost of storing MMX registers
520 in SImode and DImode */
521 12, /* cost of moving SSE register */
522 {12, 12, 12}, /* cost of loading SSE registers
523 in SImode, DImode and TImode */
524 {2, 2, 8}, /* cost of storing SSE registers
525 in SImode, DImode and TImode */
526 10, /* MMX or SSE register to integer */
527 64, /* size of prefetch block */
528 6, /* number of parallel prefetches */
529 2, /* Branch cost */
530 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
531 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
532 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
533 COSTS_N_INSNS (2), /* cost of FABS instruction. */
534 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
535 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
538 static const
539 struct processor_costs nocona_cost = {
540 COSTS_N_INSNS (1), /* cost of an add instruction */
541 COSTS_N_INSNS (1), /* cost of a lea instruction */
542 COSTS_N_INSNS (1), /* variable shift costs */
543 COSTS_N_INSNS (1), /* constant shift costs */
544 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
545 COSTS_N_INSNS (10), /* HI */
546 COSTS_N_INSNS (10), /* SI */
547 COSTS_N_INSNS (10), /* DI */
548 COSTS_N_INSNS (10)}, /* other */
549 0, /* cost of multiply per each bit set */
550 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
551 COSTS_N_INSNS (66), /* HI */
552 COSTS_N_INSNS (66), /* SI */
553 COSTS_N_INSNS (66), /* DI */
554 COSTS_N_INSNS (66)}, /* other */
555 COSTS_N_INSNS (1), /* cost of movsx */
556 COSTS_N_INSNS (1), /* cost of movzx */
557 16, /* "large" insn */
558 17, /* MOVE_RATIO */
559 4, /* cost for loading QImode using movzbl */
560 {4, 4, 4}, /* cost of loading integer registers
561 in QImode, HImode and SImode.
562 Relative to reg-reg move (2). */
563 {4, 4, 4}, /* cost of storing integer registers */
564 3, /* cost of reg,reg fld/fst */
565 {12, 12, 12}, /* cost of loading fp registers
566 in SFmode, DFmode and XFmode */
567 {4, 4, 4}, /* cost of loading integer registers */
568 6, /* cost of moving MMX register */
569 {12, 12}, /* cost of loading MMX registers
570 in SImode and DImode */
571 {12, 12}, /* cost of storing MMX registers
572 in SImode and DImode */
573 6, /* cost of moving SSE register */
574 {12, 12, 12}, /* cost of loading SSE registers
575 in SImode, DImode and TImode */
576 {12, 12, 12}, /* cost of storing SSE registers
577 in SImode, DImode and TImode */
578 8, /* MMX or SSE register to integer */
579 128, /* size of prefetch block */
580 8, /* number of parallel prefetches */
581 1, /* Branch cost */
582 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
583 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
584 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
585 COSTS_N_INSNS (3), /* cost of FABS instruction. */
586 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
587 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
590 const struct processor_costs *ix86_cost = &pentium_cost;
592 /* Processor feature/optimization bitmasks. */
593 #define m_386 (1<<PROCESSOR_I386)
594 #define m_486 (1<<PROCESSOR_I486)
595 #define m_PENT (1<<PROCESSOR_PENTIUM)
596 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
597 #define m_K6 (1<<PROCESSOR_K6)
598 #define m_ATHLON (1<<PROCESSOR_ATHLON)
599 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
600 #define m_K8 (1<<PROCESSOR_K8)
601 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
602 #define m_NOCONA (1<<PROCESSOR_NOCONA)
604 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
605 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
606 const int x86_zero_extend_with_and = m_486 | m_PENT;
607 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
608 const int x86_double_with_add = ~m_386;
609 const int x86_use_bit_test = m_386;
610 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
611 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
612 const int x86_fisttp = m_NOCONA;
613 const int x86_3dnow_a = m_ATHLON_K8;
614 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
615 /* Branch hints were put in P4 based on simulation result. But
616 after P4 was made, no performance benefit was observed with
617 branch hints. It also increases the code size. As the result,
618 icc never generates branch hints. */
619 const int x86_branch_hints = 0;
620 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
621 const int x86_partial_reg_stall = m_PPRO;
622 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
623 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
624 const int x86_use_mov0 = m_K6;
625 const int x86_use_cltd = ~(m_PENT | m_K6);
626 const int x86_read_modify_write = ~m_PENT;
627 const int x86_read_modify = ~(m_PENT | m_PPRO);
628 const int x86_split_long_moves = m_PPRO;
629 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
630 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
631 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
632 const int x86_qimode_math = ~(0);
633 const int x86_promote_qi_regs = 0;
634 const int x86_himode_math = ~(m_PPRO);
635 const int x86_promote_hi_regs = m_PPRO;
636 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
637 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
638 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
639 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
640 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
641 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
642 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
643 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
644 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
645 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
646 const int x86_shift1 = ~m_486;
647 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
648 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
649 /* Set for machines where the type and dependencies are resolved on SSE
650 register parts instead of whole registers, so we may maintain just
651 lower part of scalar values in proper format leaving the upper part
652 undefined. */
653 const int x86_sse_split_regs = m_ATHLON_K8;
654 const int x86_sse_typeless_stores = m_ATHLON_K8;
655 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
656 const int x86_use_ffreep = m_ATHLON_K8;
657 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
659 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
660 integer data in xmm registers. Which results in pretty abysmal code. */
661 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
663 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
664 /* Some CPU cores are not able to predict more than 4 branch instructions in
665 the 16 byte window. */
666 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
667 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
668 const int x86_use_bt = m_ATHLON_K8;
669 /* Compare and exchange was added for 80486. */
670 const int x86_cmpxchg = ~m_386;
671 /* Compare and exchange 8 bytes was added for pentium. */
672 const int x86_cmpxchg8b = ~(m_386 | m_486);
673 /* Compare and exchange 16 bytes was added for nocona. */
674 const int x86_cmpxchg16b = m_NOCONA;
675 /* Exchange and add was added for 80486. */
676 const int x86_xadd = ~m_386;
678 /* In case the average insn count for single function invocation is
679 lower than this constant, emit fast (but longer) prologue and
680 epilogue code. */
681 #define FAST_PROLOGUE_INSN_COUNT 20
683 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
684 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
685 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
686 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
688 /* Array of the smallest class containing reg number REGNO, indexed by
689 REGNO. Used by REGNO_REG_CLASS in i386.h. */
691 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
693 /* ax, dx, cx, bx */
694 AREG, DREG, CREG, BREG,
695 /* si, di, bp, sp */
696 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
697 /* FP registers */
698 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
699 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
700 /* arg pointer */
701 NON_Q_REGS,
702 /* flags, fpsr, dirflag, frame */
703 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
704 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
705 SSE_REGS, SSE_REGS,
706 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
707 MMX_REGS, MMX_REGS,
708 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
709 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
710 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
711 SSE_REGS, SSE_REGS,
714 /* The "default" register map used in 32bit mode. */
716 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
718 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
719 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
720 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
721 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
722 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
723 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
724 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
727 static int const x86_64_int_parameter_registers[6] =
729 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
730 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
733 static int const x86_64_int_return_registers[4] =
735 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
738 /* The "default" register map used in 64bit mode. */
739 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
741 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
742 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
743 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
744 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
745 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
746 8,9,10,11,12,13,14,15, /* extended integer registers */
747 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
750 /* Define the register numbers to be used in Dwarf debugging information.
751 The SVR4 reference port C compiler uses the following register numbers
752 in its Dwarf output code:
753 0 for %eax (gcc regno = 0)
754 1 for %ecx (gcc regno = 2)
755 2 for %edx (gcc regno = 1)
756 3 for %ebx (gcc regno = 3)
757 4 for %esp (gcc regno = 7)
758 5 for %ebp (gcc regno = 6)
759 6 for %esi (gcc regno = 4)
760 7 for %edi (gcc regno = 5)
761 The following three DWARF register numbers are never generated by
762 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
763 believes these numbers have these meanings.
764 8 for %eip (no gcc equivalent)
765 9 for %eflags (gcc regno = 17)
766 10 for %trapno (no gcc equivalent)
767 It is not at all clear how we should number the FP stack registers
768 for the x86 architecture. If the version of SDB on x86/svr4 were
769 a bit less brain dead with respect to floating-point then we would
770 have a precedent to follow with respect to DWARF register numbers
771 for x86 FP registers, but the SDB on x86/svr4 is so completely
772 broken with respect to FP registers that it is hardly worth thinking
773 of it as something to strive for compatibility with.
774 The version of x86/svr4 SDB I have at the moment does (partially)
775 seem to believe that DWARF register number 11 is associated with
776 the x86 register %st(0), but that's about all. Higher DWARF
777 register numbers don't seem to be associated with anything in
778 particular, and even for DWARF regno 11, SDB only seems to under-
779 stand that it should say that a variable lives in %st(0) (when
780 asked via an `=' command) if we said it was in DWARF regno 11,
781 but SDB still prints garbage when asked for the value of the
782 variable in question (via a `/' command).
783 (Also note that the labels SDB prints for various FP stack regs
784 when doing an `x' command are all wrong.)
785 Note that these problems generally don't affect the native SVR4
786 C compiler because it doesn't allow the use of -O with -g and
787 because when it is *not* optimizing, it allocates a memory
788 location for each floating-point variable, and the memory
789 location is what gets described in the DWARF AT_location
790 attribute for the variable in question.
791 Regardless of the severe mental illness of the x86/svr4 SDB, we
792 do something sensible here and we use the following DWARF
793 register numbers. Note that these are all stack-top-relative
794 numbers.
795 11 for %st(0) (gcc regno = 8)
796 12 for %st(1) (gcc regno = 9)
797 13 for %st(2) (gcc regno = 10)
798 14 for %st(3) (gcc regno = 11)
799 15 for %st(4) (gcc regno = 12)
800 16 for %st(5) (gcc regno = 13)
801 17 for %st(6) (gcc regno = 14)
802 18 for %st(7) (gcc regno = 15)
804 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
806 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
807 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
808 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
809 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
810 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
811 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
812 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
815 /* Test and compare insns in i386.md store the information needed to
816 generate branch and scc insns here. */
818 rtx ix86_compare_op0 = NULL_RTX;
819 rtx ix86_compare_op1 = NULL_RTX;
820 rtx ix86_compare_emitted = NULL_RTX;
822 /* Size of the register save area. */
823 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
825 /* Define the structure for the machine field in struct function. */
827 struct stack_local_entry GTY(())
829 unsigned short mode;
830 unsigned short n;
831 rtx rtl;
832 struct stack_local_entry *next;
835 /* Structure describing stack frame layout.
836 Stack grows downward:
838 [arguments]
839 <- ARG_POINTER
840 saved pc
842 saved frame pointer if frame_pointer_needed
843 <- HARD_FRAME_POINTER
844 [saved regs]
846 [padding1] \
848 [va_arg registers] (
849 > to_allocate <- FRAME_POINTER
850 [frame] (
852 [padding2] /
854 struct ix86_frame
856 int nregs;
857 int padding1;
858 int va_arg_size;
859 HOST_WIDE_INT frame;
860 int padding2;
861 int outgoing_arguments_size;
862 int red_zone_size;
864 HOST_WIDE_INT to_allocate;
865 /* The offsets relative to ARG_POINTER. */
866 HOST_WIDE_INT frame_pointer_offset;
867 HOST_WIDE_INT hard_frame_pointer_offset;
868 HOST_WIDE_INT stack_pointer_offset;
870 /* When save_regs_using_mov is set, emit prologue using
871 move instead of push instructions. */
872 bool save_regs_using_mov;
875 /* Code model option. */
876 enum cmodel ix86_cmodel;
877 /* Asm dialect. */
878 enum asm_dialect ix86_asm_dialect = ASM_ATT;
879 /* TLS dialects. */
880 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
882 /* Which unit we are generating floating point math for. */
883 enum fpmath_unit ix86_fpmath;
885 /* Which cpu are we scheduling for. */
886 enum processor_type ix86_tune;
887 /* Which instruction set architecture to use. */
888 enum processor_type ix86_arch;
890 /* true if sse prefetch instruction is not NOOP. */
891 int x86_prefetch_sse;
893 /* ix86_regparm_string as a number */
894 static int ix86_regparm;
896 /* Preferred alignment for stack boundary in bits. */
897 unsigned int ix86_preferred_stack_boundary;
899 /* Values 1-5: see jump.c */
900 int ix86_branch_cost;
902 /* Variables which are this size or smaller are put in the data/bss
903 or ldata/lbss sections. */
905 int ix86_section_threshold = 65536;
907 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
908 char internal_label_prefix[16];
909 int internal_label_prefix_len;
911 static bool ix86_handle_option (size_t, const char *, int);
912 static void output_pic_addr_const (FILE *, rtx, int);
913 static void put_condition_code (enum rtx_code, enum machine_mode,
914 int, int, FILE *);
915 static const char *get_some_local_dynamic_name (void);
916 static int get_some_local_dynamic_name_1 (rtx *, void *);
917 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
918 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
919 rtx *);
920 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
921 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
922 enum machine_mode);
923 static rtx get_thread_pointer (int);
924 static rtx legitimize_tls_address (rtx, enum tls_model, int);
925 static void get_pc_thunk_name (char [32], unsigned int);
926 static rtx gen_push (rtx);
927 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
928 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
929 static struct machine_function * ix86_init_machine_status (void);
930 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
931 static int ix86_nsaved_regs (void);
932 static void ix86_emit_save_regs (void);
933 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
934 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
935 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
936 static HOST_WIDE_INT ix86_GOT_alias_set (void);
937 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
938 static rtx ix86_expand_aligntest (rtx, int);
939 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
940 static int ix86_issue_rate (void);
941 static int ix86_adjust_cost (rtx, rtx, rtx, int);
942 static int ia32_multipass_dfa_lookahead (void);
943 static void ix86_init_mmx_sse_builtins (void);
944 static rtx x86_this_parameter (tree);
945 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
946 HOST_WIDE_INT, tree);
947 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
948 static void x86_file_start (void);
949 static void ix86_reorg (void);
950 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
951 static tree ix86_build_builtin_va_list (void);
952 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
953 tree, int *, int);
954 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
955 static bool ix86_vector_mode_supported_p (enum machine_mode);
957 static int ix86_address_cost (rtx);
958 static bool ix86_cannot_force_const_mem (rtx);
959 static rtx ix86_delegitimize_address (rtx);
961 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
963 struct builtin_description;
964 static rtx ix86_expand_sse_comi (const struct builtin_description *,
965 tree, rtx);
966 static rtx ix86_expand_sse_compare (const struct builtin_description *,
967 tree, rtx);
968 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
969 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
970 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
971 static rtx ix86_expand_store_builtin (enum insn_code, tree);
972 static rtx safe_vector_operand (rtx, enum machine_mode);
973 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
974 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
975 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
976 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
977 static int ix86_fp_comparison_cost (enum rtx_code code);
978 static unsigned int ix86_select_alt_pic_regnum (void);
979 static int ix86_save_reg (unsigned int, int);
980 static void ix86_compute_frame_layout (struct ix86_frame *);
981 static int ix86_comp_type_attributes (tree, tree);
982 static int ix86_function_regparm (tree, tree);
983 const struct attribute_spec ix86_attribute_table[];
984 static bool ix86_function_ok_for_sibcall (tree, tree);
985 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
986 static int ix86_value_regno (enum machine_mode, tree, tree);
987 static bool contains_128bit_aligned_vector_p (tree);
988 static rtx ix86_struct_value_rtx (tree, int);
989 static bool ix86_ms_bitfield_layout_p (tree);
990 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
991 static int extended_reg_mentioned_1 (rtx *, void *);
992 static bool ix86_rtx_costs (rtx, int, int, int *);
993 static int min_insn_size (rtx);
994 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
995 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
996 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
997 tree, bool);
998 static void ix86_init_builtins (void);
999 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1000 static const char *ix86_mangle_fundamental_type (tree);
1001 static tree ix86_stack_protect_fail (void);
1002 static rtx ix86_internal_arg_pointer (void);
1003 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1005 /* This function is only used on Solaris. */
1006 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1007 ATTRIBUTE_UNUSED;
1009 /* Register class used for passing given 64bit part of the argument.
1010 These represent classes as documented by the PS ABI, with the exception
1011 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1012 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1014 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1015 whenever possible (upper half does contain padding).
1017 enum x86_64_reg_class
1019 X86_64_NO_CLASS,
1020 X86_64_INTEGER_CLASS,
1021 X86_64_INTEGERSI_CLASS,
1022 X86_64_SSE_CLASS,
1023 X86_64_SSESF_CLASS,
1024 X86_64_SSEDF_CLASS,
1025 X86_64_SSEUP_CLASS,
1026 X86_64_X87_CLASS,
1027 X86_64_X87UP_CLASS,
1028 X86_64_COMPLEX_X87_CLASS,
1029 X86_64_MEMORY_CLASS
1031 static const char * const x86_64_reg_class_name[] = {
1032 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1033 "sseup", "x87", "x87up", "cplx87", "no"
1036 #define MAX_CLASSES 4
1038 /* Table of constants used by fldpi, fldln2, etc.... */
1039 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1040 static bool ext_80387_constants_init = 0;
1041 static void init_ext_80387_constants (void);
1042 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1043 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1044 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1045 static section *x86_64_elf_select_section (tree decl, int reloc,
1046 unsigned HOST_WIDE_INT align)
1047 ATTRIBUTE_UNUSED;
1049 /* Initialize the GCC target structure. */
1050 #undef TARGET_ATTRIBUTE_TABLE
1051 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1052 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1053 # undef TARGET_MERGE_DECL_ATTRIBUTES
1054 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1055 #endif
1057 #undef TARGET_COMP_TYPE_ATTRIBUTES
1058 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1060 #undef TARGET_INIT_BUILTINS
1061 #define TARGET_INIT_BUILTINS ix86_init_builtins
1062 #undef TARGET_EXPAND_BUILTIN
1063 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1065 #undef TARGET_ASM_FUNCTION_EPILOGUE
1066 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1068 #undef TARGET_ENCODE_SECTION_INFO
1069 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1070 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1071 #else
1072 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1073 #endif
1075 #undef TARGET_ASM_OPEN_PAREN
1076 #define TARGET_ASM_OPEN_PAREN ""
1077 #undef TARGET_ASM_CLOSE_PAREN
1078 #define TARGET_ASM_CLOSE_PAREN ""
1080 #undef TARGET_ASM_ALIGNED_HI_OP
1081 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1082 #undef TARGET_ASM_ALIGNED_SI_OP
1083 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1084 #ifdef ASM_QUAD
1085 #undef TARGET_ASM_ALIGNED_DI_OP
1086 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1087 #endif
1089 #undef TARGET_ASM_UNALIGNED_HI_OP
1090 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1091 #undef TARGET_ASM_UNALIGNED_SI_OP
1092 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1093 #undef TARGET_ASM_UNALIGNED_DI_OP
1094 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1096 #undef TARGET_SCHED_ADJUST_COST
1097 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1098 #undef TARGET_SCHED_ISSUE_RATE
1099 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1100 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1101 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1102 ia32_multipass_dfa_lookahead
1104 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1105 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1107 #ifdef HAVE_AS_TLS
1108 #undef TARGET_HAVE_TLS
1109 #define TARGET_HAVE_TLS true
1110 #endif
1111 #undef TARGET_CANNOT_FORCE_CONST_MEM
1112 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1114 #undef TARGET_DELEGITIMIZE_ADDRESS
1115 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1117 #undef TARGET_MS_BITFIELD_LAYOUT_P
1118 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1120 #if TARGET_MACHO
1121 #undef TARGET_BINDS_LOCAL_P
1122 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1123 #endif
1125 #undef TARGET_ASM_OUTPUT_MI_THUNK
1126 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1127 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1128 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1130 #undef TARGET_ASM_FILE_START
1131 #define TARGET_ASM_FILE_START x86_file_start
1133 #undef TARGET_DEFAULT_TARGET_FLAGS
1134 #define TARGET_DEFAULT_TARGET_FLAGS \
1135 (TARGET_DEFAULT \
1136 | TARGET_64BIT_DEFAULT \
1137 | TARGET_SUBTARGET_DEFAULT \
1138 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1140 #undef TARGET_HANDLE_OPTION
1141 #define TARGET_HANDLE_OPTION ix86_handle_option
1143 #undef TARGET_RTX_COSTS
1144 #define TARGET_RTX_COSTS ix86_rtx_costs
1145 #undef TARGET_ADDRESS_COST
1146 #define TARGET_ADDRESS_COST ix86_address_cost
1148 #undef TARGET_FIXED_CONDITION_CODE_REGS
1149 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1150 #undef TARGET_CC_MODES_COMPATIBLE
1151 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1153 #undef TARGET_MACHINE_DEPENDENT_REORG
1154 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1156 #undef TARGET_BUILD_BUILTIN_VA_LIST
1157 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1159 #undef TARGET_MD_ASM_CLOBBERS
1160 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1162 #undef TARGET_PROMOTE_PROTOTYPES
1163 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1164 #undef TARGET_STRUCT_VALUE_RTX
1165 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1166 #undef TARGET_SETUP_INCOMING_VARARGS
1167 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1168 #undef TARGET_MUST_PASS_IN_STACK
1169 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1170 #undef TARGET_PASS_BY_REFERENCE
1171 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1172 #undef TARGET_INTERNAL_ARG_POINTER
1173 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1174 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1175 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1177 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1178 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1180 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1181 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1183 #ifdef HAVE_AS_TLS
1184 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1185 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1186 #endif
1188 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1189 #undef TARGET_INSERT_ATTRIBUTES
1190 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1191 #endif
1193 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1194 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1196 #undef TARGET_STACK_PROTECT_FAIL
1197 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1199 #undef TARGET_FUNCTION_VALUE
1200 #define TARGET_FUNCTION_VALUE ix86_function_value
1202 struct gcc_target targetm = TARGET_INITIALIZER;
1205 /* The svr4 ABI for the i386 says that records and unions are returned
1206 in memory. */
1207 #ifndef DEFAULT_PCC_STRUCT_RETURN
1208 #define DEFAULT_PCC_STRUCT_RETURN 1
1209 #endif
1211 /* Implement TARGET_HANDLE_OPTION. */
1213 static bool
1214 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1216 switch (code)
1218 case OPT_m3dnow:
1219 if (!value)
1221 target_flags &= ~MASK_3DNOW_A;
1222 target_flags_explicit |= MASK_3DNOW_A;
1224 return true;
1226 case OPT_mmmx:
1227 if (!value)
1229 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1230 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1232 return true;
1234 case OPT_msse:
1235 if (!value)
1237 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1238 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1240 return true;
1242 case OPT_msse2:
1243 if (!value)
1245 target_flags &= ~MASK_SSE3;
1246 target_flags_explicit |= MASK_SSE3;
1248 return true;
1250 default:
1251 return true;
1255 /* Sometimes certain combinations of command options do not make
1256 sense on a particular target machine. You can define a macro
1257 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1258 defined, is executed once just after all the command options have
1259 been parsed.
1261 Don't use this macro to turn on various extra optimizations for
1262 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1264 void
1265 override_options (void)
1267 int i;
1268 int ix86_tune_defaulted = 0;
1270 /* Comes from final.c -- no real reason to change it. */
1271 #define MAX_CODE_ALIGN 16
1273 static struct ptt
1275 const struct processor_costs *cost; /* Processor costs */
1276 const int target_enable; /* Target flags to enable. */
1277 const int target_disable; /* Target flags to disable. */
1278 const int align_loop; /* Default alignments. */
1279 const int align_loop_max_skip;
1280 const int align_jump;
1281 const int align_jump_max_skip;
1282 const int align_func;
1284 const processor_target_table[PROCESSOR_max] =
1286 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1287 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1288 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1289 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1290 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1291 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1292 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1293 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1294 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1297 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1298 static struct pta
1300 const char *const name; /* processor name or nickname. */
1301 const enum processor_type processor;
1302 const enum pta_flags
1304 PTA_SSE = 1,
1305 PTA_SSE2 = 2,
1306 PTA_SSE3 = 4,
1307 PTA_MMX = 8,
1308 PTA_PREFETCH_SSE = 16,
1309 PTA_3DNOW = 32,
1310 PTA_3DNOW_A = 64,
1311 PTA_64BIT = 128
1312 } flags;
1314 const processor_alias_table[] =
1316 {"i386", PROCESSOR_I386, 0},
1317 {"i486", PROCESSOR_I486, 0},
1318 {"i586", PROCESSOR_PENTIUM, 0},
1319 {"pentium", PROCESSOR_PENTIUM, 0},
1320 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1321 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1322 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1323 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1324 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1325 {"i686", PROCESSOR_PENTIUMPRO, 0},
1326 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1327 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1328 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1329 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1330 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1331 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1332 | PTA_MMX | PTA_PREFETCH_SSE},
1333 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1334 | PTA_MMX | PTA_PREFETCH_SSE},
1335 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1336 | PTA_MMX | PTA_PREFETCH_SSE},
1337 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1338 | PTA_MMX | PTA_PREFETCH_SSE},
1339 {"k6", PROCESSOR_K6, PTA_MMX},
1340 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1341 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1342 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1343 | PTA_3DNOW_A},
1344 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1345 | PTA_3DNOW | PTA_3DNOW_A},
1346 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1347 | PTA_3DNOW_A | PTA_SSE},
1348 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1349 | PTA_3DNOW_A | PTA_SSE},
1350 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1351 | PTA_3DNOW_A | PTA_SSE},
1352 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1353 | PTA_SSE | PTA_SSE2 },
1354 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1355 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1356 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1357 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1358 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1359 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1360 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1361 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1364 int const pta_size = ARRAY_SIZE (processor_alias_table);
1366 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1367 SUBTARGET_OVERRIDE_OPTIONS;
1368 #endif
1370 /* Set the default values for switches whose default depends on TARGET_64BIT
1371 in case they weren't overwritten by command line options. */
1372 if (TARGET_64BIT)
1374 if (flag_omit_frame_pointer == 2)
1375 flag_omit_frame_pointer = 1;
1376 if (flag_asynchronous_unwind_tables == 2)
1377 flag_asynchronous_unwind_tables = 1;
1378 if (flag_pcc_struct_return == 2)
1379 flag_pcc_struct_return = 0;
1381 else
1383 if (flag_omit_frame_pointer == 2)
1384 flag_omit_frame_pointer = 0;
1385 if (flag_asynchronous_unwind_tables == 2)
1386 flag_asynchronous_unwind_tables = 0;
1387 if (flag_pcc_struct_return == 2)
1388 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1391 if (!ix86_tune_string && ix86_arch_string)
1392 ix86_tune_string = ix86_arch_string;
1393 if (!ix86_tune_string)
1395 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1396 ix86_tune_defaulted = 1;
1398 if (!ix86_arch_string)
1399 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1401 if (ix86_cmodel_string != 0)
1403 if (!strcmp (ix86_cmodel_string, "small"))
1404 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1405 else if (!strcmp (ix86_cmodel_string, "medium"))
1406 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1407 else if (flag_pic)
1408 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1409 else if (!strcmp (ix86_cmodel_string, "32"))
1410 ix86_cmodel = CM_32;
1411 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1412 ix86_cmodel = CM_KERNEL;
1413 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1414 ix86_cmodel = CM_LARGE;
1415 else
1416 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1418 else
1420 ix86_cmodel = CM_32;
1421 if (TARGET_64BIT)
1422 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1424 if (ix86_asm_string != 0)
1426 if (! TARGET_MACHO
1427 && !strcmp (ix86_asm_string, "intel"))
1428 ix86_asm_dialect = ASM_INTEL;
1429 else if (!strcmp (ix86_asm_string, "att"))
1430 ix86_asm_dialect = ASM_ATT;
1431 else
1432 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1434 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1435 error ("code model %qs not supported in the %s bit mode",
1436 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1437 if (ix86_cmodel == CM_LARGE)
1438 sorry ("code model %<large%> not supported yet");
1439 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1440 sorry ("%i-bit mode not compiled in",
1441 (target_flags & MASK_64BIT) ? 64 : 32);
1443 for (i = 0; i < pta_size; i++)
1444 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1446 ix86_arch = processor_alias_table[i].processor;
1447 /* Default cpu tuning to the architecture. */
1448 ix86_tune = ix86_arch;
1449 if (processor_alias_table[i].flags & PTA_MMX
1450 && !(target_flags_explicit & MASK_MMX))
1451 target_flags |= MASK_MMX;
1452 if (processor_alias_table[i].flags & PTA_3DNOW
1453 && !(target_flags_explicit & MASK_3DNOW))
1454 target_flags |= MASK_3DNOW;
1455 if (processor_alias_table[i].flags & PTA_3DNOW_A
1456 && !(target_flags_explicit & MASK_3DNOW_A))
1457 target_flags |= MASK_3DNOW_A;
1458 if (processor_alias_table[i].flags & PTA_SSE
1459 && !(target_flags_explicit & MASK_SSE))
1460 target_flags |= MASK_SSE;
1461 if (processor_alias_table[i].flags & PTA_SSE2
1462 && !(target_flags_explicit & MASK_SSE2))
1463 target_flags |= MASK_SSE2;
1464 if (processor_alias_table[i].flags & PTA_SSE3
1465 && !(target_flags_explicit & MASK_SSE3))
1466 target_flags |= MASK_SSE3;
1467 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1468 x86_prefetch_sse = true;
1469 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1470 error ("CPU you selected does not support x86-64 "
1471 "instruction set");
1472 break;
1475 if (i == pta_size)
1476 error ("bad value (%s) for -march= switch", ix86_arch_string);
1478 for (i = 0; i < pta_size; i++)
1479 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1481 ix86_tune = processor_alias_table[i].processor;
1482 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1484 if (ix86_tune_defaulted)
1486 ix86_tune_string = "x86-64";
1487 for (i = 0; i < pta_size; i++)
1488 if (! strcmp (ix86_tune_string,
1489 processor_alias_table[i].name))
1490 break;
1491 ix86_tune = processor_alias_table[i].processor;
1493 else
1494 error ("CPU you selected does not support x86-64 "
1495 "instruction set");
1497 /* Intel CPUs have always interpreted SSE prefetch instructions as
1498 NOPs; so, we can enable SSE prefetch instructions even when
1499 -mtune (rather than -march) points us to a processor that has them.
1500 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1501 higher processors. */
1502 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1503 x86_prefetch_sse = true;
1504 break;
1506 if (i == pta_size)
1507 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1509 if (optimize_size)
1510 ix86_cost = &size_cost;
1511 else
1512 ix86_cost = processor_target_table[ix86_tune].cost;
1513 target_flags |= processor_target_table[ix86_tune].target_enable;
1514 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1516 /* Arrange to set up i386_stack_locals for all functions. */
1517 init_machine_status = ix86_init_machine_status;
1519 /* Validate -mregparm= value. */
1520 if (ix86_regparm_string)
1522 i = atoi (ix86_regparm_string);
1523 if (i < 0 || i > REGPARM_MAX)
1524 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1525 else
1526 ix86_regparm = i;
1528 else
1529 if (TARGET_64BIT)
1530 ix86_regparm = REGPARM_MAX;
1532 /* If the user has provided any of the -malign-* options,
1533 warn and use that value only if -falign-* is not set.
1534 Remove this code in GCC 3.2 or later. */
1535 if (ix86_align_loops_string)
1537 warning (0, "-malign-loops is obsolete, use -falign-loops");
1538 if (align_loops == 0)
1540 i = atoi (ix86_align_loops_string);
1541 if (i < 0 || i > MAX_CODE_ALIGN)
1542 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1543 else
1544 align_loops = 1 << i;
1548 if (ix86_align_jumps_string)
1550 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1551 if (align_jumps == 0)
1553 i = atoi (ix86_align_jumps_string);
1554 if (i < 0 || i > MAX_CODE_ALIGN)
1555 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1556 else
1557 align_jumps = 1 << i;
1561 if (ix86_align_funcs_string)
1563 warning (0, "-malign-functions is obsolete, use -falign-functions");
1564 if (align_functions == 0)
1566 i = atoi (ix86_align_funcs_string);
1567 if (i < 0 || i > MAX_CODE_ALIGN)
1568 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1569 else
1570 align_functions = 1 << i;
1574 /* Default align_* from the processor table. */
1575 if (align_loops == 0)
1577 align_loops = processor_target_table[ix86_tune].align_loop;
1578 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1580 if (align_jumps == 0)
1582 align_jumps = processor_target_table[ix86_tune].align_jump;
1583 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1585 if (align_functions == 0)
1587 align_functions = processor_target_table[ix86_tune].align_func;
1590 /* Validate -mpreferred-stack-boundary= value, or provide default.
1591 The default of 128 bits is for Pentium III's SSE __m128, but we
1592 don't want additional code to keep the stack aligned when
1593 optimizing for code size. */
1594 ix86_preferred_stack_boundary = ((TARGET_64BIT || TARGET_MACHO || !optimize_size)
1595 ? 128 : 32);
1596 if (ix86_preferred_stack_boundary_string)
1598 i = atoi (ix86_preferred_stack_boundary_string);
1599 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1600 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1601 TARGET_64BIT ? 4 : 2);
1602 else
1603 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1606 /* Validate -mbranch-cost= value, or provide default. */
1607 ix86_branch_cost = ix86_cost->branch_cost;
1608 if (ix86_branch_cost_string)
1610 i = atoi (ix86_branch_cost_string);
1611 if (i < 0 || i > 5)
1612 error ("-mbranch-cost=%d is not between 0 and 5", i);
1613 else
1614 ix86_branch_cost = i;
1616 if (ix86_section_threshold_string)
1618 i = atoi (ix86_section_threshold_string);
1619 if (i < 0)
1620 error ("-mlarge-data-threshold=%d is negative", i);
1621 else
1622 ix86_section_threshold = i;
1625 if (ix86_tls_dialect_string)
1627 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1628 ix86_tls_dialect = TLS_DIALECT_GNU;
1629 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
1630 ix86_tls_dialect = TLS_DIALECT_GNU2;
1631 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1632 ix86_tls_dialect = TLS_DIALECT_SUN;
1633 else
1634 error ("bad value (%s) for -mtls-dialect= switch",
1635 ix86_tls_dialect_string);
1638 /* Keep nonleaf frame pointers. */
1639 if (flag_omit_frame_pointer)
1640 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1641 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1642 flag_omit_frame_pointer = 1;
1644 /* If we're doing fast math, we don't care about comparison order
1645 wrt NaNs. This lets us use a shorter comparison sequence. */
1646 if (flag_unsafe_math_optimizations)
1647 target_flags &= ~MASK_IEEE_FP;
1649 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1650 since the insns won't need emulation. */
1651 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1652 target_flags &= ~MASK_NO_FANCY_MATH_387;
1654 /* Likewise, if the target doesn't have a 387, or we've specified
1655 software floating point, don't use 387 inline intrinsics. */
1656 if (!TARGET_80387)
1657 target_flags |= MASK_NO_FANCY_MATH_387;
1659 /* Turn on SSE2 builtins for -msse3. */
1660 if (TARGET_SSE3)
1661 target_flags |= MASK_SSE2;
1663 /* Turn on SSE builtins for -msse2. */
1664 if (TARGET_SSE2)
1665 target_flags |= MASK_SSE;
1667 /* Turn on MMX builtins for -msse. */
1668 if (TARGET_SSE)
1670 target_flags |= MASK_MMX & ~target_flags_explicit;
1671 x86_prefetch_sse = true;
1674 /* Turn on MMX builtins for 3Dnow. */
1675 if (TARGET_3DNOW)
1676 target_flags |= MASK_MMX;
1678 if (TARGET_64BIT)
1680 if (TARGET_ALIGN_DOUBLE)
1681 error ("-malign-double makes no sense in the 64bit mode");
1682 if (TARGET_RTD)
1683 error ("-mrtd calling convention not supported in the 64bit mode");
1685 /* Enable by default the SSE and MMX builtins. Do allow the user to
1686 explicitly disable any of these. In particular, disabling SSE and
1687 MMX for kernel code is extremely useful. */
1688 target_flags
1689 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1690 & ~target_flags_explicit);
1692 else
1694 /* i386 ABI does not specify red zone. It still makes sense to use it
1695 when programmer takes care to stack from being destroyed. */
1696 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1697 target_flags |= MASK_NO_RED_ZONE;
1700 /* Accept -msseregparm only if at least SSE support is enabled. */
1701 if (TARGET_SSEREGPARM
1702 && ! TARGET_SSE)
1703 error ("-msseregparm used without SSE enabled");
1705 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1707 if (ix86_fpmath_string != 0)
1709 if (! strcmp (ix86_fpmath_string, "387"))
1710 ix86_fpmath = FPMATH_387;
1711 else if (! strcmp (ix86_fpmath_string, "sse"))
1713 if (!TARGET_SSE)
1715 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1716 ix86_fpmath = FPMATH_387;
1718 else
1719 ix86_fpmath = FPMATH_SSE;
1721 else if (! strcmp (ix86_fpmath_string, "387,sse")
1722 || ! strcmp (ix86_fpmath_string, "sse,387"))
1724 if (!TARGET_SSE)
1726 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1727 ix86_fpmath = FPMATH_387;
1729 else if (!TARGET_80387)
1731 warning (0, "387 instruction set disabled, using SSE arithmetics");
1732 ix86_fpmath = FPMATH_SSE;
1734 else
1735 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1737 else
1738 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1741 /* If the i387 is disabled, then do not return values in it. */
1742 if (!TARGET_80387)
1743 target_flags &= ~MASK_FLOAT_RETURNS;
1745 if ((x86_accumulate_outgoing_args & TUNEMASK)
1746 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1747 && !optimize_size)
1748 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1750 /* ??? Unwind info is not correct around the CFG unless either a frame
1751 pointer is present or M_A_O_A is set. Fixing this requires rewriting
1752 unwind info generation to be aware of the CFG and propagating states
1753 around edges. */
1754 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
1755 || flag_exceptions || flag_non_call_exceptions)
1756 && flag_omit_frame_pointer
1757 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
1759 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1760 warning (0, "unwind tables currently require either a frame pointer "
1761 "or -maccumulate-outgoing-args for correctness");
1762 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1765 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1767 char *p;
1768 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1769 p = strchr (internal_label_prefix, 'X');
1770 internal_label_prefix_len = p - internal_label_prefix;
1771 *p = '\0';
1774 /* When scheduling description is not available, disable scheduler pass
1775 so it won't slow down the compilation and make x87 code slower. */
1776 if (!TARGET_SCHEDULE)
1777 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1780 /* switch to the appropriate section for output of DECL.
1781 DECL is either a `VAR_DECL' node or a constant of some sort.
1782 RELOC indicates whether forming the initial value of DECL requires
1783 link-time relocations. */
1785 static section *
1786 x86_64_elf_select_section (tree decl, int reloc,
1787 unsigned HOST_WIDE_INT align)
1789 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1790 && ix86_in_large_data_p (decl))
1792 const char *sname = NULL;
1793 unsigned int flags = SECTION_WRITE;
1794 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1796 case SECCAT_DATA:
1797 sname = ".ldata";
1798 break;
1799 case SECCAT_DATA_REL:
1800 sname = ".ldata.rel";
1801 break;
1802 case SECCAT_DATA_REL_LOCAL:
1803 sname = ".ldata.rel.local";
1804 break;
1805 case SECCAT_DATA_REL_RO:
1806 sname = ".ldata.rel.ro";
1807 break;
1808 case SECCAT_DATA_REL_RO_LOCAL:
1809 sname = ".ldata.rel.ro.local";
1810 break;
1811 case SECCAT_BSS:
1812 sname = ".lbss";
1813 flags |= SECTION_BSS;
1814 break;
1815 case SECCAT_RODATA:
1816 case SECCAT_RODATA_MERGE_STR:
1817 case SECCAT_RODATA_MERGE_STR_INIT:
1818 case SECCAT_RODATA_MERGE_CONST:
1819 sname = ".lrodata";
1820 flags = 0;
1821 break;
1822 case SECCAT_SRODATA:
1823 case SECCAT_SDATA:
1824 case SECCAT_SBSS:
1825 gcc_unreachable ();
1826 case SECCAT_TEXT:
1827 case SECCAT_TDATA:
1828 case SECCAT_TBSS:
1829 /* We don't split these for medium model. Place them into
1830 default sections and hope for best. */
1831 break;
1833 if (sname)
1835 /* We might get called with string constants, but get_named_section
1836 doesn't like them as they are not DECLs. Also, we need to set
1837 flags in that case. */
1838 if (!DECL_P (decl))
1839 return get_section (sname, flags, NULL);
1840 return get_named_section (decl, sname, reloc);
1843 return default_elf_select_section (decl, reloc, align);
1846 /* Build up a unique section name, expressed as a
1847 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1848 RELOC indicates whether the initial value of EXP requires
1849 link-time relocations. */
1851 static void
1852 x86_64_elf_unique_section (tree decl, int reloc)
1854 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1855 && ix86_in_large_data_p (decl))
1857 const char *prefix = NULL;
1858 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1859 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1861 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1863 case SECCAT_DATA:
1864 case SECCAT_DATA_REL:
1865 case SECCAT_DATA_REL_LOCAL:
1866 case SECCAT_DATA_REL_RO:
1867 case SECCAT_DATA_REL_RO_LOCAL:
1868 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1869 break;
1870 case SECCAT_BSS:
1871 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1872 break;
1873 case SECCAT_RODATA:
1874 case SECCAT_RODATA_MERGE_STR:
1875 case SECCAT_RODATA_MERGE_STR_INIT:
1876 case SECCAT_RODATA_MERGE_CONST:
1877 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1878 break;
1879 case SECCAT_SRODATA:
1880 case SECCAT_SDATA:
1881 case SECCAT_SBSS:
1882 gcc_unreachable ();
1883 case SECCAT_TEXT:
1884 case SECCAT_TDATA:
1885 case SECCAT_TBSS:
1886 /* We don't split these for medium model. Place them into
1887 default sections and hope for best. */
1888 break;
1890 if (prefix)
1892 const char *name;
1893 size_t nlen, plen;
1894 char *string;
1895 plen = strlen (prefix);
1897 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1898 name = targetm.strip_name_encoding (name);
1899 nlen = strlen (name);
1901 string = alloca (nlen + plen + 1);
1902 memcpy (string, prefix, plen);
1903 memcpy (string + plen, name, nlen + 1);
1905 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1906 return;
1909 default_unique_section (decl, reloc);
1912 #ifdef COMMON_ASM_OP
1913 /* This says how to output assembler code to declare an
1914 uninitialized external linkage data object.
1916 For medium model x86-64 we need to use .largecomm opcode for
1917 large objects. */
1918 void
1919 x86_elf_aligned_common (FILE *file,
1920 const char *name, unsigned HOST_WIDE_INT size,
1921 int align)
1923 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1924 && size > (unsigned int)ix86_section_threshold)
1925 fprintf (file, ".largecomm\t");
1926 else
1927 fprintf (file, "%s", COMMON_ASM_OP);
1928 assemble_name (file, name);
1929 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1930 size, align / BITS_PER_UNIT);
1933 /* Utility function for targets to use in implementing
1934 ASM_OUTPUT_ALIGNED_BSS. */
1936 void
1937 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1938 const char *name, unsigned HOST_WIDE_INT size,
1939 int align)
1941 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1942 && size > (unsigned int)ix86_section_threshold)
1943 switch_to_section (get_named_section (decl, ".lbss", 0));
1944 else
1945 switch_to_section (bss_section);
1946 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1947 #ifdef ASM_DECLARE_OBJECT_NAME
1948 last_assemble_variable_decl = decl;
1949 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1950 #else
1951 /* Standard thing is just output label for the object. */
1952 ASM_OUTPUT_LABEL (file, name);
1953 #endif /* ASM_DECLARE_OBJECT_NAME */
1954 ASM_OUTPUT_SKIP (file, size ? size : 1);
1956 #endif
1958 void
1959 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1961 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1962 make the problem with not enough registers even worse. */
1963 #ifdef INSN_SCHEDULING
1964 if (level > 1)
1965 flag_schedule_insns = 0;
1966 #endif
1968 if (TARGET_MACHO)
1969 /* The Darwin libraries never set errno, so we might as well
1970 avoid calling them when that's the only reason we would. */
1971 flag_errno_math = 0;
1973 /* The default values of these switches depend on the TARGET_64BIT
1974 that is not known at this moment. Mark these values with 2 and
1975 let user the to override these. In case there is no command line option
1976 specifying them, we will set the defaults in override_options. */
1977 if (optimize >= 1)
1978 flag_omit_frame_pointer = 2;
1979 flag_pcc_struct_return = 2;
1980 flag_asynchronous_unwind_tables = 2;
1981 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1982 SUBTARGET_OPTIMIZATION_OPTIONS;
1983 #endif
1986 /* Table of valid machine attributes. */
1987 const struct attribute_spec ix86_attribute_table[] =
1989 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1990 /* Stdcall attribute says callee is responsible for popping arguments
1991 if they are not variable. */
1992 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1993 /* Fastcall attribute says callee is responsible for popping arguments
1994 if they are not variable. */
1995 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1996 /* Cdecl attribute says the callee is a normal C declaration */
1997 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1998 /* Regparm attribute specifies how many integer arguments are to be
1999 passed in registers. */
2000 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2001 /* Sseregparm attribute says we are using x86_64 calling conventions
2002 for FP arguments. */
2003 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2004 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2005 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2006 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2007 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2008 #endif
2009 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2010 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2011 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2012 SUBTARGET_ATTRIBUTE_TABLE,
2013 #endif
2014 { NULL, 0, 0, false, false, false, NULL }
2017 /* Decide whether we can make a sibling call to a function. DECL is the
2018 declaration of the function being targeted by the call and EXP is the
2019 CALL_EXPR representing the call. */
2021 static bool
2022 ix86_function_ok_for_sibcall (tree decl, tree exp)
2024 tree func;
2025 rtx a, b;
2027 /* If we are generating position-independent code, we cannot sibcall
2028 optimize any indirect call, or a direct call to a global function,
2029 as the PLT requires %ebx be live. */
2030 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
2031 return false;
2033 if (decl)
2034 func = decl;
2035 else
2037 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2038 if (POINTER_TYPE_P (func))
2039 func = TREE_TYPE (func);
2042 /* Check that the return value locations are the same. Like
2043 if we are returning floats on the 80387 register stack, we cannot
2044 make a sibcall from a function that doesn't return a float to a
2045 function that does or, conversely, from a function that does return
2046 a float to a function that doesn't; the necessary stack adjustment
2047 would not be executed. This is also the place we notice
2048 differences in the return value ABI. Note that it is ok for one
2049 of the functions to have void return type as long as the return
2050 value of the other is passed in a register. */
2051 a = ix86_function_value (TREE_TYPE (exp), func, false);
2052 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2053 cfun->decl, false);
2054 if (STACK_REG_P (a) || STACK_REG_P (b))
2056 if (!rtx_equal_p (a, b))
2057 return false;
2059 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2061 else if (!rtx_equal_p (a, b))
2062 return false;
2064 /* If this call is indirect, we'll need to be able to use a call-clobbered
2065 register for the address of the target function. Make sure that all
2066 such registers are not used for passing parameters. */
2067 if (!decl && !TARGET_64BIT)
2069 tree type;
2071 /* We're looking at the CALL_EXPR, we need the type of the function. */
2072 type = TREE_OPERAND (exp, 0); /* pointer expression */
2073 type = TREE_TYPE (type); /* pointer type */
2074 type = TREE_TYPE (type); /* function type */
2076 if (ix86_function_regparm (type, NULL) >= 3)
2078 /* ??? Need to count the actual number of registers to be used,
2079 not the possible number of registers. Fix later. */
2080 return false;
2084 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2085 /* Dllimport'd functions are also called indirectly. */
2086 if (decl && DECL_DLLIMPORT_P (decl)
2087 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2088 return false;
2089 #endif
2091 /* If we forced aligned the stack, then sibcalling would unalign the
2092 stack, which may break the called function. */
2093 if (cfun->machine->force_align_arg_pointer)
2094 return false;
2096 /* Otherwise okay. That also includes certain types of indirect calls. */
2097 return true;
2100 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2101 calling convention attributes;
2102 arguments as in struct attribute_spec.handler. */
2104 static tree
2105 ix86_handle_cconv_attribute (tree *node, tree name,
2106 tree args,
2107 int flags ATTRIBUTE_UNUSED,
2108 bool *no_add_attrs)
2110 if (TREE_CODE (*node) != FUNCTION_TYPE
2111 && TREE_CODE (*node) != METHOD_TYPE
2112 && TREE_CODE (*node) != FIELD_DECL
2113 && TREE_CODE (*node) != TYPE_DECL)
2115 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2116 IDENTIFIER_POINTER (name));
2117 *no_add_attrs = true;
2118 return NULL_TREE;
2121 /* Can combine regparm with all attributes but fastcall. */
2122 if (is_attribute_p ("regparm", name))
2124 tree cst;
2126 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2128 error ("fastcall and regparm attributes are not compatible");
2131 cst = TREE_VALUE (args);
2132 if (TREE_CODE (cst) != INTEGER_CST)
2134 warning (OPT_Wattributes,
2135 "%qs attribute requires an integer constant argument",
2136 IDENTIFIER_POINTER (name));
2137 *no_add_attrs = true;
2139 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2141 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2142 IDENTIFIER_POINTER (name), REGPARM_MAX);
2143 *no_add_attrs = true;
2146 return NULL_TREE;
2149 if (TARGET_64BIT)
2151 warning (OPT_Wattributes, "%qs attribute ignored",
2152 IDENTIFIER_POINTER (name));
2153 *no_add_attrs = true;
2154 return NULL_TREE;
2157 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2158 if (is_attribute_p ("fastcall", name))
2160 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2162 error ("fastcall and cdecl attributes are not compatible");
2164 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2166 error ("fastcall and stdcall attributes are not compatible");
2168 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2170 error ("fastcall and regparm attributes are not compatible");
2174 /* Can combine stdcall with fastcall (redundant), regparm and
2175 sseregparm. */
2176 else if (is_attribute_p ("stdcall", name))
2178 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2180 error ("stdcall and cdecl attributes are not compatible");
2182 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2184 error ("stdcall and fastcall attributes are not compatible");
2188 /* Can combine cdecl with regparm and sseregparm. */
2189 else if (is_attribute_p ("cdecl", name))
2191 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2193 error ("stdcall and cdecl attributes are not compatible");
2195 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2197 error ("fastcall and cdecl attributes are not compatible");
2201 /* Can combine sseregparm with all attributes. */
2203 return NULL_TREE;
2206 /* Return 0 if the attributes for two types are incompatible, 1 if they
2207 are compatible, and 2 if they are nearly compatible (which causes a
2208 warning to be generated). */
2210 static int
2211 ix86_comp_type_attributes (tree type1, tree type2)
2213 /* Check for mismatch of non-default calling convention. */
2214 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2216 if (TREE_CODE (type1) != FUNCTION_TYPE)
2217 return 1;
2219 /* Check for mismatched fastcall/regparm types. */
2220 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2221 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2222 || (ix86_function_regparm (type1, NULL)
2223 != ix86_function_regparm (type2, NULL)))
2224 return 0;
2226 /* Check for mismatched sseregparm types. */
2227 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2228 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2229 return 0;
2231 /* Check for mismatched return types (cdecl vs stdcall). */
2232 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2233 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2234 return 0;
2236 return 1;
2239 /* Return the regparm value for a function with the indicated TYPE and DECL.
2240 DECL may be NULL when calling function indirectly
2241 or considering a libcall. */
2243 static int
2244 ix86_function_regparm (tree type, tree decl)
2246 tree attr;
2247 int regparm = ix86_regparm;
2248 bool user_convention = false;
2250 if (!TARGET_64BIT)
2252 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2253 if (attr)
2255 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2256 user_convention = true;
2259 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2261 regparm = 2;
2262 user_convention = true;
2265 /* Use register calling convention for local functions when possible. */
2266 if (!TARGET_64BIT && !user_convention && decl
2267 && flag_unit_at_a_time && !profile_flag)
2269 struct cgraph_local_info *i = cgraph_local_info (decl);
2270 if (i && i->local)
2272 int local_regparm, globals = 0, regno;
2274 /* Make sure no regparm register is taken by a global register
2275 variable. */
2276 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2277 if (global_regs[local_regparm])
2278 break;
2279 /* We can't use regparm(3) for nested functions as these use
2280 static chain pointer in third argument. */
2281 if (local_regparm == 3
2282 && decl_function_context (decl)
2283 && !DECL_NO_STATIC_CHAIN (decl))
2284 local_regparm = 2;
2285 /* Each global register variable increases register preassure,
2286 so the more global reg vars there are, the smaller regparm
2287 optimization use, unless requested by the user explicitly. */
2288 for (regno = 0; regno < 6; regno++)
2289 if (global_regs[regno])
2290 globals++;
2291 local_regparm
2292 = globals < local_regparm ? local_regparm - globals : 0;
2294 if (local_regparm > regparm)
2295 regparm = local_regparm;
2299 return regparm;
2302 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2303 in SSE registers for a function with the indicated TYPE and DECL.
2304 DECL may be NULL when calling function indirectly
2305 or considering a libcall. Otherwise return 0. */
2307 static int
2308 ix86_function_sseregparm (tree type, tree decl)
2310 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2311 by the sseregparm attribute. */
2312 if (TARGET_SSEREGPARM
2313 || (type
2314 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2316 if (!TARGET_SSE)
2318 if (decl)
2319 error ("Calling %qD with attribute sseregparm without "
2320 "SSE/SSE2 enabled", decl);
2321 else
2322 error ("Calling %qT with attribute sseregparm without "
2323 "SSE/SSE2 enabled", type);
2324 return 0;
2327 return 2;
2330 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2331 in SSE registers even for 32-bit mode and not just 3, but up to
2332 8 SSE arguments in registers. */
2333 if (!TARGET_64BIT && decl
2334 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2336 struct cgraph_local_info *i = cgraph_local_info (decl);
2337 if (i && i->local)
2338 return TARGET_SSE2 ? 2 : 1;
2341 return 0;
2344 /* Return true if EAX is live at the start of the function. Used by
2345 ix86_expand_prologue to determine if we need special help before
2346 calling allocate_stack_worker. */
2348 static bool
2349 ix86_eax_live_at_start_p (void)
2351 /* Cheat. Don't bother working forward from ix86_function_regparm
2352 to the function type to whether an actual argument is located in
2353 eax. Instead just look at cfg info, which is still close enough
2354 to correct at this point. This gives false positives for broken
2355 functions that might use uninitialized data that happens to be
2356 allocated in eax, but who cares? */
2357 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2360 /* Value is the number of bytes of arguments automatically
2361 popped when returning from a subroutine call.
2362 FUNDECL is the declaration node of the function (as a tree),
2363 FUNTYPE is the data type of the function (as a tree),
2364 or for a library call it is an identifier node for the subroutine name.
2365 SIZE is the number of bytes of arguments passed on the stack.
2367 On the 80386, the RTD insn may be used to pop them if the number
2368 of args is fixed, but if the number is variable then the caller
2369 must pop them all. RTD can't be used for library calls now
2370 because the library is compiled with the Unix compiler.
2371 Use of RTD is a selectable option, since it is incompatible with
2372 standard Unix calling sequences. If the option is not selected,
2373 the caller must always pop the args.
2375 The attribute stdcall is equivalent to RTD on a per module basis. */
2378 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2380 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2382 /* Cdecl functions override -mrtd, and never pop the stack. */
2383 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2385 /* Stdcall and fastcall functions will pop the stack if not
2386 variable args. */
2387 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2388 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2389 rtd = 1;
2391 if (rtd
2392 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2393 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2394 == void_type_node)))
2395 return size;
2398 /* Lose any fake structure return argument if it is passed on the stack. */
2399 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2400 && !TARGET_64BIT
2401 && !KEEP_AGGREGATE_RETURN_POINTER)
2403 int nregs = ix86_function_regparm (funtype, fundecl);
2405 if (!nregs)
2406 return GET_MODE_SIZE (Pmode);
2409 return 0;
2412 /* Argument support functions. */
2414 /* Return true when register may be used to pass function parameters. */
2415 bool
2416 ix86_function_arg_regno_p (int regno)
2418 int i;
2419 if (!TARGET_64BIT)
2420 return (regno < REGPARM_MAX
2421 || (TARGET_MMX && MMX_REGNO_P (regno)
2422 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2423 || (TARGET_SSE && SSE_REGNO_P (regno)
2424 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2426 if (TARGET_SSE && SSE_REGNO_P (regno)
2427 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2428 return true;
2429 /* RAX is used as hidden argument to va_arg functions. */
2430 if (!regno)
2431 return true;
2432 for (i = 0; i < REGPARM_MAX; i++)
2433 if (regno == x86_64_int_parameter_registers[i])
2434 return true;
2435 return false;
2438 /* Return if we do not know how to pass TYPE solely in registers. */
2440 static bool
2441 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2443 if (must_pass_in_stack_var_size_or_pad (mode, type))
2444 return true;
2446 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2447 The layout_type routine is crafty and tries to trick us into passing
2448 currently unsupported vector types on the stack by using TImode. */
2449 return (!TARGET_64BIT && mode == TImode
2450 && type && TREE_CODE (type) != VECTOR_TYPE);
2453 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2454 for a call to a function whose data type is FNTYPE.
2455 For a library call, FNTYPE is 0. */
2457 void
2458 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2459 tree fntype, /* tree ptr for function decl */
2460 rtx libname, /* SYMBOL_REF of library name or 0 */
2461 tree fndecl)
2463 static CUMULATIVE_ARGS zero_cum;
2464 tree param, next_param;
2466 if (TARGET_DEBUG_ARG)
2468 fprintf (stderr, "\ninit_cumulative_args (");
2469 if (fntype)
2470 fprintf (stderr, "fntype code = %s, ret code = %s",
2471 tree_code_name[(int) TREE_CODE (fntype)],
2472 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2473 else
2474 fprintf (stderr, "no fntype");
2476 if (libname)
2477 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2480 *cum = zero_cum;
2482 /* Set up the number of registers to use for passing arguments. */
2483 cum->nregs = ix86_regparm;
2484 if (TARGET_SSE)
2485 cum->sse_nregs = SSE_REGPARM_MAX;
2486 if (TARGET_MMX)
2487 cum->mmx_nregs = MMX_REGPARM_MAX;
2488 cum->warn_sse = true;
2489 cum->warn_mmx = true;
2490 cum->maybe_vaarg = false;
2492 /* Use ecx and edx registers if function has fastcall attribute,
2493 else look for regparm information. */
2494 if (fntype && !TARGET_64BIT)
2496 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2498 cum->nregs = 2;
2499 cum->fastcall = 1;
2501 else
2502 cum->nregs = ix86_function_regparm (fntype, fndecl);
2505 /* Set up the number of SSE registers used for passing SFmode
2506 and DFmode arguments. Warn for mismatching ABI. */
2507 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2509 /* Determine if this function has variable arguments. This is
2510 indicated by the last argument being 'void_type_mode' if there
2511 are no variable arguments. If there are variable arguments, then
2512 we won't pass anything in registers in 32-bit mode. */
2514 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2516 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2517 param != 0; param = next_param)
2519 next_param = TREE_CHAIN (param);
2520 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2522 if (!TARGET_64BIT)
2524 cum->nregs = 0;
2525 cum->sse_nregs = 0;
2526 cum->mmx_nregs = 0;
2527 cum->warn_sse = 0;
2528 cum->warn_mmx = 0;
2529 cum->fastcall = 0;
2530 cum->float_in_sse = 0;
2532 cum->maybe_vaarg = true;
2536 if ((!fntype && !libname)
2537 || (fntype && !TYPE_ARG_TYPES (fntype)))
2538 cum->maybe_vaarg = true;
2540 if (TARGET_DEBUG_ARG)
2541 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2543 return;
2546 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2547 But in the case of vector types, it is some vector mode.
2549 When we have only some of our vector isa extensions enabled, then there
2550 are some modes for which vector_mode_supported_p is false. For these
2551 modes, the generic vector support in gcc will choose some non-vector mode
2552 in order to implement the type. By computing the natural mode, we'll
2553 select the proper ABI location for the operand and not depend on whatever
2554 the middle-end decides to do with these vector types. */
2556 static enum machine_mode
2557 type_natural_mode (tree type)
2559 enum machine_mode mode = TYPE_MODE (type);
2561 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2563 HOST_WIDE_INT size = int_size_in_bytes (type);
2564 if ((size == 8 || size == 16)
2565 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2566 && TYPE_VECTOR_SUBPARTS (type) > 1)
2568 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2570 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2571 mode = MIN_MODE_VECTOR_FLOAT;
2572 else
2573 mode = MIN_MODE_VECTOR_INT;
2575 /* Get the mode which has this inner mode and number of units. */
2576 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2577 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2578 && GET_MODE_INNER (mode) == innermode)
2579 return mode;
2581 gcc_unreachable ();
2585 return mode;
2588 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2589 this may not agree with the mode that the type system has chosen for the
2590 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2591 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2593 static rtx
2594 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2595 unsigned int regno)
2597 rtx tmp;
2599 if (orig_mode != BLKmode)
2600 tmp = gen_rtx_REG (orig_mode, regno);
2601 else
2603 tmp = gen_rtx_REG (mode, regno);
2604 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2605 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2608 return tmp;
2611 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2612 of this code is to classify each 8bytes of incoming argument by the register
2613 class and assign registers accordingly. */
2615 /* Return the union class of CLASS1 and CLASS2.
2616 See the x86-64 PS ABI for details. */
2618 static enum x86_64_reg_class
2619 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2621 /* Rule #1: If both classes are equal, this is the resulting class. */
2622 if (class1 == class2)
2623 return class1;
2625 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2626 the other class. */
2627 if (class1 == X86_64_NO_CLASS)
2628 return class2;
2629 if (class2 == X86_64_NO_CLASS)
2630 return class1;
2632 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2633 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2634 return X86_64_MEMORY_CLASS;
2636 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2637 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2638 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2639 return X86_64_INTEGERSI_CLASS;
2640 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2641 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2642 return X86_64_INTEGER_CLASS;
2644 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2645 MEMORY is used. */
2646 if (class1 == X86_64_X87_CLASS
2647 || class1 == X86_64_X87UP_CLASS
2648 || class1 == X86_64_COMPLEX_X87_CLASS
2649 || class2 == X86_64_X87_CLASS
2650 || class2 == X86_64_X87UP_CLASS
2651 || class2 == X86_64_COMPLEX_X87_CLASS)
2652 return X86_64_MEMORY_CLASS;
2654 /* Rule #6: Otherwise class SSE is used. */
2655 return X86_64_SSE_CLASS;
2658 /* Classify the argument of type TYPE and mode MODE.
2659 CLASSES will be filled by the register class used to pass each word
2660 of the operand. The number of words is returned. In case the parameter
2661 should be passed in memory, 0 is returned. As a special case for zero
2662 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2664 BIT_OFFSET is used internally for handling records and specifies offset
2665 of the offset in bits modulo 256 to avoid overflow cases.
2667 See the x86-64 PS ABI for details.
2670 static int
2671 classify_argument (enum machine_mode mode, tree type,
2672 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2674 HOST_WIDE_INT bytes =
2675 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2676 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2678 /* Variable sized entities are always passed/returned in memory. */
2679 if (bytes < 0)
2680 return 0;
2682 if (mode != VOIDmode
2683 && targetm.calls.must_pass_in_stack (mode, type))
2684 return 0;
2686 if (type && AGGREGATE_TYPE_P (type))
2688 int i;
2689 tree field;
2690 enum x86_64_reg_class subclasses[MAX_CLASSES];
2692 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2693 if (bytes > 16)
2694 return 0;
2696 for (i = 0; i < words; i++)
2697 classes[i] = X86_64_NO_CLASS;
2699 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2700 signalize memory class, so handle it as special case. */
2701 if (!words)
2703 classes[0] = X86_64_NO_CLASS;
2704 return 1;
2707 /* Classify each field of record and merge classes. */
2708 switch (TREE_CODE (type))
2710 case RECORD_TYPE:
2711 /* For classes first merge in the field of the subclasses. */
2712 if (TYPE_BINFO (type))
2714 tree binfo, base_binfo;
2715 int basenum;
2717 for (binfo = TYPE_BINFO (type), basenum = 0;
2718 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2720 int num;
2721 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2722 tree type = BINFO_TYPE (base_binfo);
2724 num = classify_argument (TYPE_MODE (type),
2725 type, subclasses,
2726 (offset + bit_offset) % 256);
2727 if (!num)
2728 return 0;
2729 for (i = 0; i < num; i++)
2731 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2732 classes[i + pos] =
2733 merge_classes (subclasses[i], classes[i + pos]);
2737 /* And now merge the fields of structure. */
2738 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2740 if (TREE_CODE (field) == FIELD_DECL)
2742 int num;
2744 /* Bitfields are always classified as integer. Handle them
2745 early, since later code would consider them to be
2746 misaligned integers. */
2747 if (DECL_BIT_FIELD (field))
2749 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2750 i < ((int_bit_position (field) + (bit_offset % 64))
2751 + tree_low_cst (DECL_SIZE (field), 0)
2752 + 63) / 8 / 8; i++)
2753 classes[i] =
2754 merge_classes (X86_64_INTEGER_CLASS,
2755 classes[i]);
2757 else
2759 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2760 TREE_TYPE (field), subclasses,
2761 (int_bit_position (field)
2762 + bit_offset) % 256);
2763 if (!num)
2764 return 0;
2765 for (i = 0; i < num; i++)
2767 int pos =
2768 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2769 classes[i + pos] =
2770 merge_classes (subclasses[i], classes[i + pos]);
2775 break;
2777 case ARRAY_TYPE:
2778 /* Arrays are handled as small records. */
2780 int num;
2781 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2782 TREE_TYPE (type), subclasses, bit_offset);
2783 if (!num)
2784 return 0;
2786 /* The partial classes are now full classes. */
2787 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2788 subclasses[0] = X86_64_SSE_CLASS;
2789 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2790 subclasses[0] = X86_64_INTEGER_CLASS;
2792 for (i = 0; i < words; i++)
2793 classes[i] = subclasses[i % num];
2795 break;
2797 case UNION_TYPE:
2798 case QUAL_UNION_TYPE:
2799 /* Unions are similar to RECORD_TYPE but offset is always 0.
2802 /* Unions are not derived. */
2803 gcc_assert (!TYPE_BINFO (type)
2804 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2805 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2807 if (TREE_CODE (field) == FIELD_DECL)
2809 int num;
2810 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2811 TREE_TYPE (field), subclasses,
2812 bit_offset);
2813 if (!num)
2814 return 0;
2815 for (i = 0; i < num; i++)
2816 classes[i] = merge_classes (subclasses[i], classes[i]);
2819 break;
2821 default:
2822 gcc_unreachable ();
2825 /* Final merger cleanup. */
2826 for (i = 0; i < words; i++)
2828 /* If one class is MEMORY, everything should be passed in
2829 memory. */
2830 if (classes[i] == X86_64_MEMORY_CLASS)
2831 return 0;
2833 /* The X86_64_SSEUP_CLASS should be always preceded by
2834 X86_64_SSE_CLASS. */
2835 if (classes[i] == X86_64_SSEUP_CLASS
2836 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2837 classes[i] = X86_64_SSE_CLASS;
2839 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2840 if (classes[i] == X86_64_X87UP_CLASS
2841 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2842 classes[i] = X86_64_SSE_CLASS;
2844 return words;
2847 /* Compute alignment needed. We align all types to natural boundaries with
2848 exception of XFmode that is aligned to 64bits. */
2849 if (mode != VOIDmode && mode != BLKmode)
2851 int mode_alignment = GET_MODE_BITSIZE (mode);
2853 if (mode == XFmode)
2854 mode_alignment = 128;
2855 else if (mode == XCmode)
2856 mode_alignment = 256;
2857 if (COMPLEX_MODE_P (mode))
2858 mode_alignment /= 2;
2859 /* Misaligned fields are always returned in memory. */
2860 if (bit_offset % mode_alignment)
2861 return 0;
2864 /* for V1xx modes, just use the base mode */
2865 if (VECTOR_MODE_P (mode)
2866 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2867 mode = GET_MODE_INNER (mode);
2869 /* Classification of atomic types. */
2870 switch (mode)
2872 case DImode:
2873 case SImode:
2874 case HImode:
2875 case QImode:
2876 case CSImode:
2877 case CHImode:
2878 case CQImode:
2879 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2880 classes[0] = X86_64_INTEGERSI_CLASS;
2881 else
2882 classes[0] = X86_64_INTEGER_CLASS;
2883 return 1;
2884 case CDImode:
2885 case TImode:
2886 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2887 return 2;
2888 case CTImode:
2889 return 0;
2890 case SFmode:
2891 if (!(bit_offset % 64))
2892 classes[0] = X86_64_SSESF_CLASS;
2893 else
2894 classes[0] = X86_64_SSE_CLASS;
2895 return 1;
2896 case DFmode:
2897 classes[0] = X86_64_SSEDF_CLASS;
2898 return 1;
2899 case XFmode:
2900 classes[0] = X86_64_X87_CLASS;
2901 classes[1] = X86_64_X87UP_CLASS;
2902 return 2;
2903 case TFmode:
2904 classes[0] = X86_64_SSE_CLASS;
2905 classes[1] = X86_64_SSEUP_CLASS;
2906 return 2;
2907 case SCmode:
2908 classes[0] = X86_64_SSE_CLASS;
2909 return 1;
2910 case DCmode:
2911 classes[0] = X86_64_SSEDF_CLASS;
2912 classes[1] = X86_64_SSEDF_CLASS;
2913 return 2;
2914 case XCmode:
2915 classes[0] = X86_64_COMPLEX_X87_CLASS;
2916 return 1;
2917 case TCmode:
2918 /* This modes is larger than 16 bytes. */
2919 return 0;
2920 case V4SFmode:
2921 case V4SImode:
2922 case V16QImode:
2923 case V8HImode:
2924 case V2DFmode:
2925 case V2DImode:
2926 classes[0] = X86_64_SSE_CLASS;
2927 classes[1] = X86_64_SSEUP_CLASS;
2928 return 2;
2929 case V2SFmode:
2930 case V2SImode:
2931 case V4HImode:
2932 case V8QImode:
2933 classes[0] = X86_64_SSE_CLASS;
2934 return 1;
2935 case BLKmode:
2936 case VOIDmode:
2937 return 0;
2938 default:
2939 gcc_assert (VECTOR_MODE_P (mode));
2941 if (bytes > 16)
2942 return 0;
2944 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2946 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2947 classes[0] = X86_64_INTEGERSI_CLASS;
2948 else
2949 classes[0] = X86_64_INTEGER_CLASS;
2950 classes[1] = X86_64_INTEGER_CLASS;
2951 return 1 + (bytes > 8);
2955 /* Examine the argument and return set number of register required in each
2956 class. Return 0 iff parameter should be passed in memory. */
2957 static int
2958 examine_argument (enum machine_mode mode, tree type, int in_return,
2959 int *int_nregs, int *sse_nregs)
2961 enum x86_64_reg_class class[MAX_CLASSES];
2962 int n = classify_argument (mode, type, class, 0);
2964 *int_nregs = 0;
2965 *sse_nregs = 0;
2966 if (!n)
2967 return 0;
2968 for (n--; n >= 0; n--)
2969 switch (class[n])
2971 case X86_64_INTEGER_CLASS:
2972 case X86_64_INTEGERSI_CLASS:
2973 (*int_nregs)++;
2974 break;
2975 case X86_64_SSE_CLASS:
2976 case X86_64_SSESF_CLASS:
2977 case X86_64_SSEDF_CLASS:
2978 (*sse_nregs)++;
2979 break;
2980 case X86_64_NO_CLASS:
2981 case X86_64_SSEUP_CLASS:
2982 break;
2983 case X86_64_X87_CLASS:
2984 case X86_64_X87UP_CLASS:
2985 if (!in_return)
2986 return 0;
2987 break;
2988 case X86_64_COMPLEX_X87_CLASS:
2989 return in_return ? 2 : 0;
2990 case X86_64_MEMORY_CLASS:
2991 gcc_unreachable ();
2993 return 1;
2996 /* Construct container for the argument used by GCC interface. See
2997 FUNCTION_ARG for the detailed description. */
2999 static rtx
3000 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3001 tree type, int in_return, int nintregs, int nsseregs,
3002 const int *intreg, int sse_regno)
3004 enum machine_mode tmpmode;
3005 int bytes =
3006 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3007 enum x86_64_reg_class class[MAX_CLASSES];
3008 int n;
3009 int i;
3010 int nexps = 0;
3011 int needed_sseregs, needed_intregs;
3012 rtx exp[MAX_CLASSES];
3013 rtx ret;
3015 n = classify_argument (mode, type, class, 0);
3016 if (TARGET_DEBUG_ARG)
3018 if (!n)
3019 fprintf (stderr, "Memory class\n");
3020 else
3022 fprintf (stderr, "Classes:");
3023 for (i = 0; i < n; i++)
3025 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3027 fprintf (stderr, "\n");
3030 if (!n)
3031 return NULL;
3032 if (!examine_argument (mode, type, in_return, &needed_intregs,
3033 &needed_sseregs))
3034 return NULL;
3035 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3036 return NULL;
3038 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3039 some less clueful developer tries to use floating-point anyway. */
3040 if (needed_sseregs && !TARGET_SSE)
3042 static bool issued_error;
3043 if (!issued_error)
3045 issued_error = true;
3046 if (in_return)
3047 error ("SSE register return with SSE disabled");
3048 else
3049 error ("SSE register argument with SSE disabled");
3051 return NULL;
3054 /* First construct simple cases. Avoid SCmode, since we want to use
3055 single register to pass this type. */
3056 if (n == 1 && mode != SCmode)
3057 switch (class[0])
3059 case X86_64_INTEGER_CLASS:
3060 case X86_64_INTEGERSI_CLASS:
3061 return gen_rtx_REG (mode, intreg[0]);
3062 case X86_64_SSE_CLASS:
3063 case X86_64_SSESF_CLASS:
3064 case X86_64_SSEDF_CLASS:
3065 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3066 case X86_64_X87_CLASS:
3067 case X86_64_COMPLEX_X87_CLASS:
3068 return gen_rtx_REG (mode, FIRST_STACK_REG);
3069 case X86_64_NO_CLASS:
3070 /* Zero sized array, struct or class. */
3071 return NULL;
3072 default:
3073 gcc_unreachable ();
3075 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3076 && mode != BLKmode)
3077 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3078 if (n == 2
3079 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3080 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3081 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3082 && class[1] == X86_64_INTEGER_CLASS
3083 && (mode == CDImode || mode == TImode || mode == TFmode)
3084 && intreg[0] + 1 == intreg[1])
3085 return gen_rtx_REG (mode, intreg[0]);
3087 /* Otherwise figure out the entries of the PARALLEL. */
3088 for (i = 0; i < n; i++)
3090 switch (class[i])
3092 case X86_64_NO_CLASS:
3093 break;
3094 case X86_64_INTEGER_CLASS:
3095 case X86_64_INTEGERSI_CLASS:
3096 /* Merge TImodes on aligned occasions here too. */
3097 if (i * 8 + 8 > bytes)
3098 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3099 else if (class[i] == X86_64_INTEGERSI_CLASS)
3100 tmpmode = SImode;
3101 else
3102 tmpmode = DImode;
3103 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3104 if (tmpmode == BLKmode)
3105 tmpmode = DImode;
3106 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3107 gen_rtx_REG (tmpmode, *intreg),
3108 GEN_INT (i*8));
3109 intreg++;
3110 break;
3111 case X86_64_SSESF_CLASS:
3112 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3113 gen_rtx_REG (SFmode,
3114 SSE_REGNO (sse_regno)),
3115 GEN_INT (i*8));
3116 sse_regno++;
3117 break;
3118 case X86_64_SSEDF_CLASS:
3119 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3120 gen_rtx_REG (DFmode,
3121 SSE_REGNO (sse_regno)),
3122 GEN_INT (i*8));
3123 sse_regno++;
3124 break;
3125 case X86_64_SSE_CLASS:
3126 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3127 tmpmode = TImode;
3128 else
3129 tmpmode = DImode;
3130 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3131 gen_rtx_REG (tmpmode,
3132 SSE_REGNO (sse_regno)),
3133 GEN_INT (i*8));
3134 if (tmpmode == TImode)
3135 i++;
3136 sse_regno++;
3137 break;
3138 default:
3139 gcc_unreachable ();
3143 /* Empty aligned struct, union or class. */
3144 if (nexps == 0)
3145 return NULL;
3147 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3148 for (i = 0; i < nexps; i++)
3149 XVECEXP (ret, 0, i) = exp [i];
3150 return ret;
3153 /* Update the data in CUM to advance over an argument
3154 of mode MODE and data type TYPE.
3155 (TYPE is null for libcalls where that information may not be available.) */
3157 void
3158 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3159 tree type, int named)
3161 int bytes =
3162 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3163 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3165 if (type)
3166 mode = type_natural_mode (type);
3168 if (TARGET_DEBUG_ARG)
3169 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3170 "mode=%s, named=%d)\n\n",
3171 words, cum->words, cum->nregs, cum->sse_nregs,
3172 GET_MODE_NAME (mode), named);
3174 if (TARGET_64BIT)
3176 int int_nregs, sse_nregs;
3177 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3178 cum->words += words;
3179 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3181 cum->nregs -= int_nregs;
3182 cum->sse_nregs -= sse_nregs;
3183 cum->regno += int_nregs;
3184 cum->sse_regno += sse_nregs;
3186 else
3187 cum->words += words;
3189 else
3191 switch (mode)
3193 default:
3194 break;
3196 case BLKmode:
3197 if (bytes < 0)
3198 break;
3199 /* FALLTHRU */
3201 case DImode:
3202 case SImode:
3203 case HImode:
3204 case QImode:
3205 cum->words += words;
3206 cum->nregs -= words;
3207 cum->regno += words;
3209 if (cum->nregs <= 0)
3211 cum->nregs = 0;
3212 cum->regno = 0;
3214 break;
3216 case DFmode:
3217 if (cum->float_in_sse < 2)
3218 break;
3219 case SFmode:
3220 if (cum->float_in_sse < 1)
3221 break;
3222 /* FALLTHRU */
3224 case TImode:
3225 case V16QImode:
3226 case V8HImode:
3227 case V4SImode:
3228 case V2DImode:
3229 case V4SFmode:
3230 case V2DFmode:
3231 if (!type || !AGGREGATE_TYPE_P (type))
3233 cum->sse_words += words;
3234 cum->sse_nregs -= 1;
3235 cum->sse_regno += 1;
3236 if (cum->sse_nregs <= 0)
3238 cum->sse_nregs = 0;
3239 cum->sse_regno = 0;
3242 break;
3244 case V8QImode:
3245 case V4HImode:
3246 case V2SImode:
3247 case V2SFmode:
3248 if (!type || !AGGREGATE_TYPE_P (type))
3250 cum->mmx_words += words;
3251 cum->mmx_nregs -= 1;
3252 cum->mmx_regno += 1;
3253 if (cum->mmx_nregs <= 0)
3255 cum->mmx_nregs = 0;
3256 cum->mmx_regno = 0;
3259 break;
3264 /* Define where to put the arguments to a function.
3265 Value is zero to push the argument on the stack,
3266 or a hard register in which to store the argument.
3268 MODE is the argument's machine mode.
3269 TYPE is the data type of the argument (as a tree).
3270 This is null for libcalls where that information may
3271 not be available.
3272 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3273 the preceding args and about the function being called.
3274 NAMED is nonzero if this argument is a named parameter
3275 (otherwise it is an extra parameter matching an ellipsis). */
3278 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3279 tree type, int named)
3281 enum machine_mode mode = orig_mode;
3282 rtx ret = NULL_RTX;
3283 int bytes =
3284 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3285 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3286 static bool warnedsse, warnedmmx;
3288 /* To simplify the code below, represent vector types with a vector mode
3289 even if MMX/SSE are not active. */
3290 if (type && TREE_CODE (type) == VECTOR_TYPE)
3291 mode = type_natural_mode (type);
3293 /* Handle a hidden AL argument containing number of registers for varargs
3294 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3295 any AL settings. */
3296 if (mode == VOIDmode)
3298 if (TARGET_64BIT)
3299 return GEN_INT (cum->maybe_vaarg
3300 ? (cum->sse_nregs < 0
3301 ? SSE_REGPARM_MAX
3302 : cum->sse_regno)
3303 : -1);
3304 else
3305 return constm1_rtx;
3307 if (TARGET_64BIT)
3308 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3309 cum->sse_nregs,
3310 &x86_64_int_parameter_registers [cum->regno],
3311 cum->sse_regno);
3312 else
3313 switch (mode)
3315 /* For now, pass fp/complex values on the stack. */
3316 default:
3317 break;
3319 case BLKmode:
3320 if (bytes < 0)
3321 break;
3322 /* FALLTHRU */
3323 case DImode:
3324 case SImode:
3325 case HImode:
3326 case QImode:
3327 if (words <= cum->nregs)
3329 int regno = cum->regno;
3331 /* Fastcall allocates the first two DWORD (SImode) or
3332 smaller arguments to ECX and EDX. */
3333 if (cum->fastcall)
3335 if (mode == BLKmode || mode == DImode)
3336 break;
3338 /* ECX not EAX is the first allocated register. */
3339 if (regno == 0)
3340 regno = 2;
3342 ret = gen_rtx_REG (mode, regno);
3344 break;
3345 case DFmode:
3346 if (cum->float_in_sse < 2)
3347 break;
3348 case SFmode:
3349 if (cum->float_in_sse < 1)
3350 break;
3351 /* FALLTHRU */
3352 case TImode:
3353 case V16QImode:
3354 case V8HImode:
3355 case V4SImode:
3356 case V2DImode:
3357 case V4SFmode:
3358 case V2DFmode:
3359 if (!type || !AGGREGATE_TYPE_P (type))
3361 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3363 warnedsse = true;
3364 warning (0, "SSE vector argument without SSE enabled "
3365 "changes the ABI");
3367 if (cum->sse_nregs)
3368 ret = gen_reg_or_parallel (mode, orig_mode,
3369 cum->sse_regno + FIRST_SSE_REG);
3371 break;
3372 case V8QImode:
3373 case V4HImode:
3374 case V2SImode:
3375 case V2SFmode:
3376 if (!type || !AGGREGATE_TYPE_P (type))
3378 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3380 warnedmmx = true;
3381 warning (0, "MMX vector argument without MMX enabled "
3382 "changes the ABI");
3384 if (cum->mmx_nregs)
3385 ret = gen_reg_or_parallel (mode, orig_mode,
3386 cum->mmx_regno + FIRST_MMX_REG);
3388 break;
3391 if (TARGET_DEBUG_ARG)
3393 fprintf (stderr,
3394 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3395 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3397 if (ret)
3398 print_simple_rtl (stderr, ret);
3399 else
3400 fprintf (stderr, ", stack");
3402 fprintf (stderr, " )\n");
3405 return ret;
3408 /* A C expression that indicates when an argument must be passed by
3409 reference. If nonzero for an argument, a copy of that argument is
3410 made in memory and a pointer to the argument is passed instead of
3411 the argument itself. The pointer is passed in whatever way is
3412 appropriate for passing a pointer to that type. */
3414 static bool
3415 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3416 enum machine_mode mode ATTRIBUTE_UNUSED,
3417 tree type, bool named ATTRIBUTE_UNUSED)
3419 if (!TARGET_64BIT)
3420 return 0;
3422 if (type && int_size_in_bytes (type) == -1)
3424 if (TARGET_DEBUG_ARG)
3425 fprintf (stderr, "function_arg_pass_by_reference\n");
3426 return 1;
3429 return 0;
3432 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3433 ABI. Only called if TARGET_SSE. */
3434 static bool
3435 contains_128bit_aligned_vector_p (tree type)
3437 enum machine_mode mode = TYPE_MODE (type);
3438 if (SSE_REG_MODE_P (mode)
3439 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3440 return true;
3441 if (TYPE_ALIGN (type) < 128)
3442 return false;
3444 if (AGGREGATE_TYPE_P (type))
3446 /* Walk the aggregates recursively. */
3447 switch (TREE_CODE (type))
3449 case RECORD_TYPE:
3450 case UNION_TYPE:
3451 case QUAL_UNION_TYPE:
3453 tree field;
3455 if (TYPE_BINFO (type))
3457 tree binfo, base_binfo;
3458 int i;
3460 for (binfo = TYPE_BINFO (type), i = 0;
3461 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3462 if (contains_128bit_aligned_vector_p
3463 (BINFO_TYPE (base_binfo)))
3464 return true;
3466 /* And now merge the fields of structure. */
3467 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3469 if (TREE_CODE (field) == FIELD_DECL
3470 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3471 return true;
3473 break;
3476 case ARRAY_TYPE:
3477 /* Just for use if some languages passes arrays by value. */
3478 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3479 return true;
3480 break;
3482 default:
3483 gcc_unreachable ();
3486 return false;
3489 /* Gives the alignment boundary, in bits, of an argument with the
3490 specified mode and type. */
3493 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3495 int align;
3496 if (type)
3497 align = TYPE_ALIGN (type);
3498 else
3499 align = GET_MODE_ALIGNMENT (mode);
3500 if (align < PARM_BOUNDARY)
3501 align = PARM_BOUNDARY;
3502 if (!TARGET_64BIT)
3504 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3505 make an exception for SSE modes since these require 128bit
3506 alignment.
3508 The handling here differs from field_alignment. ICC aligns MMX
3509 arguments to 4 byte boundaries, while structure fields are aligned
3510 to 8 byte boundaries. */
3511 if (!TARGET_SSE)
3512 align = PARM_BOUNDARY;
3513 else if (!type)
3515 if (!SSE_REG_MODE_P (mode))
3516 align = PARM_BOUNDARY;
3518 else
3520 if (!contains_128bit_aligned_vector_p (type))
3521 align = PARM_BOUNDARY;
3524 if (align > 128)
3525 align = 128;
3526 return align;
3529 /* Return true if N is a possible register number of function value. */
3530 bool
3531 ix86_function_value_regno_p (int regno)
3533 if (regno == 0
3534 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3535 || (regno == FIRST_SSE_REG && TARGET_SSE))
3536 return true;
3538 if (!TARGET_64BIT
3539 && (regno == FIRST_MMX_REG && TARGET_MMX))
3540 return true;
3542 return false;
3545 /* Define how to find the value returned by a function.
3546 VALTYPE is the data type of the value (as a tree).
3547 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3548 otherwise, FUNC is 0. */
3550 ix86_function_value (tree valtype, tree fntype_or_decl,
3551 bool outgoing ATTRIBUTE_UNUSED)
3553 enum machine_mode natmode = type_natural_mode (valtype);
3555 if (TARGET_64BIT)
3557 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3558 1, REGPARM_MAX, SSE_REGPARM_MAX,
3559 x86_64_int_return_registers, 0);
3560 /* For zero sized structures, construct_container return NULL, but we
3561 need to keep rest of compiler happy by returning meaningful value. */
3562 if (!ret)
3563 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3564 return ret;
3566 else
3568 tree fn = NULL_TREE, fntype;
3569 if (fntype_or_decl
3570 && DECL_P (fntype_or_decl))
3571 fn = fntype_or_decl;
3572 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3573 return gen_rtx_REG (TYPE_MODE (valtype),
3574 ix86_value_regno (natmode, fn, fntype));
3578 /* Return true iff type is returned in memory. */
3580 ix86_return_in_memory (tree type)
3582 int needed_intregs, needed_sseregs, size;
3583 enum machine_mode mode = type_natural_mode (type);
3585 if (TARGET_64BIT)
3586 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3588 if (mode == BLKmode)
3589 return 1;
3591 size = int_size_in_bytes (type);
3593 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3594 return 0;
3596 if (VECTOR_MODE_P (mode) || mode == TImode)
3598 /* User-created vectors small enough to fit in EAX. */
3599 if (size < 8)
3600 return 0;
3602 /* MMX/3dNow values are returned in MM0,
3603 except when it doesn't exits. */
3604 if (size == 8)
3605 return (TARGET_MMX ? 0 : 1);
3607 /* SSE values are returned in XMM0, except when it doesn't exist. */
3608 if (size == 16)
3609 return (TARGET_SSE ? 0 : 1);
3612 if (mode == XFmode)
3613 return 0;
3615 if (size > 12)
3616 return 1;
3617 return 0;
3620 /* When returning SSE vector types, we have a choice of either
3621 (1) being abi incompatible with a -march switch, or
3622 (2) generating an error.
3623 Given no good solution, I think the safest thing is one warning.
3624 The user won't be able to use -Werror, but....
3626 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3627 called in response to actually generating a caller or callee that
3628 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3629 via aggregate_value_p for general type probing from tree-ssa. */
3631 static rtx
3632 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3634 static bool warnedsse, warnedmmx;
3636 if (type)
3638 /* Look at the return type of the function, not the function type. */
3639 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3641 if (!TARGET_SSE && !warnedsse)
3643 if (mode == TImode
3644 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3646 warnedsse = true;
3647 warning (0, "SSE vector return without SSE enabled "
3648 "changes the ABI");
3652 if (!TARGET_MMX && !warnedmmx)
3654 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3656 warnedmmx = true;
3657 warning (0, "MMX vector return without MMX enabled "
3658 "changes the ABI");
3663 return NULL;
3666 /* Define how to find the value returned by a library function
3667 assuming the value has mode MODE. */
3669 ix86_libcall_value (enum machine_mode mode)
3671 if (TARGET_64BIT)
3673 switch (mode)
3675 case SFmode:
3676 case SCmode:
3677 case DFmode:
3678 case DCmode:
3679 case TFmode:
3680 return gen_rtx_REG (mode, FIRST_SSE_REG);
3681 case XFmode:
3682 case XCmode:
3683 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3684 case TCmode:
3685 return NULL;
3686 default:
3687 return gen_rtx_REG (mode, 0);
3690 else
3691 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3694 /* Given a mode, return the register to use for a return value. */
3696 static int
3697 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3699 gcc_assert (!TARGET_64BIT);
3701 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3702 we prevent this case when mmx is not available. */
3703 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3704 return FIRST_MMX_REG;
3706 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3707 we prevent this case when sse is not available. */
3708 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3709 return FIRST_SSE_REG;
3711 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3712 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
3713 return 0;
3715 /* Floating point return values in %st(0), except for local functions when
3716 SSE math is enabled or for functions with sseregparm attribute. */
3717 if ((func || fntype)
3718 && (mode == SFmode || mode == DFmode))
3720 int sse_level = ix86_function_sseregparm (fntype, func);
3721 if ((sse_level >= 1 && mode == SFmode)
3722 || (sse_level == 2 && mode == DFmode))
3723 return FIRST_SSE_REG;
3726 return FIRST_FLOAT_REG;
3729 /* Create the va_list data type. */
3731 static tree
3732 ix86_build_builtin_va_list (void)
3734 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3736 /* For i386 we use plain pointer to argument area. */
3737 if (!TARGET_64BIT)
3738 return build_pointer_type (char_type_node);
3740 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3741 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3743 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3744 unsigned_type_node);
3745 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3746 unsigned_type_node);
3747 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3748 ptr_type_node);
3749 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3750 ptr_type_node);
3752 va_list_gpr_counter_field = f_gpr;
3753 va_list_fpr_counter_field = f_fpr;
3755 DECL_FIELD_CONTEXT (f_gpr) = record;
3756 DECL_FIELD_CONTEXT (f_fpr) = record;
3757 DECL_FIELD_CONTEXT (f_ovf) = record;
3758 DECL_FIELD_CONTEXT (f_sav) = record;
3760 TREE_CHAIN (record) = type_decl;
3761 TYPE_NAME (record) = type_decl;
3762 TYPE_FIELDS (record) = f_gpr;
3763 TREE_CHAIN (f_gpr) = f_fpr;
3764 TREE_CHAIN (f_fpr) = f_ovf;
3765 TREE_CHAIN (f_ovf) = f_sav;
3767 layout_type (record);
3769 /* The correct type is an array type of one element. */
3770 return build_array_type (record, build_index_type (size_zero_node));
3773 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3775 static void
3776 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3777 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3778 int no_rtl)
3780 CUMULATIVE_ARGS next_cum;
3781 rtx save_area = NULL_RTX, mem;
3782 rtx label;
3783 rtx label_ref;
3784 rtx tmp_reg;
3785 rtx nsse_reg;
3786 int set;
3787 tree fntype;
3788 int stdarg_p;
3789 int i;
3791 if (!TARGET_64BIT)
3792 return;
3794 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3795 return;
3797 /* Indicate to allocate space on the stack for varargs save area. */
3798 ix86_save_varrargs_registers = 1;
3800 cfun->stack_alignment_needed = 128;
3802 fntype = TREE_TYPE (current_function_decl);
3803 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3804 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3805 != void_type_node));
3807 /* For varargs, we do not want to skip the dummy va_dcl argument.
3808 For stdargs, we do want to skip the last named argument. */
3809 next_cum = *cum;
3810 if (stdarg_p)
3811 function_arg_advance (&next_cum, mode, type, 1);
3813 if (!no_rtl)
3814 save_area = frame_pointer_rtx;
3816 set = get_varargs_alias_set ();
3818 for (i = next_cum.regno;
3819 i < ix86_regparm
3820 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3821 i++)
3823 mem = gen_rtx_MEM (Pmode,
3824 plus_constant (save_area, i * UNITS_PER_WORD));
3825 MEM_NOTRAP_P (mem) = 1;
3826 set_mem_alias_set (mem, set);
3827 emit_move_insn (mem, gen_rtx_REG (Pmode,
3828 x86_64_int_parameter_registers[i]));
3831 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3833 /* Now emit code to save SSE registers. The AX parameter contains number
3834 of SSE parameter registers used to call this function. We use
3835 sse_prologue_save insn template that produces computed jump across
3836 SSE saves. We need some preparation work to get this working. */
3838 label = gen_label_rtx ();
3839 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3841 /* Compute address to jump to :
3842 label - 5*eax + nnamed_sse_arguments*5 */
3843 tmp_reg = gen_reg_rtx (Pmode);
3844 nsse_reg = gen_reg_rtx (Pmode);
3845 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3846 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3847 gen_rtx_MULT (Pmode, nsse_reg,
3848 GEN_INT (4))));
3849 if (next_cum.sse_regno)
3850 emit_move_insn
3851 (nsse_reg,
3852 gen_rtx_CONST (DImode,
3853 gen_rtx_PLUS (DImode,
3854 label_ref,
3855 GEN_INT (next_cum.sse_regno * 4))));
3856 else
3857 emit_move_insn (nsse_reg, label_ref);
3858 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3860 /* Compute address of memory block we save into. We always use pointer
3861 pointing 127 bytes after first byte to store - this is needed to keep
3862 instruction size limited by 4 bytes. */
3863 tmp_reg = gen_reg_rtx (Pmode);
3864 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3865 plus_constant (save_area,
3866 8 * REGPARM_MAX + 127)));
3867 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3868 MEM_NOTRAP_P (mem) = 1;
3869 set_mem_alias_set (mem, set);
3870 set_mem_align (mem, BITS_PER_WORD);
3872 /* And finally do the dirty job! */
3873 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3874 GEN_INT (next_cum.sse_regno), label));
3879 /* Implement va_start. */
3881 void
3882 ix86_va_start (tree valist, rtx nextarg)
3884 HOST_WIDE_INT words, n_gpr, n_fpr;
3885 tree f_gpr, f_fpr, f_ovf, f_sav;
3886 tree gpr, fpr, ovf, sav, t;
3888 /* Only 64bit target needs something special. */
3889 if (!TARGET_64BIT)
3891 std_expand_builtin_va_start (valist, nextarg);
3892 return;
3895 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3896 f_fpr = TREE_CHAIN (f_gpr);
3897 f_ovf = TREE_CHAIN (f_fpr);
3898 f_sav = TREE_CHAIN (f_ovf);
3900 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3901 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3902 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3903 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3904 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3906 /* Count number of gp and fp argument registers used. */
3907 words = current_function_args_info.words;
3908 n_gpr = current_function_args_info.regno;
3909 n_fpr = current_function_args_info.sse_regno;
3911 if (TARGET_DEBUG_ARG)
3912 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3913 (int) words, (int) n_gpr, (int) n_fpr);
3915 if (cfun->va_list_gpr_size)
3917 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3918 build_int_cst (NULL_TREE, n_gpr * 8));
3919 TREE_SIDE_EFFECTS (t) = 1;
3920 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3923 if (cfun->va_list_fpr_size)
3925 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3926 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3927 TREE_SIDE_EFFECTS (t) = 1;
3928 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3931 /* Find the overflow area. */
3932 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3933 if (words != 0)
3934 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
3935 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3936 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3937 TREE_SIDE_EFFECTS (t) = 1;
3938 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3940 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3942 /* Find the register save area.
3943 Prologue of the function save it right above stack frame. */
3944 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3945 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3946 TREE_SIDE_EFFECTS (t) = 1;
3947 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3951 /* Implement va_arg. */
3953 tree
3954 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3956 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3957 tree f_gpr, f_fpr, f_ovf, f_sav;
3958 tree gpr, fpr, ovf, sav, t;
3959 int size, rsize;
3960 tree lab_false, lab_over = NULL_TREE;
3961 tree addr, t2;
3962 rtx container;
3963 int indirect_p = 0;
3964 tree ptrtype;
3965 enum machine_mode nat_mode;
3967 /* Only 64bit target needs something special. */
3968 if (!TARGET_64BIT)
3969 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3971 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3972 f_fpr = TREE_CHAIN (f_gpr);
3973 f_ovf = TREE_CHAIN (f_fpr);
3974 f_sav = TREE_CHAIN (f_ovf);
3976 valist = build_va_arg_indirect_ref (valist);
3977 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3978 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3979 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3980 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3982 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3983 if (indirect_p)
3984 type = build_pointer_type (type);
3985 size = int_size_in_bytes (type);
3986 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3988 nat_mode = type_natural_mode (type);
3989 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3990 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3992 /* Pull the value out of the saved registers. */
3994 addr = create_tmp_var (ptr_type_node, "addr");
3995 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3997 if (container)
3999 int needed_intregs, needed_sseregs;
4000 bool need_temp;
4001 tree int_addr, sse_addr;
4003 lab_false = create_artificial_label ();
4004 lab_over = create_artificial_label ();
4006 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4008 need_temp = (!REG_P (container)
4009 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4010 || TYPE_ALIGN (type) > 128));
4012 /* In case we are passing structure, verify that it is consecutive block
4013 on the register save area. If not we need to do moves. */
4014 if (!need_temp && !REG_P (container))
4016 /* Verify that all registers are strictly consecutive */
4017 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4019 int i;
4021 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4023 rtx slot = XVECEXP (container, 0, i);
4024 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4025 || INTVAL (XEXP (slot, 1)) != i * 16)
4026 need_temp = 1;
4029 else
4031 int i;
4033 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4035 rtx slot = XVECEXP (container, 0, i);
4036 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4037 || INTVAL (XEXP (slot, 1)) != i * 8)
4038 need_temp = 1;
4042 if (!need_temp)
4044 int_addr = addr;
4045 sse_addr = addr;
4047 else
4049 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4050 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4051 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4052 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4055 /* First ensure that we fit completely in registers. */
4056 if (needed_intregs)
4058 t = build_int_cst (TREE_TYPE (gpr),
4059 (REGPARM_MAX - needed_intregs + 1) * 8);
4060 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4061 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4062 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4063 gimplify_and_add (t, pre_p);
4065 if (needed_sseregs)
4067 t = build_int_cst (TREE_TYPE (fpr),
4068 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4069 + REGPARM_MAX * 8);
4070 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4071 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4072 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4073 gimplify_and_add (t, pre_p);
4076 /* Compute index to start of area used for integer regs. */
4077 if (needed_intregs)
4079 /* int_addr = gpr + sav; */
4080 t = fold_convert (ptr_type_node, gpr);
4081 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4082 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
4083 gimplify_and_add (t, pre_p);
4085 if (needed_sseregs)
4087 /* sse_addr = fpr + sav; */
4088 t = fold_convert (ptr_type_node, fpr);
4089 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4090 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
4091 gimplify_and_add (t, pre_p);
4093 if (need_temp)
4095 int i;
4096 tree temp = create_tmp_var (type, "va_arg_tmp");
4098 /* addr = &temp; */
4099 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4100 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
4101 gimplify_and_add (t, pre_p);
4103 for (i = 0; i < XVECLEN (container, 0); i++)
4105 rtx slot = XVECEXP (container, 0, i);
4106 rtx reg = XEXP (slot, 0);
4107 enum machine_mode mode = GET_MODE (reg);
4108 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4109 tree addr_type = build_pointer_type (piece_type);
4110 tree src_addr, src;
4111 int src_offset;
4112 tree dest_addr, dest;
4114 if (SSE_REGNO_P (REGNO (reg)))
4116 src_addr = sse_addr;
4117 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4119 else
4121 src_addr = int_addr;
4122 src_offset = REGNO (reg) * 8;
4124 src_addr = fold_convert (addr_type, src_addr);
4125 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4126 size_int (src_offset)));
4127 src = build_va_arg_indirect_ref (src_addr);
4129 dest_addr = fold_convert (addr_type, addr);
4130 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4131 size_int (INTVAL (XEXP (slot, 1)))));
4132 dest = build_va_arg_indirect_ref (dest_addr);
4134 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
4135 gimplify_and_add (t, pre_p);
4139 if (needed_intregs)
4141 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4142 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4143 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
4144 gimplify_and_add (t, pre_p);
4146 if (needed_sseregs)
4148 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4149 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4150 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
4151 gimplify_and_add (t, pre_p);
4154 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4155 gimplify_and_add (t, pre_p);
4157 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4158 append_to_statement_list (t, pre_p);
4161 /* ... otherwise out of the overflow area. */
4163 /* Care for on-stack alignment if needed. */
4164 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4165 || integer_zerop (TYPE_SIZE (type)))
4166 t = ovf;
4167 else
4169 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4170 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4171 build_int_cst (TREE_TYPE (ovf), align - 1));
4172 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4173 build_int_cst (TREE_TYPE (t), -align));
4175 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4177 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4178 gimplify_and_add (t2, pre_p);
4180 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4181 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4182 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4183 gimplify_and_add (t, pre_p);
4185 if (container)
4187 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4188 append_to_statement_list (t, pre_p);
4191 ptrtype = build_pointer_type (type);
4192 addr = fold_convert (ptrtype, addr);
4194 if (indirect_p)
4195 addr = build_va_arg_indirect_ref (addr);
4196 return build_va_arg_indirect_ref (addr);
4199 /* Return nonzero if OPNUM's MEM should be matched
4200 in movabs* patterns. */
4203 ix86_check_movabs (rtx insn, int opnum)
4205 rtx set, mem;
4207 set = PATTERN (insn);
4208 if (GET_CODE (set) == PARALLEL)
4209 set = XVECEXP (set, 0, 0);
4210 gcc_assert (GET_CODE (set) == SET);
4211 mem = XEXP (set, opnum);
4212 while (GET_CODE (mem) == SUBREG)
4213 mem = SUBREG_REG (mem);
4214 gcc_assert (GET_CODE (mem) == MEM);
4215 return (volatile_ok || !MEM_VOLATILE_P (mem));
4218 /* Initialize the table of extra 80387 mathematical constants. */
4220 static void
4221 init_ext_80387_constants (void)
4223 static const char * cst[5] =
4225 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4226 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4227 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4228 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4229 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4231 int i;
4233 for (i = 0; i < 5; i++)
4235 real_from_string (&ext_80387_constants_table[i], cst[i]);
4236 /* Ensure each constant is rounded to XFmode precision. */
4237 real_convert (&ext_80387_constants_table[i],
4238 XFmode, &ext_80387_constants_table[i]);
4241 ext_80387_constants_init = 1;
4244 /* Return true if the constant is something that can be loaded with
4245 a special instruction. */
4248 standard_80387_constant_p (rtx x)
4250 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4251 return -1;
4253 if (x == CONST0_RTX (GET_MODE (x)))
4254 return 1;
4255 if (x == CONST1_RTX (GET_MODE (x)))
4256 return 2;
4258 /* For XFmode constants, try to find a special 80387 instruction when
4259 optimizing for size or on those CPUs that benefit from them. */
4260 if (GET_MODE (x) == XFmode
4261 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4263 REAL_VALUE_TYPE r;
4264 int i;
4266 if (! ext_80387_constants_init)
4267 init_ext_80387_constants ();
4269 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4270 for (i = 0; i < 5; i++)
4271 if (real_identical (&r, &ext_80387_constants_table[i]))
4272 return i + 3;
4275 return 0;
4278 /* Return the opcode of the special instruction to be used to load
4279 the constant X. */
4281 const char *
4282 standard_80387_constant_opcode (rtx x)
4284 switch (standard_80387_constant_p (x))
4286 case 1:
4287 return "fldz";
4288 case 2:
4289 return "fld1";
4290 case 3:
4291 return "fldlg2";
4292 case 4:
4293 return "fldln2";
4294 case 5:
4295 return "fldl2e";
4296 case 6:
4297 return "fldl2t";
4298 case 7:
4299 return "fldpi";
4300 default:
4301 gcc_unreachable ();
4305 /* Return the CONST_DOUBLE representing the 80387 constant that is
4306 loaded by the specified special instruction. The argument IDX
4307 matches the return value from standard_80387_constant_p. */
4310 standard_80387_constant_rtx (int idx)
4312 int i;
4314 if (! ext_80387_constants_init)
4315 init_ext_80387_constants ();
4317 switch (idx)
4319 case 3:
4320 case 4:
4321 case 5:
4322 case 6:
4323 case 7:
4324 i = idx - 3;
4325 break;
4327 default:
4328 gcc_unreachable ();
4331 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4332 XFmode);
4335 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4338 standard_sse_constant_p (rtx x)
4340 if (x == const0_rtx)
4341 return 1;
4342 return (x == CONST0_RTX (GET_MODE (x)));
4345 /* Returns 1 if OP contains a symbol reference */
4348 symbolic_reference_mentioned_p (rtx op)
4350 const char *fmt;
4351 int i;
4353 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4354 return 1;
4356 fmt = GET_RTX_FORMAT (GET_CODE (op));
4357 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4359 if (fmt[i] == 'E')
4361 int j;
4363 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4364 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4365 return 1;
4368 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4369 return 1;
4372 return 0;
4375 /* Return 1 if it is appropriate to emit `ret' instructions in the
4376 body of a function. Do this only if the epilogue is simple, needing a
4377 couple of insns. Prior to reloading, we can't tell how many registers
4378 must be saved, so return 0 then. Return 0 if there is no frame
4379 marker to de-allocate. */
4382 ix86_can_use_return_insn_p (void)
4384 struct ix86_frame frame;
4386 if (! reload_completed || frame_pointer_needed)
4387 return 0;
4389 /* Don't allow more than 32 pop, since that's all we can do
4390 with one instruction. */
4391 if (current_function_pops_args
4392 && current_function_args_size >= 32768)
4393 return 0;
4395 ix86_compute_frame_layout (&frame);
4396 return frame.to_allocate == 0 && frame.nregs == 0;
4399 /* Value should be nonzero if functions must have frame pointers.
4400 Zero means the frame pointer need not be set up (and parms may
4401 be accessed via the stack pointer) in functions that seem suitable. */
4404 ix86_frame_pointer_required (void)
4406 /* If we accessed previous frames, then the generated code expects
4407 to be able to access the saved ebp value in our frame. */
4408 if (cfun->machine->accesses_prev_frame)
4409 return 1;
4411 /* Several x86 os'es need a frame pointer for other reasons,
4412 usually pertaining to setjmp. */
4413 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4414 return 1;
4416 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4417 the frame pointer by default. Turn it back on now if we've not
4418 got a leaf function. */
4419 if (TARGET_OMIT_LEAF_FRAME_POINTER
4420 && (!current_function_is_leaf
4421 || ix86_current_function_calls_tls_descriptor))
4422 return 1;
4424 if (current_function_profile)
4425 return 1;
4427 return 0;
4430 /* Record that the current function accesses previous call frames. */
4432 void
4433 ix86_setup_frame_addresses (void)
4435 cfun->machine->accesses_prev_frame = 1;
4438 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4439 # define USE_HIDDEN_LINKONCE 1
4440 #else
4441 # define USE_HIDDEN_LINKONCE 0
4442 #endif
4444 static int pic_labels_used;
4446 /* Fills in the label name that should be used for a pc thunk for
4447 the given register. */
4449 static void
4450 get_pc_thunk_name (char name[32], unsigned int regno)
4452 if (USE_HIDDEN_LINKONCE)
4453 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4454 else
4455 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4459 /* This function generates code for -fpic that loads %ebx with
4460 the return address of the caller and then returns. */
4462 void
4463 ix86_file_end (void)
4465 rtx xops[2];
4466 int regno;
4468 for (regno = 0; regno < 8; ++regno)
4470 char name[32];
4472 if (! ((pic_labels_used >> regno) & 1))
4473 continue;
4475 get_pc_thunk_name (name, regno);
4477 #if TARGET_MACHO
4478 if (TARGET_MACHO)
4480 switch_to_section (darwin_sections[text_coal_section]);
4481 fputs ("\t.weak_definition\t", asm_out_file);
4482 assemble_name (asm_out_file, name);
4483 fputs ("\n\t.private_extern\t", asm_out_file);
4484 assemble_name (asm_out_file, name);
4485 fputs ("\n", asm_out_file);
4486 ASM_OUTPUT_LABEL (asm_out_file, name);
4488 else
4489 #endif
4490 if (USE_HIDDEN_LINKONCE)
4492 tree decl;
4494 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4495 error_mark_node);
4496 TREE_PUBLIC (decl) = 1;
4497 TREE_STATIC (decl) = 1;
4498 DECL_ONE_ONLY (decl) = 1;
4500 (*targetm.asm_out.unique_section) (decl, 0);
4501 switch_to_section (get_named_section (decl, NULL, 0));
4503 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4504 fputs ("\t.hidden\t", asm_out_file);
4505 assemble_name (asm_out_file, name);
4506 fputc ('\n', asm_out_file);
4507 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4509 else
4511 switch_to_section (text_section);
4512 ASM_OUTPUT_LABEL (asm_out_file, name);
4515 xops[0] = gen_rtx_REG (SImode, regno);
4516 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4517 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4518 output_asm_insn ("ret", xops);
4521 if (NEED_INDICATE_EXEC_STACK)
4522 file_end_indicate_exec_stack ();
4525 /* Emit code for the SET_GOT patterns. */
4527 const char *
4528 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
4530 rtx xops[3];
4532 xops[0] = dest;
4533 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4535 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4537 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
4539 if (!flag_pic)
4540 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4541 else
4542 output_asm_insn ("call\t%a2", xops);
4544 #if TARGET_MACHO
4545 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
4546 is what will be referenced by the Mach-O PIC subsystem. */
4547 if (!label)
4548 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4549 #endif
4551 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4552 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4554 if (flag_pic)
4555 output_asm_insn ("pop{l}\t%0", xops);
4557 else
4559 char name[32];
4560 get_pc_thunk_name (name, REGNO (dest));
4561 pic_labels_used |= 1 << REGNO (dest);
4563 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4564 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4565 output_asm_insn ("call\t%X2", xops);
4566 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
4567 is what will be referenced by the Mach-O PIC subsystem. */
4568 #if TARGET_MACHO
4569 if (!label)
4570 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4571 #endif
4574 if (TARGET_MACHO)
4575 return "";
4577 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4578 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4579 else
4580 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4582 return "";
4585 /* Generate an "push" pattern for input ARG. */
4587 static rtx
4588 gen_push (rtx arg)
4590 return gen_rtx_SET (VOIDmode,
4591 gen_rtx_MEM (Pmode,
4592 gen_rtx_PRE_DEC (Pmode,
4593 stack_pointer_rtx)),
4594 arg);
4597 /* Return >= 0 if there is an unused call-clobbered register available
4598 for the entire function. */
4600 static unsigned int
4601 ix86_select_alt_pic_regnum (void)
4603 if (current_function_is_leaf && !current_function_profile
4604 && !ix86_current_function_calls_tls_descriptor)
4606 int i;
4607 for (i = 2; i >= 0; --i)
4608 if (!regs_ever_live[i])
4609 return i;
4612 return INVALID_REGNUM;
4615 /* Return 1 if we need to save REGNO. */
4616 static int
4617 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4619 if (pic_offset_table_rtx
4620 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4621 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4622 || current_function_profile
4623 || current_function_calls_eh_return
4624 || current_function_uses_const_pool))
4626 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4627 return 0;
4628 return 1;
4631 if (current_function_calls_eh_return && maybe_eh_return)
4633 unsigned i;
4634 for (i = 0; ; i++)
4636 unsigned test = EH_RETURN_DATA_REGNO (i);
4637 if (test == INVALID_REGNUM)
4638 break;
4639 if (test == regno)
4640 return 1;
4644 if (cfun->machine->force_align_arg_pointer
4645 && regno == REGNO (cfun->machine->force_align_arg_pointer))
4646 return 1;
4648 return (regs_ever_live[regno]
4649 && !call_used_regs[regno]
4650 && !fixed_regs[regno]
4651 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4654 /* Return number of registers to be saved on the stack. */
4656 static int
4657 ix86_nsaved_regs (void)
4659 int nregs = 0;
4660 int regno;
4662 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4663 if (ix86_save_reg (regno, true))
4664 nregs++;
4665 return nregs;
4668 /* Return the offset between two registers, one to be eliminated, and the other
4669 its replacement, at the start of a routine. */
4671 HOST_WIDE_INT
4672 ix86_initial_elimination_offset (int from, int to)
4674 struct ix86_frame frame;
4675 ix86_compute_frame_layout (&frame);
4677 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4678 return frame.hard_frame_pointer_offset;
4679 else if (from == FRAME_POINTER_REGNUM
4680 && to == HARD_FRAME_POINTER_REGNUM)
4681 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4682 else
4684 gcc_assert (to == STACK_POINTER_REGNUM);
4686 if (from == ARG_POINTER_REGNUM)
4687 return frame.stack_pointer_offset;
4689 gcc_assert (from == FRAME_POINTER_REGNUM);
4690 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4694 /* Fill structure ix86_frame about frame of currently computed function. */
4696 static void
4697 ix86_compute_frame_layout (struct ix86_frame *frame)
4699 HOST_WIDE_INT total_size;
4700 unsigned int stack_alignment_needed;
4701 HOST_WIDE_INT offset;
4702 unsigned int preferred_alignment;
4703 HOST_WIDE_INT size = get_frame_size ();
4705 frame->nregs = ix86_nsaved_regs ();
4706 total_size = size;
4708 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4709 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4711 /* During reload iteration the amount of registers saved can change.
4712 Recompute the value as needed. Do not recompute when amount of registers
4713 didn't change as reload does multiple calls to the function and does not
4714 expect the decision to change within single iteration. */
4715 if (!optimize_size
4716 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4718 int count = frame->nregs;
4720 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4721 /* The fast prologue uses move instead of push to save registers. This
4722 is significantly longer, but also executes faster as modern hardware
4723 can execute the moves in parallel, but can't do that for push/pop.
4725 Be careful about choosing what prologue to emit: When function takes
4726 many instructions to execute we may use slow version as well as in
4727 case function is known to be outside hot spot (this is known with
4728 feedback only). Weight the size of function by number of registers
4729 to save as it is cheap to use one or two push instructions but very
4730 slow to use many of them. */
4731 if (count)
4732 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4733 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4734 || (flag_branch_probabilities
4735 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4736 cfun->machine->use_fast_prologue_epilogue = false;
4737 else
4738 cfun->machine->use_fast_prologue_epilogue
4739 = !expensive_function_p (count);
4741 if (TARGET_PROLOGUE_USING_MOVE
4742 && cfun->machine->use_fast_prologue_epilogue)
4743 frame->save_regs_using_mov = true;
4744 else
4745 frame->save_regs_using_mov = false;
4748 /* Skip return address and saved base pointer. */
4749 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4751 frame->hard_frame_pointer_offset = offset;
4753 /* Do some sanity checking of stack_alignment_needed and
4754 preferred_alignment, since i386 port is the only using those features
4755 that may break easily. */
4757 gcc_assert (!size || stack_alignment_needed);
4758 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4759 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4760 gcc_assert (stack_alignment_needed
4761 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4763 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4764 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4766 /* Register save area */
4767 offset += frame->nregs * UNITS_PER_WORD;
4769 /* Va-arg area */
4770 if (ix86_save_varrargs_registers)
4772 offset += X86_64_VARARGS_SIZE;
4773 frame->va_arg_size = X86_64_VARARGS_SIZE;
4775 else
4776 frame->va_arg_size = 0;
4778 /* Align start of frame for local function. */
4779 frame->padding1 = ((offset + stack_alignment_needed - 1)
4780 & -stack_alignment_needed) - offset;
4782 offset += frame->padding1;
4784 /* Frame pointer points here. */
4785 frame->frame_pointer_offset = offset;
4787 offset += size;
4789 /* Add outgoing arguments area. Can be skipped if we eliminated
4790 all the function calls as dead code.
4791 Skipping is however impossible when function calls alloca. Alloca
4792 expander assumes that last current_function_outgoing_args_size
4793 of stack frame are unused. */
4794 if (ACCUMULATE_OUTGOING_ARGS
4795 && (!current_function_is_leaf || current_function_calls_alloca
4796 || ix86_current_function_calls_tls_descriptor))
4798 offset += current_function_outgoing_args_size;
4799 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4801 else
4802 frame->outgoing_arguments_size = 0;
4804 /* Align stack boundary. Only needed if we're calling another function
4805 or using alloca. */
4806 if (!current_function_is_leaf || current_function_calls_alloca
4807 || ix86_current_function_calls_tls_descriptor)
4808 frame->padding2 = ((offset + preferred_alignment - 1)
4809 & -preferred_alignment) - offset;
4810 else
4811 frame->padding2 = 0;
4813 offset += frame->padding2;
4815 /* We've reached end of stack frame. */
4816 frame->stack_pointer_offset = offset;
4818 /* Size prologue needs to allocate. */
4819 frame->to_allocate =
4820 (size + frame->padding1 + frame->padding2
4821 + frame->outgoing_arguments_size + frame->va_arg_size);
4823 if ((!frame->to_allocate && frame->nregs <= 1)
4824 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4825 frame->save_regs_using_mov = false;
4827 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4828 && current_function_is_leaf
4829 && !ix86_current_function_calls_tls_descriptor)
4831 frame->red_zone_size = frame->to_allocate;
4832 if (frame->save_regs_using_mov)
4833 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4834 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4835 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4837 else
4838 frame->red_zone_size = 0;
4839 frame->to_allocate -= frame->red_zone_size;
4840 frame->stack_pointer_offset -= frame->red_zone_size;
4841 #if 0
4842 fprintf (stderr, "nregs: %i\n", frame->nregs);
4843 fprintf (stderr, "size: %i\n", size);
4844 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4845 fprintf (stderr, "padding1: %i\n", frame->padding1);
4846 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4847 fprintf (stderr, "padding2: %i\n", frame->padding2);
4848 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4849 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4850 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4851 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4852 frame->hard_frame_pointer_offset);
4853 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4854 #endif
4857 /* Emit code to save registers in the prologue. */
4859 static void
4860 ix86_emit_save_regs (void)
4862 unsigned int regno;
4863 rtx insn;
4865 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
4866 if (ix86_save_reg (regno, true))
4868 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4869 RTX_FRAME_RELATED_P (insn) = 1;
4873 /* Emit code to save registers using MOV insns. First register
4874 is restored from POINTER + OFFSET. */
4875 static void
4876 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4878 unsigned int regno;
4879 rtx insn;
4881 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4882 if (ix86_save_reg (regno, true))
4884 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4885 Pmode, offset),
4886 gen_rtx_REG (Pmode, regno));
4887 RTX_FRAME_RELATED_P (insn) = 1;
4888 offset += UNITS_PER_WORD;
4892 /* Expand prologue or epilogue stack adjustment.
4893 The pattern exist to put a dependency on all ebp-based memory accesses.
4894 STYLE should be negative if instructions should be marked as frame related,
4895 zero if %r11 register is live and cannot be freely used and positive
4896 otherwise. */
4898 static void
4899 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4901 rtx insn;
4903 if (! TARGET_64BIT)
4904 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4905 else if (x86_64_immediate_operand (offset, DImode))
4906 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4907 else
4909 rtx r11;
4910 /* r11 is used by indirect sibcall return as well, set before the
4911 epilogue and used after the epilogue. ATM indirect sibcall
4912 shouldn't be used together with huge frame sizes in one
4913 function because of the frame_size check in sibcall.c. */
4914 gcc_assert (style);
4915 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4916 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4917 if (style < 0)
4918 RTX_FRAME_RELATED_P (insn) = 1;
4919 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4920 offset));
4922 if (style < 0)
4923 RTX_FRAME_RELATED_P (insn) = 1;
4926 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
4928 static rtx
4929 ix86_internal_arg_pointer (void)
4931 if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
4932 && DECL_NAME (current_function_decl)
4933 && MAIN_NAME_P (DECL_NAME (current_function_decl))
4934 && DECL_FILE_SCOPE_P (current_function_decl))
4936 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
4937 return copy_to_reg (cfun->machine->force_align_arg_pointer);
4939 else
4940 return virtual_incoming_args_rtx;
4943 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
4944 This is called from dwarf2out.c to emit call frame instructions
4945 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
4946 static void
4947 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
4949 rtx unspec = SET_SRC (pattern);
4950 gcc_assert (GET_CODE (unspec) == UNSPEC);
4952 switch (index)
4954 case UNSPEC_REG_SAVE:
4955 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
4956 SET_DEST (pattern));
4957 break;
4958 case UNSPEC_DEF_CFA:
4959 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
4960 INTVAL (XVECEXP (unspec, 0, 0)));
4961 break;
4962 default:
4963 gcc_unreachable ();
4967 /* Expand the prologue into a bunch of separate insns. */
4969 void
4970 ix86_expand_prologue (void)
4972 rtx insn;
4973 bool pic_reg_used;
4974 struct ix86_frame frame;
4975 HOST_WIDE_INT allocate;
4977 ix86_compute_frame_layout (&frame);
4979 if (cfun->machine->force_align_arg_pointer)
4981 rtx x, y;
4983 /* Grab the argument pointer. */
4984 x = plus_constant (stack_pointer_rtx, 4);
4985 y = cfun->machine->force_align_arg_pointer;
4986 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
4987 RTX_FRAME_RELATED_P (insn) = 1;
4989 /* The unwind info consists of two parts: install the fafp as the cfa,
4990 and record the fafp as the "save register" of the stack pointer.
4991 The later is there in order that the unwinder can see where it
4992 should restore the stack pointer across the and insn. */
4993 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
4994 x = gen_rtx_SET (VOIDmode, y, x);
4995 RTX_FRAME_RELATED_P (x) = 1;
4996 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
4997 UNSPEC_REG_SAVE);
4998 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
4999 RTX_FRAME_RELATED_P (y) = 1;
5000 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5001 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5002 REG_NOTES (insn) = x;
5004 /* Align the stack. */
5005 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5006 GEN_INT (-16)));
5008 /* And here we cheat like madmen with the unwind info. We force the
5009 cfa register back to sp+4, which is exactly what it was at the
5010 start of the function. Re-pushing the return address results in
5011 the return at the same spot relative to the cfa, and thus is
5012 correct wrt the unwind info. */
5013 x = cfun->machine->force_align_arg_pointer;
5014 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5015 insn = emit_insn (gen_push (x));
5016 RTX_FRAME_RELATED_P (insn) = 1;
5018 x = GEN_INT (4);
5019 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5020 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5021 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5022 REG_NOTES (insn) = x;
5025 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5026 slower on all targets. Also sdb doesn't like it. */
5028 if (frame_pointer_needed)
5030 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5031 RTX_FRAME_RELATED_P (insn) = 1;
5033 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5034 RTX_FRAME_RELATED_P (insn) = 1;
5037 allocate = frame.to_allocate;
5039 if (!frame.save_regs_using_mov)
5040 ix86_emit_save_regs ();
5041 else
5042 allocate += frame.nregs * UNITS_PER_WORD;
5044 /* When using red zone we may start register saving before allocating
5045 the stack frame saving one cycle of the prologue. */
5046 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5047 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5048 : stack_pointer_rtx,
5049 -frame.nregs * UNITS_PER_WORD);
5051 if (allocate == 0)
5053 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5054 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5055 GEN_INT (-allocate), -1);
5056 else
5058 /* Only valid for Win32. */
5059 rtx eax = gen_rtx_REG (SImode, 0);
5060 bool eax_live = ix86_eax_live_at_start_p ();
5061 rtx t;
5063 gcc_assert (!TARGET_64BIT);
5065 if (eax_live)
5067 emit_insn (gen_push (eax));
5068 allocate -= 4;
5071 emit_move_insn (eax, GEN_INT (allocate));
5073 insn = emit_insn (gen_allocate_stack_worker (eax));
5074 RTX_FRAME_RELATED_P (insn) = 1;
5075 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5076 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5077 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5078 t, REG_NOTES (insn));
5080 if (eax_live)
5082 if (frame_pointer_needed)
5083 t = plus_constant (hard_frame_pointer_rtx,
5084 allocate
5085 - frame.to_allocate
5086 - frame.nregs * UNITS_PER_WORD);
5087 else
5088 t = plus_constant (stack_pointer_rtx, allocate);
5089 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5093 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5095 if (!frame_pointer_needed || !frame.to_allocate)
5096 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5097 else
5098 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5099 -frame.nregs * UNITS_PER_WORD);
5102 pic_reg_used = false;
5103 if (pic_offset_table_rtx
5104 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5105 || current_function_profile))
5107 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5109 if (alt_pic_reg_used != INVALID_REGNUM)
5110 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5112 pic_reg_used = true;
5115 if (pic_reg_used)
5117 if (TARGET_64BIT)
5118 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5119 else
5120 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5122 /* Even with accurate pre-reload life analysis, we can wind up
5123 deleting all references to the pic register after reload.
5124 Consider if cross-jumping unifies two sides of a branch
5125 controlled by a comparison vs the only read from a global.
5126 In which case, allow the set_got to be deleted, though we're
5127 too late to do anything about the ebx save in the prologue. */
5128 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5131 /* Prevent function calls from be scheduled before the call to mcount.
5132 In the pic_reg_used case, make sure that the got load isn't deleted. */
5133 if (current_function_profile)
5134 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5137 /* Emit code to restore saved registers using MOV insns. First register
5138 is restored from POINTER + OFFSET. */
5139 static void
5140 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5141 int maybe_eh_return)
5143 int regno;
5144 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5146 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5147 if (ix86_save_reg (regno, maybe_eh_return))
5149 /* Ensure that adjust_address won't be forced to produce pointer
5150 out of range allowed by x86-64 instruction set. */
5151 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5153 rtx r11;
5155 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
5156 emit_move_insn (r11, GEN_INT (offset));
5157 emit_insn (gen_adddi3 (r11, r11, pointer));
5158 base_address = gen_rtx_MEM (Pmode, r11);
5159 offset = 0;
5161 emit_move_insn (gen_rtx_REG (Pmode, regno),
5162 adjust_address (base_address, Pmode, offset));
5163 offset += UNITS_PER_WORD;
5167 /* Restore function stack, frame, and registers. */
5169 void
5170 ix86_expand_epilogue (int style)
5172 int regno;
5173 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5174 struct ix86_frame frame;
5175 HOST_WIDE_INT offset;
5177 ix86_compute_frame_layout (&frame);
5179 /* Calculate start of saved registers relative to ebp. Special care
5180 must be taken for the normal return case of a function using
5181 eh_return: the eax and edx registers are marked as saved, but not
5182 restored along this path. */
5183 offset = frame.nregs;
5184 if (current_function_calls_eh_return && style != 2)
5185 offset -= 2;
5186 offset *= -UNITS_PER_WORD;
5188 /* If we're only restoring one register and sp is not valid then
5189 using a move instruction to restore the register since it's
5190 less work than reloading sp and popping the register.
5192 The default code result in stack adjustment using add/lea instruction,
5193 while this code results in LEAVE instruction (or discrete equivalent),
5194 so it is profitable in some other cases as well. Especially when there
5195 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5196 and there is exactly one register to pop. This heuristic may need some
5197 tuning in future. */
5198 if ((!sp_valid && frame.nregs <= 1)
5199 || (TARGET_EPILOGUE_USING_MOVE
5200 && cfun->machine->use_fast_prologue_epilogue
5201 && (frame.nregs > 1 || frame.to_allocate))
5202 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5203 || (frame_pointer_needed && TARGET_USE_LEAVE
5204 && cfun->machine->use_fast_prologue_epilogue
5205 && frame.nregs == 1)
5206 || current_function_calls_eh_return)
5208 /* Restore registers. We can use ebp or esp to address the memory
5209 locations. If both are available, default to ebp, since offsets
5210 are known to be small. Only exception is esp pointing directly to the
5211 end of block of saved registers, where we may simplify addressing
5212 mode. */
5214 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5215 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5216 frame.to_allocate, style == 2);
5217 else
5218 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5219 offset, style == 2);
5221 /* eh_return epilogues need %ecx added to the stack pointer. */
5222 if (style == 2)
5224 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5226 if (frame_pointer_needed)
5228 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5229 tmp = plus_constant (tmp, UNITS_PER_WORD);
5230 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5232 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5233 emit_move_insn (hard_frame_pointer_rtx, tmp);
5235 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5236 const0_rtx, style);
5238 else
5240 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5241 tmp = plus_constant (tmp, (frame.to_allocate
5242 + frame.nregs * UNITS_PER_WORD));
5243 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5246 else if (!frame_pointer_needed)
5247 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5248 GEN_INT (frame.to_allocate
5249 + frame.nregs * UNITS_PER_WORD),
5250 style);
5251 /* If not an i386, mov & pop is faster than "leave". */
5252 else if (TARGET_USE_LEAVE || optimize_size
5253 || !cfun->machine->use_fast_prologue_epilogue)
5254 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5255 else
5257 pro_epilogue_adjust_stack (stack_pointer_rtx,
5258 hard_frame_pointer_rtx,
5259 const0_rtx, style);
5260 if (TARGET_64BIT)
5261 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5262 else
5263 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5266 else
5268 /* First step is to deallocate the stack frame so that we can
5269 pop the registers. */
5270 if (!sp_valid)
5272 gcc_assert (frame_pointer_needed);
5273 pro_epilogue_adjust_stack (stack_pointer_rtx,
5274 hard_frame_pointer_rtx,
5275 GEN_INT (offset), style);
5277 else if (frame.to_allocate)
5278 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5279 GEN_INT (frame.to_allocate), style);
5281 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5282 if (ix86_save_reg (regno, false))
5284 if (TARGET_64BIT)
5285 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5286 else
5287 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5289 if (frame_pointer_needed)
5291 /* Leave results in shorter dependency chains on CPUs that are
5292 able to grok it fast. */
5293 if (TARGET_USE_LEAVE)
5294 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5295 else if (TARGET_64BIT)
5296 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5297 else
5298 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5302 if (cfun->machine->force_align_arg_pointer)
5304 emit_insn (gen_addsi3 (stack_pointer_rtx,
5305 cfun->machine->force_align_arg_pointer,
5306 GEN_INT (-4)));
5309 /* Sibcall epilogues don't want a return instruction. */
5310 if (style == 0)
5311 return;
5313 if (current_function_pops_args && current_function_args_size)
5315 rtx popc = GEN_INT (current_function_pops_args);
5317 /* i386 can only pop 64K bytes. If asked to pop more, pop
5318 return address, do explicit add, and jump indirectly to the
5319 caller. */
5321 if (current_function_pops_args >= 65536)
5323 rtx ecx = gen_rtx_REG (SImode, 2);
5325 /* There is no "pascal" calling convention in 64bit ABI. */
5326 gcc_assert (!TARGET_64BIT);
5328 emit_insn (gen_popsi1 (ecx));
5329 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5330 emit_jump_insn (gen_return_indirect_internal (ecx));
5332 else
5333 emit_jump_insn (gen_return_pop_internal (popc));
5335 else
5336 emit_jump_insn (gen_return_internal ());
5339 /* Reset from the function's potential modifications. */
5341 static void
5342 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5343 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5345 if (pic_offset_table_rtx)
5346 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5349 /* Extract the parts of an RTL expression that is a valid memory address
5350 for an instruction. Return 0 if the structure of the address is
5351 grossly off. Return -1 if the address contains ASHIFT, so it is not
5352 strictly valid, but still used for computing length of lea instruction. */
5355 ix86_decompose_address (rtx addr, struct ix86_address *out)
5357 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5358 rtx base_reg, index_reg;
5359 HOST_WIDE_INT scale = 1;
5360 rtx scale_rtx = NULL_RTX;
5361 int retval = 1;
5362 enum ix86_address_seg seg = SEG_DEFAULT;
5364 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5365 base = addr;
5366 else if (GET_CODE (addr) == PLUS)
5368 rtx addends[4], op;
5369 int n = 0, i;
5371 op = addr;
5374 if (n >= 4)
5375 return 0;
5376 addends[n++] = XEXP (op, 1);
5377 op = XEXP (op, 0);
5379 while (GET_CODE (op) == PLUS);
5380 if (n >= 4)
5381 return 0;
5382 addends[n] = op;
5384 for (i = n; i >= 0; --i)
5386 op = addends[i];
5387 switch (GET_CODE (op))
5389 case MULT:
5390 if (index)
5391 return 0;
5392 index = XEXP (op, 0);
5393 scale_rtx = XEXP (op, 1);
5394 break;
5396 case UNSPEC:
5397 if (XINT (op, 1) == UNSPEC_TP
5398 && TARGET_TLS_DIRECT_SEG_REFS
5399 && seg == SEG_DEFAULT)
5400 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5401 else
5402 return 0;
5403 break;
5405 case REG:
5406 case SUBREG:
5407 if (!base)
5408 base = op;
5409 else if (!index)
5410 index = op;
5411 else
5412 return 0;
5413 break;
5415 case CONST:
5416 case CONST_INT:
5417 case SYMBOL_REF:
5418 case LABEL_REF:
5419 if (disp)
5420 return 0;
5421 disp = op;
5422 break;
5424 default:
5425 return 0;
5429 else if (GET_CODE (addr) == MULT)
5431 index = XEXP (addr, 0); /* index*scale */
5432 scale_rtx = XEXP (addr, 1);
5434 else if (GET_CODE (addr) == ASHIFT)
5436 rtx tmp;
5438 /* We're called for lea too, which implements ashift on occasion. */
5439 index = XEXP (addr, 0);
5440 tmp = XEXP (addr, 1);
5441 if (GET_CODE (tmp) != CONST_INT)
5442 return 0;
5443 scale = INTVAL (tmp);
5444 if ((unsigned HOST_WIDE_INT) scale > 3)
5445 return 0;
5446 scale = 1 << scale;
5447 retval = -1;
5449 else
5450 disp = addr; /* displacement */
5452 /* Extract the integral value of scale. */
5453 if (scale_rtx)
5455 if (GET_CODE (scale_rtx) != CONST_INT)
5456 return 0;
5457 scale = INTVAL (scale_rtx);
5460 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5461 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5463 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5464 if (base_reg && index_reg && scale == 1
5465 && (index_reg == arg_pointer_rtx
5466 || index_reg == frame_pointer_rtx
5467 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5469 rtx tmp;
5470 tmp = base, base = index, index = tmp;
5471 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5474 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5475 if ((base_reg == hard_frame_pointer_rtx
5476 || base_reg == frame_pointer_rtx
5477 || base_reg == arg_pointer_rtx) && !disp)
5478 disp = const0_rtx;
5480 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5481 Avoid this by transforming to [%esi+0]. */
5482 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5483 && base_reg && !index_reg && !disp
5484 && REG_P (base_reg)
5485 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5486 disp = const0_rtx;
5488 /* Special case: encode reg+reg instead of reg*2. */
5489 if (!base && index && scale && scale == 2)
5490 base = index, base_reg = index_reg, scale = 1;
5492 /* Special case: scaling cannot be encoded without base or displacement. */
5493 if (!base && !disp && index && scale != 1)
5494 disp = const0_rtx;
5496 out->base = base;
5497 out->index = index;
5498 out->disp = disp;
5499 out->scale = scale;
5500 out->seg = seg;
5502 return retval;
5505 /* Return cost of the memory address x.
5506 For i386, it is better to use a complex address than let gcc copy
5507 the address into a reg and make a new pseudo. But not if the address
5508 requires to two regs - that would mean more pseudos with longer
5509 lifetimes. */
5510 static int
5511 ix86_address_cost (rtx x)
5513 struct ix86_address parts;
5514 int cost = 1;
5515 int ok = ix86_decompose_address (x, &parts);
5517 gcc_assert (ok);
5519 if (parts.base && GET_CODE (parts.base) == SUBREG)
5520 parts.base = SUBREG_REG (parts.base);
5521 if (parts.index && GET_CODE (parts.index) == SUBREG)
5522 parts.index = SUBREG_REG (parts.index);
5524 /* More complex memory references are better. */
5525 if (parts.disp && parts.disp != const0_rtx)
5526 cost--;
5527 if (parts.seg != SEG_DEFAULT)
5528 cost--;
5530 /* Attempt to minimize number of registers in the address. */
5531 if ((parts.base
5532 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5533 || (parts.index
5534 && (!REG_P (parts.index)
5535 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5536 cost++;
5538 if (parts.base
5539 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5540 && parts.index
5541 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5542 && parts.base != parts.index)
5543 cost++;
5545 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5546 since it's predecode logic can't detect the length of instructions
5547 and it degenerates to vector decoded. Increase cost of such
5548 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5549 to split such addresses or even refuse such addresses at all.
5551 Following addressing modes are affected:
5552 [base+scale*index]
5553 [scale*index+disp]
5554 [base+index]
5556 The first and last case may be avoidable by explicitly coding the zero in
5557 memory address, but I don't have AMD-K6 machine handy to check this
5558 theory. */
5560 if (TARGET_K6
5561 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5562 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5563 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5564 cost += 10;
5566 return cost;
5569 /* If X is a machine specific address (i.e. a symbol or label being
5570 referenced as a displacement from the GOT implemented using an
5571 UNSPEC), then return the base term. Otherwise return X. */
5574 ix86_find_base_term (rtx x)
5576 rtx term;
5578 if (TARGET_64BIT)
5580 if (GET_CODE (x) != CONST)
5581 return x;
5582 term = XEXP (x, 0);
5583 if (GET_CODE (term) == PLUS
5584 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5585 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5586 term = XEXP (term, 0);
5587 if (GET_CODE (term) != UNSPEC
5588 || XINT (term, 1) != UNSPEC_GOTPCREL)
5589 return x;
5591 term = XVECEXP (term, 0, 0);
5593 if (GET_CODE (term) != SYMBOL_REF
5594 && GET_CODE (term) != LABEL_REF)
5595 return x;
5597 return term;
5600 term = ix86_delegitimize_address (x);
5602 if (GET_CODE (term) != SYMBOL_REF
5603 && GET_CODE (term) != LABEL_REF)
5604 return x;
5606 return term;
5609 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5610 this is used for to form addresses to local data when -fPIC is in
5611 use. */
5613 static bool
5614 darwin_local_data_pic (rtx disp)
5616 if (GET_CODE (disp) == MINUS)
5618 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5619 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5620 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5622 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5623 if (! strcmp (sym_name, "<pic base>"))
5624 return true;
5628 return false;
5631 /* Determine if a given RTX is a valid constant. We already know this
5632 satisfies CONSTANT_P. */
5634 bool
5635 legitimate_constant_p (rtx x)
5637 switch (GET_CODE (x))
5639 case CONST:
5640 x = XEXP (x, 0);
5642 if (GET_CODE (x) == PLUS)
5644 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5645 return false;
5646 x = XEXP (x, 0);
5649 if (TARGET_MACHO && darwin_local_data_pic (x))
5650 return true;
5652 /* Only some unspecs are valid as "constants". */
5653 if (GET_CODE (x) == UNSPEC)
5654 switch (XINT (x, 1))
5656 case UNSPEC_GOTOFF:
5657 return TARGET_64BIT;
5658 case UNSPEC_TPOFF:
5659 case UNSPEC_NTPOFF:
5660 x = XVECEXP (x, 0, 0);
5661 return (GET_CODE (x) == SYMBOL_REF
5662 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5663 case UNSPEC_DTPOFF:
5664 x = XVECEXP (x, 0, 0);
5665 return (GET_CODE (x) == SYMBOL_REF
5666 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
5667 default:
5668 return false;
5671 /* We must have drilled down to a symbol. */
5672 if (GET_CODE (x) == LABEL_REF)
5673 return true;
5674 if (GET_CODE (x) != SYMBOL_REF)
5675 return false;
5676 /* FALLTHRU */
5678 case SYMBOL_REF:
5679 /* TLS symbols are never valid. */
5680 if (SYMBOL_REF_TLS_MODEL (x))
5681 return false;
5682 break;
5684 default:
5685 break;
5688 /* Otherwise we handle everything else in the move patterns. */
5689 return true;
5692 /* Determine if it's legal to put X into the constant pool. This
5693 is not possible for the address of thread-local symbols, which
5694 is checked above. */
5696 static bool
5697 ix86_cannot_force_const_mem (rtx x)
5699 return !legitimate_constant_p (x);
5702 /* Determine if a given RTX is a valid constant address. */
5704 bool
5705 constant_address_p (rtx x)
5707 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5710 /* Nonzero if the constant value X is a legitimate general operand
5711 when generating PIC code. It is given that flag_pic is on and
5712 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5714 bool
5715 legitimate_pic_operand_p (rtx x)
5717 rtx inner;
5719 switch (GET_CODE (x))
5721 case CONST:
5722 inner = XEXP (x, 0);
5723 if (GET_CODE (inner) == PLUS
5724 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5725 inner = XEXP (inner, 0);
5727 /* Only some unspecs are valid as "constants". */
5728 if (GET_CODE (inner) == UNSPEC)
5729 switch (XINT (inner, 1))
5731 case UNSPEC_GOTOFF:
5732 return TARGET_64BIT;
5733 case UNSPEC_TPOFF:
5734 x = XVECEXP (inner, 0, 0);
5735 return (GET_CODE (x) == SYMBOL_REF
5736 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5737 default:
5738 return false;
5740 /* FALLTHRU */
5742 case SYMBOL_REF:
5743 case LABEL_REF:
5744 return legitimate_pic_address_disp_p (x);
5746 default:
5747 return true;
5751 /* Determine if a given CONST RTX is a valid memory displacement
5752 in PIC mode. */
5755 legitimate_pic_address_disp_p (rtx disp)
5757 bool saw_plus;
5759 /* In 64bit mode we can allow direct addresses of symbols and labels
5760 when they are not dynamic symbols. */
5761 if (TARGET_64BIT)
5763 rtx op0 = disp, op1;
5765 switch (GET_CODE (disp))
5767 case LABEL_REF:
5768 return true;
5770 case CONST:
5771 if (GET_CODE (XEXP (disp, 0)) != PLUS)
5772 break;
5773 op0 = XEXP (XEXP (disp, 0), 0);
5774 op1 = XEXP (XEXP (disp, 0), 1);
5775 if (GET_CODE (op1) != CONST_INT
5776 || INTVAL (op1) >= 16*1024*1024
5777 || INTVAL (op1) < -16*1024*1024)
5778 break;
5779 if (GET_CODE (op0) == LABEL_REF)
5780 return true;
5781 if (GET_CODE (op0) != SYMBOL_REF)
5782 break;
5783 /* FALLTHRU */
5785 case SYMBOL_REF:
5786 /* TLS references should always be enclosed in UNSPEC. */
5787 if (SYMBOL_REF_TLS_MODEL (op0))
5788 return false;
5789 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
5790 return true;
5791 break;
5793 default:
5794 break;
5797 if (GET_CODE (disp) != CONST)
5798 return 0;
5799 disp = XEXP (disp, 0);
5801 if (TARGET_64BIT)
5803 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5804 of GOT tables. We should not need these anyway. */
5805 if (GET_CODE (disp) != UNSPEC
5806 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5807 && XINT (disp, 1) != UNSPEC_GOTOFF))
5808 return 0;
5810 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5811 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5812 return 0;
5813 return 1;
5816 saw_plus = false;
5817 if (GET_CODE (disp) == PLUS)
5819 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5820 return 0;
5821 disp = XEXP (disp, 0);
5822 saw_plus = true;
5825 if (TARGET_MACHO && darwin_local_data_pic (disp))
5826 return 1;
5828 if (GET_CODE (disp) != UNSPEC)
5829 return 0;
5831 switch (XINT (disp, 1))
5833 case UNSPEC_GOT:
5834 if (saw_plus)
5835 return false;
5836 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5837 case UNSPEC_GOTOFF:
5838 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
5839 While ABI specify also 32bit relocation but we don't produce it in
5840 small PIC model at all. */
5841 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5842 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5843 && !TARGET_64BIT)
5844 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5845 return false;
5846 case UNSPEC_GOTTPOFF:
5847 case UNSPEC_GOTNTPOFF:
5848 case UNSPEC_INDNTPOFF:
5849 if (saw_plus)
5850 return false;
5851 disp = XVECEXP (disp, 0, 0);
5852 return (GET_CODE (disp) == SYMBOL_REF
5853 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
5854 case UNSPEC_NTPOFF:
5855 disp = XVECEXP (disp, 0, 0);
5856 return (GET_CODE (disp) == SYMBOL_REF
5857 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
5858 case UNSPEC_DTPOFF:
5859 disp = XVECEXP (disp, 0, 0);
5860 return (GET_CODE (disp) == SYMBOL_REF
5861 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
5864 return 0;
5867 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5868 memory address for an instruction. The MODE argument is the machine mode
5869 for the MEM expression that wants to use this address.
5871 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5872 convert common non-canonical forms to canonical form so that they will
5873 be recognized. */
5876 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5878 struct ix86_address parts;
5879 rtx base, index, disp;
5880 HOST_WIDE_INT scale;
5881 const char *reason = NULL;
5882 rtx reason_rtx = NULL_RTX;
5884 if (TARGET_DEBUG_ADDR)
5886 fprintf (stderr,
5887 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5888 GET_MODE_NAME (mode), strict);
5889 debug_rtx (addr);
5892 if (ix86_decompose_address (addr, &parts) <= 0)
5894 reason = "decomposition failed";
5895 goto report_error;
5898 base = parts.base;
5899 index = parts.index;
5900 disp = parts.disp;
5901 scale = parts.scale;
5903 /* Validate base register.
5905 Don't allow SUBREG's that span more than a word here. It can lead to spill
5906 failures when the base is one word out of a two word structure, which is
5907 represented internally as a DImode int. */
5909 if (base)
5911 rtx reg;
5912 reason_rtx = base;
5914 if (REG_P (base))
5915 reg = base;
5916 else if (GET_CODE (base) == SUBREG
5917 && REG_P (SUBREG_REG (base))
5918 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5919 <= UNITS_PER_WORD)
5920 reg = SUBREG_REG (base);
5921 else
5923 reason = "base is not a register";
5924 goto report_error;
5927 if (GET_MODE (base) != Pmode)
5929 reason = "base is not in Pmode";
5930 goto report_error;
5933 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5934 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5936 reason = "base is not valid";
5937 goto report_error;
5941 /* Validate index register.
5943 Don't allow SUBREG's that span more than a word here -- same as above. */
5945 if (index)
5947 rtx reg;
5948 reason_rtx = index;
5950 if (REG_P (index))
5951 reg = index;
5952 else if (GET_CODE (index) == SUBREG
5953 && REG_P (SUBREG_REG (index))
5954 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5955 <= UNITS_PER_WORD)
5956 reg = SUBREG_REG (index);
5957 else
5959 reason = "index is not a register";
5960 goto report_error;
5963 if (GET_MODE (index) != Pmode)
5965 reason = "index is not in Pmode";
5966 goto report_error;
5969 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5970 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5972 reason = "index is not valid";
5973 goto report_error;
5977 /* Validate scale factor. */
5978 if (scale != 1)
5980 reason_rtx = GEN_INT (scale);
5981 if (!index)
5983 reason = "scale without index";
5984 goto report_error;
5987 if (scale != 2 && scale != 4 && scale != 8)
5989 reason = "scale is not a valid multiplier";
5990 goto report_error;
5994 /* Validate displacement. */
5995 if (disp)
5997 reason_rtx = disp;
5999 if (GET_CODE (disp) == CONST
6000 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6001 switch (XINT (XEXP (disp, 0), 1))
6003 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6004 used. While ABI specify also 32bit relocations, we don't produce
6005 them at all and use IP relative instead. */
6006 case UNSPEC_GOT:
6007 case UNSPEC_GOTOFF:
6008 gcc_assert (flag_pic);
6009 if (!TARGET_64BIT)
6010 goto is_legitimate_pic;
6011 reason = "64bit address unspec";
6012 goto report_error;
6014 case UNSPEC_GOTPCREL:
6015 gcc_assert (flag_pic);
6016 goto is_legitimate_pic;
6018 case UNSPEC_GOTTPOFF:
6019 case UNSPEC_GOTNTPOFF:
6020 case UNSPEC_INDNTPOFF:
6021 case UNSPEC_NTPOFF:
6022 case UNSPEC_DTPOFF:
6023 break;
6025 default:
6026 reason = "invalid address unspec";
6027 goto report_error;
6030 else if (flag_pic && (SYMBOLIC_CONST (disp)
6031 #if TARGET_MACHO
6032 && !machopic_operand_p (disp)
6033 #endif
6036 is_legitimate_pic:
6037 if (TARGET_64BIT && (index || base))
6039 /* foo@dtpoff(%rX) is ok. */
6040 if (GET_CODE (disp) != CONST
6041 || GET_CODE (XEXP (disp, 0)) != PLUS
6042 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6043 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6044 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6045 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6047 reason = "non-constant pic memory reference";
6048 goto report_error;
6051 else if (! legitimate_pic_address_disp_p (disp))
6053 reason = "displacement is an invalid pic construct";
6054 goto report_error;
6057 /* This code used to verify that a symbolic pic displacement
6058 includes the pic_offset_table_rtx register.
6060 While this is good idea, unfortunately these constructs may
6061 be created by "adds using lea" optimization for incorrect
6062 code like:
6064 int a;
6065 int foo(int i)
6067 return *(&a+i);
6070 This code is nonsensical, but results in addressing
6071 GOT table with pic_offset_table_rtx base. We can't
6072 just refuse it easily, since it gets matched by
6073 "addsi3" pattern, that later gets split to lea in the
6074 case output register differs from input. While this
6075 can be handled by separate addsi pattern for this case
6076 that never results in lea, this seems to be easier and
6077 correct fix for crash to disable this test. */
6079 else if (GET_CODE (disp) != LABEL_REF
6080 && GET_CODE (disp) != CONST_INT
6081 && (GET_CODE (disp) != CONST
6082 || !legitimate_constant_p (disp))
6083 && (GET_CODE (disp) != SYMBOL_REF
6084 || !legitimate_constant_p (disp)))
6086 reason = "displacement is not constant";
6087 goto report_error;
6089 else if (TARGET_64BIT
6090 && !x86_64_immediate_operand (disp, VOIDmode))
6092 reason = "displacement is out of range";
6093 goto report_error;
6097 /* Everything looks valid. */
6098 if (TARGET_DEBUG_ADDR)
6099 fprintf (stderr, "Success.\n");
6100 return TRUE;
6102 report_error:
6103 if (TARGET_DEBUG_ADDR)
6105 fprintf (stderr, "Error: %s\n", reason);
6106 debug_rtx (reason_rtx);
6108 return FALSE;
6111 /* Return a unique alias set for the GOT. */
6113 static HOST_WIDE_INT
6114 ix86_GOT_alias_set (void)
6116 static HOST_WIDE_INT set = -1;
6117 if (set == -1)
6118 set = new_alias_set ();
6119 return set;
6122 /* Return a legitimate reference for ORIG (an address) using the
6123 register REG. If REG is 0, a new pseudo is generated.
6125 There are two types of references that must be handled:
6127 1. Global data references must load the address from the GOT, via
6128 the PIC reg. An insn is emitted to do this load, and the reg is
6129 returned.
6131 2. Static data references, constant pool addresses, and code labels
6132 compute the address as an offset from the GOT, whose base is in
6133 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6134 differentiate them from global data objects. The returned
6135 address is the PIC reg + an unspec constant.
6137 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6138 reg also appears in the address. */
6140 static rtx
6141 legitimize_pic_address (rtx orig, rtx reg)
6143 rtx addr = orig;
6144 rtx new = orig;
6145 rtx base;
6147 #if TARGET_MACHO
6148 if (reg == 0)
6149 reg = gen_reg_rtx (Pmode);
6150 /* Use the generic Mach-O PIC machinery. */
6151 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6152 #endif
6154 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6155 new = addr;
6156 else if (TARGET_64BIT
6157 && ix86_cmodel != CM_SMALL_PIC
6158 && local_symbolic_operand (addr, Pmode))
6160 rtx tmpreg;
6161 /* This symbol may be referenced via a displacement from the PIC
6162 base address (@GOTOFF). */
6164 if (reload_in_progress)
6165 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6166 if (GET_CODE (addr) == CONST)
6167 addr = XEXP (addr, 0);
6168 if (GET_CODE (addr) == PLUS)
6170 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6171 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6173 else
6174 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6175 new = gen_rtx_CONST (Pmode, new);
6176 if (!reg)
6177 tmpreg = gen_reg_rtx (Pmode);
6178 else
6179 tmpreg = reg;
6180 emit_move_insn (tmpreg, new);
6182 if (reg != 0)
6184 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6185 tmpreg, 1, OPTAB_DIRECT);
6186 new = reg;
6188 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6190 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6192 /* This symbol may be referenced via a displacement from the PIC
6193 base address (@GOTOFF). */
6195 if (reload_in_progress)
6196 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6197 if (GET_CODE (addr) == CONST)
6198 addr = XEXP (addr, 0);
6199 if (GET_CODE (addr) == PLUS)
6201 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6202 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6204 else
6205 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6206 new = gen_rtx_CONST (Pmode, new);
6207 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6209 if (reg != 0)
6211 emit_move_insn (reg, new);
6212 new = reg;
6215 else if (GET_CODE (addr) == SYMBOL_REF)
6217 if (TARGET_64BIT)
6219 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6220 new = gen_rtx_CONST (Pmode, new);
6221 new = gen_const_mem (Pmode, new);
6222 set_mem_alias_set (new, ix86_GOT_alias_set ());
6224 if (reg == 0)
6225 reg = gen_reg_rtx (Pmode);
6226 /* Use directly gen_movsi, otherwise the address is loaded
6227 into register for CSE. We don't want to CSE this addresses,
6228 instead we CSE addresses from the GOT table, so skip this. */
6229 emit_insn (gen_movsi (reg, new));
6230 new = reg;
6232 else
6234 /* This symbol must be referenced via a load from the
6235 Global Offset Table (@GOT). */
6237 if (reload_in_progress)
6238 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6239 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6240 new = gen_rtx_CONST (Pmode, new);
6241 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6242 new = gen_const_mem (Pmode, new);
6243 set_mem_alias_set (new, ix86_GOT_alias_set ());
6245 if (reg == 0)
6246 reg = gen_reg_rtx (Pmode);
6247 emit_move_insn (reg, new);
6248 new = reg;
6251 else
6253 if (GET_CODE (addr) == CONST_INT
6254 && !x86_64_immediate_operand (addr, VOIDmode))
6256 if (reg)
6258 emit_move_insn (reg, addr);
6259 new = reg;
6261 else
6262 new = force_reg (Pmode, addr);
6264 else if (GET_CODE (addr) == CONST)
6266 addr = XEXP (addr, 0);
6268 /* We must match stuff we generate before. Assume the only
6269 unspecs that can get here are ours. Not that we could do
6270 anything with them anyway.... */
6271 if (GET_CODE (addr) == UNSPEC
6272 || (GET_CODE (addr) == PLUS
6273 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6274 return orig;
6275 gcc_assert (GET_CODE (addr) == PLUS);
6277 if (GET_CODE (addr) == PLUS)
6279 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6281 /* Check first to see if this is a constant offset from a @GOTOFF
6282 symbol reference. */
6283 if (local_symbolic_operand (op0, Pmode)
6284 && GET_CODE (op1) == CONST_INT)
6286 if (!TARGET_64BIT)
6288 if (reload_in_progress)
6289 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6290 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6291 UNSPEC_GOTOFF);
6292 new = gen_rtx_PLUS (Pmode, new, op1);
6293 new = gen_rtx_CONST (Pmode, new);
6294 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6296 if (reg != 0)
6298 emit_move_insn (reg, new);
6299 new = reg;
6302 else
6304 if (INTVAL (op1) < -16*1024*1024
6305 || INTVAL (op1) >= 16*1024*1024)
6307 if (!x86_64_immediate_operand (op1, Pmode))
6308 op1 = force_reg (Pmode, op1);
6309 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6313 else
6315 base = legitimize_pic_address (XEXP (addr, 0), reg);
6316 new = legitimize_pic_address (XEXP (addr, 1),
6317 base == reg ? NULL_RTX : reg);
6319 if (GET_CODE (new) == CONST_INT)
6320 new = plus_constant (base, INTVAL (new));
6321 else
6323 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6325 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6326 new = XEXP (new, 1);
6328 new = gen_rtx_PLUS (Pmode, base, new);
6333 return new;
6336 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6338 static rtx
6339 get_thread_pointer (int to_reg)
6341 rtx tp, reg, insn;
6343 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6344 if (!to_reg)
6345 return tp;
6347 reg = gen_reg_rtx (Pmode);
6348 insn = gen_rtx_SET (VOIDmode, reg, tp);
6349 insn = emit_insn (insn);
6351 return reg;
6354 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6355 false if we expect this to be used for a memory address and true if
6356 we expect to load the address into a register. */
6358 static rtx
6359 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6361 rtx dest, base, off, pic, tp;
6362 int type;
6364 switch (model)
6366 case TLS_MODEL_GLOBAL_DYNAMIC:
6367 dest = gen_reg_rtx (Pmode);
6368 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6370 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6372 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6374 start_sequence ();
6375 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6376 insns = get_insns ();
6377 end_sequence ();
6379 emit_libcall_block (insns, dest, rax, x);
6381 else if (TARGET_64BIT && TARGET_GNU2_TLS)
6382 emit_insn (gen_tls_global_dynamic_64 (dest, x));
6383 else
6384 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6386 if (TARGET_GNU2_TLS)
6388 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
6390 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
6392 break;
6394 case TLS_MODEL_LOCAL_DYNAMIC:
6395 base = gen_reg_rtx (Pmode);
6396 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6398 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
6400 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6402 start_sequence ();
6403 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6404 insns = get_insns ();
6405 end_sequence ();
6407 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6408 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6409 emit_libcall_block (insns, base, rax, note);
6411 else if (TARGET_64BIT && TARGET_GNU2_TLS)
6412 emit_insn (gen_tls_local_dynamic_base_64 (base));
6413 else
6414 emit_insn (gen_tls_local_dynamic_base_32 (base));
6416 if (TARGET_GNU2_TLS)
6418 rtx x = ix86_tls_module_base ();
6420 base = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, base));
6422 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
6425 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6426 off = gen_rtx_CONST (Pmode, off);
6428 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
6429 break;
6431 case TLS_MODEL_INITIAL_EXEC:
6432 if (TARGET_64BIT)
6434 pic = NULL;
6435 type = UNSPEC_GOTNTPOFF;
6437 else if (flag_pic)
6439 if (reload_in_progress)
6440 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6441 pic = pic_offset_table_rtx;
6442 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6444 else if (!TARGET_ANY_GNU_TLS)
6446 pic = gen_reg_rtx (Pmode);
6447 emit_insn (gen_set_got (pic));
6448 type = UNSPEC_GOTTPOFF;
6450 else
6452 pic = NULL;
6453 type = UNSPEC_INDNTPOFF;
6456 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6457 off = gen_rtx_CONST (Pmode, off);
6458 if (pic)
6459 off = gen_rtx_PLUS (Pmode, pic, off);
6460 off = gen_const_mem (Pmode, off);
6461 set_mem_alias_set (off, ix86_GOT_alias_set ());
6463 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
6465 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6466 off = force_reg (Pmode, off);
6467 return gen_rtx_PLUS (Pmode, base, off);
6469 else
6471 base = get_thread_pointer (true);
6472 dest = gen_reg_rtx (Pmode);
6473 emit_insn (gen_subsi3 (dest, base, off));
6475 break;
6477 case TLS_MODEL_LOCAL_EXEC:
6478 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6479 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
6480 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6481 off = gen_rtx_CONST (Pmode, off);
6483 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
6485 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6486 return gen_rtx_PLUS (Pmode, base, off);
6488 else
6490 base = get_thread_pointer (true);
6491 dest = gen_reg_rtx (Pmode);
6492 emit_insn (gen_subsi3 (dest, base, off));
6494 break;
6496 default:
6497 gcc_unreachable ();
6500 return dest;
6503 /* Try machine-dependent ways of modifying an illegitimate address
6504 to be legitimate. If we find one, return the new, valid address.
6505 This macro is used in only one place: `memory_address' in explow.c.
6507 OLDX is the address as it was before break_out_memory_refs was called.
6508 In some cases it is useful to look at this to decide what needs to be done.
6510 MODE and WIN are passed so that this macro can use
6511 GO_IF_LEGITIMATE_ADDRESS.
6513 It is always safe for this macro to do nothing. It exists to recognize
6514 opportunities to optimize the output.
6516 For the 80386, we handle X+REG by loading X into a register R and
6517 using R+REG. R will go in a general reg and indexing will be used.
6518 However, if REG is a broken-out memory address or multiplication,
6519 nothing needs to be done because REG can certainly go in a general reg.
6521 When -fpic is used, special handling is needed for symbolic references.
6522 See comments by legitimize_pic_address in i386.c for details. */
6525 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6527 int changed = 0;
6528 unsigned log;
6530 if (TARGET_DEBUG_ADDR)
6532 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6533 GET_MODE_NAME (mode));
6534 debug_rtx (x);
6537 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6538 if (log)
6539 return legitimize_tls_address (x, log, false);
6540 if (GET_CODE (x) == CONST
6541 && GET_CODE (XEXP (x, 0)) == PLUS
6542 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6543 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6545 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6546 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6549 if (flag_pic && SYMBOLIC_CONST (x))
6550 return legitimize_pic_address (x, 0);
6552 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6553 if (GET_CODE (x) == ASHIFT
6554 && GET_CODE (XEXP (x, 1)) == CONST_INT
6555 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6557 changed = 1;
6558 log = INTVAL (XEXP (x, 1));
6559 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6560 GEN_INT (1 << log));
6563 if (GET_CODE (x) == PLUS)
6565 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6567 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6568 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6569 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6571 changed = 1;
6572 log = INTVAL (XEXP (XEXP (x, 0), 1));
6573 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6574 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6575 GEN_INT (1 << log));
6578 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6579 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6580 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6582 changed = 1;
6583 log = INTVAL (XEXP (XEXP (x, 1), 1));
6584 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6585 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6586 GEN_INT (1 << log));
6589 /* Put multiply first if it isn't already. */
6590 if (GET_CODE (XEXP (x, 1)) == MULT)
6592 rtx tmp = XEXP (x, 0);
6593 XEXP (x, 0) = XEXP (x, 1);
6594 XEXP (x, 1) = tmp;
6595 changed = 1;
6598 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6599 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6600 created by virtual register instantiation, register elimination, and
6601 similar optimizations. */
6602 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6604 changed = 1;
6605 x = gen_rtx_PLUS (Pmode,
6606 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6607 XEXP (XEXP (x, 1), 0)),
6608 XEXP (XEXP (x, 1), 1));
6611 /* Canonicalize
6612 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6613 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6614 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6615 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6616 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6617 && CONSTANT_P (XEXP (x, 1)))
6619 rtx constant;
6620 rtx other = NULL_RTX;
6622 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6624 constant = XEXP (x, 1);
6625 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6627 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6629 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6630 other = XEXP (x, 1);
6632 else
6633 constant = 0;
6635 if (constant)
6637 changed = 1;
6638 x = gen_rtx_PLUS (Pmode,
6639 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6640 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6641 plus_constant (other, INTVAL (constant)));
6645 if (changed && legitimate_address_p (mode, x, FALSE))
6646 return x;
6648 if (GET_CODE (XEXP (x, 0)) == MULT)
6650 changed = 1;
6651 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6654 if (GET_CODE (XEXP (x, 1)) == MULT)
6656 changed = 1;
6657 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6660 if (changed
6661 && GET_CODE (XEXP (x, 1)) == REG
6662 && GET_CODE (XEXP (x, 0)) == REG)
6663 return x;
6665 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6667 changed = 1;
6668 x = legitimize_pic_address (x, 0);
6671 if (changed && legitimate_address_p (mode, x, FALSE))
6672 return x;
6674 if (GET_CODE (XEXP (x, 0)) == REG)
6676 rtx temp = gen_reg_rtx (Pmode);
6677 rtx val = force_operand (XEXP (x, 1), temp);
6678 if (val != temp)
6679 emit_move_insn (temp, val);
6681 XEXP (x, 1) = temp;
6682 return x;
6685 else if (GET_CODE (XEXP (x, 1)) == REG)
6687 rtx temp = gen_reg_rtx (Pmode);
6688 rtx val = force_operand (XEXP (x, 0), temp);
6689 if (val != temp)
6690 emit_move_insn (temp, val);
6692 XEXP (x, 0) = temp;
6693 return x;
6697 return x;
6700 /* Print an integer constant expression in assembler syntax. Addition
6701 and subtraction are the only arithmetic that may appear in these
6702 expressions. FILE is the stdio stream to write to, X is the rtx, and
6703 CODE is the operand print code from the output string. */
6705 static void
6706 output_pic_addr_const (FILE *file, rtx x, int code)
6708 char buf[256];
6710 switch (GET_CODE (x))
6712 case PC:
6713 gcc_assert (flag_pic);
6714 putc ('.', file);
6715 break;
6717 case SYMBOL_REF:
6718 assemble_name (file, XSTR (x, 0));
6719 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6720 fputs ("@PLT", file);
6721 break;
6723 case LABEL_REF:
6724 x = XEXP (x, 0);
6725 /* FALLTHRU */
6726 case CODE_LABEL:
6727 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6728 assemble_name (asm_out_file, buf);
6729 break;
6731 case CONST_INT:
6732 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6733 break;
6735 case CONST:
6736 /* This used to output parentheses around the expression,
6737 but that does not work on the 386 (either ATT or BSD assembler). */
6738 output_pic_addr_const (file, XEXP (x, 0), code);
6739 break;
6741 case CONST_DOUBLE:
6742 if (GET_MODE (x) == VOIDmode)
6744 /* We can use %d if the number is <32 bits and positive. */
6745 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6746 fprintf (file, "0x%lx%08lx",
6747 (unsigned long) CONST_DOUBLE_HIGH (x),
6748 (unsigned long) CONST_DOUBLE_LOW (x));
6749 else
6750 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6752 else
6753 /* We can't handle floating point constants;
6754 PRINT_OPERAND must handle them. */
6755 output_operand_lossage ("floating constant misused");
6756 break;
6758 case PLUS:
6759 /* Some assemblers need integer constants to appear first. */
6760 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6762 output_pic_addr_const (file, XEXP (x, 0), code);
6763 putc ('+', file);
6764 output_pic_addr_const (file, XEXP (x, 1), code);
6766 else
6768 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6769 output_pic_addr_const (file, XEXP (x, 1), code);
6770 putc ('+', file);
6771 output_pic_addr_const (file, XEXP (x, 0), code);
6773 break;
6775 case MINUS:
6776 if (!TARGET_MACHO)
6777 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6778 output_pic_addr_const (file, XEXP (x, 0), code);
6779 putc ('-', file);
6780 output_pic_addr_const (file, XEXP (x, 1), code);
6781 if (!TARGET_MACHO)
6782 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6783 break;
6785 case UNSPEC:
6786 gcc_assert (XVECLEN (x, 0) == 1);
6787 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6788 switch (XINT (x, 1))
6790 case UNSPEC_GOT:
6791 fputs ("@GOT", file);
6792 break;
6793 case UNSPEC_GOTOFF:
6794 fputs ("@GOTOFF", file);
6795 break;
6796 case UNSPEC_GOTPCREL:
6797 fputs ("@GOTPCREL(%rip)", file);
6798 break;
6799 case UNSPEC_GOTTPOFF:
6800 /* FIXME: This might be @TPOFF in Sun ld too. */
6801 fputs ("@GOTTPOFF", file);
6802 break;
6803 case UNSPEC_TPOFF:
6804 fputs ("@TPOFF", file);
6805 break;
6806 case UNSPEC_NTPOFF:
6807 if (TARGET_64BIT)
6808 fputs ("@TPOFF", file);
6809 else
6810 fputs ("@NTPOFF", file);
6811 break;
6812 case UNSPEC_DTPOFF:
6813 fputs ("@DTPOFF", file);
6814 break;
6815 case UNSPEC_GOTNTPOFF:
6816 if (TARGET_64BIT)
6817 fputs ("@GOTTPOFF(%rip)", file);
6818 else
6819 fputs ("@GOTNTPOFF", file);
6820 break;
6821 case UNSPEC_INDNTPOFF:
6822 fputs ("@INDNTPOFF", file);
6823 break;
6824 default:
6825 output_operand_lossage ("invalid UNSPEC as operand");
6826 break;
6828 break;
6830 default:
6831 output_operand_lossage ("invalid expression as operand");
6835 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6836 We need to emit DTP-relative relocations. */
6838 static void
6839 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6841 fputs (ASM_LONG, file);
6842 output_addr_const (file, x);
6843 fputs ("@DTPOFF", file);
6844 switch (size)
6846 case 4:
6847 break;
6848 case 8:
6849 fputs (", 0", file);
6850 break;
6851 default:
6852 gcc_unreachable ();
6856 /* In the name of slightly smaller debug output, and to cater to
6857 general assembler lossage, recognize PIC+GOTOFF and turn it back
6858 into a direct symbol reference. */
6860 static rtx
6861 ix86_delegitimize_address (rtx orig_x)
6863 rtx x = orig_x, y;
6865 if (GET_CODE (x) == MEM)
6866 x = XEXP (x, 0);
6868 if (TARGET_64BIT)
6870 if (GET_CODE (x) != CONST
6871 || GET_CODE (XEXP (x, 0)) != UNSPEC
6872 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6873 || GET_CODE (orig_x) != MEM)
6874 return orig_x;
6875 return XVECEXP (XEXP (x, 0), 0, 0);
6878 if (GET_CODE (x) != PLUS
6879 || GET_CODE (XEXP (x, 1)) != CONST)
6880 return orig_x;
6882 if (GET_CODE (XEXP (x, 0)) == REG
6883 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6884 /* %ebx + GOT/GOTOFF */
6885 y = NULL;
6886 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6888 /* %ebx + %reg * scale + GOT/GOTOFF */
6889 y = XEXP (x, 0);
6890 if (GET_CODE (XEXP (y, 0)) == REG
6891 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6892 y = XEXP (y, 1);
6893 else if (GET_CODE (XEXP (y, 1)) == REG
6894 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6895 y = XEXP (y, 0);
6896 else
6897 return orig_x;
6898 if (GET_CODE (y) != REG
6899 && GET_CODE (y) != MULT
6900 && GET_CODE (y) != ASHIFT)
6901 return orig_x;
6903 else
6904 return orig_x;
6906 x = XEXP (XEXP (x, 1), 0);
6907 if (GET_CODE (x) == UNSPEC
6908 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6909 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6911 if (y)
6912 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6913 return XVECEXP (x, 0, 0);
6916 if (GET_CODE (x) == PLUS
6917 && GET_CODE (XEXP (x, 0)) == UNSPEC
6918 && GET_CODE (XEXP (x, 1)) == CONST_INT
6919 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6920 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6921 && GET_CODE (orig_x) != MEM)))
6923 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6924 if (y)
6925 return gen_rtx_PLUS (Pmode, y, x);
6926 return x;
6929 return orig_x;
6932 static void
6933 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6934 int fp, FILE *file)
6936 const char *suffix;
6938 if (mode == CCFPmode || mode == CCFPUmode)
6940 enum rtx_code second_code, bypass_code;
6941 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6942 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6943 code = ix86_fp_compare_code_to_integer (code);
6944 mode = CCmode;
6946 if (reverse)
6947 code = reverse_condition (code);
6949 switch (code)
6951 case EQ:
6952 suffix = "e";
6953 break;
6954 case NE:
6955 suffix = "ne";
6956 break;
6957 case GT:
6958 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6959 suffix = "g";
6960 break;
6961 case GTU:
6962 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6963 Those same assemblers have the same but opposite lossage on cmov. */
6964 gcc_assert (mode == CCmode);
6965 suffix = fp ? "nbe" : "a";
6966 break;
6967 case LT:
6968 switch (mode)
6970 case CCNOmode:
6971 case CCGOCmode:
6972 suffix = "s";
6973 break;
6975 case CCmode:
6976 case CCGCmode:
6977 suffix = "l";
6978 break;
6980 default:
6981 gcc_unreachable ();
6983 break;
6984 case LTU:
6985 gcc_assert (mode == CCmode);
6986 suffix = "b";
6987 break;
6988 case GE:
6989 switch (mode)
6991 case CCNOmode:
6992 case CCGOCmode:
6993 suffix = "ns";
6994 break;
6996 case CCmode:
6997 case CCGCmode:
6998 suffix = "ge";
6999 break;
7001 default:
7002 gcc_unreachable ();
7004 break;
7005 case GEU:
7006 /* ??? As above. */
7007 gcc_assert (mode == CCmode);
7008 suffix = fp ? "nb" : "ae";
7009 break;
7010 case LE:
7011 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7012 suffix = "le";
7013 break;
7014 case LEU:
7015 gcc_assert (mode == CCmode);
7016 suffix = "be";
7017 break;
7018 case UNORDERED:
7019 suffix = fp ? "u" : "p";
7020 break;
7021 case ORDERED:
7022 suffix = fp ? "nu" : "np";
7023 break;
7024 default:
7025 gcc_unreachable ();
7027 fputs (suffix, file);
7030 /* Print the name of register X to FILE based on its machine mode and number.
7031 If CODE is 'w', pretend the mode is HImode.
7032 If CODE is 'b', pretend the mode is QImode.
7033 If CODE is 'k', pretend the mode is SImode.
7034 If CODE is 'q', pretend the mode is DImode.
7035 If CODE is 'h', pretend the reg is the 'high' byte register.
7036 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7038 void
7039 print_reg (rtx x, int code, FILE *file)
7041 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7042 && REGNO (x) != FRAME_POINTER_REGNUM
7043 && REGNO (x) != FLAGS_REG
7044 && REGNO (x) != FPSR_REG);
7046 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7047 putc ('%', file);
7049 if (code == 'w' || MMX_REG_P (x))
7050 code = 2;
7051 else if (code == 'b')
7052 code = 1;
7053 else if (code == 'k')
7054 code = 4;
7055 else if (code == 'q')
7056 code = 8;
7057 else if (code == 'y')
7058 code = 3;
7059 else if (code == 'h')
7060 code = 0;
7061 else
7062 code = GET_MODE_SIZE (GET_MODE (x));
7064 /* Irritatingly, AMD extended registers use different naming convention
7065 from the normal registers. */
7066 if (REX_INT_REG_P (x))
7068 gcc_assert (TARGET_64BIT);
7069 switch (code)
7071 case 0:
7072 error ("extended registers have no high halves");
7073 break;
7074 case 1:
7075 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7076 break;
7077 case 2:
7078 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7079 break;
7080 case 4:
7081 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7082 break;
7083 case 8:
7084 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7085 break;
7086 default:
7087 error ("unsupported operand size for extended register");
7088 break;
7090 return;
7092 switch (code)
7094 case 3:
7095 if (STACK_TOP_P (x))
7097 fputs ("st(0)", file);
7098 break;
7100 /* FALLTHRU */
7101 case 8:
7102 case 4:
7103 case 12:
7104 if (! ANY_FP_REG_P (x))
7105 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7106 /* FALLTHRU */
7107 case 16:
7108 case 2:
7109 normal:
7110 fputs (hi_reg_name[REGNO (x)], file);
7111 break;
7112 case 1:
7113 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7114 goto normal;
7115 fputs (qi_reg_name[REGNO (x)], file);
7116 break;
7117 case 0:
7118 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7119 goto normal;
7120 fputs (qi_high_reg_name[REGNO (x)], file);
7121 break;
7122 default:
7123 gcc_unreachable ();
7127 /* Locate some local-dynamic symbol still in use by this function
7128 so that we can print its name in some tls_local_dynamic_base
7129 pattern. */
7131 static const char *
7132 get_some_local_dynamic_name (void)
7134 rtx insn;
7136 if (cfun->machine->some_ld_name)
7137 return cfun->machine->some_ld_name;
7139 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7140 if (INSN_P (insn)
7141 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7142 return cfun->machine->some_ld_name;
7144 gcc_unreachable ();
7147 static int
7148 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7150 rtx x = *px;
7152 if (GET_CODE (x) == SYMBOL_REF
7153 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7155 cfun->machine->some_ld_name = XSTR (x, 0);
7156 return 1;
7159 return 0;
7162 /* Meaning of CODE:
7163 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7164 C -- print opcode suffix for set/cmov insn.
7165 c -- like C, but print reversed condition
7166 F,f -- likewise, but for floating-point.
7167 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7168 otherwise nothing
7169 R -- print the prefix for register names.
7170 z -- print the opcode suffix for the size of the current operand.
7171 * -- print a star (in certain assembler syntax)
7172 A -- print an absolute memory reference.
7173 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7174 s -- print a shift double count, followed by the assemblers argument
7175 delimiter.
7176 b -- print the QImode name of the register for the indicated operand.
7177 %b0 would print %al if operands[0] is reg 0.
7178 w -- likewise, print the HImode name of the register.
7179 k -- likewise, print the SImode name of the register.
7180 q -- likewise, print the DImode name of the register.
7181 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7182 y -- print "st(0)" instead of "st" as a register.
7183 D -- print condition for SSE cmp instruction.
7184 P -- if PIC, print an @PLT suffix.
7185 X -- don't print any sort of PIC '@' suffix for a symbol.
7186 & -- print some in-use local-dynamic symbol name.
7187 H -- print a memory address offset by 8; used for sse high-parts
7190 void
7191 print_operand (FILE *file, rtx x, int code)
7193 if (code)
7195 switch (code)
7197 case '*':
7198 if (ASSEMBLER_DIALECT == ASM_ATT)
7199 putc ('*', file);
7200 return;
7202 case '&':
7203 assemble_name (file, get_some_local_dynamic_name ());
7204 return;
7206 case 'A':
7207 switch (ASSEMBLER_DIALECT)
7209 case ASM_ATT:
7210 putc ('*', file);
7211 break;
7213 case ASM_INTEL:
7214 /* Intel syntax. For absolute addresses, registers should not
7215 be surrounded by braces. */
7216 if (GET_CODE (x) != REG)
7218 putc ('[', file);
7219 PRINT_OPERAND (file, x, 0);
7220 putc (']', file);
7221 return;
7223 break;
7225 default:
7226 gcc_unreachable ();
7229 PRINT_OPERAND (file, x, 0);
7230 return;
7233 case 'L':
7234 if (ASSEMBLER_DIALECT == ASM_ATT)
7235 putc ('l', file);
7236 return;
7238 case 'W':
7239 if (ASSEMBLER_DIALECT == ASM_ATT)
7240 putc ('w', file);
7241 return;
7243 case 'B':
7244 if (ASSEMBLER_DIALECT == ASM_ATT)
7245 putc ('b', file);
7246 return;
7248 case 'Q':
7249 if (ASSEMBLER_DIALECT == ASM_ATT)
7250 putc ('l', file);
7251 return;
7253 case 'S':
7254 if (ASSEMBLER_DIALECT == ASM_ATT)
7255 putc ('s', file);
7256 return;
7258 case 'T':
7259 if (ASSEMBLER_DIALECT == ASM_ATT)
7260 putc ('t', file);
7261 return;
7263 case 'z':
7264 /* 387 opcodes don't get size suffixes if the operands are
7265 registers. */
7266 if (STACK_REG_P (x))
7267 return;
7269 /* Likewise if using Intel opcodes. */
7270 if (ASSEMBLER_DIALECT == ASM_INTEL)
7271 return;
7273 /* This is the size of op from size of operand. */
7274 switch (GET_MODE_SIZE (GET_MODE (x)))
7276 case 2:
7277 #ifdef HAVE_GAS_FILDS_FISTS
7278 putc ('s', file);
7279 #endif
7280 return;
7282 case 4:
7283 if (GET_MODE (x) == SFmode)
7285 putc ('s', file);
7286 return;
7288 else
7289 putc ('l', file);
7290 return;
7292 case 12:
7293 case 16:
7294 putc ('t', file);
7295 return;
7297 case 8:
7298 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7300 #ifdef GAS_MNEMONICS
7301 putc ('q', file);
7302 #else
7303 putc ('l', file);
7304 putc ('l', file);
7305 #endif
7307 else
7308 putc ('l', file);
7309 return;
7311 default:
7312 gcc_unreachable ();
7315 case 'b':
7316 case 'w':
7317 case 'k':
7318 case 'q':
7319 case 'h':
7320 case 'y':
7321 case 'X':
7322 case 'P':
7323 break;
7325 case 's':
7326 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7328 PRINT_OPERAND (file, x, 0);
7329 putc (',', file);
7331 return;
7333 case 'D':
7334 /* Little bit of braindamage here. The SSE compare instructions
7335 does use completely different names for the comparisons that the
7336 fp conditional moves. */
7337 switch (GET_CODE (x))
7339 case EQ:
7340 case UNEQ:
7341 fputs ("eq", file);
7342 break;
7343 case LT:
7344 case UNLT:
7345 fputs ("lt", file);
7346 break;
7347 case LE:
7348 case UNLE:
7349 fputs ("le", file);
7350 break;
7351 case UNORDERED:
7352 fputs ("unord", file);
7353 break;
7354 case NE:
7355 case LTGT:
7356 fputs ("neq", file);
7357 break;
7358 case UNGE:
7359 case GE:
7360 fputs ("nlt", file);
7361 break;
7362 case UNGT:
7363 case GT:
7364 fputs ("nle", file);
7365 break;
7366 case ORDERED:
7367 fputs ("ord", file);
7368 break;
7369 default:
7370 gcc_unreachable ();
7372 return;
7373 case 'O':
7374 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7375 if (ASSEMBLER_DIALECT == ASM_ATT)
7377 switch (GET_MODE (x))
7379 case HImode: putc ('w', file); break;
7380 case SImode:
7381 case SFmode: putc ('l', file); break;
7382 case DImode:
7383 case DFmode: putc ('q', file); break;
7384 default: gcc_unreachable ();
7386 putc ('.', file);
7388 #endif
7389 return;
7390 case 'C':
7391 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7392 return;
7393 case 'F':
7394 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7395 if (ASSEMBLER_DIALECT == ASM_ATT)
7396 putc ('.', file);
7397 #endif
7398 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7399 return;
7401 /* Like above, but reverse condition */
7402 case 'c':
7403 /* Check to see if argument to %c is really a constant
7404 and not a condition code which needs to be reversed. */
7405 if (!COMPARISON_P (x))
7407 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7408 return;
7410 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7411 return;
7412 case 'f':
7413 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7414 if (ASSEMBLER_DIALECT == ASM_ATT)
7415 putc ('.', file);
7416 #endif
7417 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7418 return;
7420 case 'H':
7421 /* It doesn't actually matter what mode we use here, as we're
7422 only going to use this for printing. */
7423 x = adjust_address_nv (x, DImode, 8);
7424 break;
7426 case '+':
7428 rtx x;
7430 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7431 return;
7433 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7434 if (x)
7436 int pred_val = INTVAL (XEXP (x, 0));
7438 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7439 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7441 int taken = pred_val > REG_BR_PROB_BASE / 2;
7442 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7444 /* Emit hints only in the case default branch prediction
7445 heuristics would fail. */
7446 if (taken != cputaken)
7448 /* We use 3e (DS) prefix for taken branches and
7449 2e (CS) prefix for not taken branches. */
7450 if (taken)
7451 fputs ("ds ; ", file);
7452 else
7453 fputs ("cs ; ", file);
7457 return;
7459 default:
7460 output_operand_lossage ("invalid operand code '%c'", code);
7464 if (GET_CODE (x) == REG)
7465 print_reg (x, code, file);
7467 else if (GET_CODE (x) == MEM)
7469 /* No `byte ptr' prefix for call instructions. */
7470 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7472 const char * size;
7473 switch (GET_MODE_SIZE (GET_MODE (x)))
7475 case 1: size = "BYTE"; break;
7476 case 2: size = "WORD"; break;
7477 case 4: size = "DWORD"; break;
7478 case 8: size = "QWORD"; break;
7479 case 12: size = "XWORD"; break;
7480 case 16: size = "XMMWORD"; break;
7481 default:
7482 gcc_unreachable ();
7485 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7486 if (code == 'b')
7487 size = "BYTE";
7488 else if (code == 'w')
7489 size = "WORD";
7490 else if (code == 'k')
7491 size = "DWORD";
7493 fputs (size, file);
7494 fputs (" PTR ", file);
7497 x = XEXP (x, 0);
7498 /* Avoid (%rip) for call operands. */
7499 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7500 && GET_CODE (x) != CONST_INT)
7501 output_addr_const (file, x);
7502 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7503 output_operand_lossage ("invalid constraints for operand");
7504 else
7505 output_address (x);
7508 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7510 REAL_VALUE_TYPE r;
7511 long l;
7513 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7514 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7516 if (ASSEMBLER_DIALECT == ASM_ATT)
7517 putc ('$', file);
7518 fprintf (file, "0x%08lx", l);
7521 /* These float cases don't actually occur as immediate operands. */
7522 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7524 char dstr[30];
7526 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7527 fprintf (file, "%s", dstr);
7530 else if (GET_CODE (x) == CONST_DOUBLE
7531 && GET_MODE (x) == XFmode)
7533 char dstr[30];
7535 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7536 fprintf (file, "%s", dstr);
7539 else
7541 /* We have patterns that allow zero sets of memory, for instance.
7542 In 64-bit mode, we should probably support all 8-byte vectors,
7543 since we can in fact encode that into an immediate. */
7544 if (GET_CODE (x) == CONST_VECTOR)
7546 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7547 x = const0_rtx;
7550 if (code != 'P')
7552 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7554 if (ASSEMBLER_DIALECT == ASM_ATT)
7555 putc ('$', file);
7557 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7558 || GET_CODE (x) == LABEL_REF)
7560 if (ASSEMBLER_DIALECT == ASM_ATT)
7561 putc ('$', file);
7562 else
7563 fputs ("OFFSET FLAT:", file);
7566 if (GET_CODE (x) == CONST_INT)
7567 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7568 else if (flag_pic)
7569 output_pic_addr_const (file, x, code);
7570 else
7571 output_addr_const (file, x);
7575 /* Print a memory operand whose address is ADDR. */
7577 void
7578 print_operand_address (FILE *file, rtx addr)
7580 struct ix86_address parts;
7581 rtx base, index, disp;
7582 int scale;
7583 int ok = ix86_decompose_address (addr, &parts);
7585 gcc_assert (ok);
7587 base = parts.base;
7588 index = parts.index;
7589 disp = parts.disp;
7590 scale = parts.scale;
7592 switch (parts.seg)
7594 case SEG_DEFAULT:
7595 break;
7596 case SEG_FS:
7597 case SEG_GS:
7598 if (USER_LABEL_PREFIX[0] == 0)
7599 putc ('%', file);
7600 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7601 break;
7602 default:
7603 gcc_unreachable ();
7606 if (!base && !index)
7608 /* Displacement only requires special attention. */
7610 if (GET_CODE (disp) == CONST_INT)
7612 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7614 if (USER_LABEL_PREFIX[0] == 0)
7615 putc ('%', file);
7616 fputs ("ds:", file);
7618 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7620 else if (flag_pic)
7621 output_pic_addr_const (file, disp, 0);
7622 else
7623 output_addr_const (file, disp);
7625 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7626 if (TARGET_64BIT)
7628 if (GET_CODE (disp) == CONST
7629 && GET_CODE (XEXP (disp, 0)) == PLUS
7630 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7631 disp = XEXP (XEXP (disp, 0), 0);
7632 if (GET_CODE (disp) == LABEL_REF
7633 || (GET_CODE (disp) == SYMBOL_REF
7634 && SYMBOL_REF_TLS_MODEL (disp) == 0))
7635 fputs ("(%rip)", file);
7638 else
7640 if (ASSEMBLER_DIALECT == ASM_ATT)
7642 if (disp)
7644 if (flag_pic)
7645 output_pic_addr_const (file, disp, 0);
7646 else if (GET_CODE (disp) == LABEL_REF)
7647 output_asm_label (disp);
7648 else
7649 output_addr_const (file, disp);
7652 putc ('(', file);
7653 if (base)
7654 print_reg (base, 0, file);
7655 if (index)
7657 putc (',', file);
7658 print_reg (index, 0, file);
7659 if (scale != 1)
7660 fprintf (file, ",%d", scale);
7662 putc (')', file);
7664 else
7666 rtx offset = NULL_RTX;
7668 if (disp)
7670 /* Pull out the offset of a symbol; print any symbol itself. */
7671 if (GET_CODE (disp) == CONST
7672 && GET_CODE (XEXP (disp, 0)) == PLUS
7673 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7675 offset = XEXP (XEXP (disp, 0), 1);
7676 disp = gen_rtx_CONST (VOIDmode,
7677 XEXP (XEXP (disp, 0), 0));
7680 if (flag_pic)
7681 output_pic_addr_const (file, disp, 0);
7682 else if (GET_CODE (disp) == LABEL_REF)
7683 output_asm_label (disp);
7684 else if (GET_CODE (disp) == CONST_INT)
7685 offset = disp;
7686 else
7687 output_addr_const (file, disp);
7690 putc ('[', file);
7691 if (base)
7693 print_reg (base, 0, file);
7694 if (offset)
7696 if (INTVAL (offset) >= 0)
7697 putc ('+', file);
7698 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7701 else if (offset)
7702 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7703 else
7704 putc ('0', file);
7706 if (index)
7708 putc ('+', file);
7709 print_reg (index, 0, file);
7710 if (scale != 1)
7711 fprintf (file, "*%d", scale);
7713 putc (']', file);
7718 bool
7719 output_addr_const_extra (FILE *file, rtx x)
7721 rtx op;
7723 if (GET_CODE (x) != UNSPEC)
7724 return false;
7726 op = XVECEXP (x, 0, 0);
7727 switch (XINT (x, 1))
7729 case UNSPEC_GOTTPOFF:
7730 output_addr_const (file, op);
7731 /* FIXME: This might be @TPOFF in Sun ld. */
7732 fputs ("@GOTTPOFF", file);
7733 break;
7734 case UNSPEC_TPOFF:
7735 output_addr_const (file, op);
7736 fputs ("@TPOFF", file);
7737 break;
7738 case UNSPEC_NTPOFF:
7739 output_addr_const (file, op);
7740 if (TARGET_64BIT)
7741 fputs ("@TPOFF", file);
7742 else
7743 fputs ("@NTPOFF", file);
7744 break;
7745 case UNSPEC_DTPOFF:
7746 output_addr_const (file, op);
7747 fputs ("@DTPOFF", file);
7748 break;
7749 case UNSPEC_GOTNTPOFF:
7750 output_addr_const (file, op);
7751 if (TARGET_64BIT)
7752 fputs ("@GOTTPOFF(%rip)", file);
7753 else
7754 fputs ("@GOTNTPOFF", file);
7755 break;
7756 case UNSPEC_INDNTPOFF:
7757 output_addr_const (file, op);
7758 fputs ("@INDNTPOFF", file);
7759 break;
7761 default:
7762 return false;
7765 return true;
7768 /* Split one or more DImode RTL references into pairs of SImode
7769 references. The RTL can be REG, offsettable MEM, integer constant, or
7770 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7771 split and "num" is its length. lo_half and hi_half are output arrays
7772 that parallel "operands". */
7774 void
7775 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7777 while (num--)
7779 rtx op = operands[num];
7781 /* simplify_subreg refuse to split volatile memory addresses,
7782 but we still have to handle it. */
7783 if (GET_CODE (op) == MEM)
7785 lo_half[num] = adjust_address (op, SImode, 0);
7786 hi_half[num] = adjust_address (op, SImode, 4);
7788 else
7790 lo_half[num] = simplify_gen_subreg (SImode, op,
7791 GET_MODE (op) == VOIDmode
7792 ? DImode : GET_MODE (op), 0);
7793 hi_half[num] = simplify_gen_subreg (SImode, op,
7794 GET_MODE (op) == VOIDmode
7795 ? DImode : GET_MODE (op), 4);
7799 /* Split one or more TImode RTL references into pairs of DImode
7800 references. The RTL can be REG, offsettable MEM, integer constant, or
7801 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7802 split and "num" is its length. lo_half and hi_half are output arrays
7803 that parallel "operands". */
7805 void
7806 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7808 while (num--)
7810 rtx op = operands[num];
7812 /* simplify_subreg refuse to split volatile memory addresses, but we
7813 still have to handle it. */
7814 if (GET_CODE (op) == MEM)
7816 lo_half[num] = adjust_address (op, DImode, 0);
7817 hi_half[num] = adjust_address (op, DImode, 8);
7819 else
7821 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7822 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7827 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7828 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7829 is the expression of the binary operation. The output may either be
7830 emitted here, or returned to the caller, like all output_* functions.
7832 There is no guarantee that the operands are the same mode, as they
7833 might be within FLOAT or FLOAT_EXTEND expressions. */
7835 #ifndef SYSV386_COMPAT
7836 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7837 wants to fix the assemblers because that causes incompatibility
7838 with gcc. No-one wants to fix gcc because that causes
7839 incompatibility with assemblers... You can use the option of
7840 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7841 #define SYSV386_COMPAT 1
7842 #endif
7844 const char *
7845 output_387_binary_op (rtx insn, rtx *operands)
7847 static char buf[30];
7848 const char *p;
7849 const char *ssep;
7850 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7852 #ifdef ENABLE_CHECKING
7853 /* Even if we do not want to check the inputs, this documents input
7854 constraints. Which helps in understanding the following code. */
7855 if (STACK_REG_P (operands[0])
7856 && ((REG_P (operands[1])
7857 && REGNO (operands[0]) == REGNO (operands[1])
7858 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7859 || (REG_P (operands[2])
7860 && REGNO (operands[0]) == REGNO (operands[2])
7861 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7862 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7863 ; /* ok */
7864 else
7865 gcc_assert (is_sse);
7866 #endif
7868 switch (GET_CODE (operands[3]))
7870 case PLUS:
7871 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7872 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7873 p = "fiadd";
7874 else
7875 p = "fadd";
7876 ssep = "add";
7877 break;
7879 case MINUS:
7880 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7881 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7882 p = "fisub";
7883 else
7884 p = "fsub";
7885 ssep = "sub";
7886 break;
7888 case MULT:
7889 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7890 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7891 p = "fimul";
7892 else
7893 p = "fmul";
7894 ssep = "mul";
7895 break;
7897 case DIV:
7898 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7899 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7900 p = "fidiv";
7901 else
7902 p = "fdiv";
7903 ssep = "div";
7904 break;
7906 default:
7907 gcc_unreachable ();
7910 if (is_sse)
7912 strcpy (buf, ssep);
7913 if (GET_MODE (operands[0]) == SFmode)
7914 strcat (buf, "ss\t{%2, %0|%0, %2}");
7915 else
7916 strcat (buf, "sd\t{%2, %0|%0, %2}");
7917 return buf;
7919 strcpy (buf, p);
7921 switch (GET_CODE (operands[3]))
7923 case MULT:
7924 case PLUS:
7925 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7927 rtx temp = operands[2];
7928 operands[2] = operands[1];
7929 operands[1] = temp;
7932 /* know operands[0] == operands[1]. */
7934 if (GET_CODE (operands[2]) == MEM)
7936 p = "%z2\t%2";
7937 break;
7940 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7942 if (STACK_TOP_P (operands[0]))
7943 /* How is it that we are storing to a dead operand[2]?
7944 Well, presumably operands[1] is dead too. We can't
7945 store the result to st(0) as st(0) gets popped on this
7946 instruction. Instead store to operands[2] (which I
7947 think has to be st(1)). st(1) will be popped later.
7948 gcc <= 2.8.1 didn't have this check and generated
7949 assembly code that the Unixware assembler rejected. */
7950 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7951 else
7952 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7953 break;
7956 if (STACK_TOP_P (operands[0]))
7957 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7958 else
7959 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7960 break;
7962 case MINUS:
7963 case DIV:
7964 if (GET_CODE (operands[1]) == MEM)
7966 p = "r%z1\t%1";
7967 break;
7970 if (GET_CODE (operands[2]) == MEM)
7972 p = "%z2\t%2";
7973 break;
7976 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7978 #if SYSV386_COMPAT
7979 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7980 derived assemblers, confusingly reverse the direction of
7981 the operation for fsub{r} and fdiv{r} when the
7982 destination register is not st(0). The Intel assembler
7983 doesn't have this brain damage. Read !SYSV386_COMPAT to
7984 figure out what the hardware really does. */
7985 if (STACK_TOP_P (operands[0]))
7986 p = "{p\t%0, %2|rp\t%2, %0}";
7987 else
7988 p = "{rp\t%2, %0|p\t%0, %2}";
7989 #else
7990 if (STACK_TOP_P (operands[0]))
7991 /* As above for fmul/fadd, we can't store to st(0). */
7992 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7993 else
7994 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7995 #endif
7996 break;
7999 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8001 #if SYSV386_COMPAT
8002 if (STACK_TOP_P (operands[0]))
8003 p = "{rp\t%0, %1|p\t%1, %0}";
8004 else
8005 p = "{p\t%1, %0|rp\t%0, %1}";
8006 #else
8007 if (STACK_TOP_P (operands[0]))
8008 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8009 else
8010 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8011 #endif
8012 break;
8015 if (STACK_TOP_P (operands[0]))
8017 if (STACK_TOP_P (operands[1]))
8018 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8019 else
8020 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8021 break;
8023 else if (STACK_TOP_P (operands[1]))
8025 #if SYSV386_COMPAT
8026 p = "{\t%1, %0|r\t%0, %1}";
8027 #else
8028 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8029 #endif
8031 else
8033 #if SYSV386_COMPAT
8034 p = "{r\t%2, %0|\t%0, %2}";
8035 #else
8036 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8037 #endif
8039 break;
8041 default:
8042 gcc_unreachable ();
8045 strcat (buf, p);
8046 return buf;
8049 /* Return needed mode for entity in optimize_mode_switching pass. */
8052 ix86_mode_needed (int entity, rtx insn)
8054 enum attr_i387_cw mode;
8056 /* The mode UNINITIALIZED is used to store control word after a
8057 function call or ASM pattern. The mode ANY specify that function
8058 has no requirements on the control word and make no changes in the
8059 bits we are interested in. */
8061 if (CALL_P (insn)
8062 || (NONJUMP_INSN_P (insn)
8063 && (asm_noperands (PATTERN (insn)) >= 0
8064 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8065 return I387_CW_UNINITIALIZED;
8067 if (recog_memoized (insn) < 0)
8068 return I387_CW_ANY;
8070 mode = get_attr_i387_cw (insn);
8072 switch (entity)
8074 case I387_TRUNC:
8075 if (mode == I387_CW_TRUNC)
8076 return mode;
8077 break;
8079 case I387_FLOOR:
8080 if (mode == I387_CW_FLOOR)
8081 return mode;
8082 break;
8084 case I387_CEIL:
8085 if (mode == I387_CW_CEIL)
8086 return mode;
8087 break;
8089 case I387_MASK_PM:
8090 if (mode == I387_CW_MASK_PM)
8091 return mode;
8092 break;
8094 default:
8095 gcc_unreachable ();
8098 return I387_CW_ANY;
8101 /* Output code to initialize control word copies used by trunc?f?i and
8102 rounding patterns. CURRENT_MODE is set to current control word,
8103 while NEW_MODE is set to new control word. */
8105 void
8106 emit_i387_cw_initialization (int mode)
8108 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8109 rtx new_mode;
8111 int slot;
8113 rtx reg = gen_reg_rtx (HImode);
8115 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8116 emit_move_insn (reg, stored_mode);
8118 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8120 switch (mode)
8122 case I387_CW_TRUNC:
8123 /* round toward zero (truncate) */
8124 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8125 slot = SLOT_CW_TRUNC;
8126 break;
8128 case I387_CW_FLOOR:
8129 /* round down toward -oo */
8130 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8131 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8132 slot = SLOT_CW_FLOOR;
8133 break;
8135 case I387_CW_CEIL:
8136 /* round up toward +oo */
8137 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8138 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8139 slot = SLOT_CW_CEIL;
8140 break;
8142 case I387_CW_MASK_PM:
8143 /* mask precision exception for nearbyint() */
8144 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8145 slot = SLOT_CW_MASK_PM;
8146 break;
8148 default:
8149 gcc_unreachable ();
8152 else
8154 switch (mode)
8156 case I387_CW_TRUNC:
8157 /* round toward zero (truncate) */
8158 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8159 slot = SLOT_CW_TRUNC;
8160 break;
8162 case I387_CW_FLOOR:
8163 /* round down toward -oo */
8164 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8165 slot = SLOT_CW_FLOOR;
8166 break;
8168 case I387_CW_CEIL:
8169 /* round up toward +oo */
8170 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8171 slot = SLOT_CW_CEIL;
8172 break;
8174 case I387_CW_MASK_PM:
8175 /* mask precision exception for nearbyint() */
8176 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8177 slot = SLOT_CW_MASK_PM;
8178 break;
8180 default:
8181 gcc_unreachable ();
8185 gcc_assert (slot < MAX_386_STACK_LOCALS);
8187 new_mode = assign_386_stack_local (HImode, slot);
8188 emit_move_insn (new_mode, reg);
8191 /* Output code for INSN to convert a float to a signed int. OPERANDS
8192 are the insn operands. The output may be [HSD]Imode and the input
8193 operand may be [SDX]Fmode. */
8195 const char *
8196 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8198 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8199 int dimode_p = GET_MODE (operands[0]) == DImode;
8200 int round_mode = get_attr_i387_cw (insn);
8202 /* Jump through a hoop or two for DImode, since the hardware has no
8203 non-popping instruction. We used to do this a different way, but
8204 that was somewhat fragile and broke with post-reload splitters. */
8205 if ((dimode_p || fisttp) && !stack_top_dies)
8206 output_asm_insn ("fld\t%y1", operands);
8208 gcc_assert (STACK_TOP_P (operands[1]));
8209 gcc_assert (GET_CODE (operands[0]) == MEM);
8211 if (fisttp)
8212 output_asm_insn ("fisttp%z0\t%0", operands);
8213 else
8215 if (round_mode != I387_CW_ANY)
8216 output_asm_insn ("fldcw\t%3", operands);
8217 if (stack_top_dies || dimode_p)
8218 output_asm_insn ("fistp%z0\t%0", operands);
8219 else
8220 output_asm_insn ("fist%z0\t%0", operands);
8221 if (round_mode != I387_CW_ANY)
8222 output_asm_insn ("fldcw\t%2", operands);
8225 return "";
8228 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8229 should be used. UNORDERED_P is true when fucom should be used. */
8231 const char *
8232 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8234 int stack_top_dies;
8235 rtx cmp_op0, cmp_op1;
8236 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8238 if (eflags_p)
8240 cmp_op0 = operands[0];
8241 cmp_op1 = operands[1];
8243 else
8245 cmp_op0 = operands[1];
8246 cmp_op1 = operands[2];
8249 if (is_sse)
8251 if (GET_MODE (operands[0]) == SFmode)
8252 if (unordered_p)
8253 return "ucomiss\t{%1, %0|%0, %1}";
8254 else
8255 return "comiss\t{%1, %0|%0, %1}";
8256 else
8257 if (unordered_p)
8258 return "ucomisd\t{%1, %0|%0, %1}";
8259 else
8260 return "comisd\t{%1, %0|%0, %1}";
8263 gcc_assert (STACK_TOP_P (cmp_op0));
8265 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8267 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8269 if (stack_top_dies)
8271 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8272 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
8274 else
8275 return "ftst\n\tfnstsw\t%0";
8278 if (STACK_REG_P (cmp_op1)
8279 && stack_top_dies
8280 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8281 && REGNO (cmp_op1) != FIRST_STACK_REG)
8283 /* If both the top of the 387 stack dies, and the other operand
8284 is also a stack register that dies, then this must be a
8285 `fcompp' float compare */
8287 if (eflags_p)
8289 /* There is no double popping fcomi variant. Fortunately,
8290 eflags is immune from the fstp's cc clobbering. */
8291 if (unordered_p)
8292 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8293 else
8294 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8295 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
8297 else
8299 if (unordered_p)
8300 return "fucompp\n\tfnstsw\t%0";
8301 else
8302 return "fcompp\n\tfnstsw\t%0";
8305 else
8307 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8309 static const char * const alt[16] =
8311 "fcom%z2\t%y2\n\tfnstsw\t%0",
8312 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8313 "fucom%z2\t%y2\n\tfnstsw\t%0",
8314 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8316 "ficom%z2\t%y2\n\tfnstsw\t%0",
8317 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8318 NULL,
8319 NULL,
8321 "fcomi\t{%y1, %0|%0, %y1}",
8322 "fcomip\t{%y1, %0|%0, %y1}",
8323 "fucomi\t{%y1, %0|%0, %y1}",
8324 "fucomip\t{%y1, %0|%0, %y1}",
8326 NULL,
8327 NULL,
8328 NULL,
8329 NULL
8332 int mask;
8333 const char *ret;
8335 mask = eflags_p << 3;
8336 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
8337 mask |= unordered_p << 1;
8338 mask |= stack_top_dies;
8340 gcc_assert (mask < 16);
8341 ret = alt[mask];
8342 gcc_assert (ret);
8344 return ret;
8348 void
8349 ix86_output_addr_vec_elt (FILE *file, int value)
8351 const char *directive = ASM_LONG;
8353 #ifdef ASM_QUAD
8354 if (TARGET_64BIT)
8355 directive = ASM_QUAD;
8356 #else
8357 gcc_assert (!TARGET_64BIT);
8358 #endif
8360 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8363 void
8364 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8366 if (TARGET_64BIT)
8367 fprintf (file, "%s%s%d-%s%d\n",
8368 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8369 else if (HAVE_AS_GOTOFF_IN_DATA)
8370 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8371 #if TARGET_MACHO
8372 else if (TARGET_MACHO)
8374 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8375 machopic_output_function_base_name (file);
8376 fprintf(file, "\n");
8378 #endif
8379 else
8380 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8381 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8384 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8385 for the target. */
8387 void
8388 ix86_expand_clear (rtx dest)
8390 rtx tmp;
8392 /* We play register width games, which are only valid after reload. */
8393 gcc_assert (reload_completed);
8395 /* Avoid HImode and its attendant prefix byte. */
8396 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8397 dest = gen_rtx_REG (SImode, REGNO (dest));
8399 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8401 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8402 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8404 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8405 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8408 emit_insn (tmp);
8411 /* X is an unchanging MEM. If it is a constant pool reference, return
8412 the constant pool rtx, else NULL. */
8415 maybe_get_pool_constant (rtx x)
8417 x = ix86_delegitimize_address (XEXP (x, 0));
8419 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8420 return get_pool_constant (x);
8422 return NULL_RTX;
8425 void
8426 ix86_expand_move (enum machine_mode mode, rtx operands[])
8428 int strict = (reload_in_progress || reload_completed);
8429 rtx op0, op1;
8430 enum tls_model model;
8432 op0 = operands[0];
8433 op1 = operands[1];
8435 if (GET_CODE (op1) == SYMBOL_REF)
8437 model = SYMBOL_REF_TLS_MODEL (op1);
8438 if (model)
8440 op1 = legitimize_tls_address (op1, model, true);
8441 op1 = force_operand (op1, op0);
8442 if (op1 == op0)
8443 return;
8446 else if (GET_CODE (op1) == CONST
8447 && GET_CODE (XEXP (op1, 0)) == PLUS
8448 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8450 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8451 if (model)
8453 rtx addend = XEXP (XEXP (op1, 0), 1);
8454 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8455 op1 = force_operand (op1, NULL);
8456 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8457 op0, 1, OPTAB_DIRECT);
8458 if (op1 == op0)
8459 return;
8463 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8465 #if TARGET_MACHO
8466 if (MACHOPIC_PURE)
8468 rtx temp = ((reload_in_progress
8469 || ((op0 && GET_CODE (op0) == REG)
8470 && mode == Pmode))
8471 ? op0 : gen_reg_rtx (Pmode));
8472 op1 = machopic_indirect_data_reference (op1, temp);
8473 op1 = machopic_legitimize_pic_address (op1, mode,
8474 temp == op1 ? 0 : temp);
8476 else if (MACHOPIC_INDIRECT)
8477 op1 = machopic_indirect_data_reference (op1, 0);
8478 if (op0 == op1)
8479 return;
8480 #else
8481 if (GET_CODE (op0) == MEM)
8482 op1 = force_reg (Pmode, op1);
8483 else
8484 op1 = legitimize_address (op1, op1, Pmode);
8485 #endif /* TARGET_MACHO */
8487 else
8489 if (GET_CODE (op0) == MEM
8490 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8491 || !push_operand (op0, mode))
8492 && GET_CODE (op1) == MEM)
8493 op1 = force_reg (mode, op1);
8495 if (push_operand (op0, mode)
8496 && ! general_no_elim_operand (op1, mode))
8497 op1 = copy_to_mode_reg (mode, op1);
8499 /* Force large constants in 64bit compilation into register
8500 to get them CSEed. */
8501 if (TARGET_64BIT && mode == DImode
8502 && immediate_operand (op1, mode)
8503 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8504 && !register_operand (op0, mode)
8505 && optimize && !reload_completed && !reload_in_progress)
8506 op1 = copy_to_mode_reg (mode, op1);
8508 if (FLOAT_MODE_P (mode))
8510 /* If we are loading a floating point constant to a register,
8511 force the value to memory now, since we'll get better code
8512 out the back end. */
8514 if (strict)
8516 else if (GET_CODE (op1) == CONST_DOUBLE)
8518 op1 = validize_mem (force_const_mem (mode, op1));
8519 if (!register_operand (op0, mode))
8521 rtx temp = gen_reg_rtx (mode);
8522 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8523 emit_move_insn (op0, temp);
8524 return;
8530 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8533 void
8534 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8536 rtx op0 = operands[0], op1 = operands[1];
8538 /* Force constants other than zero into memory. We do not know how
8539 the instructions used to build constants modify the upper 64 bits
8540 of the register, once we have that information we may be able
8541 to handle some of them more efficiently. */
8542 if ((reload_in_progress | reload_completed) == 0
8543 && register_operand (op0, mode)
8544 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8545 op1 = validize_mem (force_const_mem (mode, op1));
8547 /* Make operand1 a register if it isn't already. */
8548 if (!no_new_pseudos
8549 && !register_operand (op0, mode)
8550 && !register_operand (op1, mode))
8552 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8553 return;
8556 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8559 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8560 straight to ix86_expand_vector_move. */
8562 void
8563 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8565 rtx op0, op1, m;
8567 op0 = operands[0];
8568 op1 = operands[1];
8570 if (MEM_P (op1))
8572 /* If we're optimizing for size, movups is the smallest. */
8573 if (optimize_size)
8575 op0 = gen_lowpart (V4SFmode, op0);
8576 op1 = gen_lowpart (V4SFmode, op1);
8577 emit_insn (gen_sse_movups (op0, op1));
8578 return;
8581 /* ??? If we have typed data, then it would appear that using
8582 movdqu is the only way to get unaligned data loaded with
8583 integer type. */
8584 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8586 op0 = gen_lowpart (V16QImode, op0);
8587 op1 = gen_lowpart (V16QImode, op1);
8588 emit_insn (gen_sse2_movdqu (op0, op1));
8589 return;
8592 if (TARGET_SSE2 && mode == V2DFmode)
8594 rtx zero;
8596 /* When SSE registers are split into halves, we can avoid
8597 writing to the top half twice. */
8598 if (TARGET_SSE_SPLIT_REGS)
8600 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8601 zero = op0;
8603 else
8605 /* ??? Not sure about the best option for the Intel chips.
8606 The following would seem to satisfy; the register is
8607 entirely cleared, breaking the dependency chain. We
8608 then store to the upper half, with a dependency depth
8609 of one. A rumor has it that Intel recommends two movsd
8610 followed by an unpacklpd, but this is unconfirmed. And
8611 given that the dependency depth of the unpacklpd would
8612 still be one, I'm not sure why this would be better. */
8613 zero = CONST0_RTX (V2DFmode);
8616 m = adjust_address (op1, DFmode, 0);
8617 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8618 m = adjust_address (op1, DFmode, 8);
8619 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8621 else
8623 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8624 emit_move_insn (op0, CONST0_RTX (mode));
8625 else
8626 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8628 if (mode != V4SFmode)
8629 op0 = gen_lowpart (V4SFmode, op0);
8630 m = adjust_address (op1, V2SFmode, 0);
8631 emit_insn (gen_sse_loadlps (op0, op0, m));
8632 m = adjust_address (op1, V2SFmode, 8);
8633 emit_insn (gen_sse_loadhps (op0, op0, m));
8636 else if (MEM_P (op0))
8638 /* If we're optimizing for size, movups is the smallest. */
8639 if (optimize_size)
8641 op0 = gen_lowpart (V4SFmode, op0);
8642 op1 = gen_lowpart (V4SFmode, op1);
8643 emit_insn (gen_sse_movups (op0, op1));
8644 return;
8647 /* ??? Similar to above, only less clear because of quote
8648 typeless stores unquote. */
8649 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8650 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8652 op0 = gen_lowpart (V16QImode, op0);
8653 op1 = gen_lowpart (V16QImode, op1);
8654 emit_insn (gen_sse2_movdqu (op0, op1));
8655 return;
8658 if (TARGET_SSE2 && mode == V2DFmode)
8660 m = adjust_address (op0, DFmode, 0);
8661 emit_insn (gen_sse2_storelpd (m, op1));
8662 m = adjust_address (op0, DFmode, 8);
8663 emit_insn (gen_sse2_storehpd (m, op1));
8665 else
8667 if (mode != V4SFmode)
8668 op1 = gen_lowpart (V4SFmode, op1);
8669 m = adjust_address (op0, V2SFmode, 0);
8670 emit_insn (gen_sse_storelps (m, op1));
8671 m = adjust_address (op0, V2SFmode, 8);
8672 emit_insn (gen_sse_storehps (m, op1));
8675 else
8676 gcc_unreachable ();
8679 /* Expand a push in MODE. This is some mode for which we do not support
8680 proper push instructions, at least from the registers that we expect
8681 the value to live in. */
8683 void
8684 ix86_expand_push (enum machine_mode mode, rtx x)
8686 rtx tmp;
8688 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8689 GEN_INT (-GET_MODE_SIZE (mode)),
8690 stack_pointer_rtx, 1, OPTAB_DIRECT);
8691 if (tmp != stack_pointer_rtx)
8692 emit_move_insn (stack_pointer_rtx, tmp);
8694 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8695 emit_move_insn (tmp, x);
8698 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8699 destination to use for the operation. If different from the true
8700 destination in operands[0], a copy operation will be required. */
8703 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8704 rtx operands[])
8706 int matching_memory;
8707 rtx src1, src2, dst;
8709 dst = operands[0];
8710 src1 = operands[1];
8711 src2 = operands[2];
8713 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8714 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8715 && (rtx_equal_p (dst, src2)
8716 || immediate_operand (src1, mode)))
8718 rtx temp = src1;
8719 src1 = src2;
8720 src2 = temp;
8723 /* If the destination is memory, and we do not have matching source
8724 operands, do things in registers. */
8725 matching_memory = 0;
8726 if (GET_CODE (dst) == MEM)
8728 if (rtx_equal_p (dst, src1))
8729 matching_memory = 1;
8730 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8731 && rtx_equal_p (dst, src2))
8732 matching_memory = 2;
8733 else
8734 dst = gen_reg_rtx (mode);
8737 /* Both source operands cannot be in memory. */
8738 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8740 if (matching_memory != 2)
8741 src2 = force_reg (mode, src2);
8742 else
8743 src1 = force_reg (mode, src1);
8746 /* If the operation is not commutable, source 1 cannot be a constant
8747 or non-matching memory. */
8748 if ((CONSTANT_P (src1)
8749 || (!matching_memory && GET_CODE (src1) == MEM))
8750 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8751 src1 = force_reg (mode, src1);
8753 src1 = operands[1] = src1;
8754 src2 = operands[2] = src2;
8755 return dst;
8758 /* Similarly, but assume that the destination has already been
8759 set up properly. */
8761 void
8762 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8763 enum machine_mode mode, rtx operands[])
8765 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8766 gcc_assert (dst == operands[0]);
8769 /* Attempt to expand a binary operator. Make the expansion closer to the
8770 actual machine, then just general_operand, which will allow 3 separate
8771 memory references (one output, two input) in a single insn. */
8773 void
8774 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8775 rtx operands[])
8777 rtx src1, src2, dst, op, clob;
8779 dst = ix86_fixup_binary_operands (code, mode, operands);
8780 src1 = operands[1];
8781 src2 = operands[2];
8783 /* Emit the instruction. */
8785 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8786 if (reload_in_progress)
8788 /* Reload doesn't know about the flags register, and doesn't know that
8789 it doesn't want to clobber it. We can only do this with PLUS. */
8790 gcc_assert (code == PLUS);
8791 emit_insn (op);
8793 else
8795 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8796 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8799 /* Fix up the destination if needed. */
8800 if (dst != operands[0])
8801 emit_move_insn (operands[0], dst);
8804 /* Return TRUE or FALSE depending on whether the binary operator meets the
8805 appropriate constraints. */
8808 ix86_binary_operator_ok (enum rtx_code code,
8809 enum machine_mode mode ATTRIBUTE_UNUSED,
8810 rtx operands[3])
8812 /* Both source operands cannot be in memory. */
8813 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8814 return 0;
8815 /* If the operation is not commutable, source 1 cannot be a constant. */
8816 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8817 return 0;
8818 /* If the destination is memory, we must have a matching source operand. */
8819 if (GET_CODE (operands[0]) == MEM
8820 && ! (rtx_equal_p (operands[0], operands[1])
8821 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8822 && rtx_equal_p (operands[0], operands[2]))))
8823 return 0;
8824 /* If the operation is not commutable and the source 1 is memory, we must
8825 have a matching destination. */
8826 if (GET_CODE (operands[1]) == MEM
8827 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8828 && ! rtx_equal_p (operands[0], operands[1]))
8829 return 0;
8830 return 1;
8833 /* Attempt to expand a unary operator. Make the expansion closer to the
8834 actual machine, then just general_operand, which will allow 2 separate
8835 memory references (one output, one input) in a single insn. */
8837 void
8838 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8839 rtx operands[])
8841 int matching_memory;
8842 rtx src, dst, op, clob;
8844 dst = operands[0];
8845 src = operands[1];
8847 /* If the destination is memory, and we do not have matching source
8848 operands, do things in registers. */
8849 matching_memory = 0;
8850 if (MEM_P (dst))
8852 if (rtx_equal_p (dst, src))
8853 matching_memory = 1;
8854 else
8855 dst = gen_reg_rtx (mode);
8858 /* When source operand is memory, destination must match. */
8859 if (MEM_P (src) && !matching_memory)
8860 src = force_reg (mode, src);
8862 /* Emit the instruction. */
8864 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8865 if (reload_in_progress || code == NOT)
8867 /* Reload doesn't know about the flags register, and doesn't know that
8868 it doesn't want to clobber it. */
8869 gcc_assert (code == NOT);
8870 emit_insn (op);
8872 else
8874 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8875 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8878 /* Fix up the destination if needed. */
8879 if (dst != operands[0])
8880 emit_move_insn (operands[0], dst);
8883 /* Return TRUE or FALSE depending on whether the unary operator meets the
8884 appropriate constraints. */
8887 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8888 enum machine_mode mode ATTRIBUTE_UNUSED,
8889 rtx operands[2] ATTRIBUTE_UNUSED)
8891 /* If one of operands is memory, source and destination must match. */
8892 if ((GET_CODE (operands[0]) == MEM
8893 || GET_CODE (operands[1]) == MEM)
8894 && ! rtx_equal_p (operands[0], operands[1]))
8895 return FALSE;
8896 return TRUE;
8899 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8900 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8901 true, then replicate the mask for all elements of the vector register.
8902 If INVERT is true, then create a mask excluding the sign bit. */
8905 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8907 enum machine_mode vec_mode;
8908 HOST_WIDE_INT hi, lo;
8909 int shift = 63;
8910 rtvec v;
8911 rtx mask;
8913 /* Find the sign bit, sign extended to 2*HWI. */
8914 if (mode == SFmode)
8915 lo = 0x80000000, hi = lo < 0;
8916 else if (HOST_BITS_PER_WIDE_INT >= 64)
8917 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8918 else
8919 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8921 if (invert)
8922 lo = ~lo, hi = ~hi;
8924 /* Force this value into the low part of a fp vector constant. */
8925 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8926 mask = gen_lowpart (mode, mask);
8928 if (mode == SFmode)
8930 if (vect)
8931 v = gen_rtvec (4, mask, mask, mask, mask);
8932 else
8933 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8934 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8935 vec_mode = V4SFmode;
8937 else
8939 if (vect)
8940 v = gen_rtvec (2, mask, mask);
8941 else
8942 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8943 vec_mode = V2DFmode;
8946 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8949 /* Generate code for floating point ABS or NEG. */
8951 void
8952 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8953 rtx operands[])
8955 rtx mask, set, use, clob, dst, src;
8956 bool matching_memory;
8957 bool use_sse = false;
8958 bool vector_mode = VECTOR_MODE_P (mode);
8959 enum machine_mode elt_mode = mode;
8961 if (vector_mode)
8963 elt_mode = GET_MODE_INNER (mode);
8964 use_sse = true;
8966 else if (TARGET_SSE_MATH)
8967 use_sse = SSE_FLOAT_MODE_P (mode);
8969 /* NEG and ABS performed with SSE use bitwise mask operations.
8970 Create the appropriate mask now. */
8971 if (use_sse)
8972 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8973 else
8975 /* When not using SSE, we don't use the mask, but prefer to keep the
8976 same general form of the insn pattern to reduce duplication when
8977 it comes time to split. */
8978 mask = const0_rtx;
8981 dst = operands[0];
8982 src = operands[1];
8984 /* If the destination is memory, and we don't have matching source
8985 operands, do things in registers. */
8986 matching_memory = false;
8987 if (MEM_P (dst))
8989 if (rtx_equal_p (dst, src))
8990 matching_memory = true;
8991 else
8992 dst = gen_reg_rtx (mode);
8994 if (MEM_P (src) && !matching_memory)
8995 src = force_reg (mode, src);
8997 if (vector_mode)
8999 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9000 set = gen_rtx_SET (VOIDmode, dst, set);
9001 emit_insn (set);
9003 else
9005 set = gen_rtx_fmt_e (code, mode, src);
9006 set = gen_rtx_SET (VOIDmode, dst, set);
9007 use = gen_rtx_USE (VOIDmode, mask);
9008 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9009 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
9012 if (dst != operands[0])
9013 emit_move_insn (operands[0], dst);
9016 /* Expand a copysign operation. Special case operand 0 being a constant. */
9018 void
9019 ix86_expand_copysign (rtx operands[])
9021 enum machine_mode mode, vmode;
9022 rtx dest, op0, op1, mask, nmask;
9024 dest = operands[0];
9025 op0 = operands[1];
9026 op1 = operands[2];
9028 mode = GET_MODE (dest);
9029 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9031 if (GET_CODE (op0) == CONST_DOUBLE)
9033 rtvec v;
9035 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9036 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9038 if (op0 == CONST0_RTX (mode))
9039 op0 = CONST0_RTX (vmode);
9040 else
9042 if (mode == SFmode)
9043 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9044 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9045 else
9046 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9047 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9050 mask = ix86_build_signbit_mask (mode, 0, 0);
9052 if (mode == SFmode)
9053 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9054 else
9055 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9057 else
9059 nmask = ix86_build_signbit_mask (mode, 0, 1);
9060 mask = ix86_build_signbit_mask (mode, 0, 0);
9062 if (mode == SFmode)
9063 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9064 else
9065 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9069 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9070 be a constant, and so has already been expanded into a vector constant. */
9072 void
9073 ix86_split_copysign_const (rtx operands[])
9075 enum machine_mode mode, vmode;
9076 rtx dest, op0, op1, mask, x;
9078 dest = operands[0];
9079 op0 = operands[1];
9080 op1 = operands[2];
9081 mask = operands[3];
9083 mode = GET_MODE (dest);
9084 vmode = GET_MODE (mask);
9086 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9087 x = gen_rtx_AND (vmode, dest, mask);
9088 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9090 if (op0 != CONST0_RTX (vmode))
9092 x = gen_rtx_IOR (vmode, dest, op0);
9093 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9097 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9098 so we have to do two masks. */
9100 void
9101 ix86_split_copysign_var (rtx operands[])
9103 enum machine_mode mode, vmode;
9104 rtx dest, scratch, op0, op1, mask, nmask, x;
9106 dest = operands[0];
9107 scratch = operands[1];
9108 op0 = operands[2];
9109 op1 = operands[3];
9110 nmask = operands[4];
9111 mask = operands[5];
9113 mode = GET_MODE (dest);
9114 vmode = GET_MODE (mask);
9116 if (rtx_equal_p (op0, op1))
9118 /* Shouldn't happen often (it's useless, obviously), but when it does
9119 we'd generate incorrect code if we continue below. */
9120 emit_move_insn (dest, op0);
9121 return;
9124 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9126 gcc_assert (REGNO (op1) == REGNO (scratch));
9128 x = gen_rtx_AND (vmode, scratch, mask);
9129 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9131 dest = mask;
9132 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9133 x = gen_rtx_NOT (vmode, dest);
9134 x = gen_rtx_AND (vmode, x, op0);
9135 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9137 else
9139 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9141 x = gen_rtx_AND (vmode, scratch, mask);
9143 else /* alternative 2,4 */
9145 gcc_assert (REGNO (mask) == REGNO (scratch));
9146 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9147 x = gen_rtx_AND (vmode, scratch, op1);
9149 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9151 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9153 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9154 x = gen_rtx_AND (vmode, dest, nmask);
9156 else /* alternative 3,4 */
9158 gcc_assert (REGNO (nmask) == REGNO (dest));
9159 dest = nmask;
9160 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9161 x = gen_rtx_AND (vmode, dest, op0);
9163 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9166 x = gen_rtx_IOR (vmode, dest, scratch);
9167 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9170 /* Return TRUE or FALSE depending on whether the first SET in INSN
9171 has source and destination with matching CC modes, and that the
9172 CC mode is at least as constrained as REQ_MODE. */
9175 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9177 rtx set;
9178 enum machine_mode set_mode;
9180 set = PATTERN (insn);
9181 if (GET_CODE (set) == PARALLEL)
9182 set = XVECEXP (set, 0, 0);
9183 gcc_assert (GET_CODE (set) == SET);
9184 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9186 set_mode = GET_MODE (SET_DEST (set));
9187 switch (set_mode)
9189 case CCNOmode:
9190 if (req_mode != CCNOmode
9191 && (req_mode != CCmode
9192 || XEXP (SET_SRC (set), 1) != const0_rtx))
9193 return 0;
9194 break;
9195 case CCmode:
9196 if (req_mode == CCGCmode)
9197 return 0;
9198 /* FALLTHRU */
9199 case CCGCmode:
9200 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9201 return 0;
9202 /* FALLTHRU */
9203 case CCGOCmode:
9204 if (req_mode == CCZmode)
9205 return 0;
9206 /* FALLTHRU */
9207 case CCZmode:
9208 break;
9210 default:
9211 gcc_unreachable ();
9214 return (GET_MODE (SET_SRC (set)) == set_mode);
9217 /* Generate insn patterns to do an integer compare of OPERANDS. */
9219 static rtx
9220 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9222 enum machine_mode cmpmode;
9223 rtx tmp, flags;
9225 cmpmode = SELECT_CC_MODE (code, op0, op1);
9226 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9228 /* This is very simple, but making the interface the same as in the
9229 FP case makes the rest of the code easier. */
9230 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9231 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9233 /* Return the test that should be put into the flags user, i.e.
9234 the bcc, scc, or cmov instruction. */
9235 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9238 /* Figure out whether to use ordered or unordered fp comparisons.
9239 Return the appropriate mode to use. */
9241 enum machine_mode
9242 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9244 /* ??? In order to make all comparisons reversible, we do all comparisons
9245 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9246 all forms trapping and nontrapping comparisons, we can make inequality
9247 comparisons trapping again, since it results in better code when using
9248 FCOM based compares. */
9249 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9252 enum machine_mode
9253 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9255 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9256 return ix86_fp_compare_mode (code);
9257 switch (code)
9259 /* Only zero flag is needed. */
9260 case EQ: /* ZF=0 */
9261 case NE: /* ZF!=0 */
9262 return CCZmode;
9263 /* Codes needing carry flag. */
9264 case GEU: /* CF=0 */
9265 case GTU: /* CF=0 & ZF=0 */
9266 case LTU: /* CF=1 */
9267 case LEU: /* CF=1 | ZF=1 */
9268 return CCmode;
9269 /* Codes possibly doable only with sign flag when
9270 comparing against zero. */
9271 case GE: /* SF=OF or SF=0 */
9272 case LT: /* SF<>OF or SF=1 */
9273 if (op1 == const0_rtx)
9274 return CCGOCmode;
9275 else
9276 /* For other cases Carry flag is not required. */
9277 return CCGCmode;
9278 /* Codes doable only with sign flag when comparing
9279 against zero, but we miss jump instruction for it
9280 so we need to use relational tests against overflow
9281 that thus needs to be zero. */
9282 case GT: /* ZF=0 & SF=OF */
9283 case LE: /* ZF=1 | SF<>OF */
9284 if (op1 == const0_rtx)
9285 return CCNOmode;
9286 else
9287 return CCGCmode;
9288 /* strcmp pattern do (use flags) and combine may ask us for proper
9289 mode. */
9290 case USE:
9291 return CCmode;
9292 default:
9293 gcc_unreachable ();
9297 /* Return the fixed registers used for condition codes. */
9299 static bool
9300 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9302 *p1 = FLAGS_REG;
9303 *p2 = FPSR_REG;
9304 return true;
9307 /* If two condition code modes are compatible, return a condition code
9308 mode which is compatible with both. Otherwise, return
9309 VOIDmode. */
9311 static enum machine_mode
9312 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9314 if (m1 == m2)
9315 return m1;
9317 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9318 return VOIDmode;
9320 if ((m1 == CCGCmode && m2 == CCGOCmode)
9321 || (m1 == CCGOCmode && m2 == CCGCmode))
9322 return CCGCmode;
9324 switch (m1)
9326 default:
9327 gcc_unreachable ();
9329 case CCmode:
9330 case CCGCmode:
9331 case CCGOCmode:
9332 case CCNOmode:
9333 case CCZmode:
9334 switch (m2)
9336 default:
9337 return VOIDmode;
9339 case CCmode:
9340 case CCGCmode:
9341 case CCGOCmode:
9342 case CCNOmode:
9343 case CCZmode:
9344 return CCmode;
9347 case CCFPmode:
9348 case CCFPUmode:
9349 /* These are only compatible with themselves, which we already
9350 checked above. */
9351 return VOIDmode;
9355 /* Return true if we should use an FCOMI instruction for this fp comparison. */
9358 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9360 enum rtx_code swapped_code = swap_condition (code);
9361 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9362 || (ix86_fp_comparison_cost (swapped_code)
9363 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9366 /* Swap, force into registers, or otherwise massage the two operands
9367 to a fp comparison. The operands are updated in place; the new
9368 comparison code is returned. */
9370 static enum rtx_code
9371 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9373 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9374 rtx op0 = *pop0, op1 = *pop1;
9375 enum machine_mode op_mode = GET_MODE (op0);
9376 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9378 /* All of the unordered compare instructions only work on registers.
9379 The same is true of the fcomi compare instructions. The XFmode
9380 compare instructions require registers except when comparing
9381 against zero or when converting operand 1 from fixed point to
9382 floating point. */
9384 if (!is_sse
9385 && (fpcmp_mode == CCFPUmode
9386 || (op_mode == XFmode
9387 && ! (standard_80387_constant_p (op0) == 1
9388 || standard_80387_constant_p (op1) == 1)
9389 && GET_CODE (op1) != FLOAT)
9390 || ix86_use_fcomi_compare (code)))
9392 op0 = force_reg (op_mode, op0);
9393 op1 = force_reg (op_mode, op1);
9395 else
9397 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9398 things around if they appear profitable, otherwise force op0
9399 into a register. */
9401 if (standard_80387_constant_p (op0) == 0
9402 || (GET_CODE (op0) == MEM
9403 && ! (standard_80387_constant_p (op1) == 0
9404 || GET_CODE (op1) == MEM)))
9406 rtx tmp;
9407 tmp = op0, op0 = op1, op1 = tmp;
9408 code = swap_condition (code);
9411 if (GET_CODE (op0) != REG)
9412 op0 = force_reg (op_mode, op0);
9414 if (CONSTANT_P (op1))
9416 int tmp = standard_80387_constant_p (op1);
9417 if (tmp == 0)
9418 op1 = validize_mem (force_const_mem (op_mode, op1));
9419 else if (tmp == 1)
9421 if (TARGET_CMOVE)
9422 op1 = force_reg (op_mode, op1);
9424 else
9425 op1 = force_reg (op_mode, op1);
9429 /* Try to rearrange the comparison to make it cheaper. */
9430 if (ix86_fp_comparison_cost (code)
9431 > ix86_fp_comparison_cost (swap_condition (code))
9432 && (GET_CODE (op1) == REG || !no_new_pseudos))
9434 rtx tmp;
9435 tmp = op0, op0 = op1, op1 = tmp;
9436 code = swap_condition (code);
9437 if (GET_CODE (op0) != REG)
9438 op0 = force_reg (op_mode, op0);
9441 *pop0 = op0;
9442 *pop1 = op1;
9443 return code;
9446 /* Convert comparison codes we use to represent FP comparison to integer
9447 code that will result in proper branch. Return UNKNOWN if no such code
9448 is available. */
9450 enum rtx_code
9451 ix86_fp_compare_code_to_integer (enum rtx_code code)
9453 switch (code)
9455 case GT:
9456 return GTU;
9457 case GE:
9458 return GEU;
9459 case ORDERED:
9460 case UNORDERED:
9461 return code;
9462 break;
9463 case UNEQ:
9464 return EQ;
9465 break;
9466 case UNLT:
9467 return LTU;
9468 break;
9469 case UNLE:
9470 return LEU;
9471 break;
9472 case LTGT:
9473 return NE;
9474 break;
9475 default:
9476 return UNKNOWN;
9480 /* Split comparison code CODE into comparisons we can do using branch
9481 instructions. BYPASS_CODE is comparison code for branch that will
9482 branch around FIRST_CODE and SECOND_CODE. If some of branches
9483 is not required, set value to UNKNOWN.
9484 We never require more than two branches. */
9486 void
9487 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9488 enum rtx_code *first_code,
9489 enum rtx_code *second_code)
9491 *first_code = code;
9492 *bypass_code = UNKNOWN;
9493 *second_code = UNKNOWN;
9495 /* The fcomi comparison sets flags as follows:
9497 cmp ZF PF CF
9498 > 0 0 0
9499 < 0 0 1
9500 = 1 0 0
9501 un 1 1 1 */
9503 switch (code)
9505 case GT: /* GTU - CF=0 & ZF=0 */
9506 case GE: /* GEU - CF=0 */
9507 case ORDERED: /* PF=0 */
9508 case UNORDERED: /* PF=1 */
9509 case UNEQ: /* EQ - ZF=1 */
9510 case UNLT: /* LTU - CF=1 */
9511 case UNLE: /* LEU - CF=1 | ZF=1 */
9512 case LTGT: /* EQ - ZF=0 */
9513 break;
9514 case LT: /* LTU - CF=1 - fails on unordered */
9515 *first_code = UNLT;
9516 *bypass_code = UNORDERED;
9517 break;
9518 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9519 *first_code = UNLE;
9520 *bypass_code = UNORDERED;
9521 break;
9522 case EQ: /* EQ - ZF=1 - fails on unordered */
9523 *first_code = UNEQ;
9524 *bypass_code = UNORDERED;
9525 break;
9526 case NE: /* NE - ZF=0 - fails on unordered */
9527 *first_code = LTGT;
9528 *second_code = UNORDERED;
9529 break;
9530 case UNGE: /* GEU - CF=0 - fails on unordered */
9531 *first_code = GE;
9532 *second_code = UNORDERED;
9533 break;
9534 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9535 *first_code = GT;
9536 *second_code = UNORDERED;
9537 break;
9538 default:
9539 gcc_unreachable ();
9541 if (!TARGET_IEEE_FP)
9543 *second_code = UNKNOWN;
9544 *bypass_code = UNKNOWN;
9548 /* Return cost of comparison done fcom + arithmetics operations on AX.
9549 All following functions do use number of instructions as a cost metrics.
9550 In future this should be tweaked to compute bytes for optimize_size and
9551 take into account performance of various instructions on various CPUs. */
9552 static int
9553 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9555 if (!TARGET_IEEE_FP)
9556 return 4;
9557 /* The cost of code output by ix86_expand_fp_compare. */
9558 switch (code)
9560 case UNLE:
9561 case UNLT:
9562 case LTGT:
9563 case GT:
9564 case GE:
9565 case UNORDERED:
9566 case ORDERED:
9567 case UNEQ:
9568 return 4;
9569 break;
9570 case LT:
9571 case NE:
9572 case EQ:
9573 case UNGE:
9574 return 5;
9575 break;
9576 case LE:
9577 case UNGT:
9578 return 6;
9579 break;
9580 default:
9581 gcc_unreachable ();
9585 /* Return cost of comparison done using fcomi operation.
9586 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9587 static int
9588 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9590 enum rtx_code bypass_code, first_code, second_code;
9591 /* Return arbitrarily high cost when instruction is not supported - this
9592 prevents gcc from using it. */
9593 if (!TARGET_CMOVE)
9594 return 1024;
9595 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9596 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9599 /* Return cost of comparison done using sahf operation.
9600 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9601 static int
9602 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9604 enum rtx_code bypass_code, first_code, second_code;
9605 /* Return arbitrarily high cost when instruction is not preferred - this
9606 avoids gcc from using it. */
9607 if (!TARGET_USE_SAHF && !optimize_size)
9608 return 1024;
9609 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9610 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9613 /* Compute cost of the comparison done using any method.
9614 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9615 static int
9616 ix86_fp_comparison_cost (enum rtx_code code)
9618 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9619 int min;
9621 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9622 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9624 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9625 if (min > sahf_cost)
9626 min = sahf_cost;
9627 if (min > fcomi_cost)
9628 min = fcomi_cost;
9629 return min;
9632 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9634 static rtx
9635 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9636 rtx *second_test, rtx *bypass_test)
9638 enum machine_mode fpcmp_mode, intcmp_mode;
9639 rtx tmp, tmp2;
9640 int cost = ix86_fp_comparison_cost (code);
9641 enum rtx_code bypass_code, first_code, second_code;
9643 fpcmp_mode = ix86_fp_compare_mode (code);
9644 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9646 if (second_test)
9647 *second_test = NULL_RTX;
9648 if (bypass_test)
9649 *bypass_test = NULL_RTX;
9651 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9653 /* Do fcomi/sahf based test when profitable. */
9654 if ((bypass_code == UNKNOWN || bypass_test)
9655 && (second_code == UNKNOWN || second_test)
9656 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9658 if (TARGET_CMOVE)
9660 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9661 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9662 tmp);
9663 emit_insn (tmp);
9665 else
9667 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9668 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9669 if (!scratch)
9670 scratch = gen_reg_rtx (HImode);
9671 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9672 emit_insn (gen_x86_sahf_1 (scratch));
9675 /* The FP codes work out to act like unsigned. */
9676 intcmp_mode = fpcmp_mode;
9677 code = first_code;
9678 if (bypass_code != UNKNOWN)
9679 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9680 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9681 const0_rtx);
9682 if (second_code != UNKNOWN)
9683 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9684 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9685 const0_rtx);
9687 else
9689 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9690 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9691 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9692 if (!scratch)
9693 scratch = gen_reg_rtx (HImode);
9694 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9696 /* In the unordered case, we have to check C2 for NaN's, which
9697 doesn't happen to work out to anything nice combination-wise.
9698 So do some bit twiddling on the value we've got in AH to come
9699 up with an appropriate set of condition codes. */
9701 intcmp_mode = CCNOmode;
9702 switch (code)
9704 case GT:
9705 case UNGT:
9706 if (code == GT || !TARGET_IEEE_FP)
9708 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9709 code = EQ;
9711 else
9713 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9714 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9715 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9716 intcmp_mode = CCmode;
9717 code = GEU;
9719 break;
9720 case LT:
9721 case UNLT:
9722 if (code == LT && TARGET_IEEE_FP)
9724 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9725 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9726 intcmp_mode = CCmode;
9727 code = EQ;
9729 else
9731 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9732 code = NE;
9734 break;
9735 case GE:
9736 case UNGE:
9737 if (code == GE || !TARGET_IEEE_FP)
9739 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9740 code = EQ;
9742 else
9744 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9745 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9746 GEN_INT (0x01)));
9747 code = NE;
9749 break;
9750 case LE:
9751 case UNLE:
9752 if (code == LE && TARGET_IEEE_FP)
9754 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9755 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9756 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9757 intcmp_mode = CCmode;
9758 code = LTU;
9760 else
9762 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9763 code = NE;
9765 break;
9766 case EQ:
9767 case UNEQ:
9768 if (code == EQ && TARGET_IEEE_FP)
9770 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9771 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9772 intcmp_mode = CCmode;
9773 code = EQ;
9775 else
9777 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9778 code = NE;
9779 break;
9781 break;
9782 case NE:
9783 case LTGT:
9784 if (code == NE && TARGET_IEEE_FP)
9786 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9787 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9788 GEN_INT (0x40)));
9789 code = NE;
9791 else
9793 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9794 code = EQ;
9796 break;
9798 case UNORDERED:
9799 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9800 code = NE;
9801 break;
9802 case ORDERED:
9803 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9804 code = EQ;
9805 break;
9807 default:
9808 gcc_unreachable ();
9812 /* Return the test that should be put into the flags user, i.e.
9813 the bcc, scc, or cmov instruction. */
9814 return gen_rtx_fmt_ee (code, VOIDmode,
9815 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9816 const0_rtx);
9820 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9822 rtx op0, op1, ret;
9823 op0 = ix86_compare_op0;
9824 op1 = ix86_compare_op1;
9826 if (second_test)
9827 *second_test = NULL_RTX;
9828 if (bypass_test)
9829 *bypass_test = NULL_RTX;
9831 if (ix86_compare_emitted)
9833 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9834 ix86_compare_emitted = NULL_RTX;
9836 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9837 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9838 second_test, bypass_test);
9839 else
9840 ret = ix86_expand_int_compare (code, op0, op1);
9842 return ret;
9845 /* Return true if the CODE will result in nontrivial jump sequence. */
9846 bool
9847 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9849 enum rtx_code bypass_code, first_code, second_code;
9850 if (!TARGET_CMOVE)
9851 return true;
9852 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9853 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9856 void
9857 ix86_expand_branch (enum rtx_code code, rtx label)
9859 rtx tmp;
9861 switch (GET_MODE (ix86_compare_op0))
9863 case QImode:
9864 case HImode:
9865 case SImode:
9866 simple:
9867 tmp = ix86_expand_compare (code, NULL, NULL);
9868 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9869 gen_rtx_LABEL_REF (VOIDmode, label),
9870 pc_rtx);
9871 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9872 return;
9874 case SFmode:
9875 case DFmode:
9876 case XFmode:
9878 rtvec vec;
9879 int use_fcomi;
9880 enum rtx_code bypass_code, first_code, second_code;
9882 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9883 &ix86_compare_op1);
9885 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9887 /* Check whether we will use the natural sequence with one jump. If
9888 so, we can expand jump early. Otherwise delay expansion by
9889 creating compound insn to not confuse optimizers. */
9890 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9891 && TARGET_CMOVE)
9893 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9894 gen_rtx_LABEL_REF (VOIDmode, label),
9895 pc_rtx, NULL_RTX, NULL_RTX);
9897 else
9899 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9900 ix86_compare_op0, ix86_compare_op1);
9901 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9902 gen_rtx_LABEL_REF (VOIDmode, label),
9903 pc_rtx);
9904 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9906 use_fcomi = ix86_use_fcomi_compare (code);
9907 vec = rtvec_alloc (3 + !use_fcomi);
9908 RTVEC_ELT (vec, 0) = tmp;
9909 RTVEC_ELT (vec, 1)
9910 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9911 RTVEC_ELT (vec, 2)
9912 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9913 if (! use_fcomi)
9914 RTVEC_ELT (vec, 3)
9915 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9917 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9919 return;
9922 case DImode:
9923 if (TARGET_64BIT)
9924 goto simple;
9925 case TImode:
9926 /* Expand DImode branch into multiple compare+branch. */
9928 rtx lo[2], hi[2], label2;
9929 enum rtx_code code1, code2, code3;
9930 enum machine_mode submode;
9932 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9934 tmp = ix86_compare_op0;
9935 ix86_compare_op0 = ix86_compare_op1;
9936 ix86_compare_op1 = tmp;
9937 code = swap_condition (code);
9939 if (GET_MODE (ix86_compare_op0) == DImode)
9941 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9942 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9943 submode = SImode;
9945 else
9947 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9948 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9949 submode = DImode;
9952 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9953 avoid two branches. This costs one extra insn, so disable when
9954 optimizing for size. */
9956 if ((code == EQ || code == NE)
9957 && (!optimize_size
9958 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9960 rtx xor0, xor1;
9962 xor1 = hi[0];
9963 if (hi[1] != const0_rtx)
9964 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9965 NULL_RTX, 0, OPTAB_WIDEN);
9967 xor0 = lo[0];
9968 if (lo[1] != const0_rtx)
9969 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9970 NULL_RTX, 0, OPTAB_WIDEN);
9972 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9973 NULL_RTX, 0, OPTAB_WIDEN);
9975 ix86_compare_op0 = tmp;
9976 ix86_compare_op1 = const0_rtx;
9977 ix86_expand_branch (code, label);
9978 return;
9981 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9982 op1 is a constant and the low word is zero, then we can just
9983 examine the high word. */
9985 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9986 switch (code)
9988 case LT: case LTU: case GE: case GEU:
9989 ix86_compare_op0 = hi[0];
9990 ix86_compare_op1 = hi[1];
9991 ix86_expand_branch (code, label);
9992 return;
9993 default:
9994 break;
9997 /* Otherwise, we need two or three jumps. */
9999 label2 = gen_label_rtx ();
10001 code1 = code;
10002 code2 = swap_condition (code);
10003 code3 = unsigned_condition (code);
10005 switch (code)
10007 case LT: case GT: case LTU: case GTU:
10008 break;
10010 case LE: code1 = LT; code2 = GT; break;
10011 case GE: code1 = GT; code2 = LT; break;
10012 case LEU: code1 = LTU; code2 = GTU; break;
10013 case GEU: code1 = GTU; code2 = LTU; break;
10015 case EQ: code1 = UNKNOWN; code2 = NE; break;
10016 case NE: code2 = UNKNOWN; break;
10018 default:
10019 gcc_unreachable ();
10023 * a < b =>
10024 * if (hi(a) < hi(b)) goto true;
10025 * if (hi(a) > hi(b)) goto false;
10026 * if (lo(a) < lo(b)) goto true;
10027 * false:
10030 ix86_compare_op0 = hi[0];
10031 ix86_compare_op1 = hi[1];
10033 if (code1 != UNKNOWN)
10034 ix86_expand_branch (code1, label);
10035 if (code2 != UNKNOWN)
10036 ix86_expand_branch (code2, label2);
10038 ix86_compare_op0 = lo[0];
10039 ix86_compare_op1 = lo[1];
10040 ix86_expand_branch (code3, label);
10042 if (code2 != UNKNOWN)
10043 emit_label (label2);
10044 return;
10047 default:
10048 gcc_unreachable ();
10052 /* Split branch based on floating point condition. */
10053 void
10054 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10055 rtx target1, rtx target2, rtx tmp, rtx pushed)
10057 rtx second, bypass;
10058 rtx label = NULL_RTX;
10059 rtx condition;
10060 int bypass_probability = -1, second_probability = -1, probability = -1;
10061 rtx i;
10063 if (target2 != pc_rtx)
10065 rtx tmp = target2;
10066 code = reverse_condition_maybe_unordered (code);
10067 target2 = target1;
10068 target1 = tmp;
10071 condition = ix86_expand_fp_compare (code, op1, op2,
10072 tmp, &second, &bypass);
10074 /* Remove pushed operand from stack. */
10075 if (pushed)
10076 ix86_free_from_memory (GET_MODE (pushed));
10078 if (split_branch_probability >= 0)
10080 /* Distribute the probabilities across the jumps.
10081 Assume the BYPASS and SECOND to be always test
10082 for UNORDERED. */
10083 probability = split_branch_probability;
10085 /* Value of 1 is low enough to make no need for probability
10086 to be updated. Later we may run some experiments and see
10087 if unordered values are more frequent in practice. */
10088 if (bypass)
10089 bypass_probability = 1;
10090 if (second)
10091 second_probability = 1;
10093 if (bypass != NULL_RTX)
10095 label = gen_label_rtx ();
10096 i = emit_jump_insn (gen_rtx_SET
10097 (VOIDmode, pc_rtx,
10098 gen_rtx_IF_THEN_ELSE (VOIDmode,
10099 bypass,
10100 gen_rtx_LABEL_REF (VOIDmode,
10101 label),
10102 pc_rtx)));
10103 if (bypass_probability >= 0)
10104 REG_NOTES (i)
10105 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10106 GEN_INT (bypass_probability),
10107 REG_NOTES (i));
10109 i = emit_jump_insn (gen_rtx_SET
10110 (VOIDmode, pc_rtx,
10111 gen_rtx_IF_THEN_ELSE (VOIDmode,
10112 condition, target1, target2)));
10113 if (probability >= 0)
10114 REG_NOTES (i)
10115 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10116 GEN_INT (probability),
10117 REG_NOTES (i));
10118 if (second != NULL_RTX)
10120 i = emit_jump_insn (gen_rtx_SET
10121 (VOIDmode, pc_rtx,
10122 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10123 target2)));
10124 if (second_probability >= 0)
10125 REG_NOTES (i)
10126 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10127 GEN_INT (second_probability),
10128 REG_NOTES (i));
10130 if (label != NULL_RTX)
10131 emit_label (label);
10135 ix86_expand_setcc (enum rtx_code code, rtx dest)
10137 rtx ret, tmp, tmpreg, equiv;
10138 rtx second_test, bypass_test;
10140 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10141 return 0; /* FAIL */
10143 gcc_assert (GET_MODE (dest) == QImode);
10145 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10146 PUT_MODE (ret, QImode);
10148 tmp = dest;
10149 tmpreg = dest;
10151 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10152 if (bypass_test || second_test)
10154 rtx test = second_test;
10155 int bypass = 0;
10156 rtx tmp2 = gen_reg_rtx (QImode);
10157 if (bypass_test)
10159 gcc_assert (!second_test);
10160 test = bypass_test;
10161 bypass = 1;
10162 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10164 PUT_MODE (test, QImode);
10165 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10167 if (bypass)
10168 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10169 else
10170 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10173 /* Attach a REG_EQUAL note describing the comparison result. */
10174 if (ix86_compare_op0 && ix86_compare_op1)
10176 equiv = simplify_gen_relational (code, QImode,
10177 GET_MODE (ix86_compare_op0),
10178 ix86_compare_op0, ix86_compare_op1);
10179 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10182 return 1; /* DONE */
10185 /* Expand comparison setting or clearing carry flag. Return true when
10186 successful and set pop for the operation. */
10187 static bool
10188 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10190 enum machine_mode mode =
10191 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10193 /* Do not handle DImode compares that go trought special path. Also we can't
10194 deal with FP compares yet. This is possible to add. */
10195 if (mode == (TARGET_64BIT ? TImode : DImode))
10196 return false;
10197 if (FLOAT_MODE_P (mode))
10199 rtx second_test = NULL, bypass_test = NULL;
10200 rtx compare_op, compare_seq;
10202 /* Shortcut: following common codes never translate into carry flag compares. */
10203 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10204 || code == ORDERED || code == UNORDERED)
10205 return false;
10207 /* These comparisons require zero flag; swap operands so they won't. */
10208 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10209 && !TARGET_IEEE_FP)
10211 rtx tmp = op0;
10212 op0 = op1;
10213 op1 = tmp;
10214 code = swap_condition (code);
10217 /* Try to expand the comparison and verify that we end up with carry flag
10218 based comparison. This is fails to be true only when we decide to expand
10219 comparison using arithmetic that is not too common scenario. */
10220 start_sequence ();
10221 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10222 &second_test, &bypass_test);
10223 compare_seq = get_insns ();
10224 end_sequence ();
10226 if (second_test || bypass_test)
10227 return false;
10228 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10229 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10230 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10231 else
10232 code = GET_CODE (compare_op);
10233 if (code != LTU && code != GEU)
10234 return false;
10235 emit_insn (compare_seq);
10236 *pop = compare_op;
10237 return true;
10239 if (!INTEGRAL_MODE_P (mode))
10240 return false;
10241 switch (code)
10243 case LTU:
10244 case GEU:
10245 break;
10247 /* Convert a==0 into (unsigned)a<1. */
10248 case EQ:
10249 case NE:
10250 if (op1 != const0_rtx)
10251 return false;
10252 op1 = const1_rtx;
10253 code = (code == EQ ? LTU : GEU);
10254 break;
10256 /* Convert a>b into b<a or a>=b-1. */
10257 case GTU:
10258 case LEU:
10259 if (GET_CODE (op1) == CONST_INT)
10261 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10262 /* Bail out on overflow. We still can swap operands but that
10263 would force loading of the constant into register. */
10264 if (op1 == const0_rtx
10265 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10266 return false;
10267 code = (code == GTU ? GEU : LTU);
10269 else
10271 rtx tmp = op1;
10272 op1 = op0;
10273 op0 = tmp;
10274 code = (code == GTU ? LTU : GEU);
10276 break;
10278 /* Convert a>=0 into (unsigned)a<0x80000000. */
10279 case LT:
10280 case GE:
10281 if (mode == DImode || op1 != const0_rtx)
10282 return false;
10283 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10284 code = (code == LT ? GEU : LTU);
10285 break;
10286 case LE:
10287 case GT:
10288 if (mode == DImode || op1 != constm1_rtx)
10289 return false;
10290 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10291 code = (code == LE ? GEU : LTU);
10292 break;
10294 default:
10295 return false;
10297 /* Swapping operands may cause constant to appear as first operand. */
10298 if (!nonimmediate_operand (op0, VOIDmode))
10300 if (no_new_pseudos)
10301 return false;
10302 op0 = force_reg (mode, op0);
10304 ix86_compare_op0 = op0;
10305 ix86_compare_op1 = op1;
10306 *pop = ix86_expand_compare (code, NULL, NULL);
10307 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10308 return true;
10312 ix86_expand_int_movcc (rtx operands[])
10314 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10315 rtx compare_seq, compare_op;
10316 rtx second_test, bypass_test;
10317 enum machine_mode mode = GET_MODE (operands[0]);
10318 bool sign_bit_compare_p = false;;
10320 start_sequence ();
10321 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10322 compare_seq = get_insns ();
10323 end_sequence ();
10325 compare_code = GET_CODE (compare_op);
10327 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
10328 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
10329 sign_bit_compare_p = true;
10331 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
10332 HImode insns, we'd be swallowed in word prefix ops. */
10334 if ((mode != HImode || TARGET_FAST_PREFIX)
10335 && (mode != (TARGET_64BIT ? TImode : DImode))
10336 && GET_CODE (operands[2]) == CONST_INT
10337 && GET_CODE (operands[3]) == CONST_INT)
10339 rtx out = operands[0];
10340 HOST_WIDE_INT ct = INTVAL (operands[2]);
10341 HOST_WIDE_INT cf = INTVAL (operands[3]);
10342 HOST_WIDE_INT diff;
10344 diff = ct - cf;
10345 /* Sign bit compares are better done using shifts than we do by using
10346 sbb. */
10347 if (sign_bit_compare_p
10348 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10349 ix86_compare_op1, &compare_op))
10351 /* Detect overlap between destination and compare sources. */
10352 rtx tmp = out;
10354 if (!sign_bit_compare_p)
10356 bool fpcmp = false;
10358 compare_code = GET_CODE (compare_op);
10360 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10361 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10363 fpcmp = true;
10364 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10367 /* To simplify rest of code, restrict to the GEU case. */
10368 if (compare_code == LTU)
10370 HOST_WIDE_INT tmp = ct;
10371 ct = cf;
10372 cf = tmp;
10373 compare_code = reverse_condition (compare_code);
10374 code = reverse_condition (code);
10376 else
10378 if (fpcmp)
10379 PUT_CODE (compare_op,
10380 reverse_condition_maybe_unordered
10381 (GET_CODE (compare_op)));
10382 else
10383 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10385 diff = ct - cf;
10387 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10388 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10389 tmp = gen_reg_rtx (mode);
10391 if (mode == DImode)
10392 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10393 else
10394 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10396 else
10398 if (code == GT || code == GE)
10399 code = reverse_condition (code);
10400 else
10402 HOST_WIDE_INT tmp = ct;
10403 ct = cf;
10404 cf = tmp;
10405 diff = ct - cf;
10407 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10408 ix86_compare_op1, VOIDmode, 0, -1);
10411 if (diff == 1)
10414 * cmpl op0,op1
10415 * sbbl dest,dest
10416 * [addl dest, ct]
10418 * Size 5 - 8.
10420 if (ct)
10421 tmp = expand_simple_binop (mode, PLUS,
10422 tmp, GEN_INT (ct),
10423 copy_rtx (tmp), 1, OPTAB_DIRECT);
10425 else if (cf == -1)
10428 * cmpl op0,op1
10429 * sbbl dest,dest
10430 * orl $ct, dest
10432 * Size 8.
10434 tmp = expand_simple_binop (mode, IOR,
10435 tmp, GEN_INT (ct),
10436 copy_rtx (tmp), 1, OPTAB_DIRECT);
10438 else if (diff == -1 && ct)
10441 * cmpl op0,op1
10442 * sbbl dest,dest
10443 * notl dest
10444 * [addl dest, cf]
10446 * Size 8 - 11.
10448 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10449 if (cf)
10450 tmp = expand_simple_binop (mode, PLUS,
10451 copy_rtx (tmp), GEN_INT (cf),
10452 copy_rtx (tmp), 1, OPTAB_DIRECT);
10454 else
10457 * cmpl op0,op1
10458 * sbbl dest,dest
10459 * [notl dest]
10460 * andl cf - ct, dest
10461 * [addl dest, ct]
10463 * Size 8 - 11.
10466 if (cf == 0)
10468 cf = ct;
10469 ct = 0;
10470 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10473 tmp = expand_simple_binop (mode, AND,
10474 copy_rtx (tmp),
10475 gen_int_mode (cf - ct, mode),
10476 copy_rtx (tmp), 1, OPTAB_DIRECT);
10477 if (ct)
10478 tmp = expand_simple_binop (mode, PLUS,
10479 copy_rtx (tmp), GEN_INT (ct),
10480 copy_rtx (tmp), 1, OPTAB_DIRECT);
10483 if (!rtx_equal_p (tmp, out))
10484 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10486 return 1; /* DONE */
10489 if (diff < 0)
10491 HOST_WIDE_INT tmp;
10492 tmp = ct, ct = cf, cf = tmp;
10493 diff = -diff;
10494 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10496 /* We may be reversing unordered compare to normal compare, that
10497 is not valid in general (we may convert non-trapping condition
10498 to trapping one), however on i386 we currently emit all
10499 comparisons unordered. */
10500 compare_code = reverse_condition_maybe_unordered (compare_code);
10501 code = reverse_condition_maybe_unordered (code);
10503 else
10505 compare_code = reverse_condition (compare_code);
10506 code = reverse_condition (code);
10510 compare_code = UNKNOWN;
10511 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10512 && GET_CODE (ix86_compare_op1) == CONST_INT)
10514 if (ix86_compare_op1 == const0_rtx
10515 && (code == LT || code == GE))
10516 compare_code = code;
10517 else if (ix86_compare_op1 == constm1_rtx)
10519 if (code == LE)
10520 compare_code = LT;
10521 else if (code == GT)
10522 compare_code = GE;
10526 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10527 if (compare_code != UNKNOWN
10528 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10529 && (cf == -1 || ct == -1))
10531 /* If lea code below could be used, only optimize
10532 if it results in a 2 insn sequence. */
10534 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10535 || diff == 3 || diff == 5 || diff == 9)
10536 || (compare_code == LT && ct == -1)
10537 || (compare_code == GE && cf == -1))
10540 * notl op1 (if necessary)
10541 * sarl $31, op1
10542 * orl cf, op1
10544 if (ct != -1)
10546 cf = ct;
10547 ct = -1;
10548 code = reverse_condition (code);
10551 out = emit_store_flag (out, code, ix86_compare_op0,
10552 ix86_compare_op1, VOIDmode, 0, -1);
10554 out = expand_simple_binop (mode, IOR,
10555 out, GEN_INT (cf),
10556 out, 1, OPTAB_DIRECT);
10557 if (out != operands[0])
10558 emit_move_insn (operands[0], out);
10560 return 1; /* DONE */
10565 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10566 || diff == 3 || diff == 5 || diff == 9)
10567 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10568 && (mode != DImode
10569 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10572 * xorl dest,dest
10573 * cmpl op1,op2
10574 * setcc dest
10575 * lea cf(dest*(ct-cf)),dest
10577 * Size 14.
10579 * This also catches the degenerate setcc-only case.
10582 rtx tmp;
10583 int nops;
10585 out = emit_store_flag (out, code, ix86_compare_op0,
10586 ix86_compare_op1, VOIDmode, 0, 1);
10588 nops = 0;
10589 /* On x86_64 the lea instruction operates on Pmode, so we need
10590 to get arithmetics done in proper mode to match. */
10591 if (diff == 1)
10592 tmp = copy_rtx (out);
10593 else
10595 rtx out1;
10596 out1 = copy_rtx (out);
10597 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10598 nops++;
10599 if (diff & 1)
10601 tmp = gen_rtx_PLUS (mode, tmp, out1);
10602 nops++;
10605 if (cf != 0)
10607 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10608 nops++;
10610 if (!rtx_equal_p (tmp, out))
10612 if (nops == 1)
10613 out = force_operand (tmp, copy_rtx (out));
10614 else
10615 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10617 if (!rtx_equal_p (out, operands[0]))
10618 emit_move_insn (operands[0], copy_rtx (out));
10620 return 1; /* DONE */
10624 * General case: Jumpful:
10625 * xorl dest,dest cmpl op1, op2
10626 * cmpl op1, op2 movl ct, dest
10627 * setcc dest jcc 1f
10628 * decl dest movl cf, dest
10629 * andl (cf-ct),dest 1:
10630 * addl ct,dest
10632 * Size 20. Size 14.
10634 * This is reasonably steep, but branch mispredict costs are
10635 * high on modern cpus, so consider failing only if optimizing
10636 * for space.
10639 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10640 && BRANCH_COST >= 2)
10642 if (cf == 0)
10644 cf = ct;
10645 ct = 0;
10646 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10647 /* We may be reversing unordered compare to normal compare,
10648 that is not valid in general (we may convert non-trapping
10649 condition to trapping one), however on i386 we currently
10650 emit all comparisons unordered. */
10651 code = reverse_condition_maybe_unordered (code);
10652 else
10654 code = reverse_condition (code);
10655 if (compare_code != UNKNOWN)
10656 compare_code = reverse_condition (compare_code);
10660 if (compare_code != UNKNOWN)
10662 /* notl op1 (if needed)
10663 sarl $31, op1
10664 andl (cf-ct), op1
10665 addl ct, op1
10667 For x < 0 (resp. x <= -1) there will be no notl,
10668 so if possible swap the constants to get rid of the
10669 complement.
10670 True/false will be -1/0 while code below (store flag
10671 followed by decrement) is 0/-1, so the constants need
10672 to be exchanged once more. */
10674 if (compare_code == GE || !cf)
10676 code = reverse_condition (code);
10677 compare_code = LT;
10679 else
10681 HOST_WIDE_INT tmp = cf;
10682 cf = ct;
10683 ct = tmp;
10686 out = emit_store_flag (out, code, ix86_compare_op0,
10687 ix86_compare_op1, VOIDmode, 0, -1);
10689 else
10691 out = emit_store_flag (out, code, ix86_compare_op0,
10692 ix86_compare_op1, VOIDmode, 0, 1);
10694 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10695 copy_rtx (out), 1, OPTAB_DIRECT);
10698 out = expand_simple_binop (mode, AND, copy_rtx (out),
10699 gen_int_mode (cf - ct, mode),
10700 copy_rtx (out), 1, OPTAB_DIRECT);
10701 if (ct)
10702 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10703 copy_rtx (out), 1, OPTAB_DIRECT);
10704 if (!rtx_equal_p (out, operands[0]))
10705 emit_move_insn (operands[0], copy_rtx (out));
10707 return 1; /* DONE */
10711 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10713 /* Try a few things more with specific constants and a variable. */
10715 optab op;
10716 rtx var, orig_out, out, tmp;
10718 if (BRANCH_COST <= 2)
10719 return 0; /* FAIL */
10721 /* If one of the two operands is an interesting constant, load a
10722 constant with the above and mask it in with a logical operation. */
10724 if (GET_CODE (operands[2]) == CONST_INT)
10726 var = operands[3];
10727 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10728 operands[3] = constm1_rtx, op = and_optab;
10729 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10730 operands[3] = const0_rtx, op = ior_optab;
10731 else
10732 return 0; /* FAIL */
10734 else if (GET_CODE (operands[3]) == CONST_INT)
10736 var = operands[2];
10737 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10738 operands[2] = constm1_rtx, op = and_optab;
10739 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10740 operands[2] = const0_rtx, op = ior_optab;
10741 else
10742 return 0; /* FAIL */
10744 else
10745 return 0; /* FAIL */
10747 orig_out = operands[0];
10748 tmp = gen_reg_rtx (mode);
10749 operands[0] = tmp;
10751 /* Recurse to get the constant loaded. */
10752 if (ix86_expand_int_movcc (operands) == 0)
10753 return 0; /* FAIL */
10755 /* Mask in the interesting variable. */
10756 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10757 OPTAB_WIDEN);
10758 if (!rtx_equal_p (out, orig_out))
10759 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10761 return 1; /* DONE */
10765 * For comparison with above,
10767 * movl cf,dest
10768 * movl ct,tmp
10769 * cmpl op1,op2
10770 * cmovcc tmp,dest
10772 * Size 15.
10775 if (! nonimmediate_operand (operands[2], mode))
10776 operands[2] = force_reg (mode, operands[2]);
10777 if (! nonimmediate_operand (operands[3], mode))
10778 operands[3] = force_reg (mode, operands[3]);
10780 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10782 rtx tmp = gen_reg_rtx (mode);
10783 emit_move_insn (tmp, operands[3]);
10784 operands[3] = tmp;
10786 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10788 rtx tmp = gen_reg_rtx (mode);
10789 emit_move_insn (tmp, operands[2]);
10790 operands[2] = tmp;
10793 if (! register_operand (operands[2], VOIDmode)
10794 && (mode == QImode
10795 || ! register_operand (operands[3], VOIDmode)))
10796 operands[2] = force_reg (mode, operands[2]);
10798 if (mode == QImode
10799 && ! register_operand (operands[3], VOIDmode))
10800 operands[3] = force_reg (mode, operands[3]);
10802 emit_insn (compare_seq);
10803 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10804 gen_rtx_IF_THEN_ELSE (mode,
10805 compare_op, operands[2],
10806 operands[3])));
10807 if (bypass_test)
10808 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10809 gen_rtx_IF_THEN_ELSE (mode,
10810 bypass_test,
10811 copy_rtx (operands[3]),
10812 copy_rtx (operands[0]))));
10813 if (second_test)
10814 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10815 gen_rtx_IF_THEN_ELSE (mode,
10816 second_test,
10817 copy_rtx (operands[2]),
10818 copy_rtx (operands[0]))));
10820 return 1; /* DONE */
10823 /* Swap, force into registers, or otherwise massage the two operands
10824 to an sse comparison with a mask result. Thus we differ a bit from
10825 ix86_prepare_fp_compare_args which expects to produce a flags result.
10827 The DEST operand exists to help determine whether to commute commutative
10828 operators. The POP0/POP1 operands are updated in place. The new
10829 comparison code is returned, or UNKNOWN if not implementable. */
10831 static enum rtx_code
10832 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10833 rtx *pop0, rtx *pop1)
10835 rtx tmp;
10837 switch (code)
10839 case LTGT:
10840 case UNEQ:
10841 /* We have no LTGT as an operator. We could implement it with
10842 NE & ORDERED, but this requires an extra temporary. It's
10843 not clear that it's worth it. */
10844 return UNKNOWN;
10846 case LT:
10847 case LE:
10848 case UNGT:
10849 case UNGE:
10850 /* These are supported directly. */
10851 break;
10853 case EQ:
10854 case NE:
10855 case UNORDERED:
10856 case ORDERED:
10857 /* For commutative operators, try to canonicalize the destination
10858 operand to be first in the comparison - this helps reload to
10859 avoid extra moves. */
10860 if (!dest || !rtx_equal_p (dest, *pop1))
10861 break;
10862 /* FALLTHRU */
10864 case GE:
10865 case GT:
10866 case UNLE:
10867 case UNLT:
10868 /* These are not supported directly. Swap the comparison operands
10869 to transform into something that is supported. */
10870 tmp = *pop0;
10871 *pop0 = *pop1;
10872 *pop1 = tmp;
10873 code = swap_condition (code);
10874 break;
10876 default:
10877 gcc_unreachable ();
10880 return code;
10883 /* Detect conditional moves that exactly match min/max operational
10884 semantics. Note that this is IEEE safe, as long as we don't
10885 interchange the operands.
10887 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10888 and TRUE if the operation is successful and instructions are emitted. */
10890 static bool
10891 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10892 rtx cmp_op1, rtx if_true, rtx if_false)
10894 enum machine_mode mode;
10895 bool is_min;
10896 rtx tmp;
10898 if (code == LT)
10900 else if (code == UNGE)
10902 tmp = if_true;
10903 if_true = if_false;
10904 if_false = tmp;
10906 else
10907 return false;
10909 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10910 is_min = true;
10911 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10912 is_min = false;
10913 else
10914 return false;
10916 mode = GET_MODE (dest);
10918 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10919 but MODE may be a vector mode and thus not appropriate. */
10920 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10922 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10923 rtvec v;
10925 if_true = force_reg (mode, if_true);
10926 v = gen_rtvec (2, if_true, if_false);
10927 tmp = gen_rtx_UNSPEC (mode, v, u);
10929 else
10931 code = is_min ? SMIN : SMAX;
10932 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10935 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10936 return true;
10939 /* Expand an sse vector comparison. Return the register with the result. */
10941 static rtx
10942 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10943 rtx op_true, rtx op_false)
10945 enum machine_mode mode = GET_MODE (dest);
10946 rtx x;
10948 cmp_op0 = force_reg (mode, cmp_op0);
10949 if (!nonimmediate_operand (cmp_op1, mode))
10950 cmp_op1 = force_reg (mode, cmp_op1);
10952 if (optimize
10953 || reg_overlap_mentioned_p (dest, op_true)
10954 || reg_overlap_mentioned_p (dest, op_false))
10955 dest = gen_reg_rtx (mode);
10957 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10958 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10960 return dest;
10963 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10964 operations. This is used for both scalar and vector conditional moves. */
10966 static void
10967 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10969 enum machine_mode mode = GET_MODE (dest);
10970 rtx t2, t3, x;
10972 if (op_false == CONST0_RTX (mode))
10974 op_true = force_reg (mode, op_true);
10975 x = gen_rtx_AND (mode, cmp, op_true);
10976 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10978 else if (op_true == CONST0_RTX (mode))
10980 op_false = force_reg (mode, op_false);
10981 x = gen_rtx_NOT (mode, cmp);
10982 x = gen_rtx_AND (mode, x, op_false);
10983 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10985 else
10987 op_true = force_reg (mode, op_true);
10988 op_false = force_reg (mode, op_false);
10990 t2 = gen_reg_rtx (mode);
10991 if (optimize)
10992 t3 = gen_reg_rtx (mode);
10993 else
10994 t3 = dest;
10996 x = gen_rtx_AND (mode, op_true, cmp);
10997 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10999 x = gen_rtx_NOT (mode, cmp);
11000 x = gen_rtx_AND (mode, x, op_false);
11001 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11003 x = gen_rtx_IOR (mode, t3, t2);
11004 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11008 /* Expand a floating-point conditional move. Return true if successful. */
11011 ix86_expand_fp_movcc (rtx operands[])
11013 enum machine_mode mode = GET_MODE (operands[0]);
11014 enum rtx_code code = GET_CODE (operands[1]);
11015 rtx tmp, compare_op, second_test, bypass_test;
11017 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11019 enum machine_mode cmode;
11021 /* Since we've no cmove for sse registers, don't force bad register
11022 allocation just to gain access to it. Deny movcc when the
11023 comparison mode doesn't match the move mode. */
11024 cmode = GET_MODE (ix86_compare_op0);
11025 if (cmode == VOIDmode)
11026 cmode = GET_MODE (ix86_compare_op1);
11027 if (cmode != mode)
11028 return 0;
11030 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11031 &ix86_compare_op0,
11032 &ix86_compare_op1);
11033 if (code == UNKNOWN)
11034 return 0;
11036 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11037 ix86_compare_op1, operands[2],
11038 operands[3]))
11039 return 1;
11041 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11042 ix86_compare_op1, operands[2], operands[3]);
11043 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11044 return 1;
11047 /* The floating point conditional move instructions don't directly
11048 support conditions resulting from a signed integer comparison. */
11050 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11052 /* The floating point conditional move instructions don't directly
11053 support signed integer comparisons. */
11055 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11057 gcc_assert (!second_test && !bypass_test);
11058 tmp = gen_reg_rtx (QImode);
11059 ix86_expand_setcc (code, tmp);
11060 code = NE;
11061 ix86_compare_op0 = tmp;
11062 ix86_compare_op1 = const0_rtx;
11063 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11065 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11067 tmp = gen_reg_rtx (mode);
11068 emit_move_insn (tmp, operands[3]);
11069 operands[3] = tmp;
11071 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11073 tmp = gen_reg_rtx (mode);
11074 emit_move_insn (tmp, operands[2]);
11075 operands[2] = tmp;
11078 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11079 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11080 operands[2], operands[3])));
11081 if (bypass_test)
11082 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11083 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11084 operands[3], operands[0])));
11085 if (second_test)
11086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11087 gen_rtx_IF_THEN_ELSE (mode, second_test,
11088 operands[2], operands[0])));
11090 return 1;
11093 /* Expand a floating-point vector conditional move; a vcond operation
11094 rather than a movcc operation. */
11096 bool
11097 ix86_expand_fp_vcond (rtx operands[])
11099 enum rtx_code code = GET_CODE (operands[3]);
11100 rtx cmp;
11102 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11103 &operands[4], &operands[5]);
11104 if (code == UNKNOWN)
11105 return false;
11107 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11108 operands[5], operands[1], operands[2]))
11109 return true;
11111 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11112 operands[1], operands[2]);
11113 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11114 return true;
11117 /* Expand a signed integral vector conditional move. */
11119 bool
11120 ix86_expand_int_vcond (rtx operands[])
11122 enum machine_mode mode = GET_MODE (operands[0]);
11123 enum rtx_code code = GET_CODE (operands[3]);
11124 bool negate = false;
11125 rtx x, cop0, cop1;
11127 cop0 = operands[4];
11128 cop1 = operands[5];
11130 /* Canonicalize the comparison to EQ, GT, GTU. */
11131 switch (code)
11133 case EQ:
11134 case GT:
11135 case GTU:
11136 break;
11138 case NE:
11139 case LE:
11140 case LEU:
11141 code = reverse_condition (code);
11142 negate = true;
11143 break;
11145 case GE:
11146 case GEU:
11147 code = reverse_condition (code);
11148 negate = true;
11149 /* FALLTHRU */
11151 case LT:
11152 case LTU:
11153 code = swap_condition (code);
11154 x = cop0, cop0 = cop1, cop1 = x;
11155 break;
11157 default:
11158 gcc_unreachable ();
11161 /* Unsigned parallel compare is not supported by the hardware. Play some
11162 tricks to turn this into a signed comparison against 0. */
11163 if (code == GTU)
11165 switch (mode)
11167 case V4SImode:
11169 rtx t1, t2, mask;
11171 /* Perform a parallel modulo subtraction. */
11172 t1 = gen_reg_rtx (mode);
11173 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11175 /* Extract the original sign bit of op0. */
11176 mask = GEN_INT (-0x80000000);
11177 mask = gen_rtx_CONST_VECTOR (mode,
11178 gen_rtvec (4, mask, mask, mask, mask));
11179 mask = force_reg (mode, mask);
11180 t2 = gen_reg_rtx (mode);
11181 emit_insn (gen_andv4si3 (t2, cop0, mask));
11183 /* XOR it back into the result of the subtraction. This results
11184 in the sign bit set iff we saw unsigned underflow. */
11185 x = gen_reg_rtx (mode);
11186 emit_insn (gen_xorv4si3 (x, t1, t2));
11188 code = GT;
11190 break;
11192 case V16QImode:
11193 case V8HImode:
11194 /* Perform a parallel unsigned saturating subtraction. */
11195 x = gen_reg_rtx (mode);
11196 emit_insn (gen_rtx_SET (VOIDmode, x,
11197 gen_rtx_US_MINUS (mode, cop0, cop1)));
11199 code = EQ;
11200 negate = !negate;
11201 break;
11203 default:
11204 gcc_unreachable ();
11207 cop0 = x;
11208 cop1 = CONST0_RTX (mode);
11211 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11212 operands[1+negate], operands[2-negate]);
11214 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11215 operands[2-negate]);
11216 return true;
11219 /* Expand conditional increment or decrement using adb/sbb instructions.
11220 The default case using setcc followed by the conditional move can be
11221 done by generic code. */
11223 ix86_expand_int_addcc (rtx operands[])
11225 enum rtx_code code = GET_CODE (operands[1]);
11226 rtx compare_op;
11227 rtx val = const0_rtx;
11228 bool fpcmp = false;
11229 enum machine_mode mode = GET_MODE (operands[0]);
11231 if (operands[3] != const1_rtx
11232 && operands[3] != constm1_rtx)
11233 return 0;
11234 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11235 ix86_compare_op1, &compare_op))
11236 return 0;
11237 code = GET_CODE (compare_op);
11239 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11240 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11242 fpcmp = true;
11243 code = ix86_fp_compare_code_to_integer (code);
11246 if (code != LTU)
11248 val = constm1_rtx;
11249 if (fpcmp)
11250 PUT_CODE (compare_op,
11251 reverse_condition_maybe_unordered
11252 (GET_CODE (compare_op)));
11253 else
11254 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11256 PUT_MODE (compare_op, mode);
11258 /* Construct either adc or sbb insn. */
11259 if ((code == LTU) == (operands[3] == constm1_rtx))
11261 switch (GET_MODE (operands[0]))
11263 case QImode:
11264 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11265 break;
11266 case HImode:
11267 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
11268 break;
11269 case SImode:
11270 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
11271 break;
11272 case DImode:
11273 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11274 break;
11275 default:
11276 gcc_unreachable ();
11279 else
11281 switch (GET_MODE (operands[0]))
11283 case QImode:
11284 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
11285 break;
11286 case HImode:
11287 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
11288 break;
11289 case SImode:
11290 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
11291 break;
11292 case DImode:
11293 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11294 break;
11295 default:
11296 gcc_unreachable ();
11299 return 1; /* DONE */
11303 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
11304 works for floating pointer parameters and nonoffsetable memories.
11305 For pushes, it returns just stack offsets; the values will be saved
11306 in the right order. Maximally three parts are generated. */
11308 static int
11309 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
11311 int size;
11313 if (!TARGET_64BIT)
11314 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
11315 else
11316 size = (GET_MODE_SIZE (mode) + 4) / 8;
11318 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
11319 gcc_assert (size >= 2 && size <= 3);
11321 /* Optimize constant pool reference to immediates. This is used by fp
11322 moves, that force all constants to memory to allow combining. */
11323 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
11325 rtx tmp = maybe_get_pool_constant (operand);
11326 if (tmp)
11327 operand = tmp;
11330 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
11332 /* The only non-offsetable memories we handle are pushes. */
11333 int ok = push_operand (operand, VOIDmode);
11335 gcc_assert (ok);
11337 operand = copy_rtx (operand);
11338 PUT_MODE (operand, Pmode);
11339 parts[0] = parts[1] = parts[2] = operand;
11340 return size;
11343 if (GET_CODE (operand) == CONST_VECTOR)
11345 enum machine_mode imode = int_mode_for_mode (mode);
11346 /* Caution: if we looked through a constant pool memory above,
11347 the operand may actually have a different mode now. That's
11348 ok, since we want to pun this all the way back to an integer. */
11349 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
11350 gcc_assert (operand != NULL);
11351 mode = imode;
11354 if (!TARGET_64BIT)
11356 if (mode == DImode)
11357 split_di (&operand, 1, &parts[0], &parts[1]);
11358 else
11360 if (REG_P (operand))
11362 gcc_assert (reload_completed);
11363 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11364 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11365 if (size == 3)
11366 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11368 else if (offsettable_memref_p (operand))
11370 operand = adjust_address (operand, SImode, 0);
11371 parts[0] = operand;
11372 parts[1] = adjust_address (operand, SImode, 4);
11373 if (size == 3)
11374 parts[2] = adjust_address (operand, SImode, 8);
11376 else if (GET_CODE (operand) == CONST_DOUBLE)
11378 REAL_VALUE_TYPE r;
11379 long l[4];
11381 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11382 switch (mode)
11384 case XFmode:
11385 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11386 parts[2] = gen_int_mode (l[2], SImode);
11387 break;
11388 case DFmode:
11389 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11390 break;
11391 default:
11392 gcc_unreachable ();
11394 parts[1] = gen_int_mode (l[1], SImode);
11395 parts[0] = gen_int_mode (l[0], SImode);
11397 else
11398 gcc_unreachable ();
11401 else
11403 if (mode == TImode)
11404 split_ti (&operand, 1, &parts[0], &parts[1]);
11405 if (mode == XFmode || mode == TFmode)
11407 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11408 if (REG_P (operand))
11410 gcc_assert (reload_completed);
11411 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11412 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11414 else if (offsettable_memref_p (operand))
11416 operand = adjust_address (operand, DImode, 0);
11417 parts[0] = operand;
11418 parts[1] = adjust_address (operand, upper_mode, 8);
11420 else if (GET_CODE (operand) == CONST_DOUBLE)
11422 REAL_VALUE_TYPE r;
11423 long l[4];
11425 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11426 real_to_target (l, &r, mode);
11428 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11429 if (HOST_BITS_PER_WIDE_INT >= 64)
11430 parts[0]
11431 = gen_int_mode
11432 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11433 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11434 DImode);
11435 else
11436 parts[0] = immed_double_const (l[0], l[1], DImode);
11438 if (upper_mode == SImode)
11439 parts[1] = gen_int_mode (l[2], SImode);
11440 else if (HOST_BITS_PER_WIDE_INT >= 64)
11441 parts[1]
11442 = gen_int_mode
11443 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11444 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11445 DImode);
11446 else
11447 parts[1] = immed_double_const (l[2], l[3], DImode);
11449 else
11450 gcc_unreachable ();
11454 return size;
11457 /* Emit insns to perform a move or push of DI, DF, and XF values.
11458 Return false when normal moves are needed; true when all required
11459 insns have been emitted. Operands 2-4 contain the input values
11460 int the correct order; operands 5-7 contain the output values. */
11462 void
11463 ix86_split_long_move (rtx operands[])
11465 rtx part[2][3];
11466 int nparts;
11467 int push = 0;
11468 int collisions = 0;
11469 enum machine_mode mode = GET_MODE (operands[0]);
11471 /* The DFmode expanders may ask us to move double.
11472 For 64bit target this is single move. By hiding the fact
11473 here we simplify i386.md splitters. */
11474 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11476 /* Optimize constant pool reference to immediates. This is used by
11477 fp moves, that force all constants to memory to allow combining. */
11479 if (GET_CODE (operands[1]) == MEM
11480 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11481 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11482 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11483 if (push_operand (operands[0], VOIDmode))
11485 operands[0] = copy_rtx (operands[0]);
11486 PUT_MODE (operands[0], Pmode);
11488 else
11489 operands[0] = gen_lowpart (DImode, operands[0]);
11490 operands[1] = gen_lowpart (DImode, operands[1]);
11491 emit_move_insn (operands[0], operands[1]);
11492 return;
11495 /* The only non-offsettable memory we handle is push. */
11496 if (push_operand (operands[0], VOIDmode))
11497 push = 1;
11498 else
11499 gcc_assert (GET_CODE (operands[0]) != MEM
11500 || offsettable_memref_p (operands[0]));
11502 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11503 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11505 /* When emitting push, take care for source operands on the stack. */
11506 if (push && GET_CODE (operands[1]) == MEM
11507 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11509 if (nparts == 3)
11510 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11511 XEXP (part[1][2], 0));
11512 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11513 XEXP (part[1][1], 0));
11516 /* We need to do copy in the right order in case an address register
11517 of the source overlaps the destination. */
11518 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11520 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11521 collisions++;
11522 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11523 collisions++;
11524 if (nparts == 3
11525 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11526 collisions++;
11528 /* Collision in the middle part can be handled by reordering. */
11529 if (collisions == 1 && nparts == 3
11530 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11532 rtx tmp;
11533 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11534 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11537 /* If there are more collisions, we can't handle it by reordering.
11538 Do an lea to the last part and use only one colliding move. */
11539 else if (collisions > 1)
11541 rtx base;
11543 collisions = 1;
11545 base = part[0][nparts - 1];
11547 /* Handle the case when the last part isn't valid for lea.
11548 Happens in 64-bit mode storing the 12-byte XFmode. */
11549 if (GET_MODE (base) != Pmode)
11550 base = gen_rtx_REG (Pmode, REGNO (base));
11552 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11553 part[1][0] = replace_equiv_address (part[1][0], base);
11554 part[1][1] = replace_equiv_address (part[1][1],
11555 plus_constant (base, UNITS_PER_WORD));
11556 if (nparts == 3)
11557 part[1][2] = replace_equiv_address (part[1][2],
11558 plus_constant (base, 8));
11562 if (push)
11564 if (!TARGET_64BIT)
11566 if (nparts == 3)
11568 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11569 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11570 emit_move_insn (part[0][2], part[1][2]);
11573 else
11575 /* In 64bit mode we don't have 32bit push available. In case this is
11576 register, it is OK - we will just use larger counterpart. We also
11577 retype memory - these comes from attempt to avoid REX prefix on
11578 moving of second half of TFmode value. */
11579 if (GET_MODE (part[1][1]) == SImode)
11581 switch (GET_CODE (part[1][1]))
11583 case MEM:
11584 part[1][1] = adjust_address (part[1][1], DImode, 0);
11585 break;
11587 case REG:
11588 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11589 break;
11591 default:
11592 gcc_unreachable ();
11595 if (GET_MODE (part[1][0]) == SImode)
11596 part[1][0] = part[1][1];
11599 emit_move_insn (part[0][1], part[1][1]);
11600 emit_move_insn (part[0][0], part[1][0]);
11601 return;
11604 /* Choose correct order to not overwrite the source before it is copied. */
11605 if ((REG_P (part[0][0])
11606 && REG_P (part[1][1])
11607 && (REGNO (part[0][0]) == REGNO (part[1][1])
11608 || (nparts == 3
11609 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11610 || (collisions > 0
11611 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11613 if (nparts == 3)
11615 operands[2] = part[0][2];
11616 operands[3] = part[0][1];
11617 operands[4] = part[0][0];
11618 operands[5] = part[1][2];
11619 operands[6] = part[1][1];
11620 operands[7] = part[1][0];
11622 else
11624 operands[2] = part[0][1];
11625 operands[3] = part[0][0];
11626 operands[5] = part[1][1];
11627 operands[6] = part[1][0];
11630 else
11632 if (nparts == 3)
11634 operands[2] = part[0][0];
11635 operands[3] = part[0][1];
11636 operands[4] = part[0][2];
11637 operands[5] = part[1][0];
11638 operands[6] = part[1][1];
11639 operands[7] = part[1][2];
11641 else
11643 operands[2] = part[0][0];
11644 operands[3] = part[0][1];
11645 operands[5] = part[1][0];
11646 operands[6] = part[1][1];
11650 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11651 if (optimize_size)
11653 if (GET_CODE (operands[5]) == CONST_INT
11654 && operands[5] != const0_rtx
11655 && REG_P (operands[2]))
11657 if (GET_CODE (operands[6]) == CONST_INT
11658 && INTVAL (operands[6]) == INTVAL (operands[5]))
11659 operands[6] = operands[2];
11661 if (nparts == 3
11662 && GET_CODE (operands[7]) == CONST_INT
11663 && INTVAL (operands[7]) == INTVAL (operands[5]))
11664 operands[7] = operands[2];
11667 if (nparts == 3
11668 && GET_CODE (operands[6]) == CONST_INT
11669 && operands[6] != const0_rtx
11670 && REG_P (operands[3])
11671 && GET_CODE (operands[7]) == CONST_INT
11672 && INTVAL (operands[7]) == INTVAL (operands[6]))
11673 operands[7] = operands[3];
11676 emit_move_insn (operands[2], operands[5]);
11677 emit_move_insn (operands[3], operands[6]);
11678 if (nparts == 3)
11679 emit_move_insn (operands[4], operands[7]);
11681 return;
11684 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11685 left shift by a constant, either using a single shift or
11686 a sequence of add instructions. */
11688 static void
11689 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11691 if (count == 1)
11693 emit_insn ((mode == DImode
11694 ? gen_addsi3
11695 : gen_adddi3) (operand, operand, operand));
11697 else if (!optimize_size
11698 && count * ix86_cost->add <= ix86_cost->shift_const)
11700 int i;
11701 for (i=0; i<count; i++)
11703 emit_insn ((mode == DImode
11704 ? gen_addsi3
11705 : gen_adddi3) (operand, operand, operand));
11708 else
11709 emit_insn ((mode == DImode
11710 ? gen_ashlsi3
11711 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11714 void
11715 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11717 rtx low[2], high[2];
11718 int count;
11719 const int single_width = mode == DImode ? 32 : 64;
11721 if (GET_CODE (operands[2]) == CONST_INT)
11723 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11724 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11726 if (count >= single_width)
11728 emit_move_insn (high[0], low[1]);
11729 emit_move_insn (low[0], const0_rtx);
11731 if (count > single_width)
11732 ix86_expand_ashl_const (high[0], count - single_width, mode);
11734 else
11736 if (!rtx_equal_p (operands[0], operands[1]))
11737 emit_move_insn (operands[0], operands[1]);
11738 emit_insn ((mode == DImode
11739 ? gen_x86_shld_1
11740 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11741 ix86_expand_ashl_const (low[0], count, mode);
11743 return;
11746 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11748 if (operands[1] == const1_rtx)
11750 /* Assuming we've chosen a QImode capable registers, then 1 << N
11751 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11752 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11754 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11756 ix86_expand_clear (low[0]);
11757 ix86_expand_clear (high[0]);
11758 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11760 d = gen_lowpart (QImode, low[0]);
11761 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11762 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11763 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11765 d = gen_lowpart (QImode, high[0]);
11766 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11767 s = gen_rtx_NE (QImode, flags, const0_rtx);
11768 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11771 /* Otherwise, we can get the same results by manually performing
11772 a bit extract operation on bit 5/6, and then performing the two
11773 shifts. The two methods of getting 0/1 into low/high are exactly
11774 the same size. Avoiding the shift in the bit extract case helps
11775 pentium4 a bit; no one else seems to care much either way. */
11776 else
11778 rtx x;
11780 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11781 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11782 else
11783 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11784 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11786 emit_insn ((mode == DImode
11787 ? gen_lshrsi3
11788 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11789 emit_insn ((mode == DImode
11790 ? gen_andsi3
11791 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11792 emit_move_insn (low[0], high[0]);
11793 emit_insn ((mode == DImode
11794 ? gen_xorsi3
11795 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11798 emit_insn ((mode == DImode
11799 ? gen_ashlsi3
11800 : gen_ashldi3) (low[0], low[0], operands[2]));
11801 emit_insn ((mode == DImode
11802 ? gen_ashlsi3
11803 : gen_ashldi3) (high[0], high[0], operands[2]));
11804 return;
11807 if (operands[1] == constm1_rtx)
11809 /* For -1 << N, we can avoid the shld instruction, because we
11810 know that we're shifting 0...31/63 ones into a -1. */
11811 emit_move_insn (low[0], constm1_rtx);
11812 if (optimize_size)
11813 emit_move_insn (high[0], low[0]);
11814 else
11815 emit_move_insn (high[0], constm1_rtx);
11817 else
11819 if (!rtx_equal_p (operands[0], operands[1]))
11820 emit_move_insn (operands[0], operands[1]);
11822 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11823 emit_insn ((mode == DImode
11824 ? gen_x86_shld_1
11825 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11828 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11830 if (TARGET_CMOVE && scratch)
11832 ix86_expand_clear (scratch);
11833 emit_insn ((mode == DImode
11834 ? gen_x86_shift_adj_1
11835 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11837 else
11838 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11841 void
11842 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11844 rtx low[2], high[2];
11845 int count;
11846 const int single_width = mode == DImode ? 32 : 64;
11848 if (GET_CODE (operands[2]) == CONST_INT)
11850 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11851 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11853 if (count == single_width * 2 - 1)
11855 emit_move_insn (high[0], high[1]);
11856 emit_insn ((mode == DImode
11857 ? gen_ashrsi3
11858 : gen_ashrdi3) (high[0], high[0],
11859 GEN_INT (single_width - 1)));
11860 emit_move_insn (low[0], high[0]);
11863 else if (count >= single_width)
11865 emit_move_insn (low[0], high[1]);
11866 emit_move_insn (high[0], low[0]);
11867 emit_insn ((mode == DImode
11868 ? gen_ashrsi3
11869 : gen_ashrdi3) (high[0], high[0],
11870 GEN_INT (single_width - 1)));
11871 if (count > single_width)
11872 emit_insn ((mode == DImode
11873 ? gen_ashrsi3
11874 : gen_ashrdi3) (low[0], low[0],
11875 GEN_INT (count - single_width)));
11877 else
11879 if (!rtx_equal_p (operands[0], operands[1]))
11880 emit_move_insn (operands[0], operands[1]);
11881 emit_insn ((mode == DImode
11882 ? gen_x86_shrd_1
11883 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11884 emit_insn ((mode == DImode
11885 ? gen_ashrsi3
11886 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11889 else
11891 if (!rtx_equal_p (operands[0], operands[1]))
11892 emit_move_insn (operands[0], operands[1]);
11894 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11896 emit_insn ((mode == DImode
11897 ? gen_x86_shrd_1
11898 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11899 emit_insn ((mode == DImode
11900 ? gen_ashrsi3
11901 : gen_ashrdi3) (high[0], high[0], operands[2]));
11903 if (TARGET_CMOVE && scratch)
11905 emit_move_insn (scratch, high[0]);
11906 emit_insn ((mode == DImode
11907 ? gen_ashrsi3
11908 : gen_ashrdi3) (scratch, scratch,
11909 GEN_INT (single_width - 1)));
11910 emit_insn ((mode == DImode
11911 ? gen_x86_shift_adj_1
11912 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11913 scratch));
11915 else
11916 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11920 void
11921 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11923 rtx low[2], high[2];
11924 int count;
11925 const int single_width = mode == DImode ? 32 : 64;
11927 if (GET_CODE (operands[2]) == CONST_INT)
11929 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11930 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11932 if (count >= single_width)
11934 emit_move_insn (low[0], high[1]);
11935 ix86_expand_clear (high[0]);
11937 if (count > single_width)
11938 emit_insn ((mode == DImode
11939 ? gen_lshrsi3
11940 : gen_lshrdi3) (low[0], low[0],
11941 GEN_INT (count - single_width)));
11943 else
11945 if (!rtx_equal_p (operands[0], operands[1]))
11946 emit_move_insn (operands[0], operands[1]);
11947 emit_insn ((mode == DImode
11948 ? gen_x86_shrd_1
11949 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11950 emit_insn ((mode == DImode
11951 ? gen_lshrsi3
11952 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11955 else
11957 if (!rtx_equal_p (operands[0], operands[1]))
11958 emit_move_insn (operands[0], operands[1]);
11960 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11962 emit_insn ((mode == DImode
11963 ? gen_x86_shrd_1
11964 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11965 emit_insn ((mode == DImode
11966 ? gen_lshrsi3
11967 : gen_lshrdi3) (high[0], high[0], operands[2]));
11969 /* Heh. By reversing the arguments, we can reuse this pattern. */
11970 if (TARGET_CMOVE && scratch)
11972 ix86_expand_clear (scratch);
11973 emit_insn ((mode == DImode
11974 ? gen_x86_shift_adj_1
11975 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11976 scratch));
11978 else
11979 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11983 /* Helper function for the string operations below. Dest VARIABLE whether
11984 it is aligned to VALUE bytes. If true, jump to the label. */
11985 static rtx
11986 ix86_expand_aligntest (rtx variable, int value)
11988 rtx label = gen_label_rtx ();
11989 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11990 if (GET_MODE (variable) == DImode)
11991 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11992 else
11993 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11994 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11995 1, label);
11996 return label;
11999 /* Adjust COUNTER by the VALUE. */
12000 static void
12001 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12003 if (GET_MODE (countreg) == DImode)
12004 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12005 else
12006 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12009 /* Zero extend possibly SImode EXP to Pmode register. */
12011 ix86_zero_extend_to_Pmode (rtx exp)
12013 rtx r;
12014 if (GET_MODE (exp) == VOIDmode)
12015 return force_reg (Pmode, exp);
12016 if (GET_MODE (exp) == Pmode)
12017 return copy_to_mode_reg (Pmode, exp);
12018 r = gen_reg_rtx (Pmode);
12019 emit_insn (gen_zero_extendsidi2 (r, exp));
12020 return r;
12023 /* Expand string move (memcpy) operation. Use i386 string operations when
12024 profitable. expand_clrmem contains similar code. */
12026 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
12028 rtx srcreg, destreg, countreg, srcexp, destexp;
12029 enum machine_mode counter_mode;
12030 HOST_WIDE_INT align = 0;
12031 unsigned HOST_WIDE_INT count = 0;
12033 if (GET_CODE (align_exp) == CONST_INT)
12034 align = INTVAL (align_exp);
12036 /* Can't use any of this if the user has appropriated esi or edi. */
12037 if (global_regs[4] || global_regs[5])
12038 return 0;
12040 /* This simple hack avoids all inlining code and simplifies code below. */
12041 if (!TARGET_ALIGN_STRINGOPS)
12042 align = 64;
12044 if (GET_CODE (count_exp) == CONST_INT)
12046 count = INTVAL (count_exp);
12047 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12048 return 0;
12051 /* Figure out proper mode for counter. For 32bits it is always SImode,
12052 for 64bits use SImode when possible, otherwise DImode.
12053 Set count to number of bytes copied when known at compile time. */
12054 if (!TARGET_64BIT
12055 || GET_MODE (count_exp) == SImode
12056 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12057 counter_mode = SImode;
12058 else
12059 counter_mode = DImode;
12061 gcc_assert (counter_mode == SImode || counter_mode == DImode);
12063 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12064 if (destreg != XEXP (dst, 0))
12065 dst = replace_equiv_address_nv (dst, destreg);
12066 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
12067 if (srcreg != XEXP (src, 0))
12068 src = replace_equiv_address_nv (src, srcreg);
12070 /* When optimizing for size emit simple rep ; movsb instruction for
12071 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
12072 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
12073 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
12074 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
12075 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
12076 known to be zero or not. The rep; movsb sequence causes higher
12077 register pressure though, so take that into account. */
12079 if ((!optimize || optimize_size)
12080 && (count == 0
12081 || ((count & 0x03)
12082 && (!optimize_size
12083 || count > 5 * 4
12084 || (count & 3) + count / 4 > 6))))
12086 emit_insn (gen_cld ());
12087 countreg = ix86_zero_extend_to_Pmode (count_exp);
12088 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12089 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
12090 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
12091 destexp, srcexp));
12094 /* For constant aligned (or small unaligned) copies use rep movsl
12095 followed by code copying the rest. For PentiumPro ensure 8 byte
12096 alignment to allow rep movsl acceleration. */
12098 else if (count != 0
12099 && (align >= 8
12100 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12101 || optimize_size || count < (unsigned int) 64))
12103 unsigned HOST_WIDE_INT offset = 0;
12104 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12105 rtx srcmem, dstmem;
12107 emit_insn (gen_cld ());
12108 if (count & ~(size - 1))
12110 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
12112 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
12114 while (offset < (count & ~(size - 1)))
12116 srcmem = adjust_automodify_address_nv (src, movs_mode,
12117 srcreg, offset);
12118 dstmem = adjust_automodify_address_nv (dst, movs_mode,
12119 destreg, offset);
12120 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12121 offset += size;
12124 else
12126 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
12127 & (TARGET_64BIT ? -1 : 0x3fffffff));
12128 countreg = copy_to_mode_reg (counter_mode, countreg);
12129 countreg = ix86_zero_extend_to_Pmode (countreg);
12131 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12132 GEN_INT (size == 4 ? 2 : 3));
12133 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12134 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12136 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12137 countreg, destexp, srcexp));
12138 offset = count & ~(size - 1);
12141 if (size == 8 && (count & 0x04))
12143 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
12144 offset);
12145 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
12146 offset);
12147 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12148 offset += 4;
12150 if (count & 0x02)
12152 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
12153 offset);
12154 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
12155 offset);
12156 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12157 offset += 2;
12159 if (count & 0x01)
12161 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
12162 offset);
12163 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
12164 offset);
12165 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12168 /* The generic code based on the glibc implementation:
12169 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
12170 allowing accelerated copying there)
12171 - copy the data using rep movsl
12172 - copy the rest. */
12173 else
12175 rtx countreg2;
12176 rtx label = NULL;
12177 rtx srcmem, dstmem;
12178 int desired_alignment = (TARGET_PENTIUMPRO
12179 && (count == 0 || count >= (unsigned int) 260)
12180 ? 8 : UNITS_PER_WORD);
12181 /* Get rid of MEM_OFFSETs, they won't be accurate. */
12182 dst = change_address (dst, BLKmode, destreg);
12183 src = change_address (src, BLKmode, srcreg);
12185 /* In case we don't know anything about the alignment, default to
12186 library version, since it is usually equally fast and result in
12187 shorter code.
12189 Also emit call when we know that the count is large and call overhead
12190 will not be important. */
12191 if (!TARGET_INLINE_ALL_STRINGOPS
12192 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12193 return 0;
12195 if (TARGET_SINGLE_STRINGOP)
12196 emit_insn (gen_cld ());
12198 countreg2 = gen_reg_rtx (Pmode);
12199 countreg = copy_to_mode_reg (counter_mode, count_exp);
12201 /* We don't use loops to align destination and to copy parts smaller
12202 than 4 bytes, because gcc is able to optimize such code better (in
12203 the case the destination or the count really is aligned, gcc is often
12204 able to predict the branches) and also it is friendlier to the
12205 hardware branch prediction.
12207 Using loops is beneficial for generic case, because we can
12208 handle small counts using the loops. Many CPUs (such as Athlon)
12209 have large REP prefix setup costs.
12211 This is quite costly. Maybe we can revisit this decision later or
12212 add some customizability to this code. */
12214 if (count == 0 && align < desired_alignment)
12216 label = gen_label_rtx ();
12217 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12218 LEU, 0, counter_mode, 1, label);
12220 if (align <= 1)
12222 rtx label = ix86_expand_aligntest (destreg, 1);
12223 srcmem = change_address (src, QImode, srcreg);
12224 dstmem = change_address (dst, QImode, destreg);
12225 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12226 ix86_adjust_counter (countreg, 1);
12227 emit_label (label);
12228 LABEL_NUSES (label) = 1;
12230 if (align <= 2)
12232 rtx label = ix86_expand_aligntest (destreg, 2);
12233 srcmem = change_address (src, HImode, srcreg);
12234 dstmem = change_address (dst, HImode, destreg);
12235 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12236 ix86_adjust_counter (countreg, 2);
12237 emit_label (label);
12238 LABEL_NUSES (label) = 1;
12240 if (align <= 4 && desired_alignment > 4)
12242 rtx label = ix86_expand_aligntest (destreg, 4);
12243 srcmem = change_address (src, SImode, srcreg);
12244 dstmem = change_address (dst, SImode, destreg);
12245 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12246 ix86_adjust_counter (countreg, 4);
12247 emit_label (label);
12248 LABEL_NUSES (label) = 1;
12251 if (label && desired_alignment > 4 && !TARGET_64BIT)
12253 emit_label (label);
12254 LABEL_NUSES (label) = 1;
12255 label = NULL_RTX;
12257 if (!TARGET_SINGLE_STRINGOP)
12258 emit_insn (gen_cld ());
12259 if (TARGET_64BIT)
12261 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12262 GEN_INT (3)));
12263 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12265 else
12267 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12268 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12270 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12271 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12272 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12273 countreg2, destexp, srcexp));
12275 if (label)
12277 emit_label (label);
12278 LABEL_NUSES (label) = 1;
12280 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12282 srcmem = change_address (src, SImode, srcreg);
12283 dstmem = change_address (dst, SImode, destreg);
12284 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12286 if ((align <= 4 || count == 0) && TARGET_64BIT)
12288 rtx label = ix86_expand_aligntest (countreg, 4);
12289 srcmem = change_address (src, SImode, srcreg);
12290 dstmem = change_address (dst, SImode, destreg);
12291 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12292 emit_label (label);
12293 LABEL_NUSES (label) = 1;
12295 if (align > 2 && count != 0 && (count & 2))
12297 srcmem = change_address (src, HImode, srcreg);
12298 dstmem = change_address (dst, HImode, destreg);
12299 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12301 if (align <= 2 || count == 0)
12303 rtx label = ix86_expand_aligntest (countreg, 2);
12304 srcmem = change_address (src, HImode, srcreg);
12305 dstmem = change_address (dst, HImode, destreg);
12306 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12307 emit_label (label);
12308 LABEL_NUSES (label) = 1;
12310 if (align > 1 && count != 0 && (count & 1))
12312 srcmem = change_address (src, QImode, srcreg);
12313 dstmem = change_address (dst, QImode, destreg);
12314 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12316 if (align <= 1 || count == 0)
12318 rtx label = ix86_expand_aligntest (countreg, 1);
12319 srcmem = change_address (src, QImode, srcreg);
12320 dstmem = change_address (dst, QImode, destreg);
12321 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12322 emit_label (label);
12323 LABEL_NUSES (label) = 1;
12327 return 1;
12330 /* Expand string clear operation (bzero). Use i386 string operations when
12331 profitable. expand_movmem contains similar code. */
12333 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
12335 rtx destreg, zeroreg, countreg, destexp;
12336 enum machine_mode counter_mode;
12337 HOST_WIDE_INT align = 0;
12338 unsigned HOST_WIDE_INT count = 0;
12340 if (GET_CODE (align_exp) == CONST_INT)
12341 align = INTVAL (align_exp);
12343 /* Can't use any of this if the user has appropriated esi. */
12344 if (global_regs[4])
12345 return 0;
12347 /* This simple hack avoids all inlining code and simplifies code below. */
12348 if (!TARGET_ALIGN_STRINGOPS)
12349 align = 32;
12351 if (GET_CODE (count_exp) == CONST_INT)
12353 count = INTVAL (count_exp);
12354 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12355 return 0;
12357 /* Figure out proper mode for counter. For 32bits it is always SImode,
12358 for 64bits use SImode when possible, otherwise DImode.
12359 Set count to number of bytes copied when known at compile time. */
12360 if (!TARGET_64BIT
12361 || GET_MODE (count_exp) == SImode
12362 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12363 counter_mode = SImode;
12364 else
12365 counter_mode = DImode;
12367 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12368 if (destreg != XEXP (dst, 0))
12369 dst = replace_equiv_address_nv (dst, destreg);
12372 /* When optimizing for size emit simple rep ; movsb instruction for
12373 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12374 sequence is 7 bytes long, so if optimizing for size and count is
12375 small enough that some stosl, stosw and stosb instructions without
12376 rep are shorter, fall back into the next if. */
12378 if ((!optimize || optimize_size)
12379 && (count == 0
12380 || ((count & 0x03)
12381 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12383 emit_insn (gen_cld ());
12385 countreg = ix86_zero_extend_to_Pmode (count_exp);
12386 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12387 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12388 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12390 else if (count != 0
12391 && (align >= 8
12392 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12393 || optimize_size || count < (unsigned int) 64))
12395 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12396 unsigned HOST_WIDE_INT offset = 0;
12398 emit_insn (gen_cld ());
12400 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12401 if (count & ~(size - 1))
12403 unsigned HOST_WIDE_INT repcount;
12404 unsigned int max_nonrep;
12406 repcount = count >> (size == 4 ? 2 : 3);
12407 if (!TARGET_64BIT)
12408 repcount &= 0x3fffffff;
12410 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12411 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12412 bytes. In both cases the latter seems to be faster for small
12413 values of N. */
12414 max_nonrep = size == 4 ? 7 : 4;
12415 if (!optimize_size)
12416 switch (ix86_tune)
12418 case PROCESSOR_PENTIUM4:
12419 case PROCESSOR_NOCONA:
12420 max_nonrep = 3;
12421 break;
12422 default:
12423 break;
12426 if (repcount <= max_nonrep)
12427 while (repcount-- > 0)
12429 rtx mem = adjust_automodify_address_nv (dst,
12430 GET_MODE (zeroreg),
12431 destreg, offset);
12432 emit_insn (gen_strset (destreg, mem, zeroreg));
12433 offset += size;
12435 else
12437 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12438 countreg = ix86_zero_extend_to_Pmode (countreg);
12439 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12440 GEN_INT (size == 4 ? 2 : 3));
12441 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12442 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12443 destexp));
12444 offset = count & ~(size - 1);
12447 if (size == 8 && (count & 0x04))
12449 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12450 offset);
12451 emit_insn (gen_strset (destreg, mem,
12452 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12453 offset += 4;
12455 if (count & 0x02)
12457 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12458 offset);
12459 emit_insn (gen_strset (destreg, mem,
12460 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12461 offset += 2;
12463 if (count & 0x01)
12465 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12466 offset);
12467 emit_insn (gen_strset (destreg, mem,
12468 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12471 else
12473 rtx countreg2;
12474 rtx label = NULL;
12475 /* Compute desired alignment of the string operation. */
12476 int desired_alignment = (TARGET_PENTIUMPRO
12477 && (count == 0 || count >= (unsigned int) 260)
12478 ? 8 : UNITS_PER_WORD);
12480 /* In case we don't know anything about the alignment, default to
12481 library version, since it is usually equally fast and result in
12482 shorter code.
12484 Also emit call when we know that the count is large and call overhead
12485 will not be important. */
12486 if (!TARGET_INLINE_ALL_STRINGOPS
12487 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12488 return 0;
12490 if (TARGET_SINGLE_STRINGOP)
12491 emit_insn (gen_cld ());
12493 countreg2 = gen_reg_rtx (Pmode);
12494 countreg = copy_to_mode_reg (counter_mode, count_exp);
12495 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12496 /* Get rid of MEM_OFFSET, it won't be accurate. */
12497 dst = change_address (dst, BLKmode, destreg);
12499 if (count == 0 && align < desired_alignment)
12501 label = gen_label_rtx ();
12502 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12503 LEU, 0, counter_mode, 1, label);
12505 if (align <= 1)
12507 rtx label = ix86_expand_aligntest (destreg, 1);
12508 emit_insn (gen_strset (destreg, dst,
12509 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12510 ix86_adjust_counter (countreg, 1);
12511 emit_label (label);
12512 LABEL_NUSES (label) = 1;
12514 if (align <= 2)
12516 rtx label = ix86_expand_aligntest (destreg, 2);
12517 emit_insn (gen_strset (destreg, dst,
12518 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12519 ix86_adjust_counter (countreg, 2);
12520 emit_label (label);
12521 LABEL_NUSES (label) = 1;
12523 if (align <= 4 && desired_alignment > 4)
12525 rtx label = ix86_expand_aligntest (destreg, 4);
12526 emit_insn (gen_strset (destreg, dst,
12527 (TARGET_64BIT
12528 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12529 : zeroreg)));
12530 ix86_adjust_counter (countreg, 4);
12531 emit_label (label);
12532 LABEL_NUSES (label) = 1;
12535 if (label && desired_alignment > 4 && !TARGET_64BIT)
12537 emit_label (label);
12538 LABEL_NUSES (label) = 1;
12539 label = NULL_RTX;
12542 if (!TARGET_SINGLE_STRINGOP)
12543 emit_insn (gen_cld ());
12544 if (TARGET_64BIT)
12546 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12547 GEN_INT (3)));
12548 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12550 else
12552 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12553 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12555 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12556 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12558 if (label)
12560 emit_label (label);
12561 LABEL_NUSES (label) = 1;
12564 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12565 emit_insn (gen_strset (destreg, dst,
12566 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12567 if (TARGET_64BIT && (align <= 4 || count == 0))
12569 rtx label = ix86_expand_aligntest (countreg, 4);
12570 emit_insn (gen_strset (destreg, dst,
12571 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12572 emit_label (label);
12573 LABEL_NUSES (label) = 1;
12575 if (align > 2 && count != 0 && (count & 2))
12576 emit_insn (gen_strset (destreg, dst,
12577 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12578 if (align <= 2 || count == 0)
12580 rtx label = ix86_expand_aligntest (countreg, 2);
12581 emit_insn (gen_strset (destreg, dst,
12582 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12583 emit_label (label);
12584 LABEL_NUSES (label) = 1;
12586 if (align > 1 && count != 0 && (count & 1))
12587 emit_insn (gen_strset (destreg, dst,
12588 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12589 if (align <= 1 || count == 0)
12591 rtx label = ix86_expand_aligntest (countreg, 1);
12592 emit_insn (gen_strset (destreg, dst,
12593 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12594 emit_label (label);
12595 LABEL_NUSES (label) = 1;
12598 return 1;
12601 /* Expand strlen. */
12603 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12605 rtx addr, scratch1, scratch2, scratch3, scratch4;
12607 /* The generic case of strlen expander is long. Avoid it's
12608 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12610 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12611 && !TARGET_INLINE_ALL_STRINGOPS
12612 && !optimize_size
12613 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12614 return 0;
12616 addr = force_reg (Pmode, XEXP (src, 0));
12617 scratch1 = gen_reg_rtx (Pmode);
12619 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12620 && !optimize_size)
12622 /* Well it seems that some optimizer does not combine a call like
12623 foo(strlen(bar), strlen(bar));
12624 when the move and the subtraction is done here. It does calculate
12625 the length just once when these instructions are done inside of
12626 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12627 often used and I use one fewer register for the lifetime of
12628 output_strlen_unroll() this is better. */
12630 emit_move_insn (out, addr);
12632 ix86_expand_strlensi_unroll_1 (out, src, align);
12634 /* strlensi_unroll_1 returns the address of the zero at the end of
12635 the string, like memchr(), so compute the length by subtracting
12636 the start address. */
12637 if (TARGET_64BIT)
12638 emit_insn (gen_subdi3 (out, out, addr));
12639 else
12640 emit_insn (gen_subsi3 (out, out, addr));
12642 else
12644 rtx unspec;
12645 scratch2 = gen_reg_rtx (Pmode);
12646 scratch3 = gen_reg_rtx (Pmode);
12647 scratch4 = force_reg (Pmode, constm1_rtx);
12649 emit_move_insn (scratch3, addr);
12650 eoschar = force_reg (QImode, eoschar);
12652 emit_insn (gen_cld ());
12653 src = replace_equiv_address_nv (src, scratch3);
12655 /* If .md starts supporting :P, this can be done in .md. */
12656 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12657 scratch4), UNSPEC_SCAS);
12658 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12659 if (TARGET_64BIT)
12661 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12662 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12664 else
12666 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12667 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12670 return 1;
12673 /* Expand the appropriate insns for doing strlen if not just doing
12674 repnz; scasb
12676 out = result, initialized with the start address
12677 align_rtx = alignment of the address.
12678 scratch = scratch register, initialized with the startaddress when
12679 not aligned, otherwise undefined
12681 This is just the body. It needs the initializations mentioned above and
12682 some address computing at the end. These things are done in i386.md. */
12684 static void
12685 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12687 int align;
12688 rtx tmp;
12689 rtx align_2_label = NULL_RTX;
12690 rtx align_3_label = NULL_RTX;
12691 rtx align_4_label = gen_label_rtx ();
12692 rtx end_0_label = gen_label_rtx ();
12693 rtx mem;
12694 rtx tmpreg = gen_reg_rtx (SImode);
12695 rtx scratch = gen_reg_rtx (SImode);
12696 rtx cmp;
12698 align = 0;
12699 if (GET_CODE (align_rtx) == CONST_INT)
12700 align = INTVAL (align_rtx);
12702 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12704 /* Is there a known alignment and is it less than 4? */
12705 if (align < 4)
12707 rtx scratch1 = gen_reg_rtx (Pmode);
12708 emit_move_insn (scratch1, out);
12709 /* Is there a known alignment and is it not 2? */
12710 if (align != 2)
12712 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12713 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12715 /* Leave just the 3 lower bits. */
12716 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12717 NULL_RTX, 0, OPTAB_WIDEN);
12719 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12720 Pmode, 1, align_4_label);
12721 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12722 Pmode, 1, align_2_label);
12723 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12724 Pmode, 1, align_3_label);
12726 else
12728 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12729 check if is aligned to 4 - byte. */
12731 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12732 NULL_RTX, 0, OPTAB_WIDEN);
12734 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12735 Pmode, 1, align_4_label);
12738 mem = change_address (src, QImode, out);
12740 /* Now compare the bytes. */
12742 /* Compare the first n unaligned byte on a byte per byte basis. */
12743 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12744 QImode, 1, end_0_label);
12746 /* Increment the address. */
12747 if (TARGET_64BIT)
12748 emit_insn (gen_adddi3 (out, out, const1_rtx));
12749 else
12750 emit_insn (gen_addsi3 (out, out, const1_rtx));
12752 /* Not needed with an alignment of 2 */
12753 if (align != 2)
12755 emit_label (align_2_label);
12757 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12758 end_0_label);
12760 if (TARGET_64BIT)
12761 emit_insn (gen_adddi3 (out, out, const1_rtx));
12762 else
12763 emit_insn (gen_addsi3 (out, out, const1_rtx));
12765 emit_label (align_3_label);
12768 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12769 end_0_label);
12771 if (TARGET_64BIT)
12772 emit_insn (gen_adddi3 (out, out, const1_rtx));
12773 else
12774 emit_insn (gen_addsi3 (out, out, const1_rtx));
12777 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12778 align this loop. It gives only huge programs, but does not help to
12779 speed up. */
12780 emit_label (align_4_label);
12782 mem = change_address (src, SImode, out);
12783 emit_move_insn (scratch, mem);
12784 if (TARGET_64BIT)
12785 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12786 else
12787 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12789 /* This formula yields a nonzero result iff one of the bytes is zero.
12790 This saves three branches inside loop and many cycles. */
12792 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12793 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12794 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12795 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12796 gen_int_mode (0x80808080, SImode)));
12797 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12798 align_4_label);
12800 if (TARGET_CMOVE)
12802 rtx reg = gen_reg_rtx (SImode);
12803 rtx reg2 = gen_reg_rtx (Pmode);
12804 emit_move_insn (reg, tmpreg);
12805 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12807 /* If zero is not in the first two bytes, move two bytes forward. */
12808 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12809 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12810 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12811 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12812 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12813 reg,
12814 tmpreg)));
12815 /* Emit lea manually to avoid clobbering of flags. */
12816 emit_insn (gen_rtx_SET (SImode, reg2,
12817 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12819 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12820 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12821 emit_insn (gen_rtx_SET (VOIDmode, out,
12822 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12823 reg2,
12824 out)));
12827 else
12829 rtx end_2_label = gen_label_rtx ();
12830 /* Is zero in the first two bytes? */
12832 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12833 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12834 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12835 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12836 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12837 pc_rtx);
12838 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12839 JUMP_LABEL (tmp) = end_2_label;
12841 /* Not in the first two. Move two bytes forward. */
12842 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12843 if (TARGET_64BIT)
12844 emit_insn (gen_adddi3 (out, out, const2_rtx));
12845 else
12846 emit_insn (gen_addsi3 (out, out, const2_rtx));
12848 emit_label (end_2_label);
12852 /* Avoid branch in fixing the byte. */
12853 tmpreg = gen_lowpart (QImode, tmpreg);
12854 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12855 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12856 if (TARGET_64BIT)
12857 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12858 else
12859 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12861 emit_label (end_0_label);
12864 void
12865 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12866 rtx callarg2 ATTRIBUTE_UNUSED,
12867 rtx pop, int sibcall)
12869 rtx use = NULL, call;
12871 if (pop == const0_rtx)
12872 pop = NULL;
12873 gcc_assert (!TARGET_64BIT || !pop);
12875 #if TARGET_MACHO
12876 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12877 fnaddr = machopic_indirect_call_target (fnaddr);
12878 #else
12879 /* Static functions and indirect calls don't need the pic register. */
12880 if (! TARGET_64BIT && flag_pic
12881 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12882 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12883 use_reg (&use, pic_offset_table_rtx);
12885 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12887 rtx al = gen_rtx_REG (QImode, 0);
12888 emit_move_insn (al, callarg2);
12889 use_reg (&use, al);
12891 #endif /* TARGET_MACHO */
12893 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12895 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12896 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12898 if (sibcall && TARGET_64BIT
12899 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12901 rtx addr;
12902 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12903 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12904 emit_move_insn (fnaddr, addr);
12905 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12908 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12909 if (retval)
12910 call = gen_rtx_SET (VOIDmode, retval, call);
12911 if (pop)
12913 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12914 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12915 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12918 call = emit_call_insn (call);
12919 if (use)
12920 CALL_INSN_FUNCTION_USAGE (call) = use;
12924 /* Clear stack slot assignments remembered from previous functions.
12925 This is called from INIT_EXPANDERS once before RTL is emitted for each
12926 function. */
12928 static struct machine_function *
12929 ix86_init_machine_status (void)
12931 struct machine_function *f;
12933 f = ggc_alloc_cleared (sizeof (struct machine_function));
12934 f->use_fast_prologue_epilogue_nregs = -1;
12935 f->tls_descriptor_call_expanded_p = 0;
12937 return f;
12940 /* Return a MEM corresponding to a stack slot with mode MODE.
12941 Allocate a new slot if necessary.
12943 The RTL for a function can have several slots available: N is
12944 which slot to use. */
12947 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12949 struct stack_local_entry *s;
12951 gcc_assert (n < MAX_386_STACK_LOCALS);
12953 for (s = ix86_stack_locals; s; s = s->next)
12954 if (s->mode == mode && s->n == n)
12955 return s->rtl;
12957 s = (struct stack_local_entry *)
12958 ggc_alloc (sizeof (struct stack_local_entry));
12959 s->n = n;
12960 s->mode = mode;
12961 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12963 s->next = ix86_stack_locals;
12964 ix86_stack_locals = s;
12965 return s->rtl;
12968 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12970 static GTY(()) rtx ix86_tls_symbol;
12972 ix86_tls_get_addr (void)
12975 if (!ix86_tls_symbol)
12977 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12978 (TARGET_ANY_GNU_TLS
12979 && !TARGET_64BIT)
12980 ? "___tls_get_addr"
12981 : "__tls_get_addr");
12984 return ix86_tls_symbol;
12987 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
12989 static GTY(()) rtx ix86_tls_module_base_symbol;
12991 ix86_tls_module_base (void)
12994 if (!ix86_tls_module_base_symbol)
12996 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
12997 "_TLS_MODULE_BASE_");
12998 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
12999 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
13002 return ix86_tls_module_base_symbol;
13005 /* Calculate the length of the memory address in the instruction
13006 encoding. Does not include the one-byte modrm, opcode, or prefix. */
13009 memory_address_length (rtx addr)
13011 struct ix86_address parts;
13012 rtx base, index, disp;
13013 int len;
13014 int ok;
13016 if (GET_CODE (addr) == PRE_DEC
13017 || GET_CODE (addr) == POST_INC
13018 || GET_CODE (addr) == PRE_MODIFY
13019 || GET_CODE (addr) == POST_MODIFY)
13020 return 0;
13022 ok = ix86_decompose_address (addr, &parts);
13023 gcc_assert (ok);
13025 if (parts.base && GET_CODE (parts.base) == SUBREG)
13026 parts.base = SUBREG_REG (parts.base);
13027 if (parts.index && GET_CODE (parts.index) == SUBREG)
13028 parts.index = SUBREG_REG (parts.index);
13030 base = parts.base;
13031 index = parts.index;
13032 disp = parts.disp;
13033 len = 0;
13035 /* Rule of thumb:
13036 - esp as the base always wants an index,
13037 - ebp as the base always wants a displacement. */
13039 /* Register Indirect. */
13040 if (base && !index && !disp)
13042 /* esp (for its index) and ebp (for its displacement) need
13043 the two-byte modrm form. */
13044 if (addr == stack_pointer_rtx
13045 || addr == arg_pointer_rtx
13046 || addr == frame_pointer_rtx
13047 || addr == hard_frame_pointer_rtx)
13048 len = 1;
13051 /* Direct Addressing. */
13052 else if (disp && !base && !index)
13053 len = 4;
13055 else
13057 /* Find the length of the displacement constant. */
13058 if (disp)
13060 if (GET_CODE (disp) == CONST_INT
13061 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
13062 && base)
13063 len = 1;
13064 else
13065 len = 4;
13067 /* ebp always wants a displacement. */
13068 else if (base == hard_frame_pointer_rtx)
13069 len = 1;
13071 /* An index requires the two-byte modrm form.... */
13072 if (index
13073 /* ...like esp, which always wants an index. */
13074 || base == stack_pointer_rtx
13075 || base == arg_pointer_rtx
13076 || base == frame_pointer_rtx)
13077 len += 1;
13080 return len;
13083 /* Compute default value for "length_immediate" attribute. When SHORTFORM
13084 is set, expect that insn have 8bit immediate alternative. */
13086 ix86_attr_length_immediate_default (rtx insn, int shortform)
13088 int len = 0;
13089 int i;
13090 extract_insn_cached (insn);
13091 for (i = recog_data.n_operands - 1; i >= 0; --i)
13092 if (CONSTANT_P (recog_data.operand[i]))
13094 gcc_assert (!len);
13095 if (shortform
13096 && GET_CODE (recog_data.operand[i]) == CONST_INT
13097 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
13098 len = 1;
13099 else
13101 switch (get_attr_mode (insn))
13103 case MODE_QI:
13104 len+=1;
13105 break;
13106 case MODE_HI:
13107 len+=2;
13108 break;
13109 case MODE_SI:
13110 len+=4;
13111 break;
13112 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
13113 case MODE_DI:
13114 len+=4;
13115 break;
13116 default:
13117 fatal_insn ("unknown insn mode", insn);
13121 return len;
13123 /* Compute default value for "length_address" attribute. */
13125 ix86_attr_length_address_default (rtx insn)
13127 int i;
13129 if (get_attr_type (insn) == TYPE_LEA)
13131 rtx set = PATTERN (insn);
13133 if (GET_CODE (set) == PARALLEL)
13134 set = XVECEXP (set, 0, 0);
13136 gcc_assert (GET_CODE (set) == SET);
13138 return memory_address_length (SET_SRC (set));
13141 extract_insn_cached (insn);
13142 for (i = recog_data.n_operands - 1; i >= 0; --i)
13143 if (GET_CODE (recog_data.operand[i]) == MEM)
13145 return memory_address_length (XEXP (recog_data.operand[i], 0));
13146 break;
13148 return 0;
13151 /* Return the maximum number of instructions a cpu can issue. */
13153 static int
13154 ix86_issue_rate (void)
13156 switch (ix86_tune)
13158 case PROCESSOR_PENTIUM:
13159 case PROCESSOR_K6:
13160 return 2;
13162 case PROCESSOR_PENTIUMPRO:
13163 case PROCESSOR_PENTIUM4:
13164 case PROCESSOR_ATHLON:
13165 case PROCESSOR_K8:
13166 case PROCESSOR_NOCONA:
13167 return 3;
13169 default:
13170 return 1;
13174 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
13175 by DEP_INSN and nothing set by DEP_INSN. */
13177 static int
13178 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13180 rtx set, set2;
13182 /* Simplify the test for uninteresting insns. */
13183 if (insn_type != TYPE_SETCC
13184 && insn_type != TYPE_ICMOV
13185 && insn_type != TYPE_FCMOV
13186 && insn_type != TYPE_IBR)
13187 return 0;
13189 if ((set = single_set (dep_insn)) != 0)
13191 set = SET_DEST (set);
13192 set2 = NULL_RTX;
13194 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
13195 && XVECLEN (PATTERN (dep_insn), 0) == 2
13196 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
13197 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
13199 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13200 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13202 else
13203 return 0;
13205 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
13206 return 0;
13208 /* This test is true if the dependent insn reads the flags but
13209 not any other potentially set register. */
13210 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
13211 return 0;
13213 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
13214 return 0;
13216 return 1;
13219 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
13220 address with operands set by DEP_INSN. */
13222 static int
13223 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13225 rtx addr;
13227 if (insn_type == TYPE_LEA
13228 && TARGET_PENTIUM)
13230 addr = PATTERN (insn);
13232 if (GET_CODE (addr) == PARALLEL)
13233 addr = XVECEXP (addr, 0, 0);
13235 gcc_assert (GET_CODE (addr) == SET);
13237 addr = SET_SRC (addr);
13239 else
13241 int i;
13242 extract_insn_cached (insn);
13243 for (i = recog_data.n_operands - 1; i >= 0; --i)
13244 if (GET_CODE (recog_data.operand[i]) == MEM)
13246 addr = XEXP (recog_data.operand[i], 0);
13247 goto found;
13249 return 0;
13250 found:;
13253 return modified_in_p (addr, dep_insn);
13256 static int
13257 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
13259 enum attr_type insn_type, dep_insn_type;
13260 enum attr_memory memory;
13261 rtx set, set2;
13262 int dep_insn_code_number;
13264 /* Anti and output dependencies have zero cost on all CPUs. */
13265 if (REG_NOTE_KIND (link) != 0)
13266 return 0;
13268 dep_insn_code_number = recog_memoized (dep_insn);
13270 /* If we can't recognize the insns, we can't really do anything. */
13271 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
13272 return cost;
13274 insn_type = get_attr_type (insn);
13275 dep_insn_type = get_attr_type (dep_insn);
13277 switch (ix86_tune)
13279 case PROCESSOR_PENTIUM:
13280 /* Address Generation Interlock adds a cycle of latency. */
13281 if (ix86_agi_dependant (insn, dep_insn, insn_type))
13282 cost += 1;
13284 /* ??? Compares pair with jump/setcc. */
13285 if (ix86_flags_dependant (insn, dep_insn, insn_type))
13286 cost = 0;
13288 /* Floating point stores require value to be ready one cycle earlier. */
13289 if (insn_type == TYPE_FMOV
13290 && get_attr_memory (insn) == MEMORY_STORE
13291 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13292 cost += 1;
13293 break;
13295 case PROCESSOR_PENTIUMPRO:
13296 memory = get_attr_memory (insn);
13298 /* INT->FP conversion is expensive. */
13299 if (get_attr_fp_int_src (dep_insn))
13300 cost += 5;
13302 /* There is one cycle extra latency between an FP op and a store. */
13303 if (insn_type == TYPE_FMOV
13304 && (set = single_set (dep_insn)) != NULL_RTX
13305 && (set2 = single_set (insn)) != NULL_RTX
13306 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
13307 && GET_CODE (SET_DEST (set2)) == MEM)
13308 cost += 1;
13310 /* Show ability of reorder buffer to hide latency of load by executing
13311 in parallel with previous instruction in case
13312 previous instruction is not needed to compute the address. */
13313 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13314 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13316 /* Claim moves to take one cycle, as core can issue one load
13317 at time and the next load can start cycle later. */
13318 if (dep_insn_type == TYPE_IMOV
13319 || dep_insn_type == TYPE_FMOV)
13320 cost = 1;
13321 else if (cost > 1)
13322 cost--;
13324 break;
13326 case PROCESSOR_K6:
13327 memory = get_attr_memory (insn);
13329 /* The esp dependency is resolved before the instruction is really
13330 finished. */
13331 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
13332 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
13333 return 1;
13335 /* INT->FP conversion is expensive. */
13336 if (get_attr_fp_int_src (dep_insn))
13337 cost += 5;
13339 /* Show ability of reorder buffer to hide latency of load by executing
13340 in parallel with previous instruction in case
13341 previous instruction is not needed to compute the address. */
13342 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13343 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13345 /* Claim moves to take one cycle, as core can issue one load
13346 at time and the next load can start cycle later. */
13347 if (dep_insn_type == TYPE_IMOV
13348 || dep_insn_type == TYPE_FMOV)
13349 cost = 1;
13350 else if (cost > 2)
13351 cost -= 2;
13352 else
13353 cost = 1;
13355 break;
13357 case PROCESSOR_ATHLON:
13358 case PROCESSOR_K8:
13359 memory = get_attr_memory (insn);
13361 /* Show ability of reorder buffer to hide latency of load by executing
13362 in parallel with previous instruction in case
13363 previous instruction is not needed to compute the address. */
13364 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13365 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13367 enum attr_unit unit = get_attr_unit (insn);
13368 int loadcost = 3;
13370 /* Because of the difference between the length of integer and
13371 floating unit pipeline preparation stages, the memory operands
13372 for floating point are cheaper.
13374 ??? For Athlon it the difference is most probably 2. */
13375 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
13376 loadcost = 3;
13377 else
13378 loadcost = TARGET_ATHLON ? 2 : 0;
13380 if (cost >= loadcost)
13381 cost -= loadcost;
13382 else
13383 cost = 0;
13386 default:
13387 break;
13390 return cost;
13393 /* How many alternative schedules to try. This should be as wide as the
13394 scheduling freedom in the DFA, but no wider. Making this value too
13395 large results extra work for the scheduler. */
13397 static int
13398 ia32_multipass_dfa_lookahead (void)
13400 if (ix86_tune == PROCESSOR_PENTIUM)
13401 return 2;
13403 if (ix86_tune == PROCESSOR_PENTIUMPRO
13404 || ix86_tune == PROCESSOR_K6)
13405 return 1;
13407 else
13408 return 0;
13412 /* Compute the alignment given to a constant that is being placed in memory.
13413 EXP is the constant and ALIGN is the alignment that the object would
13414 ordinarily have.
13415 The value of this function is used instead of that alignment to align
13416 the object. */
13419 ix86_constant_alignment (tree exp, int align)
13421 if (TREE_CODE (exp) == REAL_CST)
13423 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13424 return 64;
13425 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13426 return 128;
13428 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13429 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13430 return BITS_PER_WORD;
13432 return align;
13435 /* Compute the alignment for a static variable.
13436 TYPE is the data type, and ALIGN is the alignment that
13437 the object would ordinarily have. The value of this function is used
13438 instead of that alignment to align the object. */
13441 ix86_data_alignment (tree type, int align)
13443 int max_align = optimize_size ? BITS_PER_WORD : 256;
13445 if (AGGREGATE_TYPE_P (type)
13446 && TYPE_SIZE (type)
13447 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13448 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
13449 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
13450 && align < max_align)
13451 align = max_align;
13453 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13454 to 16byte boundary. */
13455 if (TARGET_64BIT)
13457 if (AGGREGATE_TYPE_P (type)
13458 && TYPE_SIZE (type)
13459 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13460 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13461 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13462 return 128;
13465 if (TREE_CODE (type) == ARRAY_TYPE)
13467 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13468 return 64;
13469 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13470 return 128;
13472 else if (TREE_CODE (type) == COMPLEX_TYPE)
13475 if (TYPE_MODE (type) == DCmode && align < 64)
13476 return 64;
13477 if (TYPE_MODE (type) == XCmode && align < 128)
13478 return 128;
13480 else if ((TREE_CODE (type) == RECORD_TYPE
13481 || TREE_CODE (type) == UNION_TYPE
13482 || TREE_CODE (type) == QUAL_UNION_TYPE)
13483 && TYPE_FIELDS (type))
13485 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13486 return 64;
13487 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13488 return 128;
13490 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13491 || TREE_CODE (type) == INTEGER_TYPE)
13493 if (TYPE_MODE (type) == DFmode && align < 64)
13494 return 64;
13495 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13496 return 128;
13499 return align;
13502 /* Compute the alignment for a local variable.
13503 TYPE is the data type, and ALIGN is the alignment that
13504 the object would ordinarily have. The value of this macro is used
13505 instead of that alignment to align the object. */
13508 ix86_local_alignment (tree type, int align)
13510 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13511 to 16byte boundary. */
13512 if (TARGET_64BIT)
13514 if (AGGREGATE_TYPE_P (type)
13515 && TYPE_SIZE (type)
13516 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13517 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13518 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13519 return 128;
13521 if (TREE_CODE (type) == ARRAY_TYPE)
13523 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13524 return 64;
13525 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13526 return 128;
13528 else if (TREE_CODE (type) == COMPLEX_TYPE)
13530 if (TYPE_MODE (type) == DCmode && align < 64)
13531 return 64;
13532 if (TYPE_MODE (type) == XCmode && align < 128)
13533 return 128;
13535 else if ((TREE_CODE (type) == RECORD_TYPE
13536 || TREE_CODE (type) == UNION_TYPE
13537 || TREE_CODE (type) == QUAL_UNION_TYPE)
13538 && TYPE_FIELDS (type))
13540 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13541 return 64;
13542 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13543 return 128;
13545 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13546 || TREE_CODE (type) == INTEGER_TYPE)
13549 if (TYPE_MODE (type) == DFmode && align < 64)
13550 return 64;
13551 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13552 return 128;
13554 return align;
13557 /* Emit RTL insns to initialize the variable parts of a trampoline.
13558 FNADDR is an RTX for the address of the function's pure code.
13559 CXT is an RTX for the static chain value for the function. */
13560 void
13561 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13563 if (!TARGET_64BIT)
13565 /* Compute offset from the end of the jmp to the target function. */
13566 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13567 plus_constant (tramp, 10),
13568 NULL_RTX, 1, OPTAB_DIRECT);
13569 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13570 gen_int_mode (0xb9, QImode));
13571 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13572 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13573 gen_int_mode (0xe9, QImode));
13574 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13576 else
13578 int offset = 0;
13579 /* Try to load address using shorter movl instead of movabs.
13580 We may want to support movq for kernel mode, but kernel does not use
13581 trampolines at the moment. */
13582 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13584 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13585 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13586 gen_int_mode (0xbb41, HImode));
13587 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13588 gen_lowpart (SImode, fnaddr));
13589 offset += 6;
13591 else
13593 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13594 gen_int_mode (0xbb49, HImode));
13595 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13596 fnaddr);
13597 offset += 10;
13599 /* Load static chain using movabs to r10. */
13600 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13601 gen_int_mode (0xba49, HImode));
13602 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13603 cxt);
13604 offset += 10;
13605 /* Jump to the r11 */
13606 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13607 gen_int_mode (0xff49, HImode));
13608 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13609 gen_int_mode (0xe3, QImode));
13610 offset += 3;
13611 gcc_assert (offset <= TRAMPOLINE_SIZE);
13614 #ifdef ENABLE_EXECUTE_STACK
13615 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13616 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13617 #endif
13620 /* Codes for all the SSE/MMX builtins. */
13621 enum ix86_builtins
13623 IX86_BUILTIN_ADDPS,
13624 IX86_BUILTIN_ADDSS,
13625 IX86_BUILTIN_DIVPS,
13626 IX86_BUILTIN_DIVSS,
13627 IX86_BUILTIN_MULPS,
13628 IX86_BUILTIN_MULSS,
13629 IX86_BUILTIN_SUBPS,
13630 IX86_BUILTIN_SUBSS,
13632 IX86_BUILTIN_CMPEQPS,
13633 IX86_BUILTIN_CMPLTPS,
13634 IX86_BUILTIN_CMPLEPS,
13635 IX86_BUILTIN_CMPGTPS,
13636 IX86_BUILTIN_CMPGEPS,
13637 IX86_BUILTIN_CMPNEQPS,
13638 IX86_BUILTIN_CMPNLTPS,
13639 IX86_BUILTIN_CMPNLEPS,
13640 IX86_BUILTIN_CMPNGTPS,
13641 IX86_BUILTIN_CMPNGEPS,
13642 IX86_BUILTIN_CMPORDPS,
13643 IX86_BUILTIN_CMPUNORDPS,
13644 IX86_BUILTIN_CMPEQSS,
13645 IX86_BUILTIN_CMPLTSS,
13646 IX86_BUILTIN_CMPLESS,
13647 IX86_BUILTIN_CMPNEQSS,
13648 IX86_BUILTIN_CMPNLTSS,
13649 IX86_BUILTIN_CMPNLESS,
13650 IX86_BUILTIN_CMPNGTSS,
13651 IX86_BUILTIN_CMPNGESS,
13652 IX86_BUILTIN_CMPORDSS,
13653 IX86_BUILTIN_CMPUNORDSS,
13655 IX86_BUILTIN_COMIEQSS,
13656 IX86_BUILTIN_COMILTSS,
13657 IX86_BUILTIN_COMILESS,
13658 IX86_BUILTIN_COMIGTSS,
13659 IX86_BUILTIN_COMIGESS,
13660 IX86_BUILTIN_COMINEQSS,
13661 IX86_BUILTIN_UCOMIEQSS,
13662 IX86_BUILTIN_UCOMILTSS,
13663 IX86_BUILTIN_UCOMILESS,
13664 IX86_BUILTIN_UCOMIGTSS,
13665 IX86_BUILTIN_UCOMIGESS,
13666 IX86_BUILTIN_UCOMINEQSS,
13668 IX86_BUILTIN_CVTPI2PS,
13669 IX86_BUILTIN_CVTPS2PI,
13670 IX86_BUILTIN_CVTSI2SS,
13671 IX86_BUILTIN_CVTSI642SS,
13672 IX86_BUILTIN_CVTSS2SI,
13673 IX86_BUILTIN_CVTSS2SI64,
13674 IX86_BUILTIN_CVTTPS2PI,
13675 IX86_BUILTIN_CVTTSS2SI,
13676 IX86_BUILTIN_CVTTSS2SI64,
13678 IX86_BUILTIN_MAXPS,
13679 IX86_BUILTIN_MAXSS,
13680 IX86_BUILTIN_MINPS,
13681 IX86_BUILTIN_MINSS,
13683 IX86_BUILTIN_LOADUPS,
13684 IX86_BUILTIN_STOREUPS,
13685 IX86_BUILTIN_MOVSS,
13687 IX86_BUILTIN_MOVHLPS,
13688 IX86_BUILTIN_MOVLHPS,
13689 IX86_BUILTIN_LOADHPS,
13690 IX86_BUILTIN_LOADLPS,
13691 IX86_BUILTIN_STOREHPS,
13692 IX86_BUILTIN_STORELPS,
13694 IX86_BUILTIN_MASKMOVQ,
13695 IX86_BUILTIN_MOVMSKPS,
13696 IX86_BUILTIN_PMOVMSKB,
13698 IX86_BUILTIN_MOVNTPS,
13699 IX86_BUILTIN_MOVNTQ,
13701 IX86_BUILTIN_LOADDQU,
13702 IX86_BUILTIN_STOREDQU,
13704 IX86_BUILTIN_PACKSSWB,
13705 IX86_BUILTIN_PACKSSDW,
13706 IX86_BUILTIN_PACKUSWB,
13708 IX86_BUILTIN_PADDB,
13709 IX86_BUILTIN_PADDW,
13710 IX86_BUILTIN_PADDD,
13711 IX86_BUILTIN_PADDQ,
13712 IX86_BUILTIN_PADDSB,
13713 IX86_BUILTIN_PADDSW,
13714 IX86_BUILTIN_PADDUSB,
13715 IX86_BUILTIN_PADDUSW,
13716 IX86_BUILTIN_PSUBB,
13717 IX86_BUILTIN_PSUBW,
13718 IX86_BUILTIN_PSUBD,
13719 IX86_BUILTIN_PSUBQ,
13720 IX86_BUILTIN_PSUBSB,
13721 IX86_BUILTIN_PSUBSW,
13722 IX86_BUILTIN_PSUBUSB,
13723 IX86_BUILTIN_PSUBUSW,
13725 IX86_BUILTIN_PAND,
13726 IX86_BUILTIN_PANDN,
13727 IX86_BUILTIN_POR,
13728 IX86_BUILTIN_PXOR,
13730 IX86_BUILTIN_PAVGB,
13731 IX86_BUILTIN_PAVGW,
13733 IX86_BUILTIN_PCMPEQB,
13734 IX86_BUILTIN_PCMPEQW,
13735 IX86_BUILTIN_PCMPEQD,
13736 IX86_BUILTIN_PCMPGTB,
13737 IX86_BUILTIN_PCMPGTW,
13738 IX86_BUILTIN_PCMPGTD,
13740 IX86_BUILTIN_PMADDWD,
13742 IX86_BUILTIN_PMAXSW,
13743 IX86_BUILTIN_PMAXUB,
13744 IX86_BUILTIN_PMINSW,
13745 IX86_BUILTIN_PMINUB,
13747 IX86_BUILTIN_PMULHUW,
13748 IX86_BUILTIN_PMULHW,
13749 IX86_BUILTIN_PMULLW,
13751 IX86_BUILTIN_PSADBW,
13752 IX86_BUILTIN_PSHUFW,
13754 IX86_BUILTIN_PSLLW,
13755 IX86_BUILTIN_PSLLD,
13756 IX86_BUILTIN_PSLLQ,
13757 IX86_BUILTIN_PSRAW,
13758 IX86_BUILTIN_PSRAD,
13759 IX86_BUILTIN_PSRLW,
13760 IX86_BUILTIN_PSRLD,
13761 IX86_BUILTIN_PSRLQ,
13762 IX86_BUILTIN_PSLLWI,
13763 IX86_BUILTIN_PSLLDI,
13764 IX86_BUILTIN_PSLLQI,
13765 IX86_BUILTIN_PSRAWI,
13766 IX86_BUILTIN_PSRADI,
13767 IX86_BUILTIN_PSRLWI,
13768 IX86_BUILTIN_PSRLDI,
13769 IX86_BUILTIN_PSRLQI,
13771 IX86_BUILTIN_PUNPCKHBW,
13772 IX86_BUILTIN_PUNPCKHWD,
13773 IX86_BUILTIN_PUNPCKHDQ,
13774 IX86_BUILTIN_PUNPCKLBW,
13775 IX86_BUILTIN_PUNPCKLWD,
13776 IX86_BUILTIN_PUNPCKLDQ,
13778 IX86_BUILTIN_SHUFPS,
13780 IX86_BUILTIN_RCPPS,
13781 IX86_BUILTIN_RCPSS,
13782 IX86_BUILTIN_RSQRTPS,
13783 IX86_BUILTIN_RSQRTSS,
13784 IX86_BUILTIN_SQRTPS,
13785 IX86_BUILTIN_SQRTSS,
13787 IX86_BUILTIN_UNPCKHPS,
13788 IX86_BUILTIN_UNPCKLPS,
13790 IX86_BUILTIN_ANDPS,
13791 IX86_BUILTIN_ANDNPS,
13792 IX86_BUILTIN_ORPS,
13793 IX86_BUILTIN_XORPS,
13795 IX86_BUILTIN_EMMS,
13796 IX86_BUILTIN_LDMXCSR,
13797 IX86_BUILTIN_STMXCSR,
13798 IX86_BUILTIN_SFENCE,
13800 /* 3DNow! Original */
13801 IX86_BUILTIN_FEMMS,
13802 IX86_BUILTIN_PAVGUSB,
13803 IX86_BUILTIN_PF2ID,
13804 IX86_BUILTIN_PFACC,
13805 IX86_BUILTIN_PFADD,
13806 IX86_BUILTIN_PFCMPEQ,
13807 IX86_BUILTIN_PFCMPGE,
13808 IX86_BUILTIN_PFCMPGT,
13809 IX86_BUILTIN_PFMAX,
13810 IX86_BUILTIN_PFMIN,
13811 IX86_BUILTIN_PFMUL,
13812 IX86_BUILTIN_PFRCP,
13813 IX86_BUILTIN_PFRCPIT1,
13814 IX86_BUILTIN_PFRCPIT2,
13815 IX86_BUILTIN_PFRSQIT1,
13816 IX86_BUILTIN_PFRSQRT,
13817 IX86_BUILTIN_PFSUB,
13818 IX86_BUILTIN_PFSUBR,
13819 IX86_BUILTIN_PI2FD,
13820 IX86_BUILTIN_PMULHRW,
13822 /* 3DNow! Athlon Extensions */
13823 IX86_BUILTIN_PF2IW,
13824 IX86_BUILTIN_PFNACC,
13825 IX86_BUILTIN_PFPNACC,
13826 IX86_BUILTIN_PI2FW,
13827 IX86_BUILTIN_PSWAPDSI,
13828 IX86_BUILTIN_PSWAPDSF,
13830 /* SSE2 */
13831 IX86_BUILTIN_ADDPD,
13832 IX86_BUILTIN_ADDSD,
13833 IX86_BUILTIN_DIVPD,
13834 IX86_BUILTIN_DIVSD,
13835 IX86_BUILTIN_MULPD,
13836 IX86_BUILTIN_MULSD,
13837 IX86_BUILTIN_SUBPD,
13838 IX86_BUILTIN_SUBSD,
13840 IX86_BUILTIN_CMPEQPD,
13841 IX86_BUILTIN_CMPLTPD,
13842 IX86_BUILTIN_CMPLEPD,
13843 IX86_BUILTIN_CMPGTPD,
13844 IX86_BUILTIN_CMPGEPD,
13845 IX86_BUILTIN_CMPNEQPD,
13846 IX86_BUILTIN_CMPNLTPD,
13847 IX86_BUILTIN_CMPNLEPD,
13848 IX86_BUILTIN_CMPNGTPD,
13849 IX86_BUILTIN_CMPNGEPD,
13850 IX86_BUILTIN_CMPORDPD,
13851 IX86_BUILTIN_CMPUNORDPD,
13852 IX86_BUILTIN_CMPNEPD,
13853 IX86_BUILTIN_CMPEQSD,
13854 IX86_BUILTIN_CMPLTSD,
13855 IX86_BUILTIN_CMPLESD,
13856 IX86_BUILTIN_CMPNEQSD,
13857 IX86_BUILTIN_CMPNLTSD,
13858 IX86_BUILTIN_CMPNLESD,
13859 IX86_BUILTIN_CMPORDSD,
13860 IX86_BUILTIN_CMPUNORDSD,
13861 IX86_BUILTIN_CMPNESD,
13863 IX86_BUILTIN_COMIEQSD,
13864 IX86_BUILTIN_COMILTSD,
13865 IX86_BUILTIN_COMILESD,
13866 IX86_BUILTIN_COMIGTSD,
13867 IX86_BUILTIN_COMIGESD,
13868 IX86_BUILTIN_COMINEQSD,
13869 IX86_BUILTIN_UCOMIEQSD,
13870 IX86_BUILTIN_UCOMILTSD,
13871 IX86_BUILTIN_UCOMILESD,
13872 IX86_BUILTIN_UCOMIGTSD,
13873 IX86_BUILTIN_UCOMIGESD,
13874 IX86_BUILTIN_UCOMINEQSD,
13876 IX86_BUILTIN_MAXPD,
13877 IX86_BUILTIN_MAXSD,
13878 IX86_BUILTIN_MINPD,
13879 IX86_BUILTIN_MINSD,
13881 IX86_BUILTIN_ANDPD,
13882 IX86_BUILTIN_ANDNPD,
13883 IX86_BUILTIN_ORPD,
13884 IX86_BUILTIN_XORPD,
13886 IX86_BUILTIN_SQRTPD,
13887 IX86_BUILTIN_SQRTSD,
13889 IX86_BUILTIN_UNPCKHPD,
13890 IX86_BUILTIN_UNPCKLPD,
13892 IX86_BUILTIN_SHUFPD,
13894 IX86_BUILTIN_LOADUPD,
13895 IX86_BUILTIN_STOREUPD,
13896 IX86_BUILTIN_MOVSD,
13898 IX86_BUILTIN_LOADHPD,
13899 IX86_BUILTIN_LOADLPD,
13901 IX86_BUILTIN_CVTDQ2PD,
13902 IX86_BUILTIN_CVTDQ2PS,
13904 IX86_BUILTIN_CVTPD2DQ,
13905 IX86_BUILTIN_CVTPD2PI,
13906 IX86_BUILTIN_CVTPD2PS,
13907 IX86_BUILTIN_CVTTPD2DQ,
13908 IX86_BUILTIN_CVTTPD2PI,
13910 IX86_BUILTIN_CVTPI2PD,
13911 IX86_BUILTIN_CVTSI2SD,
13912 IX86_BUILTIN_CVTSI642SD,
13914 IX86_BUILTIN_CVTSD2SI,
13915 IX86_BUILTIN_CVTSD2SI64,
13916 IX86_BUILTIN_CVTSD2SS,
13917 IX86_BUILTIN_CVTSS2SD,
13918 IX86_BUILTIN_CVTTSD2SI,
13919 IX86_BUILTIN_CVTTSD2SI64,
13921 IX86_BUILTIN_CVTPS2DQ,
13922 IX86_BUILTIN_CVTPS2PD,
13923 IX86_BUILTIN_CVTTPS2DQ,
13925 IX86_BUILTIN_MOVNTI,
13926 IX86_BUILTIN_MOVNTPD,
13927 IX86_BUILTIN_MOVNTDQ,
13929 /* SSE2 MMX */
13930 IX86_BUILTIN_MASKMOVDQU,
13931 IX86_BUILTIN_MOVMSKPD,
13932 IX86_BUILTIN_PMOVMSKB128,
13934 IX86_BUILTIN_PACKSSWB128,
13935 IX86_BUILTIN_PACKSSDW128,
13936 IX86_BUILTIN_PACKUSWB128,
13938 IX86_BUILTIN_PADDB128,
13939 IX86_BUILTIN_PADDW128,
13940 IX86_BUILTIN_PADDD128,
13941 IX86_BUILTIN_PADDQ128,
13942 IX86_BUILTIN_PADDSB128,
13943 IX86_BUILTIN_PADDSW128,
13944 IX86_BUILTIN_PADDUSB128,
13945 IX86_BUILTIN_PADDUSW128,
13946 IX86_BUILTIN_PSUBB128,
13947 IX86_BUILTIN_PSUBW128,
13948 IX86_BUILTIN_PSUBD128,
13949 IX86_BUILTIN_PSUBQ128,
13950 IX86_BUILTIN_PSUBSB128,
13951 IX86_BUILTIN_PSUBSW128,
13952 IX86_BUILTIN_PSUBUSB128,
13953 IX86_BUILTIN_PSUBUSW128,
13955 IX86_BUILTIN_PAND128,
13956 IX86_BUILTIN_PANDN128,
13957 IX86_BUILTIN_POR128,
13958 IX86_BUILTIN_PXOR128,
13960 IX86_BUILTIN_PAVGB128,
13961 IX86_BUILTIN_PAVGW128,
13963 IX86_BUILTIN_PCMPEQB128,
13964 IX86_BUILTIN_PCMPEQW128,
13965 IX86_BUILTIN_PCMPEQD128,
13966 IX86_BUILTIN_PCMPGTB128,
13967 IX86_BUILTIN_PCMPGTW128,
13968 IX86_BUILTIN_PCMPGTD128,
13970 IX86_BUILTIN_PMADDWD128,
13972 IX86_BUILTIN_PMAXSW128,
13973 IX86_BUILTIN_PMAXUB128,
13974 IX86_BUILTIN_PMINSW128,
13975 IX86_BUILTIN_PMINUB128,
13977 IX86_BUILTIN_PMULUDQ,
13978 IX86_BUILTIN_PMULUDQ128,
13979 IX86_BUILTIN_PMULHUW128,
13980 IX86_BUILTIN_PMULHW128,
13981 IX86_BUILTIN_PMULLW128,
13983 IX86_BUILTIN_PSADBW128,
13984 IX86_BUILTIN_PSHUFHW,
13985 IX86_BUILTIN_PSHUFLW,
13986 IX86_BUILTIN_PSHUFD,
13988 IX86_BUILTIN_PSLLW128,
13989 IX86_BUILTIN_PSLLD128,
13990 IX86_BUILTIN_PSLLQ128,
13991 IX86_BUILTIN_PSRAW128,
13992 IX86_BUILTIN_PSRAD128,
13993 IX86_BUILTIN_PSRLW128,
13994 IX86_BUILTIN_PSRLD128,
13995 IX86_BUILTIN_PSRLQ128,
13996 IX86_BUILTIN_PSLLDQI128,
13997 IX86_BUILTIN_PSLLWI128,
13998 IX86_BUILTIN_PSLLDI128,
13999 IX86_BUILTIN_PSLLQI128,
14000 IX86_BUILTIN_PSRAWI128,
14001 IX86_BUILTIN_PSRADI128,
14002 IX86_BUILTIN_PSRLDQI128,
14003 IX86_BUILTIN_PSRLWI128,
14004 IX86_BUILTIN_PSRLDI128,
14005 IX86_BUILTIN_PSRLQI128,
14007 IX86_BUILTIN_PUNPCKHBW128,
14008 IX86_BUILTIN_PUNPCKHWD128,
14009 IX86_BUILTIN_PUNPCKHDQ128,
14010 IX86_BUILTIN_PUNPCKHQDQ128,
14011 IX86_BUILTIN_PUNPCKLBW128,
14012 IX86_BUILTIN_PUNPCKLWD128,
14013 IX86_BUILTIN_PUNPCKLDQ128,
14014 IX86_BUILTIN_PUNPCKLQDQ128,
14016 IX86_BUILTIN_CLFLUSH,
14017 IX86_BUILTIN_MFENCE,
14018 IX86_BUILTIN_LFENCE,
14020 /* Prescott New Instructions. */
14021 IX86_BUILTIN_ADDSUBPS,
14022 IX86_BUILTIN_HADDPS,
14023 IX86_BUILTIN_HSUBPS,
14024 IX86_BUILTIN_MOVSHDUP,
14025 IX86_BUILTIN_MOVSLDUP,
14026 IX86_BUILTIN_ADDSUBPD,
14027 IX86_BUILTIN_HADDPD,
14028 IX86_BUILTIN_HSUBPD,
14029 IX86_BUILTIN_LDDQU,
14031 IX86_BUILTIN_MONITOR,
14032 IX86_BUILTIN_MWAIT,
14034 IX86_BUILTIN_VEC_INIT_V2SI,
14035 IX86_BUILTIN_VEC_INIT_V4HI,
14036 IX86_BUILTIN_VEC_INIT_V8QI,
14037 IX86_BUILTIN_VEC_EXT_V2DF,
14038 IX86_BUILTIN_VEC_EXT_V2DI,
14039 IX86_BUILTIN_VEC_EXT_V4SF,
14040 IX86_BUILTIN_VEC_EXT_V4SI,
14041 IX86_BUILTIN_VEC_EXT_V8HI,
14042 IX86_BUILTIN_VEC_EXT_V2SI,
14043 IX86_BUILTIN_VEC_EXT_V4HI,
14044 IX86_BUILTIN_VEC_SET_V8HI,
14045 IX86_BUILTIN_VEC_SET_V4HI,
14047 IX86_BUILTIN_MAX
14050 #define def_builtin(MASK, NAME, TYPE, CODE) \
14051 do { \
14052 if ((MASK) & target_flags \
14053 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
14054 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
14055 NULL, NULL_TREE); \
14056 } while (0)
14058 /* Bits for builtin_description.flag. */
14060 /* Set when we don't support the comparison natively, and should
14061 swap_comparison in order to support it. */
14062 #define BUILTIN_DESC_SWAP_OPERANDS 1
14064 struct builtin_description
14066 const unsigned int mask;
14067 const enum insn_code icode;
14068 const char *const name;
14069 const enum ix86_builtins code;
14070 const enum rtx_code comparison;
14071 const unsigned int flag;
14074 static const struct builtin_description bdesc_comi[] =
14076 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
14077 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
14078 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
14079 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
14080 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
14081 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
14082 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
14083 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
14084 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
14085 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
14086 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
14087 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
14088 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
14089 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
14090 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
14091 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
14092 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
14093 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
14094 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
14095 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
14096 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
14097 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
14098 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
14099 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
14102 static const struct builtin_description bdesc_2arg[] =
14104 /* SSE */
14105 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
14106 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
14107 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
14108 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
14109 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
14110 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
14111 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
14112 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
14114 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
14115 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
14116 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
14117 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
14118 BUILTIN_DESC_SWAP_OPERANDS },
14119 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
14120 BUILTIN_DESC_SWAP_OPERANDS },
14121 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
14122 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
14123 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
14124 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
14125 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
14126 BUILTIN_DESC_SWAP_OPERANDS },
14127 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
14128 BUILTIN_DESC_SWAP_OPERANDS },
14129 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
14130 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
14131 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
14132 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
14133 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
14134 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
14135 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
14136 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
14137 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
14138 BUILTIN_DESC_SWAP_OPERANDS },
14139 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
14140 BUILTIN_DESC_SWAP_OPERANDS },
14141 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
14143 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
14144 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
14145 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
14146 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
14148 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
14149 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
14150 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
14151 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
14153 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
14154 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
14155 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
14156 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
14157 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
14159 /* MMX */
14160 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
14161 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
14162 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
14163 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
14164 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
14165 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
14166 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
14167 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
14169 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
14170 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
14171 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
14172 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
14173 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
14174 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
14175 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
14176 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
14178 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
14179 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
14180 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
14182 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
14183 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
14184 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
14185 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
14187 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
14188 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
14190 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
14191 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
14192 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
14193 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
14194 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
14195 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
14197 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
14198 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
14199 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
14200 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
14202 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
14203 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
14204 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
14205 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
14206 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
14207 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
14209 /* Special. */
14210 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
14211 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
14212 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
14214 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
14215 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
14216 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
14218 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
14219 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
14220 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
14221 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
14222 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
14223 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
14225 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
14226 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
14227 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
14228 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
14229 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
14230 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
14232 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
14233 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
14234 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
14235 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
14237 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
14238 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
14240 /* SSE2 */
14241 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
14242 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
14243 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
14244 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
14245 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
14246 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
14247 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
14248 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
14250 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
14251 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
14252 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
14253 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
14254 BUILTIN_DESC_SWAP_OPERANDS },
14255 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
14256 BUILTIN_DESC_SWAP_OPERANDS },
14257 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
14258 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
14259 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
14260 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
14261 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
14262 BUILTIN_DESC_SWAP_OPERANDS },
14263 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
14264 BUILTIN_DESC_SWAP_OPERANDS },
14265 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
14266 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
14267 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
14268 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
14269 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
14270 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
14271 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
14272 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
14273 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
14275 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
14276 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
14277 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
14278 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
14280 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
14281 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
14282 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
14283 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
14285 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
14286 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
14287 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
14289 /* SSE2 MMX */
14290 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
14291 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
14292 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
14293 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
14294 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
14295 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
14296 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
14297 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
14299 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
14300 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
14301 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
14302 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
14303 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
14304 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
14305 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
14306 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
14308 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
14309 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
14311 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
14312 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
14313 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
14314 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
14316 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
14317 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
14319 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
14320 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
14321 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
14322 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
14323 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
14324 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
14326 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
14327 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
14328 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
14329 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
14331 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
14332 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
14333 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
14334 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
14335 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
14336 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
14337 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
14338 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
14340 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
14341 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
14342 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
14344 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
14345 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
14347 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
14348 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
14350 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
14351 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
14352 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
14354 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
14355 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
14356 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
14358 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
14359 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
14361 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
14363 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
14364 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
14365 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
14366 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
14368 /* SSE3 MMX */
14369 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
14370 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
14371 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
14372 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
14373 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
14374 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
14377 static const struct builtin_description bdesc_1arg[] =
14379 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
14380 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
14382 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14383 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14384 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14386 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14387 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14388 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14389 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14390 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14391 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14393 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14394 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14396 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14398 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14399 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14401 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14402 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14403 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14404 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14405 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14407 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14409 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14410 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14411 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14412 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14414 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14415 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14416 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14418 /* SSE3 */
14419 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14420 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14423 static void
14424 ix86_init_builtins (void)
14426 if (TARGET_MMX)
14427 ix86_init_mmx_sse_builtins ();
14430 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14431 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14432 builtins. */
14433 static void
14434 ix86_init_mmx_sse_builtins (void)
14436 const struct builtin_description * d;
14437 size_t i;
14439 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14440 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14441 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14442 tree V2DI_type_node
14443 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14444 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14445 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14446 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14447 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14448 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14449 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14451 tree pchar_type_node = build_pointer_type (char_type_node);
14452 tree pcchar_type_node = build_pointer_type (
14453 build_type_variant (char_type_node, 1, 0));
14454 tree pfloat_type_node = build_pointer_type (float_type_node);
14455 tree pcfloat_type_node = build_pointer_type (
14456 build_type_variant (float_type_node, 1, 0));
14457 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14458 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14459 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14461 /* Comparisons. */
14462 tree int_ftype_v4sf_v4sf
14463 = build_function_type_list (integer_type_node,
14464 V4SF_type_node, V4SF_type_node, NULL_TREE);
14465 tree v4si_ftype_v4sf_v4sf
14466 = build_function_type_list (V4SI_type_node,
14467 V4SF_type_node, V4SF_type_node, NULL_TREE);
14468 /* MMX/SSE/integer conversions. */
14469 tree int_ftype_v4sf
14470 = build_function_type_list (integer_type_node,
14471 V4SF_type_node, NULL_TREE);
14472 tree int64_ftype_v4sf
14473 = build_function_type_list (long_long_integer_type_node,
14474 V4SF_type_node, NULL_TREE);
14475 tree int_ftype_v8qi
14476 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14477 tree v4sf_ftype_v4sf_int
14478 = build_function_type_list (V4SF_type_node,
14479 V4SF_type_node, integer_type_node, NULL_TREE);
14480 tree v4sf_ftype_v4sf_int64
14481 = build_function_type_list (V4SF_type_node,
14482 V4SF_type_node, long_long_integer_type_node,
14483 NULL_TREE);
14484 tree v4sf_ftype_v4sf_v2si
14485 = build_function_type_list (V4SF_type_node,
14486 V4SF_type_node, V2SI_type_node, NULL_TREE);
14488 /* Miscellaneous. */
14489 tree v8qi_ftype_v4hi_v4hi
14490 = build_function_type_list (V8QI_type_node,
14491 V4HI_type_node, V4HI_type_node, NULL_TREE);
14492 tree v4hi_ftype_v2si_v2si
14493 = build_function_type_list (V4HI_type_node,
14494 V2SI_type_node, V2SI_type_node, NULL_TREE);
14495 tree v4sf_ftype_v4sf_v4sf_int
14496 = build_function_type_list (V4SF_type_node,
14497 V4SF_type_node, V4SF_type_node,
14498 integer_type_node, NULL_TREE);
14499 tree v2si_ftype_v4hi_v4hi
14500 = build_function_type_list (V2SI_type_node,
14501 V4HI_type_node, V4HI_type_node, NULL_TREE);
14502 tree v4hi_ftype_v4hi_int
14503 = build_function_type_list (V4HI_type_node,
14504 V4HI_type_node, integer_type_node, NULL_TREE);
14505 tree v4hi_ftype_v4hi_di
14506 = build_function_type_list (V4HI_type_node,
14507 V4HI_type_node, long_long_unsigned_type_node,
14508 NULL_TREE);
14509 tree v2si_ftype_v2si_di
14510 = build_function_type_list (V2SI_type_node,
14511 V2SI_type_node, long_long_unsigned_type_node,
14512 NULL_TREE);
14513 tree void_ftype_void
14514 = build_function_type (void_type_node, void_list_node);
14515 tree void_ftype_unsigned
14516 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14517 tree void_ftype_unsigned_unsigned
14518 = build_function_type_list (void_type_node, unsigned_type_node,
14519 unsigned_type_node, NULL_TREE);
14520 tree void_ftype_pcvoid_unsigned_unsigned
14521 = build_function_type_list (void_type_node, const_ptr_type_node,
14522 unsigned_type_node, unsigned_type_node,
14523 NULL_TREE);
14524 tree unsigned_ftype_void
14525 = build_function_type (unsigned_type_node, void_list_node);
14526 tree v2si_ftype_v4sf
14527 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14528 /* Loads/stores. */
14529 tree void_ftype_v8qi_v8qi_pchar
14530 = build_function_type_list (void_type_node,
14531 V8QI_type_node, V8QI_type_node,
14532 pchar_type_node, NULL_TREE);
14533 tree v4sf_ftype_pcfloat
14534 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14535 /* @@@ the type is bogus */
14536 tree v4sf_ftype_v4sf_pv2si
14537 = build_function_type_list (V4SF_type_node,
14538 V4SF_type_node, pv2si_type_node, NULL_TREE);
14539 tree void_ftype_pv2si_v4sf
14540 = build_function_type_list (void_type_node,
14541 pv2si_type_node, V4SF_type_node, NULL_TREE);
14542 tree void_ftype_pfloat_v4sf
14543 = build_function_type_list (void_type_node,
14544 pfloat_type_node, V4SF_type_node, NULL_TREE);
14545 tree void_ftype_pdi_di
14546 = build_function_type_list (void_type_node,
14547 pdi_type_node, long_long_unsigned_type_node,
14548 NULL_TREE);
14549 tree void_ftype_pv2di_v2di
14550 = build_function_type_list (void_type_node,
14551 pv2di_type_node, V2DI_type_node, NULL_TREE);
14552 /* Normal vector unops. */
14553 tree v4sf_ftype_v4sf
14554 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14556 /* Normal vector binops. */
14557 tree v4sf_ftype_v4sf_v4sf
14558 = build_function_type_list (V4SF_type_node,
14559 V4SF_type_node, V4SF_type_node, NULL_TREE);
14560 tree v8qi_ftype_v8qi_v8qi
14561 = build_function_type_list (V8QI_type_node,
14562 V8QI_type_node, V8QI_type_node, NULL_TREE);
14563 tree v4hi_ftype_v4hi_v4hi
14564 = build_function_type_list (V4HI_type_node,
14565 V4HI_type_node, V4HI_type_node, NULL_TREE);
14566 tree v2si_ftype_v2si_v2si
14567 = build_function_type_list (V2SI_type_node,
14568 V2SI_type_node, V2SI_type_node, NULL_TREE);
14569 tree di_ftype_di_di
14570 = build_function_type_list (long_long_unsigned_type_node,
14571 long_long_unsigned_type_node,
14572 long_long_unsigned_type_node, NULL_TREE);
14574 tree v2si_ftype_v2sf
14575 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14576 tree v2sf_ftype_v2si
14577 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14578 tree v2si_ftype_v2si
14579 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14580 tree v2sf_ftype_v2sf
14581 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14582 tree v2sf_ftype_v2sf_v2sf
14583 = build_function_type_list (V2SF_type_node,
14584 V2SF_type_node, V2SF_type_node, NULL_TREE);
14585 tree v2si_ftype_v2sf_v2sf
14586 = build_function_type_list (V2SI_type_node,
14587 V2SF_type_node, V2SF_type_node, NULL_TREE);
14588 tree pint_type_node = build_pointer_type (integer_type_node);
14589 tree pdouble_type_node = build_pointer_type (double_type_node);
14590 tree pcdouble_type_node = build_pointer_type (
14591 build_type_variant (double_type_node, 1, 0));
14592 tree int_ftype_v2df_v2df
14593 = build_function_type_list (integer_type_node,
14594 V2DF_type_node, V2DF_type_node, NULL_TREE);
14596 tree void_ftype_pcvoid
14597 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14598 tree v4sf_ftype_v4si
14599 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14600 tree v4si_ftype_v4sf
14601 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14602 tree v2df_ftype_v4si
14603 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14604 tree v4si_ftype_v2df
14605 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14606 tree v2si_ftype_v2df
14607 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14608 tree v4sf_ftype_v2df
14609 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14610 tree v2df_ftype_v2si
14611 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14612 tree v2df_ftype_v4sf
14613 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14614 tree int_ftype_v2df
14615 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14616 tree int64_ftype_v2df
14617 = build_function_type_list (long_long_integer_type_node,
14618 V2DF_type_node, NULL_TREE);
14619 tree v2df_ftype_v2df_int
14620 = build_function_type_list (V2DF_type_node,
14621 V2DF_type_node, integer_type_node, NULL_TREE);
14622 tree v2df_ftype_v2df_int64
14623 = build_function_type_list (V2DF_type_node,
14624 V2DF_type_node, long_long_integer_type_node,
14625 NULL_TREE);
14626 tree v4sf_ftype_v4sf_v2df
14627 = build_function_type_list (V4SF_type_node,
14628 V4SF_type_node, V2DF_type_node, NULL_TREE);
14629 tree v2df_ftype_v2df_v4sf
14630 = build_function_type_list (V2DF_type_node,
14631 V2DF_type_node, V4SF_type_node, NULL_TREE);
14632 tree v2df_ftype_v2df_v2df_int
14633 = build_function_type_list (V2DF_type_node,
14634 V2DF_type_node, V2DF_type_node,
14635 integer_type_node,
14636 NULL_TREE);
14637 tree v2df_ftype_v2df_pcdouble
14638 = build_function_type_list (V2DF_type_node,
14639 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14640 tree void_ftype_pdouble_v2df
14641 = build_function_type_list (void_type_node,
14642 pdouble_type_node, V2DF_type_node, NULL_TREE);
14643 tree void_ftype_pint_int
14644 = build_function_type_list (void_type_node,
14645 pint_type_node, integer_type_node, NULL_TREE);
14646 tree void_ftype_v16qi_v16qi_pchar
14647 = build_function_type_list (void_type_node,
14648 V16QI_type_node, V16QI_type_node,
14649 pchar_type_node, NULL_TREE);
14650 tree v2df_ftype_pcdouble
14651 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14652 tree v2df_ftype_v2df_v2df
14653 = build_function_type_list (V2DF_type_node,
14654 V2DF_type_node, V2DF_type_node, NULL_TREE);
14655 tree v16qi_ftype_v16qi_v16qi
14656 = build_function_type_list (V16QI_type_node,
14657 V16QI_type_node, V16QI_type_node, NULL_TREE);
14658 tree v8hi_ftype_v8hi_v8hi
14659 = build_function_type_list (V8HI_type_node,
14660 V8HI_type_node, V8HI_type_node, NULL_TREE);
14661 tree v4si_ftype_v4si_v4si
14662 = build_function_type_list (V4SI_type_node,
14663 V4SI_type_node, V4SI_type_node, NULL_TREE);
14664 tree v2di_ftype_v2di_v2di
14665 = build_function_type_list (V2DI_type_node,
14666 V2DI_type_node, V2DI_type_node, NULL_TREE);
14667 tree v2di_ftype_v2df_v2df
14668 = build_function_type_list (V2DI_type_node,
14669 V2DF_type_node, V2DF_type_node, NULL_TREE);
14670 tree v2df_ftype_v2df
14671 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14672 tree v2di_ftype_v2di_int
14673 = build_function_type_list (V2DI_type_node,
14674 V2DI_type_node, integer_type_node, NULL_TREE);
14675 tree v4si_ftype_v4si_int
14676 = build_function_type_list (V4SI_type_node,
14677 V4SI_type_node, integer_type_node, NULL_TREE);
14678 tree v8hi_ftype_v8hi_int
14679 = build_function_type_list (V8HI_type_node,
14680 V8HI_type_node, integer_type_node, NULL_TREE);
14681 tree v8hi_ftype_v8hi_v2di
14682 = build_function_type_list (V8HI_type_node,
14683 V8HI_type_node, V2DI_type_node, NULL_TREE);
14684 tree v4si_ftype_v4si_v2di
14685 = build_function_type_list (V4SI_type_node,
14686 V4SI_type_node, V2DI_type_node, NULL_TREE);
14687 tree v4si_ftype_v8hi_v8hi
14688 = build_function_type_list (V4SI_type_node,
14689 V8HI_type_node, V8HI_type_node, NULL_TREE);
14690 tree di_ftype_v8qi_v8qi
14691 = build_function_type_list (long_long_unsigned_type_node,
14692 V8QI_type_node, V8QI_type_node, NULL_TREE);
14693 tree di_ftype_v2si_v2si
14694 = build_function_type_list (long_long_unsigned_type_node,
14695 V2SI_type_node, V2SI_type_node, NULL_TREE);
14696 tree v2di_ftype_v16qi_v16qi
14697 = build_function_type_list (V2DI_type_node,
14698 V16QI_type_node, V16QI_type_node, NULL_TREE);
14699 tree v2di_ftype_v4si_v4si
14700 = build_function_type_list (V2DI_type_node,
14701 V4SI_type_node, V4SI_type_node, NULL_TREE);
14702 tree int_ftype_v16qi
14703 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14704 tree v16qi_ftype_pcchar
14705 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14706 tree void_ftype_pchar_v16qi
14707 = build_function_type_list (void_type_node,
14708 pchar_type_node, V16QI_type_node, NULL_TREE);
14710 tree float80_type;
14711 tree float128_type;
14712 tree ftype;
14714 /* The __float80 type. */
14715 if (TYPE_MODE (long_double_type_node) == XFmode)
14716 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14717 "__float80");
14718 else
14720 /* The __float80 type. */
14721 float80_type = make_node (REAL_TYPE);
14722 TYPE_PRECISION (float80_type) = 80;
14723 layout_type (float80_type);
14724 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14727 if (TARGET_64BIT)
14729 float128_type = make_node (REAL_TYPE);
14730 TYPE_PRECISION (float128_type) = 128;
14731 layout_type (float128_type);
14732 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14735 /* Add all builtins that are more or less simple operations on two
14736 operands. */
14737 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14739 /* Use one of the operands; the target can have a different mode for
14740 mask-generating compares. */
14741 enum machine_mode mode;
14742 tree type;
14744 if (d->name == 0)
14745 continue;
14746 mode = insn_data[d->icode].operand[1].mode;
14748 switch (mode)
14750 case V16QImode:
14751 type = v16qi_ftype_v16qi_v16qi;
14752 break;
14753 case V8HImode:
14754 type = v8hi_ftype_v8hi_v8hi;
14755 break;
14756 case V4SImode:
14757 type = v4si_ftype_v4si_v4si;
14758 break;
14759 case V2DImode:
14760 type = v2di_ftype_v2di_v2di;
14761 break;
14762 case V2DFmode:
14763 type = v2df_ftype_v2df_v2df;
14764 break;
14765 case V4SFmode:
14766 type = v4sf_ftype_v4sf_v4sf;
14767 break;
14768 case V8QImode:
14769 type = v8qi_ftype_v8qi_v8qi;
14770 break;
14771 case V4HImode:
14772 type = v4hi_ftype_v4hi_v4hi;
14773 break;
14774 case V2SImode:
14775 type = v2si_ftype_v2si_v2si;
14776 break;
14777 case DImode:
14778 type = di_ftype_di_di;
14779 break;
14781 default:
14782 gcc_unreachable ();
14785 /* Override for comparisons. */
14786 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14787 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14788 type = v4si_ftype_v4sf_v4sf;
14790 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14791 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14792 type = v2di_ftype_v2df_v2df;
14794 def_builtin (d->mask, d->name, type, d->code);
14797 /* Add the remaining MMX insns with somewhat more complicated types. */
14798 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14799 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14800 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14801 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14803 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14804 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14805 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14807 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14808 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14810 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14811 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14813 /* comi/ucomi insns. */
14814 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14815 if (d->mask == MASK_SSE2)
14816 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14817 else
14818 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14820 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14821 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14822 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14824 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14825 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14826 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14827 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14828 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14829 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14830 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14831 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14832 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14833 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14834 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14836 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14838 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14839 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14841 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14842 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14843 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14844 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14846 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14847 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14848 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14849 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14851 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14853 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14855 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14856 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14857 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14858 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14859 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14860 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14862 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14864 /* Original 3DNow! */
14865 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14866 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14867 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14868 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14869 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14870 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14871 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14872 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14873 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14874 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14875 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14876 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14877 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14878 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14879 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14880 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14881 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14882 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14883 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14884 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14886 /* 3DNow! extension as used in the Athlon CPU. */
14887 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14888 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14889 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14890 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14891 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14892 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14894 /* SSE2 */
14895 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14897 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14898 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14900 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14901 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14903 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14904 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14905 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14906 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14907 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14909 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14910 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14911 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14912 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14914 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14915 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14917 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14919 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14920 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14922 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14923 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14924 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14925 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14926 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14928 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14930 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14931 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14932 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14933 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14935 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14936 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14937 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14939 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14940 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14941 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14942 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14944 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14945 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14946 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14948 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14949 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14951 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14952 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14954 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14955 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14956 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14958 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14959 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14960 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14962 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14963 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14965 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14966 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14967 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14968 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14970 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14971 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14972 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14973 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14975 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14976 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14978 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14980 /* Prescott New Instructions. */
14981 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14982 void_ftype_pcvoid_unsigned_unsigned,
14983 IX86_BUILTIN_MONITOR);
14984 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14985 void_ftype_unsigned_unsigned,
14986 IX86_BUILTIN_MWAIT);
14987 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14988 v4sf_ftype_v4sf,
14989 IX86_BUILTIN_MOVSHDUP);
14990 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14991 v4sf_ftype_v4sf,
14992 IX86_BUILTIN_MOVSLDUP);
14993 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14994 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14996 /* Access to the vec_init patterns. */
14997 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14998 integer_type_node, NULL_TREE);
14999 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
15000 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
15002 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
15003 short_integer_type_node,
15004 short_integer_type_node,
15005 short_integer_type_node, NULL_TREE);
15006 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
15007 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
15009 ftype = build_function_type_list (V8QI_type_node, char_type_node,
15010 char_type_node, char_type_node,
15011 char_type_node, char_type_node,
15012 char_type_node, char_type_node,
15013 char_type_node, NULL_TREE);
15014 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
15015 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
15017 /* Access to the vec_extract patterns. */
15018 ftype = build_function_type_list (double_type_node, V2DF_type_node,
15019 integer_type_node, NULL_TREE);
15020 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
15021 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
15023 ftype = build_function_type_list (long_long_integer_type_node,
15024 V2DI_type_node, integer_type_node,
15025 NULL_TREE);
15026 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
15027 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
15029 ftype = build_function_type_list (float_type_node, V4SF_type_node,
15030 integer_type_node, NULL_TREE);
15031 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
15032 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
15034 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
15035 integer_type_node, NULL_TREE);
15036 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
15037 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
15039 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
15040 integer_type_node, NULL_TREE);
15041 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
15042 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
15044 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
15045 integer_type_node, NULL_TREE);
15046 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
15047 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
15049 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
15050 integer_type_node, NULL_TREE);
15051 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
15052 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
15054 /* Access to the vec_set patterns. */
15055 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
15056 intHI_type_node,
15057 integer_type_node, NULL_TREE);
15058 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
15059 ftype, IX86_BUILTIN_VEC_SET_V8HI);
15061 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
15062 intHI_type_node,
15063 integer_type_node, NULL_TREE);
15064 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
15065 ftype, IX86_BUILTIN_VEC_SET_V4HI);
15068 /* Errors in the source file can cause expand_expr to return const0_rtx
15069 where we expect a vector. To avoid crashing, use one of the vector
15070 clear instructions. */
15071 static rtx
15072 safe_vector_operand (rtx x, enum machine_mode mode)
15074 if (x == const0_rtx)
15075 x = CONST0_RTX (mode);
15076 return x;
15079 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
15081 static rtx
15082 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
15084 rtx pat, xops[3];
15085 tree arg0 = TREE_VALUE (arglist);
15086 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15087 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15088 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15089 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15090 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15091 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
15093 if (VECTOR_MODE_P (mode0))
15094 op0 = safe_vector_operand (op0, mode0);
15095 if (VECTOR_MODE_P (mode1))
15096 op1 = safe_vector_operand (op1, mode1);
15098 if (optimize || !target
15099 || GET_MODE (target) != tmode
15100 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15101 target = gen_reg_rtx (tmode);
15103 if (GET_MODE (op1) == SImode && mode1 == TImode)
15105 rtx x = gen_reg_rtx (V4SImode);
15106 emit_insn (gen_sse2_loadd (x, op1));
15107 op1 = gen_lowpart (TImode, x);
15110 /* The insn must want input operands in the same modes as the
15111 result. */
15112 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
15113 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
15115 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15116 op0 = copy_to_mode_reg (mode0, op0);
15117 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15118 op1 = copy_to_mode_reg (mode1, op1);
15120 /* ??? Using ix86_fixup_binary_operands is problematic when
15121 we've got mismatched modes. Fake it. */
15123 xops[0] = target;
15124 xops[1] = op0;
15125 xops[2] = op1;
15127 if (tmode == mode0 && tmode == mode1)
15129 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
15130 op0 = xops[1];
15131 op1 = xops[2];
15133 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
15135 op0 = force_reg (mode0, op0);
15136 op1 = force_reg (mode1, op1);
15137 target = gen_reg_rtx (tmode);
15140 pat = GEN_FCN (icode) (target, op0, op1);
15141 if (! pat)
15142 return 0;
15143 emit_insn (pat);
15144 return target;
15147 /* Subroutine of ix86_expand_builtin to take care of stores. */
15149 static rtx
15150 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
15152 rtx pat;
15153 tree arg0 = TREE_VALUE (arglist);
15154 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15155 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15156 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15157 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
15158 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
15160 if (VECTOR_MODE_P (mode1))
15161 op1 = safe_vector_operand (op1, mode1);
15163 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15164 op1 = copy_to_mode_reg (mode1, op1);
15166 pat = GEN_FCN (icode) (op0, op1);
15167 if (pat)
15168 emit_insn (pat);
15169 return 0;
15172 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
15174 static rtx
15175 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
15176 rtx target, int do_load)
15178 rtx pat;
15179 tree arg0 = TREE_VALUE (arglist);
15180 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15181 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15182 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15184 if (optimize || !target
15185 || GET_MODE (target) != tmode
15186 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15187 target = gen_reg_rtx (tmode);
15188 if (do_load)
15189 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15190 else
15192 if (VECTOR_MODE_P (mode0))
15193 op0 = safe_vector_operand (op0, mode0);
15195 if ((optimize && !register_operand (op0, mode0))
15196 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15197 op0 = copy_to_mode_reg (mode0, op0);
15200 pat = GEN_FCN (icode) (target, op0);
15201 if (! pat)
15202 return 0;
15203 emit_insn (pat);
15204 return target;
15207 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
15208 sqrtss, rsqrtss, rcpss. */
15210 static rtx
15211 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
15213 rtx pat;
15214 tree arg0 = TREE_VALUE (arglist);
15215 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15216 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15217 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15219 if (optimize || !target
15220 || GET_MODE (target) != tmode
15221 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15222 target = gen_reg_rtx (tmode);
15224 if (VECTOR_MODE_P (mode0))
15225 op0 = safe_vector_operand (op0, mode0);
15227 if ((optimize && !register_operand (op0, mode0))
15228 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15229 op0 = copy_to_mode_reg (mode0, op0);
15231 op1 = op0;
15232 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
15233 op1 = copy_to_mode_reg (mode0, op1);
15235 pat = GEN_FCN (icode) (target, op0, op1);
15236 if (! pat)
15237 return 0;
15238 emit_insn (pat);
15239 return target;
15242 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
15244 static rtx
15245 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
15246 rtx target)
15248 rtx pat;
15249 tree arg0 = TREE_VALUE (arglist);
15250 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15251 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15252 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15253 rtx op2;
15254 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
15255 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
15256 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
15257 enum rtx_code comparison = d->comparison;
15259 if (VECTOR_MODE_P (mode0))
15260 op0 = safe_vector_operand (op0, mode0);
15261 if (VECTOR_MODE_P (mode1))
15262 op1 = safe_vector_operand (op1, mode1);
15264 /* Swap operands if we have a comparison that isn't available in
15265 hardware. */
15266 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15268 rtx tmp = gen_reg_rtx (mode1);
15269 emit_move_insn (tmp, op1);
15270 op1 = op0;
15271 op0 = tmp;
15274 if (optimize || !target
15275 || GET_MODE (target) != tmode
15276 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
15277 target = gen_reg_rtx (tmode);
15279 if ((optimize && !register_operand (op0, mode0))
15280 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
15281 op0 = copy_to_mode_reg (mode0, op0);
15282 if ((optimize && !register_operand (op1, mode1))
15283 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
15284 op1 = copy_to_mode_reg (mode1, op1);
15286 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15287 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
15288 if (! pat)
15289 return 0;
15290 emit_insn (pat);
15291 return target;
15294 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
15296 static rtx
15297 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
15298 rtx target)
15300 rtx pat;
15301 tree arg0 = TREE_VALUE (arglist);
15302 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15303 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15304 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15305 rtx op2;
15306 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
15307 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
15308 enum rtx_code comparison = d->comparison;
15310 if (VECTOR_MODE_P (mode0))
15311 op0 = safe_vector_operand (op0, mode0);
15312 if (VECTOR_MODE_P (mode1))
15313 op1 = safe_vector_operand (op1, mode1);
15315 /* Swap operands if we have a comparison that isn't available in
15316 hardware. */
15317 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15319 rtx tmp = op1;
15320 op1 = op0;
15321 op0 = tmp;
15324 target = gen_reg_rtx (SImode);
15325 emit_move_insn (target, const0_rtx);
15326 target = gen_rtx_SUBREG (QImode, target, 0);
15328 if ((optimize && !register_operand (op0, mode0))
15329 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15330 op0 = copy_to_mode_reg (mode0, op0);
15331 if ((optimize && !register_operand (op1, mode1))
15332 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15333 op1 = copy_to_mode_reg (mode1, op1);
15335 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15336 pat = GEN_FCN (d->icode) (op0, op1);
15337 if (! pat)
15338 return 0;
15339 emit_insn (pat);
15340 emit_insn (gen_rtx_SET (VOIDmode,
15341 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
15342 gen_rtx_fmt_ee (comparison, QImode,
15343 SET_DEST (pat),
15344 const0_rtx)));
15346 return SUBREG_REG (target);
15349 /* Return the integer constant in ARG. Constrain it to be in the range
15350 of the subparts of VEC_TYPE; issue an error if not. */
15352 static int
15353 get_element_number (tree vec_type, tree arg)
15355 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15357 if (!host_integerp (arg, 1)
15358 || (elt = tree_low_cst (arg, 1), elt > max))
15360 error ("selector must be an integer constant in the range 0..%wi", max);
15361 return 0;
15364 return elt;
15367 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15368 ix86_expand_vector_init. We DO have language-level syntax for this, in
15369 the form of (type){ init-list }. Except that since we can't place emms
15370 instructions from inside the compiler, we can't allow the use of MMX
15371 registers unless the user explicitly asks for it. So we do *not* define
15372 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
15373 we have builtins invoked by mmintrin.h that gives us license to emit
15374 these sorts of instructions. */
15376 static rtx
15377 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
15379 enum machine_mode tmode = TYPE_MODE (type);
15380 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15381 int i, n_elt = GET_MODE_NUNITS (tmode);
15382 rtvec v = rtvec_alloc (n_elt);
15384 gcc_assert (VECTOR_MODE_P (tmode));
15386 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15388 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15389 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15392 gcc_assert (arglist == NULL);
15394 if (!target || !register_operand (target, tmode))
15395 target = gen_reg_rtx (tmode);
15397 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15398 return target;
15401 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15402 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15403 had a language-level syntax for referencing vector elements. */
15405 static rtx
15406 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15408 enum machine_mode tmode, mode0;
15409 tree arg0, arg1;
15410 int elt;
15411 rtx op0;
15413 arg0 = TREE_VALUE (arglist);
15414 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15416 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15417 elt = get_element_number (TREE_TYPE (arg0), arg1);
15419 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15420 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15421 gcc_assert (VECTOR_MODE_P (mode0));
15423 op0 = force_reg (mode0, op0);
15425 if (optimize || !target || !register_operand (target, tmode))
15426 target = gen_reg_rtx (tmode);
15428 ix86_expand_vector_extract (true, target, op0, elt);
15430 return target;
15433 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15434 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15435 a language-level syntax for referencing vector elements. */
15437 static rtx
15438 ix86_expand_vec_set_builtin (tree arglist)
15440 enum machine_mode tmode, mode1;
15441 tree arg0, arg1, arg2;
15442 int elt;
15443 rtx op0, op1;
15445 arg0 = TREE_VALUE (arglist);
15446 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15447 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15449 tmode = TYPE_MODE (TREE_TYPE (arg0));
15450 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15451 gcc_assert (VECTOR_MODE_P (tmode));
15453 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15454 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15455 elt = get_element_number (TREE_TYPE (arg0), arg2);
15457 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15458 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15460 op0 = force_reg (tmode, op0);
15461 op1 = force_reg (mode1, op1);
15463 ix86_expand_vector_set (true, op0, op1, elt);
15465 return op0;
15468 /* Expand an expression EXP that calls a built-in function,
15469 with result going to TARGET if that's convenient
15470 (and in mode MODE if that's convenient).
15471 SUBTARGET may be used as the target for computing one of EXP's operands.
15472 IGNORE is nonzero if the value is to be ignored. */
15474 static rtx
15475 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15476 enum machine_mode mode ATTRIBUTE_UNUSED,
15477 int ignore ATTRIBUTE_UNUSED)
15479 const struct builtin_description *d;
15480 size_t i;
15481 enum insn_code icode;
15482 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15483 tree arglist = TREE_OPERAND (exp, 1);
15484 tree arg0, arg1, arg2;
15485 rtx op0, op1, op2, pat;
15486 enum machine_mode tmode, mode0, mode1, mode2;
15487 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15489 switch (fcode)
15491 case IX86_BUILTIN_EMMS:
15492 emit_insn (gen_mmx_emms ());
15493 return 0;
15495 case IX86_BUILTIN_SFENCE:
15496 emit_insn (gen_sse_sfence ());
15497 return 0;
15499 case IX86_BUILTIN_MASKMOVQ:
15500 case IX86_BUILTIN_MASKMOVDQU:
15501 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15502 ? CODE_FOR_mmx_maskmovq
15503 : CODE_FOR_sse2_maskmovdqu);
15504 /* Note the arg order is different from the operand order. */
15505 arg1 = TREE_VALUE (arglist);
15506 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15507 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15508 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15509 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15510 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15511 mode0 = insn_data[icode].operand[0].mode;
15512 mode1 = insn_data[icode].operand[1].mode;
15513 mode2 = insn_data[icode].operand[2].mode;
15515 op0 = force_reg (Pmode, op0);
15516 op0 = gen_rtx_MEM (mode1, op0);
15518 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15519 op0 = copy_to_mode_reg (mode0, op0);
15520 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15521 op1 = copy_to_mode_reg (mode1, op1);
15522 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15523 op2 = copy_to_mode_reg (mode2, op2);
15524 pat = GEN_FCN (icode) (op0, op1, op2);
15525 if (! pat)
15526 return 0;
15527 emit_insn (pat);
15528 return 0;
15530 case IX86_BUILTIN_SQRTSS:
15531 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15532 case IX86_BUILTIN_RSQRTSS:
15533 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15534 case IX86_BUILTIN_RCPSS:
15535 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15537 case IX86_BUILTIN_LOADUPS:
15538 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15540 case IX86_BUILTIN_STOREUPS:
15541 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15543 case IX86_BUILTIN_LOADHPS:
15544 case IX86_BUILTIN_LOADLPS:
15545 case IX86_BUILTIN_LOADHPD:
15546 case IX86_BUILTIN_LOADLPD:
15547 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15548 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15549 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15550 : CODE_FOR_sse2_loadlpd);
15551 arg0 = TREE_VALUE (arglist);
15552 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15553 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15554 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15555 tmode = insn_data[icode].operand[0].mode;
15556 mode0 = insn_data[icode].operand[1].mode;
15557 mode1 = insn_data[icode].operand[2].mode;
15559 op0 = force_reg (mode0, op0);
15560 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15561 if (optimize || target == 0
15562 || GET_MODE (target) != tmode
15563 || !register_operand (target, tmode))
15564 target = gen_reg_rtx (tmode);
15565 pat = GEN_FCN (icode) (target, op0, op1);
15566 if (! pat)
15567 return 0;
15568 emit_insn (pat);
15569 return target;
15571 case IX86_BUILTIN_STOREHPS:
15572 case IX86_BUILTIN_STORELPS:
15573 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15574 : CODE_FOR_sse_storelps);
15575 arg0 = TREE_VALUE (arglist);
15576 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15577 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15578 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15579 mode0 = insn_data[icode].operand[0].mode;
15580 mode1 = insn_data[icode].operand[1].mode;
15582 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15583 op1 = force_reg (mode1, op1);
15585 pat = GEN_FCN (icode) (op0, op1);
15586 if (! pat)
15587 return 0;
15588 emit_insn (pat);
15589 return const0_rtx;
15591 case IX86_BUILTIN_MOVNTPS:
15592 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15593 case IX86_BUILTIN_MOVNTQ:
15594 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15596 case IX86_BUILTIN_LDMXCSR:
15597 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15598 target = assign_386_stack_local (SImode, SLOT_TEMP);
15599 emit_move_insn (target, op0);
15600 emit_insn (gen_sse_ldmxcsr (target));
15601 return 0;
15603 case IX86_BUILTIN_STMXCSR:
15604 target = assign_386_stack_local (SImode, SLOT_TEMP);
15605 emit_insn (gen_sse_stmxcsr (target));
15606 return copy_to_mode_reg (SImode, target);
15608 case IX86_BUILTIN_SHUFPS:
15609 case IX86_BUILTIN_SHUFPD:
15610 icode = (fcode == IX86_BUILTIN_SHUFPS
15611 ? CODE_FOR_sse_shufps
15612 : CODE_FOR_sse2_shufpd);
15613 arg0 = TREE_VALUE (arglist);
15614 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15615 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15616 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15617 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15618 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15619 tmode = insn_data[icode].operand[0].mode;
15620 mode0 = insn_data[icode].operand[1].mode;
15621 mode1 = insn_data[icode].operand[2].mode;
15622 mode2 = insn_data[icode].operand[3].mode;
15624 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15625 op0 = copy_to_mode_reg (mode0, op0);
15626 if ((optimize && !register_operand (op1, mode1))
15627 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15628 op1 = copy_to_mode_reg (mode1, op1);
15629 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15631 /* @@@ better error message */
15632 error ("mask must be an immediate");
15633 return gen_reg_rtx (tmode);
15635 if (optimize || target == 0
15636 || GET_MODE (target) != tmode
15637 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15638 target = gen_reg_rtx (tmode);
15639 pat = GEN_FCN (icode) (target, op0, op1, op2);
15640 if (! pat)
15641 return 0;
15642 emit_insn (pat);
15643 return target;
15645 case IX86_BUILTIN_PSHUFW:
15646 case IX86_BUILTIN_PSHUFD:
15647 case IX86_BUILTIN_PSHUFHW:
15648 case IX86_BUILTIN_PSHUFLW:
15649 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15650 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15651 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15652 : CODE_FOR_mmx_pshufw);
15653 arg0 = TREE_VALUE (arglist);
15654 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15655 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15656 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15657 tmode = insn_data[icode].operand[0].mode;
15658 mode1 = insn_data[icode].operand[1].mode;
15659 mode2 = insn_data[icode].operand[2].mode;
15661 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15662 op0 = copy_to_mode_reg (mode1, op0);
15663 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15665 /* @@@ better error message */
15666 error ("mask must be an immediate");
15667 return const0_rtx;
15669 if (target == 0
15670 || GET_MODE (target) != tmode
15671 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15672 target = gen_reg_rtx (tmode);
15673 pat = GEN_FCN (icode) (target, op0, op1);
15674 if (! pat)
15675 return 0;
15676 emit_insn (pat);
15677 return target;
15679 case IX86_BUILTIN_PSLLDQI128:
15680 case IX86_BUILTIN_PSRLDQI128:
15681 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15682 : CODE_FOR_sse2_lshrti3);
15683 arg0 = TREE_VALUE (arglist);
15684 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15685 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15686 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15687 tmode = insn_data[icode].operand[0].mode;
15688 mode1 = insn_data[icode].operand[1].mode;
15689 mode2 = insn_data[icode].operand[2].mode;
15691 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15693 op0 = copy_to_reg (op0);
15694 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15696 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15698 error ("shift must be an immediate");
15699 return const0_rtx;
15701 target = gen_reg_rtx (V2DImode);
15702 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15703 if (! pat)
15704 return 0;
15705 emit_insn (pat);
15706 return target;
15708 case IX86_BUILTIN_FEMMS:
15709 emit_insn (gen_mmx_femms ());
15710 return NULL_RTX;
15712 case IX86_BUILTIN_PAVGUSB:
15713 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15715 case IX86_BUILTIN_PF2ID:
15716 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15718 case IX86_BUILTIN_PFACC:
15719 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15721 case IX86_BUILTIN_PFADD:
15722 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15724 case IX86_BUILTIN_PFCMPEQ:
15725 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15727 case IX86_BUILTIN_PFCMPGE:
15728 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15730 case IX86_BUILTIN_PFCMPGT:
15731 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15733 case IX86_BUILTIN_PFMAX:
15734 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15736 case IX86_BUILTIN_PFMIN:
15737 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15739 case IX86_BUILTIN_PFMUL:
15740 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15742 case IX86_BUILTIN_PFRCP:
15743 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15745 case IX86_BUILTIN_PFRCPIT1:
15746 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15748 case IX86_BUILTIN_PFRCPIT2:
15749 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15751 case IX86_BUILTIN_PFRSQIT1:
15752 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15754 case IX86_BUILTIN_PFRSQRT:
15755 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15757 case IX86_BUILTIN_PFSUB:
15758 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15760 case IX86_BUILTIN_PFSUBR:
15761 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15763 case IX86_BUILTIN_PI2FD:
15764 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15766 case IX86_BUILTIN_PMULHRW:
15767 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15769 case IX86_BUILTIN_PF2IW:
15770 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15772 case IX86_BUILTIN_PFNACC:
15773 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15775 case IX86_BUILTIN_PFPNACC:
15776 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15778 case IX86_BUILTIN_PI2FW:
15779 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15781 case IX86_BUILTIN_PSWAPDSI:
15782 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15784 case IX86_BUILTIN_PSWAPDSF:
15785 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15787 case IX86_BUILTIN_SQRTSD:
15788 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15789 case IX86_BUILTIN_LOADUPD:
15790 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15791 case IX86_BUILTIN_STOREUPD:
15792 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15794 case IX86_BUILTIN_MFENCE:
15795 emit_insn (gen_sse2_mfence ());
15796 return 0;
15797 case IX86_BUILTIN_LFENCE:
15798 emit_insn (gen_sse2_lfence ());
15799 return 0;
15801 case IX86_BUILTIN_CLFLUSH:
15802 arg0 = TREE_VALUE (arglist);
15803 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15804 icode = CODE_FOR_sse2_clflush;
15805 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15806 op0 = copy_to_mode_reg (Pmode, op0);
15808 emit_insn (gen_sse2_clflush (op0));
15809 return 0;
15811 case IX86_BUILTIN_MOVNTPD:
15812 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15813 case IX86_BUILTIN_MOVNTDQ:
15814 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15815 case IX86_BUILTIN_MOVNTI:
15816 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15818 case IX86_BUILTIN_LOADDQU:
15819 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15820 case IX86_BUILTIN_STOREDQU:
15821 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15823 case IX86_BUILTIN_MONITOR:
15824 arg0 = TREE_VALUE (arglist);
15825 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15826 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15827 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15828 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15829 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15830 if (!REG_P (op0))
15831 op0 = copy_to_mode_reg (SImode, op0);
15832 if (!REG_P (op1))
15833 op1 = copy_to_mode_reg (SImode, op1);
15834 if (!REG_P (op2))
15835 op2 = copy_to_mode_reg (SImode, op2);
15836 emit_insn (gen_sse3_monitor (op0, op1, op2));
15837 return 0;
15839 case IX86_BUILTIN_MWAIT:
15840 arg0 = TREE_VALUE (arglist);
15841 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15842 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15843 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15844 if (!REG_P (op0))
15845 op0 = copy_to_mode_reg (SImode, op0);
15846 if (!REG_P (op1))
15847 op1 = copy_to_mode_reg (SImode, op1);
15848 emit_insn (gen_sse3_mwait (op0, op1));
15849 return 0;
15851 case IX86_BUILTIN_LDDQU:
15852 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15853 target, 1);
15855 case IX86_BUILTIN_VEC_INIT_V2SI:
15856 case IX86_BUILTIN_VEC_INIT_V4HI:
15857 case IX86_BUILTIN_VEC_INIT_V8QI:
15858 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15860 case IX86_BUILTIN_VEC_EXT_V2DF:
15861 case IX86_BUILTIN_VEC_EXT_V2DI:
15862 case IX86_BUILTIN_VEC_EXT_V4SF:
15863 case IX86_BUILTIN_VEC_EXT_V4SI:
15864 case IX86_BUILTIN_VEC_EXT_V8HI:
15865 case IX86_BUILTIN_VEC_EXT_V2SI:
15866 case IX86_BUILTIN_VEC_EXT_V4HI:
15867 return ix86_expand_vec_ext_builtin (arglist, target);
15869 case IX86_BUILTIN_VEC_SET_V8HI:
15870 case IX86_BUILTIN_VEC_SET_V4HI:
15871 return ix86_expand_vec_set_builtin (arglist);
15873 default:
15874 break;
15877 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15878 if (d->code == fcode)
15880 /* Compares are treated specially. */
15881 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15882 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15883 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15884 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15885 return ix86_expand_sse_compare (d, arglist, target);
15887 return ix86_expand_binop_builtin (d->icode, arglist, target);
15890 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15891 if (d->code == fcode)
15892 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15894 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15895 if (d->code == fcode)
15896 return ix86_expand_sse_comi (d, arglist, target);
15898 gcc_unreachable ();
15901 /* Store OPERAND to the memory after reload is completed. This means
15902 that we can't easily use assign_stack_local. */
15904 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15906 rtx result;
15908 gcc_assert (reload_completed);
15909 if (TARGET_RED_ZONE)
15911 result = gen_rtx_MEM (mode,
15912 gen_rtx_PLUS (Pmode,
15913 stack_pointer_rtx,
15914 GEN_INT (-RED_ZONE_SIZE)));
15915 emit_move_insn (result, operand);
15917 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15919 switch (mode)
15921 case HImode:
15922 case SImode:
15923 operand = gen_lowpart (DImode, operand);
15924 /* FALLTHRU */
15925 case DImode:
15926 emit_insn (
15927 gen_rtx_SET (VOIDmode,
15928 gen_rtx_MEM (DImode,
15929 gen_rtx_PRE_DEC (DImode,
15930 stack_pointer_rtx)),
15931 operand));
15932 break;
15933 default:
15934 gcc_unreachable ();
15936 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15938 else
15940 switch (mode)
15942 case DImode:
15944 rtx operands[2];
15945 split_di (&operand, 1, operands, operands + 1);
15946 emit_insn (
15947 gen_rtx_SET (VOIDmode,
15948 gen_rtx_MEM (SImode,
15949 gen_rtx_PRE_DEC (Pmode,
15950 stack_pointer_rtx)),
15951 operands[1]));
15952 emit_insn (
15953 gen_rtx_SET (VOIDmode,
15954 gen_rtx_MEM (SImode,
15955 gen_rtx_PRE_DEC (Pmode,
15956 stack_pointer_rtx)),
15957 operands[0]));
15959 break;
15960 case HImode:
15961 /* Store HImodes as SImodes. */
15962 operand = gen_lowpart (SImode, operand);
15963 /* FALLTHRU */
15964 case SImode:
15965 emit_insn (
15966 gen_rtx_SET (VOIDmode,
15967 gen_rtx_MEM (GET_MODE (operand),
15968 gen_rtx_PRE_DEC (SImode,
15969 stack_pointer_rtx)),
15970 operand));
15971 break;
15972 default:
15973 gcc_unreachable ();
15975 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15977 return result;
15980 /* Free operand from the memory. */
15981 void
15982 ix86_free_from_memory (enum machine_mode mode)
15984 if (!TARGET_RED_ZONE)
15986 int size;
15988 if (mode == DImode || TARGET_64BIT)
15989 size = 8;
15990 else
15991 size = 4;
15992 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15993 to pop or add instruction if registers are available. */
15994 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15995 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15996 GEN_INT (size))));
16000 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
16001 QImode must go into class Q_REGS.
16002 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
16003 movdf to do mem-to-mem moves through integer regs. */
16004 enum reg_class
16005 ix86_preferred_reload_class (rtx x, enum reg_class class)
16007 /* We're only allowed to return a subclass of CLASS. Many of the
16008 following checks fail for NO_REGS, so eliminate that early. */
16009 if (class == NO_REGS)
16010 return NO_REGS;
16012 /* All classes can load zeros. */
16013 if (x == CONST0_RTX (GET_MODE (x)))
16014 return class;
16016 /* Floating-point constants need more complex checks. */
16017 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
16019 /* General regs can load everything. */
16020 if (reg_class_subset_p (class, GENERAL_REGS))
16021 return class;
16023 /* Floats can load 0 and 1 plus some others. Note that we eliminated
16024 zero above. We only want to wind up preferring 80387 registers if
16025 we plan on doing computation with them. */
16026 if (TARGET_80387
16027 && (TARGET_MIX_SSE_I387
16028 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
16029 && standard_80387_constant_p (x))
16031 /* Limit class to non-sse. */
16032 if (class == FLOAT_SSE_REGS)
16033 return FLOAT_REGS;
16034 if (class == FP_TOP_SSE_REGS)
16035 return FP_TOP_REG;
16036 if (class == FP_SECOND_SSE_REGS)
16037 return FP_SECOND_REG;
16038 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
16039 return class;
16042 return NO_REGS;
16044 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
16045 return NO_REGS;
16046 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
16047 return NO_REGS;
16049 /* Generally when we see PLUS here, it's the function invariant
16050 (plus soft-fp const_int). Which can only be computed into general
16051 regs. */
16052 if (GET_CODE (x) == PLUS)
16053 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
16055 /* QImode constants are easy to load, but non-constant QImode data
16056 must go into Q_REGS. */
16057 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
16059 if (reg_class_subset_p (class, Q_REGS))
16060 return class;
16061 if (reg_class_subset_p (Q_REGS, class))
16062 return Q_REGS;
16063 return NO_REGS;
16066 return class;
16069 /* If we are copying between general and FP registers, we need a memory
16070 location. The same is true for SSE and MMX registers.
16072 The macro can't work reliably when one of the CLASSES is class containing
16073 registers from multiple units (SSE, MMX, integer). We avoid this by never
16074 combining those units in single alternative in the machine description.
16075 Ensure that this constraint holds to avoid unexpected surprises.
16077 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
16078 enforce these sanity checks. */
16081 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
16082 enum machine_mode mode, int strict)
16084 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
16085 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
16086 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
16087 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
16088 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
16089 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
16091 gcc_assert (!strict);
16092 return true;
16095 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
16096 return true;
16098 /* ??? This is a lie. We do have moves between mmx/general, and for
16099 mmx/sse2. But by saying we need secondary memory we discourage the
16100 register allocator from using the mmx registers unless needed. */
16101 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
16102 return true;
16104 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
16106 /* SSE1 doesn't have any direct moves from other classes. */
16107 if (!TARGET_SSE2)
16108 return true;
16110 /* If the target says that inter-unit moves are more expensive
16111 than moving through memory, then don't generate them. */
16112 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
16113 return true;
16115 /* Between SSE and general, we have moves no larger than word size. */
16116 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
16117 return true;
16119 /* ??? For the cost of one register reformat penalty, we could use
16120 the same instructions to move SFmode and DFmode data, but the
16121 relevant move patterns don't support those alternatives. */
16122 if (mode == SFmode || mode == DFmode)
16123 return true;
16126 return false;
16129 /* Return true if the registers in CLASS cannot represent the change from
16130 modes FROM to TO. */
16132 bool
16133 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
16134 enum reg_class class)
16136 if (from == to)
16137 return false;
16139 /* x87 registers can't do subreg at all, as all values are reformatted
16140 to extended precision. */
16141 if (MAYBE_FLOAT_CLASS_P (class))
16142 return true;
16144 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
16146 /* Vector registers do not support QI or HImode loads. If we don't
16147 disallow a change to these modes, reload will assume it's ok to
16148 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
16149 the vec_dupv4hi pattern. */
16150 if (GET_MODE_SIZE (from) < 4)
16151 return true;
16153 /* Vector registers do not support subreg with nonzero offsets, which
16154 are otherwise valid for integer registers. Since we can't see
16155 whether we have a nonzero offset from here, prohibit all
16156 nonparadoxical subregs changing size. */
16157 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
16158 return true;
16161 return false;
16164 /* Return the cost of moving data from a register in class CLASS1 to
16165 one in class CLASS2.
16167 It is not required that the cost always equal 2 when FROM is the same as TO;
16168 on some machines it is expensive to move between registers if they are not
16169 general registers. */
16172 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
16173 enum reg_class class2)
16175 /* In case we require secondary memory, compute cost of the store followed
16176 by load. In order to avoid bad register allocation choices, we need
16177 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
16179 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
16181 int cost = 1;
16183 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
16184 MEMORY_MOVE_COST (mode, class1, 1));
16185 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
16186 MEMORY_MOVE_COST (mode, class2, 1));
16188 /* In case of copying from general_purpose_register we may emit multiple
16189 stores followed by single load causing memory size mismatch stall.
16190 Count this as arbitrarily high cost of 20. */
16191 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
16192 cost += 20;
16194 /* In the case of FP/MMX moves, the registers actually overlap, and we
16195 have to switch modes in order to treat them differently. */
16196 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
16197 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
16198 cost += 20;
16200 return cost;
16203 /* Moves between SSE/MMX and integer unit are expensive. */
16204 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
16205 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
16206 return ix86_cost->mmxsse_to_integer;
16207 if (MAYBE_FLOAT_CLASS_P (class1))
16208 return ix86_cost->fp_move;
16209 if (MAYBE_SSE_CLASS_P (class1))
16210 return ix86_cost->sse_move;
16211 if (MAYBE_MMX_CLASS_P (class1))
16212 return ix86_cost->mmx_move;
16213 return 2;
16216 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
16218 bool
16219 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
16221 /* Flags and only flags can only hold CCmode values. */
16222 if (CC_REGNO_P (regno))
16223 return GET_MODE_CLASS (mode) == MODE_CC;
16224 if (GET_MODE_CLASS (mode) == MODE_CC
16225 || GET_MODE_CLASS (mode) == MODE_RANDOM
16226 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
16227 return 0;
16228 if (FP_REGNO_P (regno))
16229 return VALID_FP_MODE_P (mode);
16230 if (SSE_REGNO_P (regno))
16232 /* We implement the move patterns for all vector modes into and
16233 out of SSE registers, even when no operation instructions
16234 are available. */
16235 return (VALID_SSE_REG_MODE (mode)
16236 || VALID_SSE2_REG_MODE (mode)
16237 || VALID_MMX_REG_MODE (mode)
16238 || VALID_MMX_REG_MODE_3DNOW (mode));
16240 if (MMX_REGNO_P (regno))
16242 /* We implement the move patterns for 3DNOW modes even in MMX mode,
16243 so if the register is available at all, then we can move data of
16244 the given mode into or out of it. */
16245 return (VALID_MMX_REG_MODE (mode)
16246 || VALID_MMX_REG_MODE_3DNOW (mode));
16249 if (mode == QImode)
16251 /* Take care for QImode values - they can be in non-QI regs,
16252 but then they do cause partial register stalls. */
16253 if (regno < 4 || TARGET_64BIT)
16254 return 1;
16255 if (!TARGET_PARTIAL_REG_STALL)
16256 return 1;
16257 return reload_in_progress || reload_completed;
16259 /* We handle both integer and floats in the general purpose registers. */
16260 else if (VALID_INT_MODE_P (mode))
16261 return 1;
16262 else if (VALID_FP_MODE_P (mode))
16263 return 1;
16264 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
16265 on to use that value in smaller contexts, this can easily force a
16266 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
16267 supporting DImode, allow it. */
16268 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
16269 return 1;
16271 return 0;
16274 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
16275 tieable integer mode. */
16277 static bool
16278 ix86_tieable_integer_mode_p (enum machine_mode mode)
16280 switch (mode)
16282 case HImode:
16283 case SImode:
16284 return true;
16286 case QImode:
16287 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
16289 case DImode:
16290 return TARGET_64BIT;
16292 default:
16293 return false;
16297 /* Return true if MODE1 is accessible in a register that can hold MODE2
16298 without copying. That is, all register classes that can hold MODE2
16299 can also hold MODE1. */
16301 bool
16302 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
16304 if (mode1 == mode2)
16305 return true;
16307 if (ix86_tieable_integer_mode_p (mode1)
16308 && ix86_tieable_integer_mode_p (mode2))
16309 return true;
16311 /* MODE2 being XFmode implies fp stack or general regs, which means we
16312 can tie any smaller floating point modes to it. Note that we do not
16313 tie this with TFmode. */
16314 if (mode2 == XFmode)
16315 return mode1 == SFmode || mode1 == DFmode;
16317 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
16318 that we can tie it with SFmode. */
16319 if (mode2 == DFmode)
16320 return mode1 == SFmode;
16322 /* If MODE2 is only appropriate for an SSE register, then tie with
16323 any other mode acceptable to SSE registers. */
16324 if (GET_MODE_SIZE (mode2) >= 8
16325 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
16326 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
16328 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
16329 with any other mode acceptable to MMX registers. */
16330 if (GET_MODE_SIZE (mode2) == 8
16331 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
16332 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
16334 return false;
16337 /* Return the cost of moving data of mode M between a
16338 register and memory. A value of 2 is the default; this cost is
16339 relative to those in `REGISTER_MOVE_COST'.
16341 If moving between registers and memory is more expensive than
16342 between two registers, you should define this macro to express the
16343 relative cost.
16345 Model also increased moving costs of QImode registers in non
16346 Q_REGS classes.
16349 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
16351 if (FLOAT_CLASS_P (class))
16353 int index;
16354 switch (mode)
16356 case SFmode:
16357 index = 0;
16358 break;
16359 case DFmode:
16360 index = 1;
16361 break;
16362 case XFmode:
16363 index = 2;
16364 break;
16365 default:
16366 return 100;
16368 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
16370 if (SSE_CLASS_P (class))
16372 int index;
16373 switch (GET_MODE_SIZE (mode))
16375 case 4:
16376 index = 0;
16377 break;
16378 case 8:
16379 index = 1;
16380 break;
16381 case 16:
16382 index = 2;
16383 break;
16384 default:
16385 return 100;
16387 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16389 if (MMX_CLASS_P (class))
16391 int index;
16392 switch (GET_MODE_SIZE (mode))
16394 case 4:
16395 index = 0;
16396 break;
16397 case 8:
16398 index = 1;
16399 break;
16400 default:
16401 return 100;
16403 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16405 switch (GET_MODE_SIZE (mode))
16407 case 1:
16408 if (in)
16409 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16410 : ix86_cost->movzbl_load);
16411 else
16412 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16413 : ix86_cost->int_store[0] + 4);
16414 break;
16415 case 2:
16416 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16417 default:
16418 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16419 if (mode == TFmode)
16420 mode = XFmode;
16421 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16422 * (((int) GET_MODE_SIZE (mode)
16423 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16427 /* Compute a (partial) cost for rtx X. Return true if the complete
16428 cost has been computed, and false if subexpressions should be
16429 scanned. In either case, *TOTAL contains the cost result. */
16431 static bool
16432 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16434 enum machine_mode mode = GET_MODE (x);
16436 switch (code)
16438 case CONST_INT:
16439 case CONST:
16440 case LABEL_REF:
16441 case SYMBOL_REF:
16442 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16443 *total = 3;
16444 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16445 *total = 2;
16446 else if (flag_pic && SYMBOLIC_CONST (x)
16447 && (!TARGET_64BIT
16448 || (!GET_CODE (x) != LABEL_REF
16449 && (GET_CODE (x) != SYMBOL_REF
16450 || !SYMBOL_REF_LOCAL_P (x)))))
16451 *total = 1;
16452 else
16453 *total = 0;
16454 return true;
16456 case CONST_DOUBLE:
16457 if (mode == VOIDmode)
16458 *total = 0;
16459 else
16460 switch (standard_80387_constant_p (x))
16462 case 1: /* 0.0 */
16463 *total = 1;
16464 break;
16465 default: /* Other constants */
16466 *total = 2;
16467 break;
16468 case 0:
16469 case -1:
16470 /* Start with (MEM (SYMBOL_REF)), since that's where
16471 it'll probably end up. Add a penalty for size. */
16472 *total = (COSTS_N_INSNS (1)
16473 + (flag_pic != 0 && !TARGET_64BIT)
16474 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16475 break;
16477 return true;
16479 case ZERO_EXTEND:
16480 /* The zero extensions is often completely free on x86_64, so make
16481 it as cheap as possible. */
16482 if (TARGET_64BIT && mode == DImode
16483 && GET_MODE (XEXP (x, 0)) == SImode)
16484 *total = 1;
16485 else if (TARGET_ZERO_EXTEND_WITH_AND)
16486 *total = ix86_cost->add;
16487 else
16488 *total = ix86_cost->movzx;
16489 return false;
16491 case SIGN_EXTEND:
16492 *total = ix86_cost->movsx;
16493 return false;
16495 case ASHIFT:
16496 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16497 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16499 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16500 if (value == 1)
16502 *total = ix86_cost->add;
16503 return false;
16505 if ((value == 2 || value == 3)
16506 && ix86_cost->lea <= ix86_cost->shift_const)
16508 *total = ix86_cost->lea;
16509 return false;
16512 /* FALLTHRU */
16514 case ROTATE:
16515 case ASHIFTRT:
16516 case LSHIFTRT:
16517 case ROTATERT:
16518 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16520 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16522 if (INTVAL (XEXP (x, 1)) > 32)
16523 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
16524 else
16525 *total = ix86_cost->shift_const * 2;
16527 else
16529 if (GET_CODE (XEXP (x, 1)) == AND)
16530 *total = ix86_cost->shift_var * 2;
16531 else
16532 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
16535 else
16537 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16538 *total = ix86_cost->shift_const;
16539 else
16540 *total = ix86_cost->shift_var;
16542 return false;
16544 case MULT:
16545 if (FLOAT_MODE_P (mode))
16547 *total = ix86_cost->fmul;
16548 return false;
16550 else
16552 rtx op0 = XEXP (x, 0);
16553 rtx op1 = XEXP (x, 1);
16554 int nbits;
16555 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16557 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16558 for (nbits = 0; value != 0; value &= value - 1)
16559 nbits++;
16561 else
16562 /* This is arbitrary. */
16563 nbits = 7;
16565 /* Compute costs correctly for widening multiplication. */
16566 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16567 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16568 == GET_MODE_SIZE (mode))
16570 int is_mulwiden = 0;
16571 enum machine_mode inner_mode = GET_MODE (op0);
16573 if (GET_CODE (op0) == GET_CODE (op1))
16574 is_mulwiden = 1, op1 = XEXP (op1, 0);
16575 else if (GET_CODE (op1) == CONST_INT)
16577 if (GET_CODE (op0) == SIGN_EXTEND)
16578 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16579 == INTVAL (op1);
16580 else
16581 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16584 if (is_mulwiden)
16585 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16588 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
16589 + nbits * ix86_cost->mult_bit
16590 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
16592 return true;
16595 case DIV:
16596 case UDIV:
16597 case MOD:
16598 case UMOD:
16599 if (FLOAT_MODE_P (mode))
16600 *total = ix86_cost->fdiv;
16601 else
16602 *total = ix86_cost->divide[MODE_INDEX (mode)];
16603 return false;
16605 case PLUS:
16606 if (FLOAT_MODE_P (mode))
16607 *total = ix86_cost->fadd;
16608 else if (GET_MODE_CLASS (mode) == MODE_INT
16609 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16611 if (GET_CODE (XEXP (x, 0)) == PLUS
16612 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16613 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16614 && CONSTANT_P (XEXP (x, 1)))
16616 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16617 if (val == 2 || val == 4 || val == 8)
16619 *total = ix86_cost->lea;
16620 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16621 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16622 outer_code);
16623 *total += rtx_cost (XEXP (x, 1), outer_code);
16624 return true;
16627 else if (GET_CODE (XEXP (x, 0)) == MULT
16628 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16630 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16631 if (val == 2 || val == 4 || val == 8)
16633 *total = ix86_cost->lea;
16634 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16635 *total += rtx_cost (XEXP (x, 1), outer_code);
16636 return true;
16639 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16641 *total = ix86_cost->lea;
16642 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16643 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16644 *total += rtx_cost (XEXP (x, 1), outer_code);
16645 return true;
16648 /* FALLTHRU */
16650 case MINUS:
16651 if (FLOAT_MODE_P (mode))
16653 *total = ix86_cost->fadd;
16654 return false;
16656 /* FALLTHRU */
16658 case AND:
16659 case IOR:
16660 case XOR:
16661 if (!TARGET_64BIT && mode == DImode)
16663 *total = (ix86_cost->add * 2
16664 + (rtx_cost (XEXP (x, 0), outer_code)
16665 << (GET_MODE (XEXP (x, 0)) != DImode))
16666 + (rtx_cost (XEXP (x, 1), outer_code)
16667 << (GET_MODE (XEXP (x, 1)) != DImode)));
16668 return true;
16670 /* FALLTHRU */
16672 case NEG:
16673 if (FLOAT_MODE_P (mode))
16675 *total = ix86_cost->fchs;
16676 return false;
16678 /* FALLTHRU */
16680 case NOT:
16681 if (!TARGET_64BIT && mode == DImode)
16682 *total = ix86_cost->add * 2;
16683 else
16684 *total = ix86_cost->add;
16685 return false;
16687 case COMPARE:
16688 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16689 && XEXP (XEXP (x, 0), 1) == const1_rtx
16690 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16691 && XEXP (x, 1) == const0_rtx)
16693 /* This kind of construct is implemented using test[bwl].
16694 Treat it as if we had an AND. */
16695 *total = (ix86_cost->add
16696 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16697 + rtx_cost (const1_rtx, outer_code));
16698 return true;
16700 return false;
16702 case FLOAT_EXTEND:
16703 if (!TARGET_SSE_MATH
16704 || mode == XFmode
16705 || (mode == DFmode && !TARGET_SSE2))
16706 *total = 0;
16707 return false;
16709 case ABS:
16710 if (FLOAT_MODE_P (mode))
16711 *total = ix86_cost->fabs;
16712 return false;
16714 case SQRT:
16715 if (FLOAT_MODE_P (mode))
16716 *total = ix86_cost->fsqrt;
16717 return false;
16719 case UNSPEC:
16720 if (XINT (x, 1) == UNSPEC_TP)
16721 *total = 0;
16722 return false;
16724 default:
16725 return false;
16729 #if TARGET_MACHO
16731 static int current_machopic_label_num;
16733 /* Given a symbol name and its associated stub, write out the
16734 definition of the stub. */
16736 void
16737 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16739 unsigned int length;
16740 char *binder_name, *symbol_name, lazy_ptr_name[32];
16741 int label = ++current_machopic_label_num;
16743 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16744 symb = (*targetm.strip_name_encoding) (symb);
16746 length = strlen (stub);
16747 binder_name = alloca (length + 32);
16748 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16750 length = strlen (symb);
16751 symbol_name = alloca (length + 32);
16752 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16754 sprintf (lazy_ptr_name, "L%d$lz", label);
16756 if (MACHOPIC_PURE)
16757 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
16758 else
16759 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
16761 fprintf (file, "%s:\n", stub);
16762 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16764 if (MACHOPIC_PURE)
16766 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16767 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16768 fprintf (file, "\tjmp %%edx\n");
16770 else
16771 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16773 fprintf (file, "%s:\n", binder_name);
16775 if (MACHOPIC_PURE)
16777 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16778 fprintf (file, "\tpushl %%eax\n");
16780 else
16781 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16783 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16785 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
16786 fprintf (file, "%s:\n", lazy_ptr_name);
16787 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16788 fprintf (file, "\t.long %s\n", binder_name);
16791 void
16792 darwin_x86_file_end (void)
16794 darwin_file_end ();
16795 ix86_file_end ();
16797 #endif /* TARGET_MACHO */
16799 /* Order the registers for register allocator. */
16801 void
16802 x86_order_regs_for_local_alloc (void)
16804 int pos = 0;
16805 int i;
16807 /* First allocate the local general purpose registers. */
16808 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16809 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16810 reg_alloc_order [pos++] = i;
16812 /* Global general purpose registers. */
16813 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16814 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16815 reg_alloc_order [pos++] = i;
16817 /* x87 registers come first in case we are doing FP math
16818 using them. */
16819 if (!TARGET_SSE_MATH)
16820 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16821 reg_alloc_order [pos++] = i;
16823 /* SSE registers. */
16824 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16825 reg_alloc_order [pos++] = i;
16826 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16827 reg_alloc_order [pos++] = i;
16829 /* x87 registers. */
16830 if (TARGET_SSE_MATH)
16831 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16832 reg_alloc_order [pos++] = i;
16834 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16835 reg_alloc_order [pos++] = i;
16837 /* Initialize the rest of array as we do not allocate some registers
16838 at all. */
16839 while (pos < FIRST_PSEUDO_REGISTER)
16840 reg_alloc_order [pos++] = 0;
16843 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16844 struct attribute_spec.handler. */
16845 static tree
16846 ix86_handle_struct_attribute (tree *node, tree name,
16847 tree args ATTRIBUTE_UNUSED,
16848 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16850 tree *type = NULL;
16851 if (DECL_P (*node))
16853 if (TREE_CODE (*node) == TYPE_DECL)
16854 type = &TREE_TYPE (*node);
16856 else
16857 type = node;
16859 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16860 || TREE_CODE (*type) == UNION_TYPE)))
16862 warning (OPT_Wattributes, "%qs attribute ignored",
16863 IDENTIFIER_POINTER (name));
16864 *no_add_attrs = true;
16867 else if ((is_attribute_p ("ms_struct", name)
16868 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16869 || ((is_attribute_p ("gcc_struct", name)
16870 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16872 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16873 IDENTIFIER_POINTER (name));
16874 *no_add_attrs = true;
16877 return NULL_TREE;
16880 static bool
16881 ix86_ms_bitfield_layout_p (tree record_type)
16883 return (TARGET_MS_BITFIELD_LAYOUT &&
16884 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16885 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16888 /* Returns an expression indicating where the this parameter is
16889 located on entry to the FUNCTION. */
16891 static rtx
16892 x86_this_parameter (tree function)
16894 tree type = TREE_TYPE (function);
16896 if (TARGET_64BIT)
16898 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16899 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16902 if (ix86_function_regparm (type, function) > 0)
16904 tree parm;
16906 parm = TYPE_ARG_TYPES (type);
16907 /* Figure out whether or not the function has a variable number of
16908 arguments. */
16909 for (; parm; parm = TREE_CHAIN (parm))
16910 if (TREE_VALUE (parm) == void_type_node)
16911 break;
16912 /* If not, the this parameter is in the first argument. */
16913 if (parm)
16915 int regno = 0;
16916 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16917 regno = 2;
16918 return gen_rtx_REG (SImode, regno);
16922 if (aggregate_value_p (TREE_TYPE (type), type))
16923 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16924 else
16925 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16928 /* Determine whether x86_output_mi_thunk can succeed. */
16930 static bool
16931 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16932 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16933 HOST_WIDE_INT vcall_offset, tree function)
16935 /* 64-bit can handle anything. */
16936 if (TARGET_64BIT)
16937 return true;
16939 /* For 32-bit, everything's fine if we have one free register. */
16940 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16941 return true;
16943 /* Need a free register for vcall_offset. */
16944 if (vcall_offset)
16945 return false;
16947 /* Need a free register for GOT references. */
16948 if (flag_pic && !(*targetm.binds_local_p) (function))
16949 return false;
16951 /* Otherwise ok. */
16952 return true;
16955 /* Output the assembler code for a thunk function. THUNK_DECL is the
16956 declaration for the thunk function itself, FUNCTION is the decl for
16957 the target function. DELTA is an immediate constant offset to be
16958 added to THIS. If VCALL_OFFSET is nonzero, the word at
16959 *(*this + vcall_offset) should be added to THIS. */
16961 static void
16962 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16963 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16964 HOST_WIDE_INT vcall_offset, tree function)
16966 rtx xops[3];
16967 rtx this = x86_this_parameter (function);
16968 rtx this_reg, tmp;
16970 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16971 pull it in now and let DELTA benefit. */
16972 if (REG_P (this))
16973 this_reg = this;
16974 else if (vcall_offset)
16976 /* Put the this parameter into %eax. */
16977 xops[0] = this;
16978 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16979 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16981 else
16982 this_reg = NULL_RTX;
16984 /* Adjust the this parameter by a fixed constant. */
16985 if (delta)
16987 xops[0] = GEN_INT (delta);
16988 xops[1] = this_reg ? this_reg : this;
16989 if (TARGET_64BIT)
16991 if (!x86_64_general_operand (xops[0], DImode))
16993 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16994 xops[1] = tmp;
16995 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16996 xops[0] = tmp;
16997 xops[1] = this;
16999 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
17001 else
17002 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
17005 /* Adjust the this parameter by a value stored in the vtable. */
17006 if (vcall_offset)
17008 if (TARGET_64BIT)
17009 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
17010 else
17012 int tmp_regno = 2 /* ECX */;
17013 if (lookup_attribute ("fastcall",
17014 TYPE_ATTRIBUTES (TREE_TYPE (function))))
17015 tmp_regno = 0 /* EAX */;
17016 tmp = gen_rtx_REG (SImode, tmp_regno);
17019 xops[0] = gen_rtx_MEM (Pmode, this_reg);
17020 xops[1] = tmp;
17021 if (TARGET_64BIT)
17022 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
17023 else
17024 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
17026 /* Adjust the this parameter. */
17027 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
17028 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
17030 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
17031 xops[0] = GEN_INT (vcall_offset);
17032 xops[1] = tmp2;
17033 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
17034 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
17036 xops[1] = this_reg;
17037 if (TARGET_64BIT)
17038 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
17039 else
17040 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
17043 /* If necessary, drop THIS back to its stack slot. */
17044 if (this_reg && this_reg != this)
17046 xops[0] = this_reg;
17047 xops[1] = this;
17048 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
17051 xops[0] = XEXP (DECL_RTL (function), 0);
17052 if (TARGET_64BIT)
17054 if (!flag_pic || (*targetm.binds_local_p) (function))
17055 output_asm_insn ("jmp\t%P0", xops);
17056 else
17058 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
17059 tmp = gen_rtx_CONST (Pmode, tmp);
17060 tmp = gen_rtx_MEM (QImode, tmp);
17061 xops[0] = tmp;
17062 output_asm_insn ("jmp\t%A0", xops);
17065 else
17067 if (!flag_pic || (*targetm.binds_local_p) (function))
17068 output_asm_insn ("jmp\t%P0", xops);
17069 else
17070 #if TARGET_MACHO
17071 if (TARGET_MACHO)
17073 rtx sym_ref = XEXP (DECL_RTL (function), 0);
17074 tmp = (gen_rtx_SYMBOL_REF
17075 (Pmode,
17076 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
17077 tmp = gen_rtx_MEM (QImode, tmp);
17078 xops[0] = tmp;
17079 output_asm_insn ("jmp\t%0", xops);
17081 else
17082 #endif /* TARGET_MACHO */
17084 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
17085 output_set_got (tmp, NULL_RTX);
17087 xops[1] = tmp;
17088 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
17089 output_asm_insn ("jmp\t{*}%1", xops);
17094 static void
17095 x86_file_start (void)
17097 default_file_start ();
17098 if (X86_FILE_START_VERSION_DIRECTIVE)
17099 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
17100 if (X86_FILE_START_FLTUSED)
17101 fputs ("\t.global\t__fltused\n", asm_out_file);
17102 if (ix86_asm_dialect == ASM_INTEL)
17103 fputs ("\t.intel_syntax\n", asm_out_file);
17107 x86_field_alignment (tree field, int computed)
17109 enum machine_mode mode;
17110 tree type = TREE_TYPE (field);
17112 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
17113 return computed;
17114 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
17115 ? get_inner_array_type (type) : type);
17116 if (mode == DFmode || mode == DCmode
17117 || GET_MODE_CLASS (mode) == MODE_INT
17118 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
17119 return MIN (32, computed);
17120 return computed;
17123 /* Output assembler code to FILE to increment profiler label # LABELNO
17124 for profiling a function entry. */
17125 void
17126 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
17128 if (TARGET_64BIT)
17129 if (flag_pic)
17131 #ifndef NO_PROFILE_COUNTERS
17132 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
17133 #endif
17134 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
17136 else
17138 #ifndef NO_PROFILE_COUNTERS
17139 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
17140 #endif
17141 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
17143 else if (flag_pic)
17145 #ifndef NO_PROFILE_COUNTERS
17146 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
17147 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
17148 #endif
17149 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
17151 else
17153 #ifndef NO_PROFILE_COUNTERS
17154 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
17155 PROFILE_COUNT_REGISTER);
17156 #endif
17157 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
17161 /* We don't have exact information about the insn sizes, but we may assume
17162 quite safely that we are informed about all 1 byte insns and memory
17163 address sizes. This is enough to eliminate unnecessary padding in
17164 99% of cases. */
17166 static int
17167 min_insn_size (rtx insn)
17169 int l = 0;
17171 if (!INSN_P (insn) || !active_insn_p (insn))
17172 return 0;
17174 /* Discard alignments we've emit and jump instructions. */
17175 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
17176 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
17177 return 0;
17178 if (GET_CODE (insn) == JUMP_INSN
17179 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
17180 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
17181 return 0;
17183 /* Important case - calls are always 5 bytes.
17184 It is common to have many calls in the row. */
17185 if (GET_CODE (insn) == CALL_INSN
17186 && symbolic_reference_mentioned_p (PATTERN (insn))
17187 && !SIBLING_CALL_P (insn))
17188 return 5;
17189 if (get_attr_length (insn) <= 1)
17190 return 1;
17192 /* For normal instructions we may rely on the sizes of addresses
17193 and the presence of symbol to require 4 bytes of encoding.
17194 This is not the case for jumps where references are PC relative. */
17195 if (GET_CODE (insn) != JUMP_INSN)
17197 l = get_attr_length_address (insn);
17198 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
17199 l = 4;
17201 if (l)
17202 return 1+l;
17203 else
17204 return 2;
17207 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
17208 window. */
17210 static void
17211 ix86_avoid_jump_misspredicts (void)
17213 rtx insn, start = get_insns ();
17214 int nbytes = 0, njumps = 0;
17215 int isjump = 0;
17217 /* Look for all minimal intervals of instructions containing 4 jumps.
17218 The intervals are bounded by START and INSN. NBYTES is the total
17219 size of instructions in the interval including INSN and not including
17220 START. When the NBYTES is smaller than 16 bytes, it is possible
17221 that the end of START and INSN ends up in the same 16byte page.
17223 The smallest offset in the page INSN can start is the case where START
17224 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
17225 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
17227 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17230 nbytes += min_insn_size (insn);
17231 if (dump_file)
17232 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
17233 INSN_UID (insn), min_insn_size (insn));
17234 if ((GET_CODE (insn) == JUMP_INSN
17235 && GET_CODE (PATTERN (insn)) != ADDR_VEC
17236 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
17237 || GET_CODE (insn) == CALL_INSN)
17238 njumps++;
17239 else
17240 continue;
17242 while (njumps > 3)
17244 start = NEXT_INSN (start);
17245 if ((GET_CODE (start) == JUMP_INSN
17246 && GET_CODE (PATTERN (start)) != ADDR_VEC
17247 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
17248 || GET_CODE (start) == CALL_INSN)
17249 njumps--, isjump = 1;
17250 else
17251 isjump = 0;
17252 nbytes -= min_insn_size (start);
17254 gcc_assert (njumps >= 0);
17255 if (dump_file)
17256 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
17257 INSN_UID (start), INSN_UID (insn), nbytes);
17259 if (njumps == 3 && isjump && nbytes < 16)
17261 int padsize = 15 - nbytes + min_insn_size (insn);
17263 if (dump_file)
17264 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
17265 INSN_UID (insn), padsize);
17266 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
17271 /* AMD Athlon works faster
17272 when RET is not destination of conditional jump or directly preceded
17273 by other jump instruction. We avoid the penalty by inserting NOP just
17274 before the RET instructions in such cases. */
17275 static void
17276 ix86_pad_returns (void)
17278 edge e;
17279 edge_iterator ei;
17281 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
17283 basic_block bb = e->src;
17284 rtx ret = BB_END (bb);
17285 rtx prev;
17286 bool replace = false;
17288 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
17289 || !maybe_hot_bb_p (bb))
17290 continue;
17291 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
17292 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
17293 break;
17294 if (prev && GET_CODE (prev) == CODE_LABEL)
17296 edge e;
17297 edge_iterator ei;
17299 FOR_EACH_EDGE (e, ei, bb->preds)
17300 if (EDGE_FREQUENCY (e) && e->src->index >= 0
17301 && !(e->flags & EDGE_FALLTHRU))
17302 replace = true;
17304 if (!replace)
17306 prev = prev_active_insn (ret);
17307 if (prev
17308 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
17309 || GET_CODE (prev) == CALL_INSN))
17310 replace = true;
17311 /* Empty functions get branch mispredict even when the jump destination
17312 is not visible to us. */
17313 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
17314 replace = true;
17316 if (replace)
17318 emit_insn_before (gen_return_internal_long (), ret);
17319 delete_insn (ret);
17324 /* Implement machine specific optimizations. We implement padding of returns
17325 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
17326 static void
17327 ix86_reorg (void)
17329 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
17330 ix86_pad_returns ();
17331 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
17332 ix86_avoid_jump_misspredicts ();
17335 /* Return nonzero when QImode register that must be represented via REX prefix
17336 is used. */
17337 bool
17338 x86_extended_QIreg_mentioned_p (rtx insn)
17340 int i;
17341 extract_insn_cached (insn);
17342 for (i = 0; i < recog_data.n_operands; i++)
17343 if (REG_P (recog_data.operand[i])
17344 && REGNO (recog_data.operand[i]) >= 4)
17345 return true;
17346 return false;
17349 /* Return nonzero when P points to register encoded via REX prefix.
17350 Called via for_each_rtx. */
17351 static int
17352 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
17354 unsigned int regno;
17355 if (!REG_P (*p))
17356 return 0;
17357 regno = REGNO (*p);
17358 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
17361 /* Return true when INSN mentions register that must be encoded using REX
17362 prefix. */
17363 bool
17364 x86_extended_reg_mentioned_p (rtx insn)
17366 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
17369 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
17370 optabs would emit if we didn't have TFmode patterns. */
17372 void
17373 x86_emit_floatuns (rtx operands[2])
17375 rtx neglab, donelab, i0, i1, f0, in, out;
17376 enum machine_mode mode, inmode;
17378 inmode = GET_MODE (operands[1]);
17379 gcc_assert (inmode == SImode || inmode == DImode);
17381 out = operands[0];
17382 in = force_reg (inmode, operands[1]);
17383 mode = GET_MODE (out);
17384 neglab = gen_label_rtx ();
17385 donelab = gen_label_rtx ();
17386 i1 = gen_reg_rtx (Pmode);
17387 f0 = gen_reg_rtx (mode);
17389 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17391 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17392 emit_jump_insn (gen_jump (donelab));
17393 emit_barrier ();
17395 emit_label (neglab);
17397 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17398 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17399 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17400 expand_float (f0, i0, 0);
17401 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17403 emit_label (donelab);
17406 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17407 with all elements equal to VAR. Return true if successful. */
17409 static bool
17410 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17411 rtx target, rtx val)
17413 enum machine_mode smode, wsmode, wvmode;
17414 rtx x;
17416 switch (mode)
17418 case V2SImode:
17419 case V2SFmode:
17420 if (!mmx_ok && !TARGET_SSE)
17421 return false;
17422 /* FALLTHRU */
17424 case V2DFmode:
17425 case V2DImode:
17426 case V4SFmode:
17427 case V4SImode:
17428 val = force_reg (GET_MODE_INNER (mode), val);
17429 x = gen_rtx_VEC_DUPLICATE (mode, val);
17430 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17431 return true;
17433 case V4HImode:
17434 if (!mmx_ok)
17435 return false;
17436 if (TARGET_SSE || TARGET_3DNOW_A)
17438 val = gen_lowpart (SImode, val);
17439 x = gen_rtx_TRUNCATE (HImode, val);
17440 x = gen_rtx_VEC_DUPLICATE (mode, x);
17441 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17442 return true;
17444 else
17446 smode = HImode;
17447 wsmode = SImode;
17448 wvmode = V2SImode;
17449 goto widen;
17452 case V8QImode:
17453 if (!mmx_ok)
17454 return false;
17455 smode = QImode;
17456 wsmode = HImode;
17457 wvmode = V4HImode;
17458 goto widen;
17459 case V8HImode:
17460 smode = HImode;
17461 wsmode = SImode;
17462 wvmode = V4SImode;
17463 goto widen;
17464 case V16QImode:
17465 smode = QImode;
17466 wsmode = HImode;
17467 wvmode = V8HImode;
17468 goto widen;
17469 widen:
17470 /* Replicate the value once into the next wider mode and recurse. */
17471 val = convert_modes (wsmode, smode, val, true);
17472 x = expand_simple_binop (wsmode, ASHIFT, val,
17473 GEN_INT (GET_MODE_BITSIZE (smode)),
17474 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17475 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17477 x = gen_reg_rtx (wvmode);
17478 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17479 gcc_unreachable ();
17480 emit_move_insn (target, gen_lowpart (mode, x));
17481 return true;
17483 default:
17484 return false;
17488 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17489 whose low element is VAR, and other elements are zero. Return true
17490 if successful. */
17492 static bool
17493 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17494 rtx target, rtx var)
17496 enum machine_mode vsimode;
17497 rtx x;
17499 switch (mode)
17501 case V2SFmode:
17502 case V2SImode:
17503 if (!mmx_ok && !TARGET_SSE)
17504 return false;
17505 /* FALLTHRU */
17507 case V2DFmode:
17508 case V2DImode:
17509 var = force_reg (GET_MODE_INNER (mode), var);
17510 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17511 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17512 return true;
17514 case V4SFmode:
17515 case V4SImode:
17516 var = force_reg (GET_MODE_INNER (mode), var);
17517 x = gen_rtx_VEC_DUPLICATE (mode, var);
17518 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17519 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17520 return true;
17522 case V8HImode:
17523 case V16QImode:
17524 vsimode = V4SImode;
17525 goto widen;
17526 case V4HImode:
17527 case V8QImode:
17528 if (!mmx_ok)
17529 return false;
17530 vsimode = V2SImode;
17531 goto widen;
17532 widen:
17533 /* Zero extend the variable element to SImode and recurse. */
17534 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17536 x = gen_reg_rtx (vsimode);
17537 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17538 gcc_unreachable ();
17540 emit_move_insn (target, gen_lowpart (mode, x));
17541 return true;
17543 default:
17544 return false;
17548 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17549 consisting of the values in VALS. It is known that all elements
17550 except ONE_VAR are constants. Return true if successful. */
17552 static bool
17553 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17554 rtx target, rtx vals, int one_var)
17556 rtx var = XVECEXP (vals, 0, one_var);
17557 enum machine_mode wmode;
17558 rtx const_vec, x;
17560 const_vec = copy_rtx (vals);
17561 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17562 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
17564 switch (mode)
17566 case V2DFmode:
17567 case V2DImode:
17568 case V2SFmode:
17569 case V2SImode:
17570 /* For the two element vectors, it's just as easy to use
17571 the general case. */
17572 return false;
17574 case V4SFmode:
17575 case V4SImode:
17576 case V8HImode:
17577 case V4HImode:
17578 break;
17580 case V16QImode:
17581 wmode = V8HImode;
17582 goto widen;
17583 case V8QImode:
17584 wmode = V4HImode;
17585 goto widen;
17586 widen:
17587 /* There's no way to set one QImode entry easily. Combine
17588 the variable value with its adjacent constant value, and
17589 promote to an HImode set. */
17590 x = XVECEXP (vals, 0, one_var ^ 1);
17591 if (one_var & 1)
17593 var = convert_modes (HImode, QImode, var, true);
17594 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17595 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17596 x = GEN_INT (INTVAL (x) & 0xff);
17598 else
17600 var = convert_modes (HImode, QImode, var, true);
17601 x = gen_int_mode (INTVAL (x) << 8, HImode);
17603 if (x != const0_rtx)
17604 var = expand_simple_binop (HImode, IOR, var, x, var,
17605 1, OPTAB_LIB_WIDEN);
17607 x = gen_reg_rtx (wmode);
17608 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17609 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17611 emit_move_insn (target, gen_lowpart (mode, x));
17612 return true;
17614 default:
17615 return false;
17618 emit_move_insn (target, const_vec);
17619 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17620 return true;
17623 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17624 all values variable, and none identical. */
17626 static void
17627 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17628 rtx target, rtx vals)
17630 enum machine_mode half_mode = GET_MODE_INNER (mode);
17631 rtx op0 = NULL, op1 = NULL;
17632 bool use_vec_concat = false;
17634 switch (mode)
17636 case V2SFmode:
17637 case V2SImode:
17638 if (!mmx_ok && !TARGET_SSE)
17639 break;
17640 /* FALLTHRU */
17642 case V2DFmode:
17643 case V2DImode:
17644 /* For the two element vectors, we always implement VEC_CONCAT. */
17645 op0 = XVECEXP (vals, 0, 0);
17646 op1 = XVECEXP (vals, 0, 1);
17647 use_vec_concat = true;
17648 break;
17650 case V4SFmode:
17651 half_mode = V2SFmode;
17652 goto half;
17653 case V4SImode:
17654 half_mode = V2SImode;
17655 goto half;
17656 half:
17658 rtvec v;
17660 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17661 Recurse to load the two halves. */
17663 op0 = gen_reg_rtx (half_mode);
17664 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17665 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17667 op1 = gen_reg_rtx (half_mode);
17668 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17669 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17671 use_vec_concat = true;
17673 break;
17675 case V8HImode:
17676 case V16QImode:
17677 case V4HImode:
17678 case V8QImode:
17679 break;
17681 default:
17682 gcc_unreachable ();
17685 if (use_vec_concat)
17687 if (!register_operand (op0, half_mode))
17688 op0 = force_reg (half_mode, op0);
17689 if (!register_operand (op1, half_mode))
17690 op1 = force_reg (half_mode, op1);
17692 emit_insn (gen_rtx_SET (VOIDmode, target,
17693 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17695 else
17697 int i, j, n_elts, n_words, n_elt_per_word;
17698 enum machine_mode inner_mode;
17699 rtx words[4], shift;
17701 inner_mode = GET_MODE_INNER (mode);
17702 n_elts = GET_MODE_NUNITS (mode);
17703 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17704 n_elt_per_word = n_elts / n_words;
17705 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17707 for (i = 0; i < n_words; ++i)
17709 rtx word = NULL_RTX;
17711 for (j = 0; j < n_elt_per_word; ++j)
17713 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17714 elt = convert_modes (word_mode, inner_mode, elt, true);
17716 if (j == 0)
17717 word = elt;
17718 else
17720 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17721 word, 1, OPTAB_LIB_WIDEN);
17722 word = expand_simple_binop (word_mode, IOR, word, elt,
17723 word, 1, OPTAB_LIB_WIDEN);
17727 words[i] = word;
17730 if (n_words == 1)
17731 emit_move_insn (target, gen_lowpart (mode, words[0]));
17732 else if (n_words == 2)
17734 rtx tmp = gen_reg_rtx (mode);
17735 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17736 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17737 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17738 emit_move_insn (target, tmp);
17740 else if (n_words == 4)
17742 rtx tmp = gen_reg_rtx (V4SImode);
17743 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17744 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17745 emit_move_insn (target, gen_lowpart (mode, tmp));
17747 else
17748 gcc_unreachable ();
17752 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17753 instructions unless MMX_OK is true. */
17755 void
17756 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17758 enum machine_mode mode = GET_MODE (target);
17759 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17760 int n_elts = GET_MODE_NUNITS (mode);
17761 int n_var = 0, one_var = -1;
17762 bool all_same = true, all_const_zero = true;
17763 int i;
17764 rtx x;
17766 for (i = 0; i < n_elts; ++i)
17768 x = XVECEXP (vals, 0, i);
17769 if (!CONSTANT_P (x))
17770 n_var++, one_var = i;
17771 else if (x != CONST0_RTX (inner_mode))
17772 all_const_zero = false;
17773 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17774 all_same = false;
17777 /* Constants are best loaded from the constant pool. */
17778 if (n_var == 0)
17780 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17781 return;
17784 /* If all values are identical, broadcast the value. */
17785 if (all_same
17786 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17787 XVECEXP (vals, 0, 0)))
17788 return;
17790 /* Values where only one field is non-constant are best loaded from
17791 the pool and overwritten via move later. */
17792 if (n_var == 1)
17794 if (all_const_zero && one_var == 0
17795 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17796 XVECEXP (vals, 0, 0)))
17797 return;
17799 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17800 return;
17803 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17806 void
17807 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17809 enum machine_mode mode = GET_MODE (target);
17810 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17811 bool use_vec_merge = false;
17812 rtx tmp;
17814 switch (mode)
17816 case V2SFmode:
17817 case V2SImode:
17818 if (mmx_ok)
17820 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17821 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17822 if (elt == 0)
17823 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17824 else
17825 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17826 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17827 return;
17829 break;
17831 case V2DFmode:
17832 case V2DImode:
17834 rtx op0, op1;
17836 /* For the two element vectors, we implement a VEC_CONCAT with
17837 the extraction of the other element. */
17839 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17840 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17842 if (elt == 0)
17843 op0 = val, op1 = tmp;
17844 else
17845 op0 = tmp, op1 = val;
17847 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17848 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17850 return;
17852 case V4SFmode:
17853 switch (elt)
17855 case 0:
17856 use_vec_merge = true;
17857 break;
17859 case 1:
17860 /* tmp = target = A B C D */
17861 tmp = copy_to_reg (target);
17862 /* target = A A B B */
17863 emit_insn (gen_sse_unpcklps (target, target, target));
17864 /* target = X A B B */
17865 ix86_expand_vector_set (false, target, val, 0);
17866 /* target = A X C D */
17867 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17868 GEN_INT (1), GEN_INT (0),
17869 GEN_INT (2+4), GEN_INT (3+4)));
17870 return;
17872 case 2:
17873 /* tmp = target = A B C D */
17874 tmp = copy_to_reg (target);
17875 /* tmp = X B C D */
17876 ix86_expand_vector_set (false, tmp, val, 0);
17877 /* target = A B X D */
17878 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17879 GEN_INT (0), GEN_INT (1),
17880 GEN_INT (0+4), GEN_INT (3+4)));
17881 return;
17883 case 3:
17884 /* tmp = target = A B C D */
17885 tmp = copy_to_reg (target);
17886 /* tmp = X B C D */
17887 ix86_expand_vector_set (false, tmp, val, 0);
17888 /* target = A B X D */
17889 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17890 GEN_INT (0), GEN_INT (1),
17891 GEN_INT (2+4), GEN_INT (0+4)));
17892 return;
17894 default:
17895 gcc_unreachable ();
17897 break;
17899 case V4SImode:
17900 /* Element 0 handled by vec_merge below. */
17901 if (elt == 0)
17903 use_vec_merge = true;
17904 break;
17907 if (TARGET_SSE2)
17909 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17910 store into element 0, then shuffle them back. */
17912 rtx order[4];
17914 order[0] = GEN_INT (elt);
17915 order[1] = const1_rtx;
17916 order[2] = const2_rtx;
17917 order[3] = GEN_INT (3);
17918 order[elt] = const0_rtx;
17920 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17921 order[1], order[2], order[3]));
17923 ix86_expand_vector_set (false, target, val, 0);
17925 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17926 order[1], order[2], order[3]));
17928 else
17930 /* For SSE1, we have to reuse the V4SF code. */
17931 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17932 gen_lowpart (SFmode, val), elt);
17934 return;
17936 case V8HImode:
17937 use_vec_merge = TARGET_SSE2;
17938 break;
17939 case V4HImode:
17940 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17941 break;
17943 case V16QImode:
17944 case V8QImode:
17945 default:
17946 break;
17949 if (use_vec_merge)
17951 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17952 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17953 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17955 else
17957 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17959 emit_move_insn (mem, target);
17961 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17962 emit_move_insn (tmp, val);
17964 emit_move_insn (target, mem);
17968 void
17969 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17971 enum machine_mode mode = GET_MODE (vec);
17972 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17973 bool use_vec_extr = false;
17974 rtx tmp;
17976 switch (mode)
17978 case V2SImode:
17979 case V2SFmode:
17980 if (!mmx_ok)
17981 break;
17982 /* FALLTHRU */
17984 case V2DFmode:
17985 case V2DImode:
17986 use_vec_extr = true;
17987 break;
17989 case V4SFmode:
17990 switch (elt)
17992 case 0:
17993 tmp = vec;
17994 break;
17996 case 1:
17997 case 3:
17998 tmp = gen_reg_rtx (mode);
17999 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
18000 GEN_INT (elt), GEN_INT (elt),
18001 GEN_INT (elt+4), GEN_INT (elt+4)));
18002 break;
18004 case 2:
18005 tmp = gen_reg_rtx (mode);
18006 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
18007 break;
18009 default:
18010 gcc_unreachable ();
18012 vec = tmp;
18013 use_vec_extr = true;
18014 elt = 0;
18015 break;
18017 case V4SImode:
18018 if (TARGET_SSE2)
18020 switch (elt)
18022 case 0:
18023 tmp = vec;
18024 break;
18026 case 1:
18027 case 3:
18028 tmp = gen_reg_rtx (mode);
18029 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
18030 GEN_INT (elt), GEN_INT (elt),
18031 GEN_INT (elt), GEN_INT (elt)));
18032 break;
18034 case 2:
18035 tmp = gen_reg_rtx (mode);
18036 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
18037 break;
18039 default:
18040 gcc_unreachable ();
18042 vec = tmp;
18043 use_vec_extr = true;
18044 elt = 0;
18046 else
18048 /* For SSE1, we have to reuse the V4SF code. */
18049 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
18050 gen_lowpart (V4SFmode, vec), elt);
18051 return;
18053 break;
18055 case V8HImode:
18056 use_vec_extr = TARGET_SSE2;
18057 break;
18058 case V4HImode:
18059 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
18060 break;
18062 case V16QImode:
18063 case V8QImode:
18064 /* ??? Could extract the appropriate HImode element and shift. */
18065 default:
18066 break;
18069 if (use_vec_extr)
18071 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
18072 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
18074 /* Let the rtl optimizers know about the zero extension performed. */
18075 if (inner_mode == HImode)
18077 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
18078 target = gen_lowpart (SImode, target);
18081 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
18083 else
18085 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
18087 emit_move_insn (mem, vec);
18089 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
18090 emit_move_insn (target, tmp);
18094 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
18095 pattern to reduce; DEST is the destination; IN is the input vector. */
18097 void
18098 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
18100 rtx tmp1, tmp2, tmp3;
18102 tmp1 = gen_reg_rtx (V4SFmode);
18103 tmp2 = gen_reg_rtx (V4SFmode);
18104 tmp3 = gen_reg_rtx (V4SFmode);
18106 emit_insn (gen_sse_movhlps (tmp1, in, in));
18107 emit_insn (fn (tmp2, tmp1, in));
18109 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
18110 GEN_INT (1), GEN_INT (1),
18111 GEN_INT (1+4), GEN_INT (1+4)));
18112 emit_insn (fn (dest, tmp2, tmp3));
18115 /* Implements target hook vector_mode_supported_p. */
18116 static bool
18117 ix86_vector_mode_supported_p (enum machine_mode mode)
18119 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
18120 return true;
18121 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
18122 return true;
18123 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
18124 return true;
18125 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
18126 return true;
18127 return false;
18130 /* Worker function for TARGET_MD_ASM_CLOBBERS.
18132 We do this in the new i386 backend to maintain source compatibility
18133 with the old cc0-based compiler. */
18135 static tree
18136 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
18137 tree inputs ATTRIBUTE_UNUSED,
18138 tree clobbers)
18140 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
18141 clobbers);
18142 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
18143 clobbers);
18144 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
18145 clobbers);
18146 return clobbers;
18149 /* Return true if this goes in small data/bss. */
18151 static bool
18152 ix86_in_large_data_p (tree exp)
18154 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
18155 return false;
18157 /* Functions are never large data. */
18158 if (TREE_CODE (exp) == FUNCTION_DECL)
18159 return false;
18161 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
18163 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
18164 if (strcmp (section, ".ldata") == 0
18165 || strcmp (section, ".lbss") == 0)
18166 return true;
18167 return false;
18169 else
18171 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
18173 /* If this is an incomplete type with size 0, then we can't put it
18174 in data because it might be too big when completed. */
18175 if (!size || size > ix86_section_threshold)
18176 return true;
18179 return false;
18181 static void
18182 ix86_encode_section_info (tree decl, rtx rtl, int first)
18184 default_encode_section_info (decl, rtl, first);
18186 if (TREE_CODE (decl) == VAR_DECL
18187 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
18188 && ix86_in_large_data_p (decl))
18189 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
18192 /* Worker function for REVERSE_CONDITION. */
18194 enum rtx_code
18195 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
18197 return (mode != CCFPmode && mode != CCFPUmode
18198 ? reverse_condition (code)
18199 : reverse_condition_maybe_unordered (code));
18202 /* Output code to perform an x87 FP register move, from OPERANDS[1]
18203 to OPERANDS[0]. */
18205 const char *
18206 output_387_reg_move (rtx insn, rtx *operands)
18208 if (REG_P (operands[1])
18209 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
18211 if (REGNO (operands[0]) == FIRST_STACK_REG
18212 && TARGET_USE_FFREEP)
18213 return "ffreep\t%y0";
18214 return "fstp\t%y0";
18216 if (STACK_TOP_P (operands[0]))
18217 return "fld%z1\t%y1";
18218 return "fst\t%y0";
18221 /* Output code to perform a conditional jump to LABEL, if C2 flag in
18222 FP status register is set. */
18224 void
18225 ix86_emit_fp_unordered_jump (rtx label)
18227 rtx reg = gen_reg_rtx (HImode);
18228 rtx temp;
18230 emit_insn (gen_x86_fnstsw_1 (reg));
18232 if (TARGET_USE_SAHF)
18234 emit_insn (gen_x86_sahf_1 (reg));
18236 temp = gen_rtx_REG (CCmode, FLAGS_REG);
18237 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
18239 else
18241 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
18243 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18244 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
18247 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
18248 gen_rtx_LABEL_REF (VOIDmode, label),
18249 pc_rtx);
18250 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
18251 emit_jump_insn (temp);
18254 /* Output code to perform a log1p XFmode calculation. */
18256 void ix86_emit_i387_log1p (rtx op0, rtx op1)
18258 rtx label1 = gen_label_rtx ();
18259 rtx label2 = gen_label_rtx ();
18261 rtx tmp = gen_reg_rtx (XFmode);
18262 rtx tmp2 = gen_reg_rtx (XFmode);
18264 emit_insn (gen_absxf2 (tmp, op1));
18265 emit_insn (gen_cmpxf (tmp,
18266 CONST_DOUBLE_FROM_REAL_VALUE (
18267 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
18268 XFmode)));
18269 emit_jump_insn (gen_bge (label1));
18271 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18272 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
18273 emit_jump (label2);
18275 emit_label (label1);
18276 emit_move_insn (tmp, CONST1_RTX (XFmode));
18277 emit_insn (gen_addxf3 (tmp, op1, tmp));
18278 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18279 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
18281 emit_label (label2);
18284 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
18286 static void
18287 i386_solaris_elf_named_section (const char *name, unsigned int flags,
18288 tree decl)
18290 /* With Binutils 2.15, the "@unwind" marker must be specified on
18291 every occurrence of the ".eh_frame" section, not just the first
18292 one. */
18293 if (TARGET_64BIT
18294 && strcmp (name, ".eh_frame") == 0)
18296 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
18297 flags & SECTION_WRITE ? "aw" : "a");
18298 return;
18300 default_elf_asm_named_section (name, flags, decl);
18303 /* Return the mangling of TYPE if it is an extended fundamental type. */
18305 static const char *
18306 ix86_mangle_fundamental_type (tree type)
18308 switch (TYPE_MODE (type))
18310 case TFmode:
18311 /* __float128 is "g". */
18312 return "g";
18313 case XFmode:
18314 /* "long double" or __float80 is "e". */
18315 return "e";
18316 default:
18317 return NULL;
18321 /* For 32-bit code we can save PIC register setup by using
18322 __stack_chk_fail_local hidden function instead of calling
18323 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
18324 register, so it is better to call __stack_chk_fail directly. */
18326 static tree
18327 ix86_stack_protect_fail (void)
18329 return TARGET_64BIT
18330 ? default_external_stack_protect_fail ()
18331 : default_hidden_stack_protect_fail ();
18334 /* Select a format to encode pointers in exception handling data. CODE
18335 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
18336 true if the symbol may be affected by dynamic relocations.
18338 ??? All x86 object file formats are capable of representing this.
18339 After all, the relocation needed is the same as for the call insn.
18340 Whether or not a particular assembler allows us to enter such, I
18341 guess we'll have to see. */
18343 asm_preferred_eh_data_format (int code, int global)
18345 if (flag_pic)
18347 int type = DW_EH_PE_sdata8;
18348 if (!TARGET_64BIT
18349 || ix86_cmodel == CM_SMALL_PIC
18350 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
18351 type = DW_EH_PE_sdata4;
18352 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
18354 if (ix86_cmodel == CM_SMALL
18355 || (ix86_cmodel == CM_MEDIUM && code))
18356 return DW_EH_PE_udata4;
18357 return DW_EH_PE_absptr;
18360 #include "gt-i386.h"