Fix build on sparc64-linux-gnu.
[official-gcc.git] / gcc / config / sparc / sparc.c
blob8d982d284f3fbc87b2185dad4628088430e779f9
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "params.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "builtins.h"
63 #include "tree-vector-builder.h"
65 /* This file should be included last. */
66 #include "target-def.h"
68 /* Processor costs */
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
74 /* Integer signed load */
75 const int int_sload;
77 /* Integer zeroed load */
78 const int int_zload;
80 /* Float load */
81 const int float_load;
83 /* fmov, fneg, fabs */
84 const int float_move;
86 /* fadd, fsub */
87 const int float_plusminus;
89 /* fcmp */
90 const int float_cmp;
92 /* fmov, fmovr */
93 const int float_cmove;
95 /* fmul */
96 const int float_mul;
98 /* fdivs */
99 const int float_div_sf;
101 /* fdivd */
102 const int float_div_df;
104 /* fsqrts */
105 const int float_sqrt_sf;
107 /* fsqrtd */
108 const int float_sqrt_df;
110 /* umul/smul */
111 const int int_mul;
113 /* mulX */
114 const int int_mulX;
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
131 /* udiv/sdiv */
132 const int int_div;
134 /* divX */
135 const int int_divX;
137 /* movcc, movr */
138 const int int_cmove;
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
143 /* cost of a (predictable) branch. */
144 const int branch_cost;
147 static const
148 struct processor_costs cypress_costs = {
149 COSTS_N_INSNS (2), /* int load */
150 COSTS_N_INSNS (2), /* int signed load */
151 COSTS_N_INSNS (2), /* int zeroed load */
152 COSTS_N_INSNS (2), /* float load */
153 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
154 COSTS_N_INSNS (5), /* fadd, fsub */
155 COSTS_N_INSNS (1), /* fcmp */
156 COSTS_N_INSNS (1), /* fmov, fmovr */
157 COSTS_N_INSNS (7), /* fmul */
158 COSTS_N_INSNS (37), /* fdivs */
159 COSTS_N_INSNS (37), /* fdivd */
160 COSTS_N_INSNS (63), /* fsqrts */
161 COSTS_N_INSNS (63), /* fsqrtd */
162 COSTS_N_INSNS (1), /* imul */
163 COSTS_N_INSNS (1), /* imulX */
164 0, /* imul bit factor */
165 COSTS_N_INSNS (1), /* idiv */
166 COSTS_N_INSNS (1), /* idivX */
167 COSTS_N_INSNS (1), /* movcc/movr */
168 0, /* shift penalty */
169 3 /* branch cost */
172 static const
173 struct processor_costs supersparc_costs = {
174 COSTS_N_INSNS (1), /* int load */
175 COSTS_N_INSNS (1), /* int signed load */
176 COSTS_N_INSNS (1), /* int zeroed load */
177 COSTS_N_INSNS (0), /* float load */
178 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
179 COSTS_N_INSNS (3), /* fadd, fsub */
180 COSTS_N_INSNS (3), /* fcmp */
181 COSTS_N_INSNS (1), /* fmov, fmovr */
182 COSTS_N_INSNS (3), /* fmul */
183 COSTS_N_INSNS (6), /* fdivs */
184 COSTS_N_INSNS (9), /* fdivd */
185 COSTS_N_INSNS (12), /* fsqrts */
186 COSTS_N_INSNS (12), /* fsqrtd */
187 COSTS_N_INSNS (4), /* imul */
188 COSTS_N_INSNS (4), /* imulX */
189 0, /* imul bit factor */
190 COSTS_N_INSNS (4), /* idiv */
191 COSTS_N_INSNS (4), /* idivX */
192 COSTS_N_INSNS (1), /* movcc/movr */
193 1, /* shift penalty */
194 3 /* branch cost */
197 static const
198 struct processor_costs hypersparc_costs = {
199 COSTS_N_INSNS (1), /* int load */
200 COSTS_N_INSNS (1), /* int signed load */
201 COSTS_N_INSNS (1), /* int zeroed load */
202 COSTS_N_INSNS (1), /* float load */
203 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
204 COSTS_N_INSNS (1), /* fadd, fsub */
205 COSTS_N_INSNS (1), /* fcmp */
206 COSTS_N_INSNS (1), /* fmov, fmovr */
207 COSTS_N_INSNS (1), /* fmul */
208 COSTS_N_INSNS (8), /* fdivs */
209 COSTS_N_INSNS (12), /* fdivd */
210 COSTS_N_INSNS (17), /* fsqrts */
211 COSTS_N_INSNS (17), /* fsqrtd */
212 COSTS_N_INSNS (17), /* imul */
213 COSTS_N_INSNS (17), /* imulX */
214 0, /* imul bit factor */
215 COSTS_N_INSNS (17), /* idiv */
216 COSTS_N_INSNS (17), /* idivX */
217 COSTS_N_INSNS (1), /* movcc/movr */
218 0, /* shift penalty */
219 3 /* branch cost */
222 static const
223 struct processor_costs leon_costs = {
224 COSTS_N_INSNS (1), /* int load */
225 COSTS_N_INSNS (1), /* int signed load */
226 COSTS_N_INSNS (1), /* int zeroed load */
227 COSTS_N_INSNS (1), /* float load */
228 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
229 COSTS_N_INSNS (1), /* fadd, fsub */
230 COSTS_N_INSNS (1), /* fcmp */
231 COSTS_N_INSNS (1), /* fmov, fmovr */
232 COSTS_N_INSNS (1), /* fmul */
233 COSTS_N_INSNS (15), /* fdivs */
234 COSTS_N_INSNS (15), /* fdivd */
235 COSTS_N_INSNS (23), /* fsqrts */
236 COSTS_N_INSNS (23), /* fsqrtd */
237 COSTS_N_INSNS (5), /* imul */
238 COSTS_N_INSNS (5), /* imulX */
239 0, /* imul bit factor */
240 COSTS_N_INSNS (5), /* idiv */
241 COSTS_N_INSNS (5), /* idivX */
242 COSTS_N_INSNS (1), /* movcc/movr */
243 0, /* shift penalty */
244 3 /* branch cost */
247 static const
248 struct processor_costs leon3_costs = {
249 COSTS_N_INSNS (1), /* int load */
250 COSTS_N_INSNS (1), /* int signed load */
251 COSTS_N_INSNS (1), /* int zeroed load */
252 COSTS_N_INSNS (1), /* float load */
253 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
254 COSTS_N_INSNS (1), /* fadd, fsub */
255 COSTS_N_INSNS (1), /* fcmp */
256 COSTS_N_INSNS (1), /* fmov, fmovr */
257 COSTS_N_INSNS (1), /* fmul */
258 COSTS_N_INSNS (14), /* fdivs */
259 COSTS_N_INSNS (15), /* fdivd */
260 COSTS_N_INSNS (22), /* fsqrts */
261 COSTS_N_INSNS (23), /* fsqrtd */
262 COSTS_N_INSNS (5), /* imul */
263 COSTS_N_INSNS (5), /* imulX */
264 0, /* imul bit factor */
265 COSTS_N_INSNS (35), /* idiv */
266 COSTS_N_INSNS (35), /* idivX */
267 COSTS_N_INSNS (1), /* movcc/movr */
268 0, /* shift penalty */
269 3 /* branch cost */
272 static const
273 struct processor_costs sparclet_costs = {
274 COSTS_N_INSNS (3), /* int load */
275 COSTS_N_INSNS (3), /* int signed load */
276 COSTS_N_INSNS (1), /* int zeroed load */
277 COSTS_N_INSNS (1), /* float load */
278 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
279 COSTS_N_INSNS (1), /* fadd, fsub */
280 COSTS_N_INSNS (1), /* fcmp */
281 COSTS_N_INSNS (1), /* fmov, fmovr */
282 COSTS_N_INSNS (1), /* fmul */
283 COSTS_N_INSNS (1), /* fdivs */
284 COSTS_N_INSNS (1), /* fdivd */
285 COSTS_N_INSNS (1), /* fsqrts */
286 COSTS_N_INSNS (1), /* fsqrtd */
287 COSTS_N_INSNS (5), /* imul */
288 COSTS_N_INSNS (5), /* imulX */
289 0, /* imul bit factor */
290 COSTS_N_INSNS (5), /* idiv */
291 COSTS_N_INSNS (5), /* idivX */
292 COSTS_N_INSNS (1), /* movcc/movr */
293 0, /* shift penalty */
294 3 /* branch cost */
297 static const
298 struct processor_costs ultrasparc_costs = {
299 COSTS_N_INSNS (2), /* int load */
300 COSTS_N_INSNS (3), /* int signed load */
301 COSTS_N_INSNS (2), /* int zeroed load */
302 COSTS_N_INSNS (2), /* float load */
303 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
304 COSTS_N_INSNS (4), /* fadd, fsub */
305 COSTS_N_INSNS (1), /* fcmp */
306 COSTS_N_INSNS (2), /* fmov, fmovr */
307 COSTS_N_INSNS (4), /* fmul */
308 COSTS_N_INSNS (13), /* fdivs */
309 COSTS_N_INSNS (23), /* fdivd */
310 COSTS_N_INSNS (13), /* fsqrts */
311 COSTS_N_INSNS (23), /* fsqrtd */
312 COSTS_N_INSNS (4), /* imul */
313 COSTS_N_INSNS (4), /* imulX */
314 2, /* imul bit factor */
315 COSTS_N_INSNS (37), /* idiv */
316 COSTS_N_INSNS (68), /* idivX */
317 COSTS_N_INSNS (2), /* movcc/movr */
318 2, /* shift penalty */
319 2 /* branch cost */
322 static const
323 struct processor_costs ultrasparc3_costs = {
324 COSTS_N_INSNS (2), /* int load */
325 COSTS_N_INSNS (3), /* int signed load */
326 COSTS_N_INSNS (3), /* int zeroed load */
327 COSTS_N_INSNS (2), /* float load */
328 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
329 COSTS_N_INSNS (4), /* fadd, fsub */
330 COSTS_N_INSNS (5), /* fcmp */
331 COSTS_N_INSNS (3), /* fmov, fmovr */
332 COSTS_N_INSNS (4), /* fmul */
333 COSTS_N_INSNS (17), /* fdivs */
334 COSTS_N_INSNS (20), /* fdivd */
335 COSTS_N_INSNS (20), /* fsqrts */
336 COSTS_N_INSNS (29), /* fsqrtd */
337 COSTS_N_INSNS (6), /* imul */
338 COSTS_N_INSNS (6), /* imulX */
339 0, /* imul bit factor */
340 COSTS_N_INSNS (40), /* idiv */
341 COSTS_N_INSNS (71), /* idivX */
342 COSTS_N_INSNS (2), /* movcc/movr */
343 0, /* shift penalty */
344 2 /* branch cost */
347 static const
348 struct processor_costs niagara_costs = {
349 COSTS_N_INSNS (3), /* int load */
350 COSTS_N_INSNS (3), /* int signed load */
351 COSTS_N_INSNS (3), /* int zeroed load */
352 COSTS_N_INSNS (9), /* float load */
353 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
354 COSTS_N_INSNS (8), /* fadd, fsub */
355 COSTS_N_INSNS (26), /* fcmp */
356 COSTS_N_INSNS (8), /* fmov, fmovr */
357 COSTS_N_INSNS (29), /* fmul */
358 COSTS_N_INSNS (54), /* fdivs */
359 COSTS_N_INSNS (83), /* fdivd */
360 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
361 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
362 COSTS_N_INSNS (11), /* imul */
363 COSTS_N_INSNS (11), /* imulX */
364 0, /* imul bit factor */
365 COSTS_N_INSNS (72), /* idiv */
366 COSTS_N_INSNS (72), /* idivX */
367 COSTS_N_INSNS (1), /* movcc/movr */
368 0, /* shift penalty */
369 4 /* branch cost */
372 static const
373 struct processor_costs niagara2_costs = {
374 COSTS_N_INSNS (3), /* int load */
375 COSTS_N_INSNS (3), /* int signed load */
376 COSTS_N_INSNS (3), /* int zeroed load */
377 COSTS_N_INSNS (3), /* float load */
378 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
379 COSTS_N_INSNS (6), /* fadd, fsub */
380 COSTS_N_INSNS (6), /* fcmp */
381 COSTS_N_INSNS (6), /* fmov, fmovr */
382 COSTS_N_INSNS (6), /* fmul */
383 COSTS_N_INSNS (19), /* fdivs */
384 COSTS_N_INSNS (33), /* fdivd */
385 COSTS_N_INSNS (19), /* fsqrts */
386 COSTS_N_INSNS (33), /* fsqrtd */
387 COSTS_N_INSNS (5), /* imul */
388 COSTS_N_INSNS (5), /* imulX */
389 0, /* imul bit factor */
390 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
391 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
392 COSTS_N_INSNS (1), /* movcc/movr */
393 0, /* shift penalty */
394 5 /* branch cost */
397 static const
398 struct processor_costs niagara3_costs = {
399 COSTS_N_INSNS (3), /* int load */
400 COSTS_N_INSNS (3), /* int signed load */
401 COSTS_N_INSNS (3), /* int zeroed load */
402 COSTS_N_INSNS (3), /* float load */
403 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
404 COSTS_N_INSNS (9), /* fadd, fsub */
405 COSTS_N_INSNS (9), /* fcmp */
406 COSTS_N_INSNS (9), /* fmov, fmovr */
407 COSTS_N_INSNS (9), /* fmul */
408 COSTS_N_INSNS (23), /* fdivs */
409 COSTS_N_INSNS (37), /* fdivd */
410 COSTS_N_INSNS (23), /* fsqrts */
411 COSTS_N_INSNS (37), /* fsqrtd */
412 COSTS_N_INSNS (9), /* imul */
413 COSTS_N_INSNS (9), /* imulX */
414 0, /* imul bit factor */
415 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
416 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
417 COSTS_N_INSNS (1), /* movcc/movr */
418 0, /* shift penalty */
419 5 /* branch cost */
422 static const
423 struct processor_costs niagara4_costs = {
424 COSTS_N_INSNS (5), /* int load */
425 COSTS_N_INSNS (5), /* int signed load */
426 COSTS_N_INSNS (5), /* int zeroed load */
427 COSTS_N_INSNS (5), /* float load */
428 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
429 COSTS_N_INSNS (11), /* fadd, fsub */
430 COSTS_N_INSNS (11), /* fcmp */
431 COSTS_N_INSNS (11), /* fmov, fmovr */
432 COSTS_N_INSNS (11), /* fmul */
433 COSTS_N_INSNS (24), /* fdivs */
434 COSTS_N_INSNS (37), /* fdivd */
435 COSTS_N_INSNS (24), /* fsqrts */
436 COSTS_N_INSNS (37), /* fsqrtd */
437 COSTS_N_INSNS (12), /* imul */
438 COSTS_N_INSNS (12), /* imulX */
439 0, /* imul bit factor */
440 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
441 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
442 COSTS_N_INSNS (1), /* movcc/movr */
443 0, /* shift penalty */
444 2 /* branch cost */
447 static const
448 struct processor_costs niagara7_costs = {
449 COSTS_N_INSNS (5), /* int load */
450 COSTS_N_INSNS (5), /* int signed load */
451 COSTS_N_INSNS (5), /* int zeroed load */
452 COSTS_N_INSNS (5), /* float load */
453 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
454 COSTS_N_INSNS (11), /* fadd, fsub */
455 COSTS_N_INSNS (11), /* fcmp */
456 COSTS_N_INSNS (11), /* fmov, fmovr */
457 COSTS_N_INSNS (11), /* fmul */
458 COSTS_N_INSNS (24), /* fdivs */
459 COSTS_N_INSNS (37), /* fdivd */
460 COSTS_N_INSNS (24), /* fsqrts */
461 COSTS_N_INSNS (37), /* fsqrtd */
462 COSTS_N_INSNS (12), /* imul */
463 COSTS_N_INSNS (12), /* imulX */
464 0, /* imul bit factor */
465 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
466 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
467 COSTS_N_INSNS (1), /* movcc/movr */
468 0, /* shift penalty */
469 1 /* branch cost */
472 static const
473 struct processor_costs m8_costs = {
474 COSTS_N_INSNS (3), /* int load */
475 COSTS_N_INSNS (3), /* int signed load */
476 COSTS_N_INSNS (3), /* int zeroed load */
477 COSTS_N_INSNS (3), /* float load */
478 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
479 COSTS_N_INSNS (9), /* fadd, fsub */
480 COSTS_N_INSNS (9), /* fcmp */
481 COSTS_N_INSNS (9), /* fmov, fmovr */
482 COSTS_N_INSNS (9), /* fmul */
483 COSTS_N_INSNS (26), /* fdivs */
484 COSTS_N_INSNS (30), /* fdivd */
485 COSTS_N_INSNS (33), /* fsqrts */
486 COSTS_N_INSNS (41), /* fsqrtd */
487 COSTS_N_INSNS (12), /* imul */
488 COSTS_N_INSNS (10), /* imulX */
489 0, /* imul bit factor */
490 COSTS_N_INSNS (57), /* udiv/sdiv */
491 COSTS_N_INSNS (30), /* udivx/sdivx */
492 COSTS_N_INSNS (1), /* movcc/movr */
493 0, /* shift penalty */
494 1 /* branch cost */
497 static const struct processor_costs *sparc_costs = &cypress_costs;
499 #ifdef HAVE_AS_RELAX_OPTION
500 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
501 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
502 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
503 somebody does not branch between the sethi and jmp. */
504 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
505 #else
506 #define LEAF_SIBCALL_SLOT_RESERVED_P \
507 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
508 #endif
510 /* Vector to say how input registers are mapped to output registers.
511 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
512 eliminate it. You must use -fomit-frame-pointer to get that. */
513 char leaf_reg_remap[] =
514 { 0, 1, 2, 3, 4, 5, 6, 7,
515 -1, -1, -1, -1, -1, -1, 14, -1,
516 -1, -1, -1, -1, -1, -1, -1, -1,
517 8, 9, 10, 11, 12, 13, -1, 15,
519 32, 33, 34, 35, 36, 37, 38, 39,
520 40, 41, 42, 43, 44, 45, 46, 47,
521 48, 49, 50, 51, 52, 53, 54, 55,
522 56, 57, 58, 59, 60, 61, 62, 63,
523 64, 65, 66, 67, 68, 69, 70, 71,
524 72, 73, 74, 75, 76, 77, 78, 79,
525 80, 81, 82, 83, 84, 85, 86, 87,
526 88, 89, 90, 91, 92, 93, 94, 95,
527 96, 97, 98, 99, 100, 101, 102};
529 /* Vector, indexed by hard register number, which contains 1
530 for a register that is allowable in a candidate for leaf
531 function treatment. */
532 char sparc_leaf_regs[] =
533 { 1, 1, 1, 1, 1, 1, 1, 1,
534 0, 0, 0, 0, 0, 0, 1, 0,
535 0, 0, 0, 0, 0, 0, 0, 0,
536 1, 1, 1, 1, 1, 1, 0, 1,
537 1, 1, 1, 1, 1, 1, 1, 1,
538 1, 1, 1, 1, 1, 1, 1, 1,
539 1, 1, 1, 1, 1, 1, 1, 1,
540 1, 1, 1, 1, 1, 1, 1, 1,
541 1, 1, 1, 1, 1, 1, 1, 1,
542 1, 1, 1, 1, 1, 1, 1, 1,
543 1, 1, 1, 1, 1, 1, 1, 1,
544 1, 1, 1, 1, 1, 1, 1, 1,
545 1, 1, 1, 1, 1, 1, 1};
547 struct GTY(()) machine_function
549 /* Size of the frame of the function. */
550 HOST_WIDE_INT frame_size;
552 /* Size of the frame of the function minus the register window save area
553 and the outgoing argument area. */
554 HOST_WIDE_INT apparent_frame_size;
556 /* Register we pretend the frame pointer is allocated to. Normally, this
557 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
558 record "offset" separately as it may be too big for (reg + disp). */
559 rtx frame_base_reg;
560 HOST_WIDE_INT frame_base_offset;
562 /* Number of global or FP registers to be saved (as 4-byte quantities). */
563 int n_global_fp_regs;
565 /* True if the current function is leaf and uses only leaf regs,
566 so that the SPARC leaf function optimization can be applied.
567 Private version of crtl->uses_only_leaf_regs, see
568 sparc_expand_prologue for the rationale. */
569 int leaf_function_p;
571 /* True if the prologue saves local or in registers. */
572 bool save_local_in_regs_p;
574 /* True if the data calculated by sparc_expand_prologue are valid. */
575 bool prologue_data_valid_p;
578 #define sparc_frame_size cfun->machine->frame_size
579 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
580 #define sparc_frame_base_reg cfun->machine->frame_base_reg
581 #define sparc_frame_base_offset cfun->machine->frame_base_offset
582 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
583 #define sparc_leaf_function_p cfun->machine->leaf_function_p
584 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
585 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
587 /* 1 if the next opcode is to be specially indented. */
588 int sparc_indent_opcode = 0;
590 static void sparc_option_override (void);
591 static void sparc_init_modes (void);
592 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
593 const_tree, bool, bool, int *, int *);
595 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
596 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
598 static void sparc_emit_set_const32 (rtx, rtx);
599 static void sparc_emit_set_const64 (rtx, rtx);
600 static void sparc_output_addr_vec (rtx);
601 static void sparc_output_addr_diff_vec (rtx);
602 static void sparc_output_deferred_case_vectors (void);
603 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
604 static bool sparc_legitimate_constant_p (machine_mode, rtx);
605 static rtx sparc_builtin_saveregs (void);
606 static int epilogue_renumber (rtx *, int);
607 static bool sparc_assemble_integer (rtx, unsigned int, int);
608 static int set_extends (rtx_insn *);
609 static void sparc_asm_function_prologue (FILE *);
610 static void sparc_asm_function_epilogue (FILE *);
611 #ifdef TARGET_SOLARIS
612 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
613 tree) ATTRIBUTE_UNUSED;
614 #endif
615 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
616 static int sparc_issue_rate (void);
617 static void sparc_sched_init (FILE *, int, int);
618 static int sparc_use_sched_lookahead (void);
620 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
621 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
622 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
623 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
624 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
626 static bool sparc_function_ok_for_sibcall (tree, tree);
627 static void sparc_init_libfuncs (void);
628 static void sparc_init_builtins (void);
629 static void sparc_fpu_init_builtins (void);
630 static void sparc_vis_init_builtins (void);
631 static tree sparc_builtin_decl (unsigned, bool);
632 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
633 static tree sparc_fold_builtin (tree, int, tree *, bool);
634 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
635 HOST_WIDE_INT, tree);
636 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
637 HOST_WIDE_INT, const_tree);
638 static struct machine_function * sparc_init_machine_status (void);
639 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
640 static rtx sparc_tls_get_addr (void);
641 static rtx sparc_tls_got (void);
642 static int sparc_register_move_cost (machine_mode,
643 reg_class_t, reg_class_t);
644 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
645 static rtx sparc_function_value (const_tree, const_tree, bool);
646 static rtx sparc_libcall_value (machine_mode, const_rtx);
647 static bool sparc_function_value_regno_p (const unsigned int);
648 static rtx sparc_struct_value_rtx (tree, int);
649 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
650 int *, const_tree, int);
651 static bool sparc_return_in_memory (const_tree, const_tree);
652 static bool sparc_strict_argument_naming (cumulative_args_t);
653 static void sparc_va_start (tree, rtx);
654 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
655 static bool sparc_vector_mode_supported_p (machine_mode);
656 static bool sparc_tls_referenced_p (rtx);
657 static rtx sparc_legitimize_tls_address (rtx);
658 static rtx sparc_legitimize_pic_address (rtx, rtx);
659 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
660 static rtx sparc_delegitimize_address (rtx);
661 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
662 static bool sparc_pass_by_reference (cumulative_args_t,
663 machine_mode, const_tree, bool);
664 static void sparc_function_arg_advance (cumulative_args_t,
665 machine_mode, const_tree, bool);
666 static rtx sparc_function_arg_1 (cumulative_args_t,
667 machine_mode, const_tree, bool, bool);
668 static rtx sparc_function_arg (cumulative_args_t,
669 machine_mode, const_tree, bool);
670 static rtx sparc_function_incoming_arg (cumulative_args_t,
671 machine_mode, const_tree, bool);
672 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
673 static unsigned int sparc_function_arg_boundary (machine_mode,
674 const_tree);
675 static int sparc_arg_partial_bytes (cumulative_args_t,
676 machine_mode, tree, bool);
677 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
678 static void sparc_file_end (void);
679 static bool sparc_frame_pointer_required (void);
680 static bool sparc_can_eliminate (const int, const int);
681 static rtx sparc_builtin_setjmp_frame_value (void);
682 static void sparc_conditional_register_usage (void);
683 static bool sparc_use_pseudo_pic_reg (void);
684 static void sparc_init_pic_reg (void);
685 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
686 static const char *sparc_mangle_type (const_tree);
687 #endif
688 static void sparc_trampoline_init (rtx, tree, rtx);
689 static machine_mode sparc_preferred_simd_mode (scalar_mode);
690 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
691 static bool sparc_lra_p (void);
692 static bool sparc_print_operand_punct_valid_p (unsigned char);
693 static void sparc_print_operand (FILE *, rtx, int);
694 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
695 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
696 machine_mode,
697 secondary_reload_info *);
698 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
699 reg_class_t);
700 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
701 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
702 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
703 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
704 static unsigned int sparc_min_arithmetic_precision (void);
705 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
706 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
707 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
708 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
709 reg_class_t);
710 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
711 static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
712 const vec_perm_indices &);
713 static bool sparc_can_follow_jump (const rtx_insn *, const rtx_insn *);
715 #ifdef SUBTARGET_ATTRIBUTE_TABLE
716 /* Table of valid machine attributes. */
717 static const struct attribute_spec sparc_attribute_table[] =
719 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
720 do_diagnostic, handler, exclude } */
721 SUBTARGET_ATTRIBUTE_TABLE,
722 { NULL, 0, 0, false, false, false, false, NULL, NULL }
724 #endif
726 /* Option handling. */
728 /* Parsed value. */
729 enum cmodel sparc_cmodel;
731 char sparc_hard_reg_printed[8];
733 /* Initialize the GCC target structure. */
735 /* The default is to use .half rather than .short for aligned HI objects. */
736 #undef TARGET_ASM_ALIGNED_HI_OP
737 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
739 #undef TARGET_ASM_UNALIGNED_HI_OP
740 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
741 #undef TARGET_ASM_UNALIGNED_SI_OP
742 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
743 #undef TARGET_ASM_UNALIGNED_DI_OP
744 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
746 /* The target hook has to handle DI-mode values. */
747 #undef TARGET_ASM_INTEGER
748 #define TARGET_ASM_INTEGER sparc_assemble_integer
750 #undef TARGET_ASM_FUNCTION_PROLOGUE
751 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
752 #undef TARGET_ASM_FUNCTION_EPILOGUE
753 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
755 #undef TARGET_SCHED_ADJUST_COST
756 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
757 #undef TARGET_SCHED_ISSUE_RATE
758 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
759 #undef TARGET_SCHED_INIT
760 #define TARGET_SCHED_INIT sparc_sched_init
761 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
762 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
764 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
765 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
767 #undef TARGET_INIT_LIBFUNCS
768 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
770 #undef TARGET_LEGITIMIZE_ADDRESS
771 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
772 #undef TARGET_DELEGITIMIZE_ADDRESS
773 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
774 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
775 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
777 #undef TARGET_INIT_BUILTINS
778 #define TARGET_INIT_BUILTINS sparc_init_builtins
779 #undef TARGET_BUILTIN_DECL
780 #define TARGET_BUILTIN_DECL sparc_builtin_decl
781 #undef TARGET_EXPAND_BUILTIN
782 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
783 #undef TARGET_FOLD_BUILTIN
784 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
786 #if TARGET_TLS
787 #undef TARGET_HAVE_TLS
788 #define TARGET_HAVE_TLS true
789 #endif
791 #undef TARGET_CANNOT_FORCE_CONST_MEM
792 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
794 #undef TARGET_ASM_OUTPUT_MI_THUNK
795 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
796 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
797 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
799 #undef TARGET_RTX_COSTS
800 #define TARGET_RTX_COSTS sparc_rtx_costs
801 #undef TARGET_ADDRESS_COST
802 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
803 #undef TARGET_REGISTER_MOVE_COST
804 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
806 #undef TARGET_PROMOTE_FUNCTION_MODE
807 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
809 #undef TARGET_FUNCTION_VALUE
810 #define TARGET_FUNCTION_VALUE sparc_function_value
811 #undef TARGET_LIBCALL_VALUE
812 #define TARGET_LIBCALL_VALUE sparc_libcall_value
813 #undef TARGET_FUNCTION_VALUE_REGNO_P
814 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
816 #undef TARGET_STRUCT_VALUE_RTX
817 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
818 #undef TARGET_RETURN_IN_MEMORY
819 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
820 #undef TARGET_MUST_PASS_IN_STACK
821 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
822 #undef TARGET_PASS_BY_REFERENCE
823 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
824 #undef TARGET_ARG_PARTIAL_BYTES
825 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
826 #undef TARGET_FUNCTION_ARG_ADVANCE
827 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
828 #undef TARGET_FUNCTION_ARG
829 #define TARGET_FUNCTION_ARG sparc_function_arg
830 #undef TARGET_FUNCTION_INCOMING_ARG
831 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
832 #undef TARGET_FUNCTION_ARG_PADDING
833 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
834 #undef TARGET_FUNCTION_ARG_BOUNDARY
835 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
837 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
838 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
839 #undef TARGET_STRICT_ARGUMENT_NAMING
840 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
842 #undef TARGET_EXPAND_BUILTIN_VA_START
843 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
844 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
845 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
847 #undef TARGET_VECTOR_MODE_SUPPORTED_P
848 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
850 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
851 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
853 #ifdef SUBTARGET_INSERT_ATTRIBUTES
854 #undef TARGET_INSERT_ATTRIBUTES
855 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
856 #endif
858 #ifdef SUBTARGET_ATTRIBUTE_TABLE
859 #undef TARGET_ATTRIBUTE_TABLE
860 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
861 #endif
863 #undef TARGET_OPTION_OVERRIDE
864 #define TARGET_OPTION_OVERRIDE sparc_option_override
866 #ifdef TARGET_THREAD_SSP_OFFSET
867 #undef TARGET_STACK_PROTECT_GUARD
868 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
869 #endif
871 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
872 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
873 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
874 #endif
876 #undef TARGET_ASM_FILE_END
877 #define TARGET_ASM_FILE_END sparc_file_end
879 #undef TARGET_FRAME_POINTER_REQUIRED
880 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
882 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
883 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
885 #undef TARGET_CAN_ELIMINATE
886 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
888 #undef TARGET_PREFERRED_RELOAD_CLASS
889 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
891 #undef TARGET_SECONDARY_RELOAD
892 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
893 #undef TARGET_SECONDARY_MEMORY_NEEDED
894 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
895 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
896 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
898 #undef TARGET_CONDITIONAL_REGISTER_USAGE
899 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
901 #undef TARGET_INIT_PIC_REG
902 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
904 #undef TARGET_USE_PSEUDO_PIC_REG
905 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
907 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
908 #undef TARGET_MANGLE_TYPE
909 #define TARGET_MANGLE_TYPE sparc_mangle_type
910 #endif
912 #undef TARGET_LRA_P
913 #define TARGET_LRA_P sparc_lra_p
915 #undef TARGET_LEGITIMATE_ADDRESS_P
916 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
918 #undef TARGET_LEGITIMATE_CONSTANT_P
919 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
921 #undef TARGET_TRAMPOLINE_INIT
922 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
924 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
925 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
926 #undef TARGET_PRINT_OPERAND
927 #define TARGET_PRINT_OPERAND sparc_print_operand
928 #undef TARGET_PRINT_OPERAND_ADDRESS
929 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
931 /* The value stored by LDSTUB. */
932 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
933 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
935 #undef TARGET_CSTORE_MODE
936 #define TARGET_CSTORE_MODE sparc_cstore_mode
938 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
939 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
941 #undef TARGET_FIXED_CONDITION_CODE_REGS
942 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
944 #undef TARGET_MIN_ARITHMETIC_PRECISION
945 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
947 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
948 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
950 #undef TARGET_HARD_REGNO_NREGS
951 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
952 #undef TARGET_HARD_REGNO_MODE_OK
953 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
955 #undef TARGET_MODES_TIEABLE_P
956 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
958 #undef TARGET_CAN_CHANGE_MODE_CLASS
959 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
961 #undef TARGET_CONSTANT_ALIGNMENT
962 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
964 #undef TARGET_VECTORIZE_VEC_PERM_CONST
965 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
967 #undef TARGET_CAN_FOLLOW_JUMP
968 #define TARGET_CAN_FOLLOW_JUMP sparc_can_follow_jump
970 struct gcc_target targetm = TARGET_INITIALIZER;
972 /* Return the memory reference contained in X if any, zero otherwise. */
974 static rtx
975 mem_ref (rtx x)
977 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
978 x = XEXP (x, 0);
980 if (MEM_P (x))
981 return x;
983 return NULL_RTX;
986 /* True if any of INSN's source register(s) is REG. */
988 static bool
989 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
991 extract_insn (insn);
992 return ((REG_P (recog_data.operand[1])
993 && REGNO (recog_data.operand[1]) == reg)
994 || (recog_data.n_operands == 3
995 && REG_P (recog_data.operand[2])
996 && REGNO (recog_data.operand[2]) == reg));
999 /* True if INSN is a floating-point division or square-root. */
1001 static bool
1002 div_sqrt_insn_p (rtx_insn *insn)
1004 if (GET_CODE (PATTERN (insn)) != SET)
1005 return false;
1007 switch (get_attr_type (insn))
1009 case TYPE_FPDIVS:
1010 case TYPE_FPSQRTS:
1011 case TYPE_FPDIVD:
1012 case TYPE_FPSQRTD:
1013 return true;
1014 default:
1015 return false;
1019 /* True if INSN is a floating-point instruction. */
1021 static bool
1022 fpop_insn_p (rtx_insn *insn)
1024 if (GET_CODE (PATTERN (insn)) != SET)
1025 return false;
1027 switch (get_attr_type (insn))
1029 case TYPE_FPMOVE:
1030 case TYPE_FPCMOVE:
1031 case TYPE_FP:
1032 case TYPE_FPCMP:
1033 case TYPE_FPMUL:
1034 case TYPE_FPDIVS:
1035 case TYPE_FPSQRTS:
1036 case TYPE_FPDIVD:
1037 case TYPE_FPSQRTD:
1038 return true;
1039 default:
1040 return false;
1044 /* True if INSN is an atomic instruction. */
1046 static bool
1047 atomic_insn_for_leon3_p (rtx_insn *insn)
1049 switch (INSN_CODE (insn))
1051 case CODE_FOR_swapsi:
1052 case CODE_FOR_ldstub:
1053 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1054 return true;
1055 default:
1056 return false;
1060 /* We use a machine specific pass to enable workarounds for errata.
1062 We need to have the (essentially) final form of the insn stream in order
1063 to properly detect the various hazards. Therefore, this machine specific
1064 pass runs as late as possible. */
1066 /* True if INSN is a md pattern or asm statement. */
1067 #define USEFUL_INSN_P(INSN) \
1068 (NONDEBUG_INSN_P (INSN) \
1069 && GET_CODE (PATTERN (INSN)) != USE \
1070 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1072 static unsigned int
1073 sparc_do_work_around_errata (void)
1075 rtx_insn *insn, *next;
1077 /* Force all instructions to be split into their final form. */
1078 split_all_insns_noflow ();
1080 /* Now look for specific patterns in the insn stream. */
1081 for (insn = get_insns (); insn; insn = next)
1083 bool insert_nop = false;
1084 rtx set;
1085 rtx_insn *jump;
1086 rtx_sequence *seq;
1088 /* Look into the instruction in a delay slot. */
1089 if (NONJUMP_INSN_P (insn)
1090 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1092 jump = seq->insn (0);
1093 insn = seq->insn (1);
1095 else if (JUMP_P (insn))
1096 jump = insn;
1097 else
1098 jump = NULL;
1100 /* Place a NOP at the branch target of an integer branch if it is a
1101 floating-point operation or a floating-point branch. */
1102 if (sparc_fix_gr712rc
1103 && jump
1104 && jump_to_label_p (jump)
1105 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1107 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1108 if (target
1109 && (fpop_insn_p (target)
1110 || (JUMP_P (target)
1111 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1112 emit_insn_before (gen_nop (), target);
1115 /* Insert a NOP between load instruction and atomic instruction. Insert
1116 a NOP at branch target if there is a load in delay slot and an atomic
1117 instruction at branch target. */
1118 if (sparc_fix_ut700
1119 && NONJUMP_INSN_P (insn)
1120 && (set = single_set (insn)) != NULL_RTX
1121 && mem_ref (SET_SRC (set))
1122 && REG_P (SET_DEST (set)))
1124 if (jump && jump_to_label_p (jump))
1126 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1127 if (target && atomic_insn_for_leon3_p (target))
1128 emit_insn_before (gen_nop (), target);
1131 next = next_active_insn (insn);
1132 if (!next)
1133 break;
1135 if (atomic_insn_for_leon3_p (next))
1136 insert_nop = true;
1139 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1140 ends with another fdiv or fsqrt instruction with no dependencies on
1141 the former, along with an appropriate pattern in between. */
1142 if (sparc_fix_lost_divsqrt
1143 && NONJUMP_INSN_P (insn)
1144 && div_sqrt_insn_p (insn))
1146 int i;
1147 int fp_found = 0;
1148 rtx_insn *after;
1150 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1152 next = next_active_insn (insn);
1153 if (!next)
1154 break;
1156 for (after = next, i = 0; i < 4; i++)
1158 /* Count floating-point operations. */
1159 if (i != 3 && fpop_insn_p (after))
1161 /* If the insn uses the destination register of
1162 the div/sqrt, then it cannot be problematic. */
1163 if (insn_uses_reg_p (after, dest_reg))
1164 break;
1165 fp_found++;
1168 /* Count floating-point loads. */
1169 if (i != 3
1170 && (set = single_set (after)) != NULL_RTX
1171 && REG_P (SET_DEST (set))
1172 && REGNO (SET_DEST (set)) > 31)
1174 /* If the insn uses the destination register of
1175 the div/sqrt, then it cannot be problematic. */
1176 if (REGNO (SET_DEST (set)) == dest_reg)
1177 break;
1178 fp_found++;
1181 /* Check if this is a problematic sequence. */
1182 if (i > 1
1183 && fp_found >= 2
1184 && div_sqrt_insn_p (after))
1186 /* If this is the short version of the problematic
1187 sequence we add two NOPs in a row to also prevent
1188 the long version. */
1189 if (i == 2)
1190 emit_insn_before (gen_nop (), next);
1191 insert_nop = true;
1192 break;
1195 /* No need to scan past a second div/sqrt. */
1196 if (div_sqrt_insn_p (after))
1197 break;
1199 /* Insert NOP before branch. */
1200 if (i < 3
1201 && (!NONJUMP_INSN_P (after)
1202 || GET_CODE (PATTERN (after)) == SEQUENCE))
1204 insert_nop = true;
1205 break;
1208 after = next_active_insn (after);
1209 if (!after)
1210 break;
1214 /* Look for either of these two sequences:
1216 Sequence A:
1217 1. store of word size or less (e.g. st / stb / sth / stf)
1218 2. any single instruction that is not a load or store
1219 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1221 Sequence B:
1222 1. store of double word size (e.g. std / stdf)
1223 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1224 if (sparc_fix_b2bst
1225 && NONJUMP_INSN_P (insn)
1226 && (set = single_set (insn)) != NULL_RTX
1227 && MEM_P (SET_DEST (set)))
1229 /* Sequence B begins with a double-word store. */
1230 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1231 rtx_insn *after;
1232 int i;
1234 next = next_active_insn (insn);
1235 if (!next)
1236 break;
1238 for (after = next, i = 0; i < 2; i++)
1240 /* Skip empty assembly statements. */
1241 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
1242 || (USEFUL_INSN_P (after)
1243 && (asm_noperands (PATTERN (after))>=0)
1244 && !strcmp (decode_asm_operands (PATTERN (after),
1245 NULL, NULL, NULL,
1246 NULL, NULL), "")))
1247 after = next_active_insn (after);
1248 if (!after)
1249 break;
1251 /* If the insn is a branch, then it cannot be problematic. */
1252 if (!NONJUMP_INSN_P (after)
1253 || GET_CODE (PATTERN (after)) == SEQUENCE)
1254 break;
1256 /* Sequence B is only two instructions long. */
1257 if (seq_b)
1259 /* Add NOP if followed by a store. */
1260 if ((set = single_set (after)) != NULL_RTX
1261 && MEM_P (SET_DEST (set)))
1262 insert_nop = true;
1264 /* Otherwise it is ok. */
1265 break;
1268 /* If the second instruction is a load or a store,
1269 then the sequence cannot be problematic. */
1270 if (i == 0)
1272 if ((set = single_set (after)) != NULL_RTX
1273 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1274 break;
1276 after = next_active_insn (after);
1277 if (!after)
1278 break;
1281 /* Add NOP if third instruction is a store. */
1282 if (i == 1
1283 && (set = single_set (after)) != NULL_RTX
1284 && MEM_P (SET_DEST (set)))
1285 insert_nop = true;
1289 /* Look for a single-word load into an odd-numbered FP register. */
1290 else if (sparc_fix_at697f
1291 && NONJUMP_INSN_P (insn)
1292 && (set = single_set (insn)) != NULL_RTX
1293 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1294 && mem_ref (SET_SRC (set))
1295 && REG_P (SET_DEST (set))
1296 && REGNO (SET_DEST (set)) > 31
1297 && REGNO (SET_DEST (set)) % 2 != 0)
1299 /* The wrong dependency is on the enclosing double register. */
1300 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1301 unsigned int src1, src2, dest;
1302 int code;
1304 next = next_active_insn (insn);
1305 if (!next)
1306 break;
1307 /* If the insn is a branch, then it cannot be problematic. */
1308 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1309 continue;
1311 extract_insn (next);
1312 code = INSN_CODE (next);
1314 switch (code)
1316 case CODE_FOR_adddf3:
1317 case CODE_FOR_subdf3:
1318 case CODE_FOR_muldf3:
1319 case CODE_FOR_divdf3:
1320 dest = REGNO (recog_data.operand[0]);
1321 src1 = REGNO (recog_data.operand[1]);
1322 src2 = REGNO (recog_data.operand[2]);
1323 if (src1 != src2)
1325 /* Case [1-4]:
1326 ld [address], %fx+1
1327 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1328 if ((src1 == x || src2 == x)
1329 && (dest == src1 || dest == src2))
1330 insert_nop = true;
1332 else
1334 /* Case 5:
1335 ld [address], %fx+1
1336 FPOPd %fx, %fx, %fx */
1337 if (src1 == x
1338 && dest == src1
1339 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1340 insert_nop = true;
1342 break;
1344 case CODE_FOR_sqrtdf2:
1345 dest = REGNO (recog_data.operand[0]);
1346 src1 = REGNO (recog_data.operand[1]);
1347 /* Case 6:
1348 ld [address], %fx+1
1349 fsqrtd %fx, %fx */
1350 if (src1 == x && dest == src1)
1351 insert_nop = true;
1352 break;
1354 default:
1355 break;
1359 /* Look for a single-word load into an integer register. */
1360 else if (sparc_fix_ut699
1361 && NONJUMP_INSN_P (insn)
1362 && (set = single_set (insn)) != NULL_RTX
1363 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1364 && (mem_ref (SET_SRC (set)) != NULL_RTX
1365 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1366 && REG_P (SET_DEST (set))
1367 && REGNO (SET_DEST (set)) < 32)
1369 /* There is no problem if the second memory access has a data
1370 dependency on the first single-cycle load. */
1371 rtx x = SET_DEST (set);
1373 next = next_active_insn (insn);
1374 if (!next)
1375 break;
1376 /* If the insn is a branch, then it cannot be problematic. */
1377 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1378 continue;
1380 /* Look for a second memory access to/from an integer register. */
1381 if ((set = single_set (next)) != NULL_RTX)
1383 rtx src = SET_SRC (set);
1384 rtx dest = SET_DEST (set);
1385 rtx mem;
1387 /* LDD is affected. */
1388 if ((mem = mem_ref (src)) != NULL_RTX
1389 && REG_P (dest)
1390 && REGNO (dest) < 32
1391 && !reg_mentioned_p (x, XEXP (mem, 0)))
1392 insert_nop = true;
1394 /* STD is *not* affected. */
1395 else if (MEM_P (dest)
1396 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1397 && (src == CONST0_RTX (GET_MODE (dest))
1398 || (REG_P (src)
1399 && REGNO (src) < 32
1400 && REGNO (src) != REGNO (x)))
1401 && !reg_mentioned_p (x, XEXP (dest, 0)))
1402 insert_nop = true;
1404 /* GOT accesses uses LD. */
1405 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1406 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1407 insert_nop = true;
1411 /* Look for a single-word load/operation into an FP register. */
1412 else if (sparc_fix_ut699
1413 && NONJUMP_INSN_P (insn)
1414 && (set = single_set (insn)) != NULL_RTX
1415 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1416 && REG_P (SET_DEST (set))
1417 && REGNO (SET_DEST (set)) > 31)
1419 /* Number of instructions in the problematic window. */
1420 const int n_insns = 4;
1421 /* The problematic combination is with the sibling FP register. */
1422 const unsigned int x = REGNO (SET_DEST (set));
1423 const unsigned int y = x ^ 1;
1424 rtx_insn *after;
1425 int i;
1427 next = next_active_insn (insn);
1428 if (!next)
1429 break;
1430 /* If the insn is a branch, then it cannot be problematic. */
1431 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1432 continue;
1434 /* Look for a second load/operation into the sibling FP register. */
1435 if (!((set = single_set (next)) != NULL_RTX
1436 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1437 && REG_P (SET_DEST (set))
1438 && REGNO (SET_DEST (set)) == y))
1439 continue;
1441 /* Look for a (possible) store from the FP register in the next N
1442 instructions, but bail out if it is again modified or if there
1443 is a store from the sibling FP register before this store. */
1444 for (after = next, i = 0; i < n_insns; i++)
1446 bool branch_p;
1448 after = next_active_insn (after);
1449 if (!after)
1450 break;
1452 /* This is a branch with an empty delay slot. */
1453 if (!NONJUMP_INSN_P (after))
1455 if (++i == n_insns)
1456 break;
1457 branch_p = true;
1458 after = NULL;
1460 /* This is a branch with a filled delay slot. */
1461 else if (rtx_sequence *seq =
1462 dyn_cast <rtx_sequence *> (PATTERN (after)))
1464 if (++i == n_insns)
1465 break;
1466 branch_p = true;
1467 after = seq->insn (1);
1469 /* This is a regular instruction. */
1470 else
1471 branch_p = false;
1473 if (after && (set = single_set (after)) != NULL_RTX)
1475 const rtx src = SET_SRC (set);
1476 const rtx dest = SET_DEST (set);
1477 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1479 /* If the FP register is again modified before the store,
1480 then the store isn't affected. */
1481 if (REG_P (dest)
1482 && (REGNO (dest) == x
1483 || (REGNO (dest) == y && size == 8)))
1484 break;
1486 if (MEM_P (dest) && REG_P (src))
1488 /* If there is a store from the sibling FP register
1489 before the store, then the store is not affected. */
1490 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1491 break;
1493 /* Otherwise, the store is affected. */
1494 if (REGNO (src) == x && size == 4)
1496 insert_nop = true;
1497 break;
1502 /* If we have a branch in the first M instructions, then we
1503 cannot see the (M+2)th instruction so we play safe. */
1504 if (branch_p && i <= (n_insns - 2))
1506 insert_nop = true;
1507 break;
1512 else
1513 next = NEXT_INSN (insn);
1515 if (insert_nop)
1516 emit_insn_before (gen_nop (), next);
1519 return 0;
1522 namespace {
1524 const pass_data pass_data_work_around_errata =
1526 RTL_PASS, /* type */
1527 "errata", /* name */
1528 OPTGROUP_NONE, /* optinfo_flags */
1529 TV_MACH_DEP, /* tv_id */
1530 0, /* properties_required */
1531 0, /* properties_provided */
1532 0, /* properties_destroyed */
1533 0, /* todo_flags_start */
1534 0, /* todo_flags_finish */
1537 class pass_work_around_errata : public rtl_opt_pass
1539 public:
1540 pass_work_around_errata(gcc::context *ctxt)
1541 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1544 /* opt_pass methods: */
1545 virtual bool gate (function *)
1547 return sparc_fix_at697f
1548 || sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc
1549 || sparc_fix_b2bst || sparc_fix_lost_divsqrt;
1552 virtual unsigned int execute (function *)
1554 return sparc_do_work_around_errata ();
1557 }; // class pass_work_around_errata
1559 } // anon namespace
1561 rtl_opt_pass *
1562 make_pass_work_around_errata (gcc::context *ctxt)
1564 return new pass_work_around_errata (ctxt);
1567 /* Helpers for TARGET_DEBUG_OPTIONS. */
1568 static void
1569 dump_target_flag_bits (const int flags)
1571 if (flags & MASK_64BIT)
1572 fprintf (stderr, "64BIT ");
1573 if (flags & MASK_APP_REGS)
1574 fprintf (stderr, "APP_REGS ");
1575 if (flags & MASK_FASTER_STRUCTS)
1576 fprintf (stderr, "FASTER_STRUCTS ");
1577 if (flags & MASK_FLAT)
1578 fprintf (stderr, "FLAT ");
1579 if (flags & MASK_FMAF)
1580 fprintf (stderr, "FMAF ");
1581 if (flags & MASK_FSMULD)
1582 fprintf (stderr, "FSMULD ");
1583 if (flags & MASK_FPU)
1584 fprintf (stderr, "FPU ");
1585 if (flags & MASK_HARD_QUAD)
1586 fprintf (stderr, "HARD_QUAD ");
1587 if (flags & MASK_POPC)
1588 fprintf (stderr, "POPC ");
1589 if (flags & MASK_PTR64)
1590 fprintf (stderr, "PTR64 ");
1591 if (flags & MASK_STACK_BIAS)
1592 fprintf (stderr, "STACK_BIAS ");
1593 if (flags & MASK_UNALIGNED_DOUBLES)
1594 fprintf (stderr, "UNALIGNED_DOUBLES ");
1595 if (flags & MASK_V8PLUS)
1596 fprintf (stderr, "V8PLUS ");
1597 if (flags & MASK_VIS)
1598 fprintf (stderr, "VIS ");
1599 if (flags & MASK_VIS2)
1600 fprintf (stderr, "VIS2 ");
1601 if (flags & MASK_VIS3)
1602 fprintf (stderr, "VIS3 ");
1603 if (flags & MASK_VIS4)
1604 fprintf (stderr, "VIS4 ");
1605 if (flags & MASK_VIS4B)
1606 fprintf (stderr, "VIS4B ");
1607 if (flags & MASK_CBCOND)
1608 fprintf (stderr, "CBCOND ");
1609 if (flags & MASK_DEPRECATED_V8_INSNS)
1610 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1611 if (flags & MASK_SPARCLET)
1612 fprintf (stderr, "SPARCLET ");
1613 if (flags & MASK_SPARCLITE)
1614 fprintf (stderr, "SPARCLITE ");
1615 if (flags & MASK_V8)
1616 fprintf (stderr, "V8 ");
1617 if (flags & MASK_V9)
1618 fprintf (stderr, "V9 ");
1621 static void
1622 dump_target_flags (const char *prefix, const int flags)
1624 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1625 dump_target_flag_bits (flags);
1626 fprintf(stderr, "]\n");
1629 /* Validate and override various options, and do some machine dependent
1630 initialization. */
1632 static void
1633 sparc_option_override (void)
1635 static struct code_model {
1636 const char *const name;
1637 const enum cmodel value;
1638 } const cmodels[] = {
1639 { "32", CM_32 },
1640 { "medlow", CM_MEDLOW },
1641 { "medmid", CM_MEDMID },
1642 { "medany", CM_MEDANY },
1643 { "embmedany", CM_EMBMEDANY },
1644 { NULL, (enum cmodel) 0 }
1646 const struct code_model *cmodel;
1647 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1648 static struct cpu_default {
1649 const int cpu;
1650 const enum processor_type processor;
1651 } const cpu_default[] = {
1652 /* There must be one entry here for each TARGET_CPU value. */
1653 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1654 { TARGET_CPU_v8, PROCESSOR_V8 },
1655 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1656 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1657 { TARGET_CPU_leon, PROCESSOR_LEON },
1658 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1659 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1660 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1661 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1662 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1663 { TARGET_CPU_v9, PROCESSOR_V9 },
1664 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1665 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1666 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1667 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1668 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1669 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1670 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1671 { TARGET_CPU_m8, PROCESSOR_M8 },
1672 { -1, PROCESSOR_V7 }
1674 const struct cpu_default *def;
1675 /* Table of values for -m{cpu,tune}=. This must match the order of
1676 the enum processor_type in sparc-opts.h. */
1677 static struct cpu_table {
1678 const char *const name;
1679 const int disable;
1680 const int enable;
1681 } const cpu_table[] = {
1682 { "v7", MASK_ISA, 0 },
1683 { "cypress", MASK_ISA, 0 },
1684 { "v8", MASK_ISA, MASK_V8 },
1685 /* TI TMS390Z55 supersparc */
1686 { "supersparc", MASK_ISA, MASK_V8 },
1687 { "hypersparc", MASK_ISA, MASK_V8 },
1688 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1689 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1690 { "leon3v7", MASK_ISA, MASK_LEON3 },
1691 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1692 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1693 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1694 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1695 { "f934", MASK_ISA, MASK_SPARCLITE },
1696 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1697 { "sparclet", MASK_ISA, MASK_SPARCLET },
1698 /* TEMIC sparclet */
1699 { "tsc701", MASK_ISA, MASK_SPARCLET },
1700 { "v9", MASK_ISA, MASK_V9 },
1701 /* UltraSPARC I, II, IIi */
1702 { "ultrasparc", MASK_ISA,
1703 /* Although insns using %y are deprecated, it is a clear win. */
1704 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1705 /* UltraSPARC III */
1706 /* ??? Check if %y issue still holds true. */
1707 { "ultrasparc3", MASK_ISA,
1708 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1709 /* UltraSPARC T1 */
1710 { "niagara", MASK_ISA,
1711 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1712 /* UltraSPARC T2 */
1713 { "niagara2", MASK_ISA,
1714 MASK_V9|MASK_POPC|MASK_VIS2 },
1715 /* UltraSPARC T3 */
1716 { "niagara3", MASK_ISA,
1717 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1718 /* UltraSPARC T4 */
1719 { "niagara4", MASK_ISA,
1720 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1721 /* UltraSPARC M7 */
1722 { "niagara7", MASK_ISA,
1723 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1724 /* UltraSPARC M8 */
1725 { "m8", MASK_ISA,
1726 MASK_V9|MASK_POPC|MASK_VIS4B|MASK_FMAF|MASK_CBCOND|MASK_SUBXC }
1728 const struct cpu_table *cpu;
1729 unsigned int i;
1731 if (sparc_debug_string != NULL)
1733 const char *q;
1734 char *p;
1736 p = ASTRDUP (sparc_debug_string);
1737 while ((q = strtok (p, ",")) != NULL)
1739 bool invert;
1740 int mask;
1742 p = NULL;
1743 if (*q == '!')
1745 invert = true;
1746 q++;
1748 else
1749 invert = false;
1751 if (! strcmp (q, "all"))
1752 mask = MASK_DEBUG_ALL;
1753 else if (! strcmp (q, "options"))
1754 mask = MASK_DEBUG_OPTIONS;
1755 else
1756 error ("unknown -mdebug-%s switch", q);
1758 if (invert)
1759 sparc_debug &= ~mask;
1760 else
1761 sparc_debug |= mask;
1765 /* Enable the FsMULd instruction by default if not explicitly specified by
1766 the user. It may be later disabled by the CPU (explicitly or not). */
1767 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1768 target_flags |= MASK_FSMULD;
1770 if (TARGET_DEBUG_OPTIONS)
1772 dump_target_flags("Initial target_flags", target_flags);
1773 dump_target_flags("target_flags_explicit", target_flags_explicit);
1776 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1777 SUBTARGET_OVERRIDE_OPTIONS;
1778 #endif
1780 #ifndef SPARC_BI_ARCH
1781 /* Check for unsupported architecture size. */
1782 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1783 error ("%s is not supported by this configuration",
1784 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1785 #endif
1787 /* We force all 64bit archs to use 128 bit long double */
1788 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1790 error ("-mlong-double-64 not allowed with -m64");
1791 target_flags |= MASK_LONG_DOUBLE_128;
1794 /* Code model selection. */
1795 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1797 #ifdef SPARC_BI_ARCH
1798 if (TARGET_ARCH32)
1799 sparc_cmodel = CM_32;
1800 #endif
1802 if (sparc_cmodel_string != NULL)
1804 if (TARGET_ARCH64)
1806 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1807 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1808 break;
1809 if (cmodel->name == NULL)
1810 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1811 else
1812 sparc_cmodel = cmodel->value;
1814 else
1815 error ("-mcmodel= is not supported on 32-bit systems");
1818 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1819 for (i = 8; i < 16; i++)
1820 if (!call_used_regs [i])
1822 error ("-fcall-saved-REG is not supported for out registers");
1823 call_used_regs [i] = 1;
1826 /* Set the default CPU if no -mcpu option was specified. */
1827 if (!global_options_set.x_sparc_cpu_and_features)
1829 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1830 if (def->cpu == TARGET_CPU_DEFAULT)
1831 break;
1832 gcc_assert (def->cpu != -1);
1833 sparc_cpu_and_features = def->processor;
1836 /* Set the default CPU if no -mtune option was specified. */
1837 if (!global_options_set.x_sparc_cpu)
1838 sparc_cpu = sparc_cpu_and_features;
1840 cpu = &cpu_table[(int) sparc_cpu_and_features];
1842 if (TARGET_DEBUG_OPTIONS)
1844 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1845 dump_target_flags ("cpu->disable", cpu->disable);
1846 dump_target_flags ("cpu->enable", cpu->enable);
1849 target_flags &= ~cpu->disable;
1850 target_flags |= (cpu->enable
1851 #ifndef HAVE_AS_FMAF_HPC_VIS3
1852 & ~(MASK_FMAF | MASK_VIS3)
1853 #endif
1854 #ifndef HAVE_AS_SPARC4
1855 & ~MASK_CBCOND
1856 #endif
1857 #ifndef HAVE_AS_SPARC5_VIS4
1858 & ~(MASK_VIS4 | MASK_SUBXC)
1859 #endif
1860 #ifndef HAVE_AS_SPARC6
1861 & ~(MASK_VIS4B)
1862 #endif
1863 #ifndef HAVE_AS_LEON
1864 & ~(MASK_LEON | MASK_LEON3)
1865 #endif
1866 & ~(target_flags_explicit & MASK_FEATURES)
1869 /* FsMULd is a V8 instruction. */
1870 if (!TARGET_V8 && !TARGET_V9)
1871 target_flags &= ~MASK_FSMULD;
1873 /* -mvis2 implies -mvis. */
1874 if (TARGET_VIS2)
1875 target_flags |= MASK_VIS;
1877 /* -mvis3 implies -mvis2 and -mvis. */
1878 if (TARGET_VIS3)
1879 target_flags |= MASK_VIS2 | MASK_VIS;
1881 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1882 if (TARGET_VIS4)
1883 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1885 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1886 if (TARGET_VIS4B)
1887 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1889 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1890 FPU is disabled. */
1891 if (!TARGET_FPU)
1892 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1893 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1895 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1896 are available; -m64 also implies v9. */
1897 if (TARGET_VIS || TARGET_ARCH64)
1899 target_flags |= MASK_V9;
1900 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1903 /* -mvis also implies -mv8plus on 32-bit. */
1904 if (TARGET_VIS && !TARGET_ARCH64)
1905 target_flags |= MASK_V8PLUS;
1907 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1908 if (TARGET_V9 && TARGET_ARCH32)
1909 target_flags |= MASK_DEPRECATED_V8_INSNS;
1911 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1912 if (!TARGET_V9 || TARGET_ARCH64)
1913 target_flags &= ~MASK_V8PLUS;
1915 /* Don't use stack biasing in 32-bit mode. */
1916 if (TARGET_ARCH32)
1917 target_flags &= ~MASK_STACK_BIAS;
1919 /* Use LRA instead of reload, unless otherwise instructed. */
1920 if (!(target_flags_explicit & MASK_LRA))
1921 target_flags |= MASK_LRA;
1923 /* Enable applicable errata workarounds for LEON3FT. */
1924 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1926 sparc_fix_b2bst = 1;
1927 sparc_fix_lost_divsqrt = 1;
1930 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1931 if (sparc_fix_ut699)
1932 target_flags &= ~MASK_FSMULD;
1934 /* Supply a default value for align_functions. */
1935 if (flag_align_functions && !str_align_functions)
1937 if (sparc_cpu == PROCESSOR_ULTRASPARC
1938 || sparc_cpu == PROCESSOR_ULTRASPARC3
1939 || sparc_cpu == PROCESSOR_NIAGARA
1940 || sparc_cpu == PROCESSOR_NIAGARA2
1941 || sparc_cpu == PROCESSOR_NIAGARA3
1942 || sparc_cpu == PROCESSOR_NIAGARA4)
1943 str_align_functions = "32";
1944 else if (sparc_cpu == PROCESSOR_NIAGARA7
1945 || sparc_cpu == PROCESSOR_M8)
1946 str_align_functions = "64";
1949 /* Validate PCC_STRUCT_RETURN. */
1950 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1951 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1953 /* Only use .uaxword when compiling for a 64-bit target. */
1954 if (!TARGET_ARCH64)
1955 targetm.asm_out.unaligned_op.di = NULL;
1957 /* Do various machine dependent initializations. */
1958 sparc_init_modes ();
1960 /* Set up function hooks. */
1961 init_machine_status = sparc_init_machine_status;
1963 switch (sparc_cpu)
1965 case PROCESSOR_V7:
1966 case PROCESSOR_CYPRESS:
1967 sparc_costs = &cypress_costs;
1968 break;
1969 case PROCESSOR_V8:
1970 case PROCESSOR_SPARCLITE:
1971 case PROCESSOR_SUPERSPARC:
1972 sparc_costs = &supersparc_costs;
1973 break;
1974 case PROCESSOR_F930:
1975 case PROCESSOR_F934:
1976 case PROCESSOR_HYPERSPARC:
1977 case PROCESSOR_SPARCLITE86X:
1978 sparc_costs = &hypersparc_costs;
1979 break;
1980 case PROCESSOR_LEON:
1981 sparc_costs = &leon_costs;
1982 break;
1983 case PROCESSOR_LEON3:
1984 case PROCESSOR_LEON3V7:
1985 sparc_costs = &leon3_costs;
1986 break;
1987 case PROCESSOR_SPARCLET:
1988 case PROCESSOR_TSC701:
1989 sparc_costs = &sparclet_costs;
1990 break;
1991 case PROCESSOR_V9:
1992 case PROCESSOR_ULTRASPARC:
1993 sparc_costs = &ultrasparc_costs;
1994 break;
1995 case PROCESSOR_ULTRASPARC3:
1996 sparc_costs = &ultrasparc3_costs;
1997 break;
1998 case PROCESSOR_NIAGARA:
1999 sparc_costs = &niagara_costs;
2000 break;
2001 case PROCESSOR_NIAGARA2:
2002 sparc_costs = &niagara2_costs;
2003 break;
2004 case PROCESSOR_NIAGARA3:
2005 sparc_costs = &niagara3_costs;
2006 break;
2007 case PROCESSOR_NIAGARA4:
2008 sparc_costs = &niagara4_costs;
2009 break;
2010 case PROCESSOR_NIAGARA7:
2011 sparc_costs = &niagara7_costs;
2012 break;
2013 case PROCESSOR_M8:
2014 sparc_costs = &m8_costs;
2015 break;
2016 case PROCESSOR_NATIVE:
2017 gcc_unreachable ();
2020 if (sparc_memory_model == SMM_DEFAULT)
2022 /* Choose the memory model for the operating system. */
2023 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
2024 if (os_default != SMM_DEFAULT)
2025 sparc_memory_model = os_default;
2026 /* Choose the most relaxed model for the processor. */
2027 else if (TARGET_V9)
2028 sparc_memory_model = SMM_RMO;
2029 else if (TARGET_LEON3)
2030 sparc_memory_model = SMM_TSO;
2031 else if (TARGET_LEON)
2032 sparc_memory_model = SMM_SC;
2033 else if (TARGET_V8)
2034 sparc_memory_model = SMM_PSO;
2035 else
2036 sparc_memory_model = SMM_SC;
2039 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
2040 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
2041 target_flags |= MASK_LONG_DOUBLE_128;
2042 #endif
2044 if (TARGET_DEBUG_OPTIONS)
2045 dump_target_flags ("Final target_flags", target_flags);
2047 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
2048 can run at the same time. More important, it is the threshold
2049 defining when additional prefetches will be dropped by the
2050 hardware.
2052 The UltraSPARC-III features a documented prefetch queue with a
2053 size of 8. Additional prefetches issued in the cpu are
2054 dropped.
2056 Niagara processors are different. In these processors prefetches
2057 are handled much like regular loads. The L1 miss buffer is 32
2058 entries, but prefetches start getting affected when 30 entries
2059 become occupied. That occupation could be a mix of regular loads
2060 and prefetches though. And that buffer is shared by all threads.
2061 Once the threshold is reached, if the core is running a single
2062 thread the prefetch will retry. If more than one thread is
2063 running, the prefetch will be dropped.
2065 All this makes it very difficult to determine how many
2066 simultaneous prefetches can be issued simultaneously, even in a
2067 single-threaded program. Experimental results show that setting
2068 this parameter to 32 works well when the number of threads is not
2069 high. */
2070 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
2071 ((sparc_cpu == PROCESSOR_ULTRASPARC
2072 || sparc_cpu == PROCESSOR_NIAGARA
2073 || sparc_cpu == PROCESSOR_NIAGARA2
2074 || sparc_cpu == PROCESSOR_NIAGARA3
2075 || sparc_cpu == PROCESSOR_NIAGARA4)
2077 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2078 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2079 || sparc_cpu == PROCESSOR_M8)
2080 ? 32 : 3))),
2081 global_options.x_param_values,
2082 global_options_set.x_param_values);
2084 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
2085 bytes.
2087 The Oracle SPARC Architecture (previously the UltraSPARC
2088 Architecture) specification states that when a PREFETCH[A]
2089 instruction is executed an implementation-specific amount of data
2090 is prefetched, and that it is at least 64 bytes long (aligned to
2091 at least 64 bytes).
2093 However, this is not correct. The M7 (and implementations prior
2094 to that) does not guarantee a 64B prefetch into a cache if the
2095 line size is smaller. A single cache line is all that is ever
2096 prefetched. So for the M7, where the L1D$ has 32B lines and the
2097 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2098 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2099 is a read_n prefetch, which is the only type which allocates to
2100 the L1.) */
2101 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
2102 (sparc_cpu == PROCESSOR_M8
2103 ? 64 : 32),
2104 global_options.x_param_values,
2105 global_options_set.x_param_values);
2107 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
2108 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2109 Niagara processors feature a L1D$ of 16KB. */
2110 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
2111 ((sparc_cpu == PROCESSOR_ULTRASPARC
2112 || sparc_cpu == PROCESSOR_ULTRASPARC3
2113 || sparc_cpu == PROCESSOR_NIAGARA
2114 || sparc_cpu == PROCESSOR_NIAGARA2
2115 || sparc_cpu == PROCESSOR_NIAGARA3
2116 || sparc_cpu == PROCESSOR_NIAGARA4
2117 || sparc_cpu == PROCESSOR_NIAGARA7
2118 || sparc_cpu == PROCESSOR_M8)
2119 ? 16 : 64),
2120 global_options.x_param_values,
2121 global_options_set.x_param_values);
2124 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
2125 that 512 is the default in params.def. */
2126 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
2127 ((sparc_cpu == PROCESSOR_NIAGARA4
2128 || sparc_cpu == PROCESSOR_M8)
2129 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2130 ? 256 : 512)),
2131 global_options.x_param_values,
2132 global_options_set.x_param_values);
2135 /* Disable save slot sharing for call-clobbered registers by default.
2136 The IRA sharing algorithm works on single registers only and this
2137 pessimizes for double floating-point registers. */
2138 if (!global_options_set.x_flag_ira_share_save_slots)
2139 flag_ira_share_save_slots = 0;
2141 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2142 redundant 32-to-64-bit extensions. */
2143 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
2144 flag_ree = 0;
2147 /* Miscellaneous utilities. */
2149 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2150 or branch on register contents instructions. */
2153 v9_regcmp_p (enum rtx_code code)
2155 return (code == EQ || code == NE || code == GE || code == LT
2156 || code == LE || code == GT);
2159 /* Nonzero if OP is a floating point constant which can
2160 be loaded into an integer register using a single
2161 sethi instruction. */
2164 fp_sethi_p (rtx op)
2166 if (GET_CODE (op) == CONST_DOUBLE)
2168 long i;
2170 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2171 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2174 return 0;
2177 /* Nonzero if OP is a floating point constant which can
2178 be loaded into an integer register using a single
2179 mov instruction. */
2182 fp_mov_p (rtx op)
2184 if (GET_CODE (op) == CONST_DOUBLE)
2186 long i;
2188 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2189 return SPARC_SIMM13_P (i);
2192 return 0;
2195 /* Nonzero if OP is a floating point constant which can
2196 be loaded into an integer register using a high/losum
2197 instruction sequence. */
2200 fp_high_losum_p (rtx op)
2202 /* The constraints calling this should only be in
2203 SFmode move insns, so any constant which cannot
2204 be moved using a single insn will do. */
2205 if (GET_CODE (op) == CONST_DOUBLE)
2207 long i;
2209 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2210 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2213 return 0;
2216 /* Return true if the address of LABEL can be loaded by means of the
2217 mov{si,di}_pic_label_ref patterns in PIC mode. */
2219 static bool
2220 can_use_mov_pic_label_ref (rtx label)
2222 /* VxWorks does not impose a fixed gap between segments; the run-time
2223 gap can be different from the object-file gap. We therefore can't
2224 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2225 are absolutely sure that X is in the same segment as the GOT.
2226 Unfortunately, the flexibility of linker scripts means that we
2227 can't be sure of that in general, so assume that GOT-relative
2228 accesses are never valid on VxWorks. */
2229 if (TARGET_VXWORKS_RTP)
2230 return false;
2232 /* Similarly, if the label is non-local, it might end up being placed
2233 in a different section than the current one; now mov_pic_label_ref
2234 requires the label and the code to be in the same section. */
2235 if (LABEL_REF_NONLOCAL_P (label))
2236 return false;
2238 /* Finally, if we are reordering basic blocks and partition into hot
2239 and cold sections, this might happen for any label. */
2240 if (flag_reorder_blocks_and_partition)
2241 return false;
2243 return true;
2246 /* Expand a move instruction. Return true if all work is done. */
2248 bool
2249 sparc_expand_move (machine_mode mode, rtx *operands)
2251 /* Handle sets of MEM first. */
2252 if (GET_CODE (operands[0]) == MEM)
2254 /* 0 is a register (or a pair of registers) on SPARC. */
2255 if (register_or_zero_operand (operands[1], mode))
2256 return false;
2258 if (!reload_in_progress)
2260 operands[0] = validize_mem (operands[0]);
2261 operands[1] = force_reg (mode, operands[1]);
2265 /* Fix up TLS cases. */
2266 if (TARGET_HAVE_TLS
2267 && CONSTANT_P (operands[1])
2268 && sparc_tls_referenced_p (operands [1]))
2270 operands[1] = sparc_legitimize_tls_address (operands[1]);
2271 return false;
2274 /* Fix up PIC cases. */
2275 if (flag_pic && CONSTANT_P (operands[1]))
2277 if (pic_address_needs_scratch (operands[1]))
2278 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2280 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2281 if ((GET_CODE (operands[1]) == LABEL_REF
2282 && can_use_mov_pic_label_ref (operands[1]))
2283 || (GET_CODE (operands[1]) == CONST
2284 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2285 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2286 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2287 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2289 if (mode == SImode)
2291 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2292 return true;
2295 if (mode == DImode)
2297 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2298 return true;
2302 if (symbolic_operand (operands[1], mode))
2304 operands[1]
2305 = sparc_legitimize_pic_address (operands[1],
2306 reload_in_progress
2307 ? operands[0] : NULL_RTX);
2308 return false;
2312 /* If we are trying to toss an integer constant into FP registers,
2313 or loading a FP or vector constant, force it into memory. */
2314 if (CONSTANT_P (operands[1])
2315 && REG_P (operands[0])
2316 && (SPARC_FP_REG_P (REGNO (operands[0]))
2317 || SCALAR_FLOAT_MODE_P (mode)
2318 || VECTOR_MODE_P (mode)))
2320 /* emit_group_store will send such bogosity to us when it is
2321 not storing directly into memory. So fix this up to avoid
2322 crashes in output_constant_pool. */
2323 if (operands [1] == const0_rtx)
2324 operands[1] = CONST0_RTX (mode);
2326 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2327 always other regs. */
2328 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2329 && (const_zero_operand (operands[1], mode)
2330 || const_all_ones_operand (operands[1], mode)))
2331 return false;
2333 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2334 /* We are able to build any SF constant in integer registers
2335 with at most 2 instructions. */
2336 && (mode == SFmode
2337 /* And any DF constant in integer registers if needed. */
2338 || (mode == DFmode && !can_create_pseudo_p ())))
2339 return false;
2341 operands[1] = force_const_mem (mode, operands[1]);
2342 if (!reload_in_progress)
2343 operands[1] = validize_mem (operands[1]);
2344 return false;
2347 /* Accept non-constants and valid constants unmodified. */
2348 if (!CONSTANT_P (operands[1])
2349 || GET_CODE (operands[1]) == HIGH
2350 || input_operand (operands[1], mode))
2351 return false;
2353 switch (mode)
2355 case E_QImode:
2356 /* All QImode constants require only one insn, so proceed. */
2357 break;
2359 case E_HImode:
2360 case E_SImode:
2361 sparc_emit_set_const32 (operands[0], operands[1]);
2362 return true;
2364 case E_DImode:
2365 /* input_operand should have filtered out 32-bit mode. */
2366 sparc_emit_set_const64 (operands[0], operands[1]);
2367 return true;
2369 case E_TImode:
2371 rtx high, low;
2372 /* TImode isn't available in 32-bit mode. */
2373 split_double (operands[1], &high, &low);
2374 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2375 high));
2376 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2377 low));
2379 return true;
2381 default:
2382 gcc_unreachable ();
2385 return false;
2388 /* Load OP1, a 32-bit constant, into OP0, a register.
2389 We know it can't be done in one insn when we get
2390 here, the move expander guarantees this. */
2392 static void
2393 sparc_emit_set_const32 (rtx op0, rtx op1)
2395 machine_mode mode = GET_MODE (op0);
2396 rtx temp = op0;
2398 if (can_create_pseudo_p ())
2399 temp = gen_reg_rtx (mode);
2401 if (GET_CODE (op1) == CONST_INT)
2403 gcc_assert (!small_int_operand (op1, mode)
2404 && !const_high_operand (op1, mode));
2406 /* Emit them as real moves instead of a HIGH/LO_SUM,
2407 this way CSE can see everything and reuse intermediate
2408 values if it wants. */
2409 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2410 & ~(HOST_WIDE_INT) 0x3ff)));
2412 emit_insn (gen_rtx_SET (op0,
2413 gen_rtx_IOR (mode, temp,
2414 GEN_INT (INTVAL (op1) & 0x3ff))));
2416 else
2418 /* A symbol, emit in the traditional way. */
2419 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2420 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2424 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2425 If TEMP is nonzero, we are forbidden to use any other scratch
2426 registers. Otherwise, we are allowed to generate them as needed.
2428 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2429 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2431 void
2432 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2434 rtx cst, temp1, temp2, temp3, temp4, temp5;
2435 rtx ti_temp = 0;
2437 /* Deal with too large offsets. */
2438 if (GET_CODE (op1) == CONST
2439 && GET_CODE (XEXP (op1, 0)) == PLUS
2440 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2441 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2443 gcc_assert (!temp);
2444 temp1 = gen_reg_rtx (DImode);
2445 temp2 = gen_reg_rtx (DImode);
2446 sparc_emit_set_const64 (temp2, cst);
2447 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2448 NULL_RTX);
2449 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2450 return;
2453 if (temp && GET_MODE (temp) == TImode)
2455 ti_temp = temp;
2456 temp = gen_rtx_REG (DImode, REGNO (temp));
2459 /* SPARC-V9 code-model support. */
2460 switch (sparc_cmodel)
2462 case CM_MEDLOW:
2463 /* The range spanned by all instructions in the object is less
2464 than 2^31 bytes (2GB) and the distance from any instruction
2465 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2466 than 2^31 bytes (2GB).
2468 The executable must be in the low 4TB of the virtual address
2469 space.
2471 sethi %hi(symbol), %temp1
2472 or %temp1, %lo(symbol), %reg */
2473 if (temp)
2474 temp1 = temp; /* op0 is allowed. */
2475 else
2476 temp1 = gen_reg_rtx (DImode);
2478 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2479 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2480 break;
2482 case CM_MEDMID:
2483 /* The range spanned by all instructions in the object is less
2484 than 2^31 bytes (2GB) and the distance from any instruction
2485 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2486 than 2^31 bytes (2GB).
2488 The executable must be in the low 16TB of the virtual address
2489 space.
2491 sethi %h44(symbol), %temp1
2492 or %temp1, %m44(symbol), %temp2
2493 sllx %temp2, 12, %temp3
2494 or %temp3, %l44(symbol), %reg */
2495 if (temp)
2497 temp1 = op0;
2498 temp2 = op0;
2499 temp3 = temp; /* op0 is allowed. */
2501 else
2503 temp1 = gen_reg_rtx (DImode);
2504 temp2 = gen_reg_rtx (DImode);
2505 temp3 = gen_reg_rtx (DImode);
2508 emit_insn (gen_seth44 (temp1, op1));
2509 emit_insn (gen_setm44 (temp2, temp1, op1));
2510 emit_insn (gen_rtx_SET (temp3,
2511 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2512 emit_insn (gen_setl44 (op0, temp3, op1));
2513 break;
2515 case CM_MEDANY:
2516 /* The range spanned by all instructions in the object is less
2517 than 2^31 bytes (2GB) and the distance from any instruction
2518 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2519 than 2^31 bytes (2GB).
2521 The executable can be placed anywhere in the virtual address
2522 space.
2524 sethi %hh(symbol), %temp1
2525 sethi %lm(symbol), %temp2
2526 or %temp1, %hm(symbol), %temp3
2527 sllx %temp3, 32, %temp4
2528 or %temp4, %temp2, %temp5
2529 or %temp5, %lo(symbol), %reg */
2530 if (temp)
2532 /* It is possible that one of the registers we got for operands[2]
2533 might coincide with that of operands[0] (which is why we made
2534 it TImode). Pick the other one to use as our scratch. */
2535 if (rtx_equal_p (temp, op0))
2537 gcc_assert (ti_temp);
2538 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2540 temp1 = op0;
2541 temp2 = temp; /* op0 is _not_ allowed, see above. */
2542 temp3 = op0;
2543 temp4 = op0;
2544 temp5 = op0;
2546 else
2548 temp1 = gen_reg_rtx (DImode);
2549 temp2 = gen_reg_rtx (DImode);
2550 temp3 = gen_reg_rtx (DImode);
2551 temp4 = gen_reg_rtx (DImode);
2552 temp5 = gen_reg_rtx (DImode);
2555 emit_insn (gen_sethh (temp1, op1));
2556 emit_insn (gen_setlm (temp2, op1));
2557 emit_insn (gen_sethm (temp3, temp1, op1));
2558 emit_insn (gen_rtx_SET (temp4,
2559 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2560 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2561 emit_insn (gen_setlo (op0, temp5, op1));
2562 break;
2564 case CM_EMBMEDANY:
2565 /* Old old old backwards compatibility kruft here.
2566 Essentially it is MEDLOW with a fixed 64-bit
2567 virtual base added to all data segment addresses.
2568 Text-segment stuff is computed like MEDANY, we can't
2569 reuse the code above because the relocation knobs
2570 look different.
2572 Data segment: sethi %hi(symbol), %temp1
2573 add %temp1, EMBMEDANY_BASE_REG, %temp2
2574 or %temp2, %lo(symbol), %reg */
2575 if (data_segment_operand (op1, GET_MODE (op1)))
2577 if (temp)
2579 temp1 = temp; /* op0 is allowed. */
2580 temp2 = op0;
2582 else
2584 temp1 = gen_reg_rtx (DImode);
2585 temp2 = gen_reg_rtx (DImode);
2588 emit_insn (gen_embmedany_sethi (temp1, op1));
2589 emit_insn (gen_embmedany_brsum (temp2, temp1));
2590 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2593 /* Text segment: sethi %uhi(symbol), %temp1
2594 sethi %hi(symbol), %temp2
2595 or %temp1, %ulo(symbol), %temp3
2596 sllx %temp3, 32, %temp4
2597 or %temp4, %temp2, %temp5
2598 or %temp5, %lo(symbol), %reg */
2599 else
2601 if (temp)
2603 /* It is possible that one of the registers we got for operands[2]
2604 might coincide with that of operands[0] (which is why we made
2605 it TImode). Pick the other one to use as our scratch. */
2606 if (rtx_equal_p (temp, op0))
2608 gcc_assert (ti_temp);
2609 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2611 temp1 = op0;
2612 temp2 = temp; /* op0 is _not_ allowed, see above. */
2613 temp3 = op0;
2614 temp4 = op0;
2615 temp5 = op0;
2617 else
2619 temp1 = gen_reg_rtx (DImode);
2620 temp2 = gen_reg_rtx (DImode);
2621 temp3 = gen_reg_rtx (DImode);
2622 temp4 = gen_reg_rtx (DImode);
2623 temp5 = gen_reg_rtx (DImode);
2626 emit_insn (gen_embmedany_textuhi (temp1, op1));
2627 emit_insn (gen_embmedany_texthi (temp2, op1));
2628 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2629 emit_insn (gen_rtx_SET (temp4,
2630 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2631 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2632 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2634 break;
2636 default:
2637 gcc_unreachable ();
2641 /* These avoid problems when cross compiling. If we do not
2642 go through all this hair then the optimizer will see
2643 invalid REG_EQUAL notes or in some cases none at all. */
2644 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2645 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2646 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2647 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2649 /* The optimizer is not to assume anything about exactly
2650 which bits are set for a HIGH, they are unspecified.
2651 Unfortunately this leads to many missed optimizations
2652 during CSE. We mask out the non-HIGH bits, and matches
2653 a plain movdi, to alleviate this problem. */
2654 static rtx
2655 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2657 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2660 static rtx
2661 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2663 return gen_rtx_SET (dest, GEN_INT (val));
2666 static rtx
2667 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2669 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2672 static rtx
2673 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2675 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2678 /* Worker routines for 64-bit constant formation on arch64.
2679 One of the key things to be doing in these emissions is
2680 to create as many temp REGs as possible. This makes it
2681 possible for half-built constants to be used later when
2682 such values are similar to something required later on.
2683 Without doing this, the optimizer cannot see such
2684 opportunities. */
2686 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2687 unsigned HOST_WIDE_INT, int);
2689 static void
2690 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2691 unsigned HOST_WIDE_INT low_bits, int is_neg)
2693 unsigned HOST_WIDE_INT high_bits;
2695 if (is_neg)
2696 high_bits = (~low_bits) & 0xffffffff;
2697 else
2698 high_bits = low_bits;
2700 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2701 if (!is_neg)
2703 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2705 else
2707 /* If we are XOR'ing with -1, then we should emit a one's complement
2708 instead. This way the combiner will notice logical operations
2709 such as ANDN later on and substitute. */
2710 if ((low_bits & 0x3ff) == 0x3ff)
2712 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2714 else
2716 emit_insn (gen_rtx_SET (op0,
2717 gen_safe_XOR64 (temp,
2718 (-(HOST_WIDE_INT)0x400
2719 | (low_bits & 0x3ff)))));
2724 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2725 unsigned HOST_WIDE_INT, int);
2727 static void
2728 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2729 unsigned HOST_WIDE_INT high_bits,
2730 unsigned HOST_WIDE_INT low_immediate,
2731 int shift_count)
2733 rtx temp2 = op0;
2735 if ((high_bits & 0xfffffc00) != 0)
2737 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2738 if ((high_bits & ~0xfffffc00) != 0)
2739 emit_insn (gen_rtx_SET (op0,
2740 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2741 else
2742 temp2 = temp;
2744 else
2746 emit_insn (gen_safe_SET64 (temp, high_bits));
2747 temp2 = temp;
2750 /* Now shift it up into place. */
2751 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2752 GEN_INT (shift_count))));
2754 /* If there is a low immediate part piece, finish up by
2755 putting that in as well. */
2756 if (low_immediate != 0)
2757 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2760 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2761 unsigned HOST_WIDE_INT);
2763 /* Full 64-bit constant decomposition. Even though this is the
2764 'worst' case, we still optimize a few things away. */
2765 static void
2766 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2767 unsigned HOST_WIDE_INT high_bits,
2768 unsigned HOST_WIDE_INT low_bits)
2770 rtx sub_temp = op0;
2772 if (can_create_pseudo_p ())
2773 sub_temp = gen_reg_rtx (DImode);
2775 if ((high_bits & 0xfffffc00) != 0)
2777 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2778 if ((high_bits & ~0xfffffc00) != 0)
2779 emit_insn (gen_rtx_SET (sub_temp,
2780 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2781 else
2782 sub_temp = temp;
2784 else
2786 emit_insn (gen_safe_SET64 (temp, high_bits));
2787 sub_temp = temp;
2790 if (can_create_pseudo_p ())
2792 rtx temp2 = gen_reg_rtx (DImode);
2793 rtx temp3 = gen_reg_rtx (DImode);
2794 rtx temp4 = gen_reg_rtx (DImode);
2796 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2797 GEN_INT (32))));
2799 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2800 if ((low_bits & ~0xfffffc00) != 0)
2802 emit_insn (gen_rtx_SET (temp3,
2803 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2804 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2806 else
2808 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2811 else
2813 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2814 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2815 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2816 int to_shift = 12;
2818 /* We are in the middle of reload, so this is really
2819 painful. However we do still make an attempt to
2820 avoid emitting truly stupid code. */
2821 if (low1 != const0_rtx)
2823 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2824 GEN_INT (to_shift))));
2825 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2826 sub_temp = op0;
2827 to_shift = 12;
2829 else
2831 to_shift += 12;
2833 if (low2 != const0_rtx)
2835 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2836 GEN_INT (to_shift))));
2837 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2838 sub_temp = op0;
2839 to_shift = 8;
2841 else
2843 to_shift += 8;
2845 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2846 GEN_INT (to_shift))));
2847 if (low3 != const0_rtx)
2848 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2849 /* phew... */
2853 /* Analyze a 64-bit constant for certain properties. */
2854 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2855 unsigned HOST_WIDE_INT,
2856 int *, int *, int *);
2858 static void
2859 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2860 unsigned HOST_WIDE_INT low_bits,
2861 int *hbsp, int *lbsp, int *abbasp)
2863 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2864 int i;
2866 lowest_bit_set = highest_bit_set = -1;
2867 i = 0;
2870 if ((lowest_bit_set == -1)
2871 && ((low_bits >> i) & 1))
2872 lowest_bit_set = i;
2873 if ((highest_bit_set == -1)
2874 && ((high_bits >> (32 - i - 1)) & 1))
2875 highest_bit_set = (64 - i - 1);
2877 while (++i < 32
2878 && ((highest_bit_set == -1)
2879 || (lowest_bit_set == -1)));
2880 if (i == 32)
2882 i = 0;
2885 if ((lowest_bit_set == -1)
2886 && ((high_bits >> i) & 1))
2887 lowest_bit_set = i + 32;
2888 if ((highest_bit_set == -1)
2889 && ((low_bits >> (32 - i - 1)) & 1))
2890 highest_bit_set = 32 - i - 1;
2892 while (++i < 32
2893 && ((highest_bit_set == -1)
2894 || (lowest_bit_set == -1)));
2896 /* If there are no bits set this should have gone out
2897 as one instruction! */
2898 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2899 all_bits_between_are_set = 1;
2900 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2902 if (i < 32)
2904 if ((low_bits & (1 << i)) != 0)
2905 continue;
2907 else
2909 if ((high_bits & (1 << (i - 32))) != 0)
2910 continue;
2912 all_bits_between_are_set = 0;
2913 break;
2915 *hbsp = highest_bit_set;
2916 *lbsp = lowest_bit_set;
2917 *abbasp = all_bits_between_are_set;
2920 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2922 static int
2923 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2924 unsigned HOST_WIDE_INT low_bits)
2926 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2928 if (high_bits == 0
2929 || high_bits == 0xffffffff)
2930 return 1;
2932 analyze_64bit_constant (high_bits, low_bits,
2933 &highest_bit_set, &lowest_bit_set,
2934 &all_bits_between_are_set);
2936 if ((highest_bit_set == 63
2937 || lowest_bit_set == 0)
2938 && all_bits_between_are_set != 0)
2939 return 1;
2941 if ((highest_bit_set - lowest_bit_set) < 21)
2942 return 1;
2944 return 0;
2947 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2948 unsigned HOST_WIDE_INT,
2949 int, int);
2951 static unsigned HOST_WIDE_INT
2952 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2953 unsigned HOST_WIDE_INT low_bits,
2954 int lowest_bit_set, int shift)
2956 HOST_WIDE_INT hi, lo;
2958 if (lowest_bit_set < 32)
2960 lo = (low_bits >> lowest_bit_set) << shift;
2961 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2963 else
2965 lo = 0;
2966 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2968 gcc_assert (! (hi & lo));
2969 return (hi | lo);
2972 /* Here we are sure to be arch64 and this is an integer constant
2973 being loaded into a register. Emit the most efficient
2974 insn sequence possible. Detection of all the 1-insn cases
2975 has been done already. */
2976 static void
2977 sparc_emit_set_const64 (rtx op0, rtx op1)
2979 unsigned HOST_WIDE_INT high_bits, low_bits;
2980 int lowest_bit_set, highest_bit_set;
2981 int all_bits_between_are_set;
2982 rtx temp = 0;
2984 /* Sanity check that we know what we are working with. */
2985 gcc_assert (TARGET_ARCH64
2986 && (GET_CODE (op0) == SUBREG
2987 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2989 if (! can_create_pseudo_p ())
2990 temp = op0;
2992 if (GET_CODE (op1) != CONST_INT)
2994 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2995 return;
2998 if (! temp)
2999 temp = gen_reg_rtx (DImode);
3001 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
3002 low_bits = (INTVAL (op1) & 0xffffffff);
3004 /* low_bits bits 0 --> 31
3005 high_bits bits 32 --> 63 */
3007 analyze_64bit_constant (high_bits, low_bits,
3008 &highest_bit_set, &lowest_bit_set,
3009 &all_bits_between_are_set);
3011 /* First try for a 2-insn sequence. */
3013 /* These situations are preferred because the optimizer can
3014 * do more things with them:
3015 * 1) mov -1, %reg
3016 * sllx %reg, shift, %reg
3017 * 2) mov -1, %reg
3018 * srlx %reg, shift, %reg
3019 * 3) mov some_small_const, %reg
3020 * sllx %reg, shift, %reg
3022 if (((highest_bit_set == 63
3023 || lowest_bit_set == 0)
3024 && all_bits_between_are_set != 0)
3025 || ((highest_bit_set - lowest_bit_set) < 12))
3027 HOST_WIDE_INT the_const = -1;
3028 int shift = lowest_bit_set;
3030 if ((highest_bit_set != 63
3031 && lowest_bit_set != 0)
3032 || all_bits_between_are_set == 0)
3034 the_const =
3035 create_simple_focus_bits (high_bits, low_bits,
3036 lowest_bit_set, 0);
3038 else if (lowest_bit_set == 0)
3039 shift = -(63 - highest_bit_set);
3041 gcc_assert (SPARC_SIMM13_P (the_const));
3042 gcc_assert (shift != 0);
3044 emit_insn (gen_safe_SET64 (temp, the_const));
3045 if (shift > 0)
3046 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3047 GEN_INT (shift))));
3048 else if (shift < 0)
3049 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3050 GEN_INT (-shift))));
3051 return;
3054 /* Now a range of 22 or less bits set somewhere.
3055 * 1) sethi %hi(focus_bits), %reg
3056 * sllx %reg, shift, %reg
3057 * 2) sethi %hi(focus_bits), %reg
3058 * srlx %reg, shift, %reg
3060 if ((highest_bit_set - lowest_bit_set) < 21)
3062 unsigned HOST_WIDE_INT focus_bits =
3063 create_simple_focus_bits (high_bits, low_bits,
3064 lowest_bit_set, 10);
3066 gcc_assert (SPARC_SETHI_P (focus_bits));
3067 gcc_assert (lowest_bit_set != 10);
3069 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3071 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3072 if (lowest_bit_set < 10)
3073 emit_insn (gen_rtx_SET (op0,
3074 gen_rtx_LSHIFTRT (DImode, temp,
3075 GEN_INT (10 - lowest_bit_set))));
3076 else if (lowest_bit_set > 10)
3077 emit_insn (gen_rtx_SET (op0,
3078 gen_rtx_ASHIFT (DImode, temp,
3079 GEN_INT (lowest_bit_set - 10))));
3080 return;
3083 /* 1) sethi %hi(low_bits), %reg
3084 * or %reg, %lo(low_bits), %reg
3085 * 2) sethi %hi(~low_bits), %reg
3086 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3088 if (high_bits == 0
3089 || high_bits == 0xffffffff)
3091 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3092 (high_bits == 0xffffffff));
3093 return;
3096 /* Now, try 3-insn sequences. */
3098 /* 1) sethi %hi(high_bits), %reg
3099 * or %reg, %lo(high_bits), %reg
3100 * sllx %reg, 32, %reg
3102 if (low_bits == 0)
3104 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3105 return;
3108 /* We may be able to do something quick
3109 when the constant is negated, so try that. */
3110 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3111 (~low_bits) & 0xfffffc00))
3113 /* NOTE: The trailing bits get XOR'd so we need the
3114 non-negated bits, not the negated ones. */
3115 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3117 if ((((~high_bits) & 0xffffffff) == 0
3118 && ((~low_bits) & 0x80000000) == 0)
3119 || (((~high_bits) & 0xffffffff) == 0xffffffff
3120 && ((~low_bits) & 0x80000000) != 0))
3122 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3124 if ((SPARC_SETHI_P (fast_int)
3125 && (~high_bits & 0xffffffff) == 0)
3126 || SPARC_SIMM13_P (fast_int))
3127 emit_insn (gen_safe_SET64 (temp, fast_int));
3128 else
3129 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3131 else
3133 rtx negated_const;
3134 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3135 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3136 sparc_emit_set_const64 (temp, negated_const);
3139 /* If we are XOR'ing with -1, then we should emit a one's complement
3140 instead. This way the combiner will notice logical operations
3141 such as ANDN later on and substitute. */
3142 if (trailing_bits == 0x3ff)
3144 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3146 else
3148 emit_insn (gen_rtx_SET (op0,
3149 gen_safe_XOR64 (temp,
3150 (-0x400 | trailing_bits))));
3152 return;
3155 /* 1) sethi %hi(xxx), %reg
3156 * or %reg, %lo(xxx), %reg
3157 * sllx %reg, yyy, %reg
3159 * ??? This is just a generalized version of the low_bits==0
3160 * thing above, FIXME...
3162 if ((highest_bit_set - lowest_bit_set) < 32)
3164 unsigned HOST_WIDE_INT focus_bits =
3165 create_simple_focus_bits (high_bits, low_bits,
3166 lowest_bit_set, 0);
3168 /* We can't get here in this state. */
3169 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3171 /* So what we know is that the set bits straddle the
3172 middle of the 64-bit word. */
3173 sparc_emit_set_const64_quick2 (op0, temp,
3174 focus_bits, 0,
3175 lowest_bit_set);
3176 return;
3179 /* 1) sethi %hi(high_bits), %reg
3180 * or %reg, %lo(high_bits), %reg
3181 * sllx %reg, 32, %reg
3182 * or %reg, low_bits, %reg
3184 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3186 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3187 return;
3190 /* The easiest way when all else fails, is full decomposition. */
3191 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3194 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3196 static bool
3197 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3199 *p1 = SPARC_ICC_REG;
3200 *p2 = SPARC_FCC_REG;
3201 return true;
3204 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3206 static unsigned int
3207 sparc_min_arithmetic_precision (void)
3209 return 32;
3212 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3213 return the mode to be used for the comparison. For floating-point,
3214 CCFP[E]mode is used. CCNZmode should be used when the first operand
3215 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3216 processing is needed. */
3218 machine_mode
3219 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3221 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3223 switch (op)
3225 case EQ:
3226 case NE:
3227 case UNORDERED:
3228 case ORDERED:
3229 case UNLT:
3230 case UNLE:
3231 case UNGT:
3232 case UNGE:
3233 case UNEQ:
3234 case LTGT:
3235 return CCFPmode;
3237 case LT:
3238 case LE:
3239 case GT:
3240 case GE:
3241 return CCFPEmode;
3243 default:
3244 gcc_unreachable ();
3247 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3248 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3249 && y == const0_rtx)
3251 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3252 return CCXNZmode;
3253 else
3254 return CCNZmode;
3256 else
3258 /* This is for the cmp<mode>_sne pattern. */
3259 if (GET_CODE (x) == NOT && y == constm1_rtx)
3261 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3262 return CCXCmode;
3263 else
3264 return CCCmode;
3267 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3268 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3270 if (GET_CODE (y) == UNSPEC
3271 && (XINT (y, 1) == UNSPEC_ADDV
3272 || XINT (y, 1) == UNSPEC_SUBV
3273 || XINT (y, 1) == UNSPEC_NEGV))
3274 return CCVmode;
3275 else
3276 return CCCmode;
3279 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3280 return CCXmode;
3281 else
3282 return CCmode;
3286 /* Emit the compare insn and return the CC reg for a CODE comparison
3287 with operands X and Y. */
3289 static rtx
3290 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3292 machine_mode mode;
3293 rtx cc_reg;
3295 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3296 return x;
3298 mode = SELECT_CC_MODE (code, x, y);
3300 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3301 fcc regs (cse can't tell they're really call clobbered regs and will
3302 remove a duplicate comparison even if there is an intervening function
3303 call - it will then try to reload the cc reg via an int reg which is why
3304 we need the movcc patterns). It is possible to provide the movcc
3305 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3306 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3307 to tell cse that CCFPE mode registers (even pseudos) are call
3308 clobbered. */
3310 /* ??? This is an experiment. Rather than making changes to cse which may
3311 or may not be easy/clean, we do our own cse. This is possible because
3312 we will generate hard registers. Cse knows they're call clobbered (it
3313 doesn't know the same thing about pseudos). If we guess wrong, no big
3314 deal, but if we win, great! */
3316 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3317 #if 1 /* experiment */
3319 int reg;
3320 /* We cycle through the registers to ensure they're all exercised. */
3321 static int next_fcc_reg = 0;
3322 /* Previous x,y for each fcc reg. */
3323 static rtx prev_args[4][2];
3325 /* Scan prev_args for x,y. */
3326 for (reg = 0; reg < 4; reg++)
3327 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3328 break;
3329 if (reg == 4)
3331 reg = next_fcc_reg;
3332 prev_args[reg][0] = x;
3333 prev_args[reg][1] = y;
3334 next_fcc_reg = (next_fcc_reg + 1) & 3;
3336 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3338 #else
3339 cc_reg = gen_reg_rtx (mode);
3340 #endif /* ! experiment */
3341 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3342 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3343 else
3344 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3346 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3347 will only result in an unrecognizable insn so no point in asserting. */
3348 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3350 return cc_reg;
3354 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3357 gen_compare_reg (rtx cmp)
3359 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3362 /* This function is used for v9 only.
3363 DEST is the target of the Scc insn.
3364 CODE is the code for an Scc's comparison.
3365 X and Y are the values we compare.
3367 This function is needed to turn
3369 (set (reg:SI 110)
3370 (gt (reg:CCX 100 %icc)
3371 (const_int 0)))
3372 into
3373 (set (reg:SI 110)
3374 (gt:DI (reg:CCX 100 %icc)
3375 (const_int 0)))
3377 IE: The instruction recognizer needs to see the mode of the comparison to
3378 find the right instruction. We could use "gt:DI" right in the
3379 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3381 static int
3382 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3384 if (! TARGET_ARCH64
3385 && (GET_MODE (x) == DImode
3386 || GET_MODE (dest) == DImode))
3387 return 0;
3389 /* Try to use the movrCC insns. */
3390 if (TARGET_ARCH64
3391 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3392 && y == const0_rtx
3393 && v9_regcmp_p (compare_code))
3395 rtx op0 = x;
3396 rtx temp;
3398 /* Special case for op0 != 0. This can be done with one instruction if
3399 dest == x. */
3401 if (compare_code == NE
3402 && GET_MODE (dest) == DImode
3403 && rtx_equal_p (op0, dest))
3405 emit_insn (gen_rtx_SET (dest,
3406 gen_rtx_IF_THEN_ELSE (DImode,
3407 gen_rtx_fmt_ee (compare_code, DImode,
3408 op0, const0_rtx),
3409 const1_rtx,
3410 dest)));
3411 return 1;
3414 if (reg_overlap_mentioned_p (dest, op0))
3416 /* Handle the case where dest == x.
3417 We "early clobber" the result. */
3418 op0 = gen_reg_rtx (GET_MODE (x));
3419 emit_move_insn (op0, x);
3422 emit_insn (gen_rtx_SET (dest, const0_rtx));
3423 if (GET_MODE (op0) != DImode)
3425 temp = gen_reg_rtx (DImode);
3426 convert_move (temp, op0, 0);
3428 else
3429 temp = op0;
3430 emit_insn (gen_rtx_SET (dest,
3431 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3432 gen_rtx_fmt_ee (compare_code, DImode,
3433 temp, const0_rtx),
3434 const1_rtx,
3435 dest)));
3436 return 1;
3438 else
3440 x = gen_compare_reg_1 (compare_code, x, y);
3441 y = const0_rtx;
3443 emit_insn (gen_rtx_SET (dest, const0_rtx));
3444 emit_insn (gen_rtx_SET (dest,
3445 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3446 gen_rtx_fmt_ee (compare_code,
3447 GET_MODE (x), x, y),
3448 const1_rtx, dest)));
3449 return 1;
3454 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3455 without jumps using the addx/subx instructions. */
3457 bool
3458 emit_scc_insn (rtx operands[])
3460 rtx tem, x, y;
3461 enum rtx_code code;
3462 machine_mode mode;
3464 /* The quad-word fp compare library routines all return nonzero to indicate
3465 true, which is different from the equivalent libgcc routines, so we must
3466 handle them specially here. */
3467 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3469 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3470 GET_CODE (operands[1]));
3471 operands[2] = XEXP (operands[1], 0);
3472 operands[3] = XEXP (operands[1], 1);
3475 code = GET_CODE (operands[1]);
3476 x = operands[2];
3477 y = operands[3];
3478 mode = GET_MODE (x);
3480 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3481 more applications). The exception to this is "reg != 0" which can
3482 be done in one instruction on v9 (so we do it). */
3483 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3485 if (y != const0_rtx)
3486 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3488 rtx pat = gen_rtx_SET (operands[0],
3489 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3490 x, const0_rtx));
3492 /* If we can use addx/subx or addxc, add a clobber for CC. */
3493 if (mode == SImode || (code == NE && TARGET_VIS3))
3495 rtx clobber
3496 = gen_rtx_CLOBBER (VOIDmode,
3497 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3498 SPARC_ICC_REG));
3499 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3502 emit_insn (pat);
3503 return true;
3506 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3507 if (TARGET_ARCH64
3508 && mode == DImode
3509 && !((code == LTU || code == GTU) && TARGET_VIS3)
3510 && gen_v9_scc (operands[0], code, x, y))
3511 return true;
3513 /* We can do LTU and GEU using the addx/subx instructions too. And
3514 for GTU/LEU, if both operands are registers swap them and fall
3515 back to the easy case. */
3516 if (code == GTU || code == LEU)
3518 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3519 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3521 tem = x;
3522 x = y;
3523 y = tem;
3524 code = swap_condition (code);
3528 if (code == LTU || code == GEU)
3530 emit_insn (gen_rtx_SET (operands[0],
3531 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3532 gen_compare_reg_1 (code, x, y),
3533 const0_rtx)));
3534 return true;
3537 /* All the posibilities to use addx/subx based sequences has been
3538 exhausted, try for a 3 instruction sequence using v9 conditional
3539 moves. */
3540 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3541 return true;
3543 /* Nope, do branches. */
3544 return false;
3547 /* Emit a conditional jump insn for the v9 architecture using comparison code
3548 CODE and jump target LABEL.
3549 This function exists to take advantage of the v9 brxx insns. */
3551 static void
3552 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3554 emit_jump_insn (gen_rtx_SET (pc_rtx,
3555 gen_rtx_IF_THEN_ELSE (VOIDmode,
3556 gen_rtx_fmt_ee (code, GET_MODE (op0),
3557 op0, const0_rtx),
3558 gen_rtx_LABEL_REF (VOIDmode, label),
3559 pc_rtx)));
3562 /* Emit a conditional jump insn for the UA2011 architecture using
3563 comparison code CODE and jump target LABEL. This function exists
3564 to take advantage of the UA2011 Compare and Branch insns. */
3566 static void
3567 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3569 rtx if_then_else;
3571 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3572 gen_rtx_fmt_ee(code, GET_MODE(op0),
3573 op0, op1),
3574 gen_rtx_LABEL_REF (VOIDmode, label),
3575 pc_rtx);
3577 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3580 void
3581 emit_conditional_branch_insn (rtx operands[])
3583 /* The quad-word fp compare library routines all return nonzero to indicate
3584 true, which is different from the equivalent libgcc routines, so we must
3585 handle them specially here. */
3586 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3588 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3589 GET_CODE (operands[0]));
3590 operands[1] = XEXP (operands[0], 0);
3591 operands[2] = XEXP (operands[0], 1);
3594 /* If we can tell early on that the comparison is against a constant
3595 that won't fit in the 5-bit signed immediate field of a cbcond,
3596 use one of the other v9 conditional branch sequences. */
3597 if (TARGET_CBCOND
3598 && GET_CODE (operands[1]) == REG
3599 && (GET_MODE (operands[1]) == SImode
3600 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3601 && (GET_CODE (operands[2]) != CONST_INT
3602 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3604 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3605 return;
3608 if (TARGET_ARCH64 && operands[2] == const0_rtx
3609 && GET_CODE (operands[1]) == REG
3610 && GET_MODE (operands[1]) == DImode)
3612 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3613 return;
3616 operands[1] = gen_compare_reg (operands[0]);
3617 operands[2] = const0_rtx;
3618 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3619 operands[1], operands[2]);
3620 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3621 operands[3]));
3625 /* Generate a DFmode part of a hard TFmode register.
3626 REG is the TFmode hard register, LOW is 1 for the
3627 low 64bit of the register and 0 otherwise.
3630 gen_df_reg (rtx reg, int low)
3632 int regno = REGNO (reg);
3634 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3635 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3636 return gen_rtx_REG (DFmode, regno);
3639 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3640 Unlike normal calls, TFmode operands are passed by reference. It is
3641 assumed that no more than 3 operands are required. */
3643 static void
3644 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3646 rtx ret_slot = NULL, arg[3], func_sym;
3647 int i;
3649 /* We only expect to be called for conversions, unary, and binary ops. */
3650 gcc_assert (nargs == 2 || nargs == 3);
3652 for (i = 0; i < nargs; ++i)
3654 rtx this_arg = operands[i];
3655 rtx this_slot;
3657 /* TFmode arguments and return values are passed by reference. */
3658 if (GET_MODE (this_arg) == TFmode)
3660 int force_stack_temp;
3662 force_stack_temp = 0;
3663 if (TARGET_BUGGY_QP_LIB && i == 0)
3664 force_stack_temp = 1;
3666 if (GET_CODE (this_arg) == MEM
3667 && ! force_stack_temp)
3669 tree expr = MEM_EXPR (this_arg);
3670 if (expr)
3671 mark_addressable (expr);
3672 this_arg = XEXP (this_arg, 0);
3674 else if (CONSTANT_P (this_arg)
3675 && ! force_stack_temp)
3677 this_slot = force_const_mem (TFmode, this_arg);
3678 this_arg = XEXP (this_slot, 0);
3680 else
3682 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3684 /* Operand 0 is the return value. We'll copy it out later. */
3685 if (i > 0)
3686 emit_move_insn (this_slot, this_arg);
3687 else
3688 ret_slot = this_slot;
3690 this_arg = XEXP (this_slot, 0);
3694 arg[i] = this_arg;
3697 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3699 if (GET_MODE (operands[0]) == TFmode)
3701 if (nargs == 2)
3702 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3703 arg[0], GET_MODE (arg[0]),
3704 arg[1], GET_MODE (arg[1]));
3705 else
3706 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3707 arg[0], GET_MODE (arg[0]),
3708 arg[1], GET_MODE (arg[1]),
3709 arg[2], GET_MODE (arg[2]));
3711 if (ret_slot)
3712 emit_move_insn (operands[0], ret_slot);
3714 else
3716 rtx ret;
3718 gcc_assert (nargs == 2);
3720 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3721 GET_MODE (operands[0]),
3722 arg[1], GET_MODE (arg[1]));
3724 if (ret != operands[0])
3725 emit_move_insn (operands[0], ret);
3729 /* Expand soft-float TFmode calls to sparc abi routines. */
3731 static void
3732 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3734 const char *func;
3736 switch (code)
3738 case PLUS:
3739 func = "_Qp_add";
3740 break;
3741 case MINUS:
3742 func = "_Qp_sub";
3743 break;
3744 case MULT:
3745 func = "_Qp_mul";
3746 break;
3747 case DIV:
3748 func = "_Qp_div";
3749 break;
3750 default:
3751 gcc_unreachable ();
3754 emit_soft_tfmode_libcall (func, 3, operands);
3757 static void
3758 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3760 const char *func;
3762 gcc_assert (code == SQRT);
3763 func = "_Qp_sqrt";
3765 emit_soft_tfmode_libcall (func, 2, operands);
3768 static void
3769 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3771 const char *func;
3773 switch (code)
3775 case FLOAT_EXTEND:
3776 switch (GET_MODE (operands[1]))
3778 case E_SFmode:
3779 func = "_Qp_stoq";
3780 break;
3781 case E_DFmode:
3782 func = "_Qp_dtoq";
3783 break;
3784 default:
3785 gcc_unreachable ();
3787 break;
3789 case FLOAT_TRUNCATE:
3790 switch (GET_MODE (operands[0]))
3792 case E_SFmode:
3793 func = "_Qp_qtos";
3794 break;
3795 case E_DFmode:
3796 func = "_Qp_qtod";
3797 break;
3798 default:
3799 gcc_unreachable ();
3801 break;
3803 case FLOAT:
3804 switch (GET_MODE (operands[1]))
3806 case E_SImode:
3807 func = "_Qp_itoq";
3808 if (TARGET_ARCH64)
3809 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3810 break;
3811 case E_DImode:
3812 func = "_Qp_xtoq";
3813 break;
3814 default:
3815 gcc_unreachable ();
3817 break;
3819 case UNSIGNED_FLOAT:
3820 switch (GET_MODE (operands[1]))
3822 case E_SImode:
3823 func = "_Qp_uitoq";
3824 if (TARGET_ARCH64)
3825 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3826 break;
3827 case E_DImode:
3828 func = "_Qp_uxtoq";
3829 break;
3830 default:
3831 gcc_unreachable ();
3833 break;
3835 case FIX:
3836 switch (GET_MODE (operands[0]))
3838 case E_SImode:
3839 func = "_Qp_qtoi";
3840 break;
3841 case E_DImode:
3842 func = "_Qp_qtox";
3843 break;
3844 default:
3845 gcc_unreachable ();
3847 break;
3849 case UNSIGNED_FIX:
3850 switch (GET_MODE (operands[0]))
3852 case E_SImode:
3853 func = "_Qp_qtoui";
3854 break;
3855 case E_DImode:
3856 func = "_Qp_qtoux";
3857 break;
3858 default:
3859 gcc_unreachable ();
3861 break;
3863 default:
3864 gcc_unreachable ();
3867 emit_soft_tfmode_libcall (func, 2, operands);
3870 /* Expand a hard-float tfmode operation. All arguments must be in
3871 registers. */
3873 static void
3874 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3876 rtx op, dest;
3878 if (GET_RTX_CLASS (code) == RTX_UNARY)
3880 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3881 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3883 else
3885 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3886 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3887 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3888 operands[1], operands[2]);
3891 if (register_operand (operands[0], VOIDmode))
3892 dest = operands[0];
3893 else
3894 dest = gen_reg_rtx (GET_MODE (operands[0]));
3896 emit_insn (gen_rtx_SET (dest, op));
3898 if (dest != operands[0])
3899 emit_move_insn (operands[0], dest);
3902 void
3903 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3905 if (TARGET_HARD_QUAD)
3906 emit_hard_tfmode_operation (code, operands);
3907 else
3908 emit_soft_tfmode_binop (code, operands);
3911 void
3912 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3914 if (TARGET_HARD_QUAD)
3915 emit_hard_tfmode_operation (code, operands);
3916 else
3917 emit_soft_tfmode_unop (code, operands);
3920 void
3921 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3923 if (TARGET_HARD_QUAD)
3924 emit_hard_tfmode_operation (code, operands);
3925 else
3926 emit_soft_tfmode_cvt (code, operands);
3929 /* Return nonzero if a branch/jump/call instruction will be emitting
3930 nop into its delay slot. */
3933 empty_delay_slot (rtx_insn *insn)
3935 rtx seq;
3937 /* If no previous instruction (should not happen), return true. */
3938 if (PREV_INSN (insn) == NULL)
3939 return 1;
3941 seq = NEXT_INSN (PREV_INSN (insn));
3942 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3943 return 0;
3945 return 1;
3948 /* Return nonzero if we should emit a nop after a cbcond instruction.
3949 The cbcond instruction does not have a delay slot, however there is
3950 a severe performance penalty if a control transfer appears right
3951 after a cbcond. Therefore we emit a nop when we detect this
3952 situation. */
3955 emit_cbcond_nop (rtx_insn *insn)
3957 rtx next = next_active_insn (insn);
3959 if (!next)
3960 return 1;
3962 if (NONJUMP_INSN_P (next)
3963 && GET_CODE (PATTERN (next)) == SEQUENCE)
3964 next = XVECEXP (PATTERN (next), 0, 0);
3965 else if (CALL_P (next)
3966 && GET_CODE (PATTERN (next)) == PARALLEL)
3968 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3970 if (GET_CODE (delay) == RETURN)
3972 /* It's a sibling call. Do not emit the nop if we're going
3973 to emit something other than the jump itself as the first
3974 instruction of the sibcall sequence. */
3975 if (sparc_leaf_function_p || TARGET_FLAT)
3976 return 0;
3980 if (NONJUMP_INSN_P (next))
3981 return 0;
3983 return 1;
3986 /* Return nonzero if TRIAL can go into the call delay slot. */
3989 eligible_for_call_delay (rtx_insn *trial)
3991 rtx pat;
3993 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3994 return 0;
3996 /* Binutils allows
3997 call __tls_get_addr, %tgd_call (foo)
3998 add %l7, %o0, %o0, %tgd_add (foo)
3999 while Sun as/ld does not. */
4000 if (TARGET_GNU_TLS || !TARGET_TLS)
4001 return 1;
4003 pat = PATTERN (trial);
4005 /* We must reject tgd_add{32|64}, i.e.
4006 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
4007 and tldm_add{32|64}, i.e.
4008 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
4009 for Sun as/ld. */
4010 if (GET_CODE (pat) == SET
4011 && GET_CODE (SET_SRC (pat)) == PLUS)
4013 rtx unspec = XEXP (SET_SRC (pat), 1);
4015 if (GET_CODE (unspec) == UNSPEC
4016 && (XINT (unspec, 1) == UNSPEC_TLSGD
4017 || XINT (unspec, 1) == UNSPEC_TLSLDM))
4018 return 0;
4021 return 1;
4024 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
4025 instruction. RETURN_P is true if the v9 variant 'return' is to be
4026 considered in the test too.
4028 TRIAL must be a SET whose destination is a REG appropriate for the
4029 'restore' instruction or, if RETURN_P is true, for the 'return'
4030 instruction. */
4032 static int
4033 eligible_for_restore_insn (rtx trial, bool return_p)
4035 rtx pat = PATTERN (trial);
4036 rtx src = SET_SRC (pat);
4037 bool src_is_freg = false;
4038 rtx src_reg;
4040 /* Since we now can do moves between float and integer registers when
4041 VIS3 is enabled, we have to catch this case. We can allow such
4042 moves when doing a 'return' however. */
4043 src_reg = src;
4044 if (GET_CODE (src_reg) == SUBREG)
4045 src_reg = SUBREG_REG (src_reg);
4046 if (GET_CODE (src_reg) == REG
4047 && SPARC_FP_REG_P (REGNO (src_reg)))
4048 src_is_freg = true;
4050 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4051 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4052 && arith_operand (src, GET_MODE (src))
4053 && ! src_is_freg)
4055 if (TARGET_ARCH64)
4056 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4057 else
4058 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4061 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4062 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4063 && arith_double_operand (src, GET_MODE (src))
4064 && ! src_is_freg)
4065 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4067 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4068 else if (! TARGET_FPU && register_operand (src, SFmode))
4069 return 1;
4071 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4072 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4073 return 1;
4075 /* If we have the 'return' instruction, anything that does not use
4076 local or output registers and can go into a delay slot wins. */
4077 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4078 return 1;
4080 /* The 'restore src1,src2,dest' pattern for SImode. */
4081 else if (GET_CODE (src) == PLUS
4082 && register_operand (XEXP (src, 0), SImode)
4083 && arith_operand (XEXP (src, 1), SImode))
4084 return 1;
4086 /* The 'restore src1,src2,dest' pattern for DImode. */
4087 else if (GET_CODE (src) == PLUS
4088 && register_operand (XEXP (src, 0), DImode)
4089 && arith_double_operand (XEXP (src, 1), DImode))
4090 return 1;
4092 /* The 'restore src1,%lo(src2),dest' pattern. */
4093 else if (GET_CODE (src) == LO_SUM
4094 && ! TARGET_CM_MEDMID
4095 && ((register_operand (XEXP (src, 0), SImode)
4096 && immediate_operand (XEXP (src, 1), SImode))
4097 || (TARGET_ARCH64
4098 && register_operand (XEXP (src, 0), DImode)
4099 && immediate_operand (XEXP (src, 1), DImode))))
4100 return 1;
4102 /* The 'restore src,src,dest' pattern. */
4103 else if (GET_CODE (src) == ASHIFT
4104 && (register_operand (XEXP (src, 0), SImode)
4105 || register_operand (XEXP (src, 0), DImode))
4106 && XEXP (src, 1) == const1_rtx)
4107 return 1;
4109 return 0;
4112 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4115 eligible_for_return_delay (rtx_insn *trial)
4117 int regno;
4118 rtx pat;
4120 /* If the function uses __builtin_eh_return, the eh_return machinery
4121 occupies the delay slot. */
4122 if (crtl->calls_eh_return)
4123 return 0;
4125 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4126 return 0;
4128 /* In the case of a leaf or flat function, anything can go into the slot. */
4129 if (sparc_leaf_function_p || TARGET_FLAT)
4130 return 1;
4132 if (!NONJUMP_INSN_P (trial))
4133 return 0;
4135 pat = PATTERN (trial);
4136 if (GET_CODE (pat) == PARALLEL)
4138 int i;
4140 if (! TARGET_V9)
4141 return 0;
4142 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4144 rtx expr = XVECEXP (pat, 0, i);
4145 if (GET_CODE (expr) != SET)
4146 return 0;
4147 if (GET_CODE (SET_DEST (expr)) != REG)
4148 return 0;
4149 regno = REGNO (SET_DEST (expr));
4150 if (regno >= 8 && regno < 24)
4151 return 0;
4153 return !epilogue_renumber (&pat, 1);
4156 if (GET_CODE (pat) != SET)
4157 return 0;
4159 if (GET_CODE (SET_DEST (pat)) != REG)
4160 return 0;
4162 regno = REGNO (SET_DEST (pat));
4164 /* Otherwise, only operations which can be done in tandem with
4165 a `restore' or `return' insn can go into the delay slot. */
4166 if (regno >= 8 && regno < 24)
4167 return 0;
4169 /* If this instruction sets up floating point register and we have a return
4170 instruction, it can probably go in. But restore will not work
4171 with FP_REGS. */
4172 if (! SPARC_INT_REG_P (regno))
4173 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4175 return eligible_for_restore_insn (trial, true);
4178 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4181 eligible_for_sibcall_delay (rtx_insn *trial)
4183 rtx pat;
4185 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4186 return 0;
4188 if (!NONJUMP_INSN_P (trial))
4189 return 0;
4191 pat = PATTERN (trial);
4193 if (sparc_leaf_function_p || TARGET_FLAT)
4195 /* If the tail call is done using the call instruction,
4196 we have to restore %o7 in the delay slot. */
4197 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4198 return 0;
4200 /* %g1 is used to build the function address */
4201 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4202 return 0;
4204 return 1;
4207 if (GET_CODE (pat) != SET)
4208 return 0;
4210 /* Otherwise, only operations which can be done in tandem with
4211 a `restore' insn can go into the delay slot. */
4212 if (GET_CODE (SET_DEST (pat)) != REG
4213 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4214 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4215 return 0;
4217 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4218 in most cases. */
4219 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4220 return 0;
4222 return eligible_for_restore_insn (trial, false);
4225 /* Determine if it's legal to put X into the constant pool. This
4226 is not possible if X contains the address of a symbol that is
4227 not constant (TLS) or not known at final link time (PIC). */
4229 static bool
4230 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4232 switch (GET_CODE (x))
4234 case CONST_INT:
4235 case CONST_WIDE_INT:
4236 case CONST_DOUBLE:
4237 case CONST_VECTOR:
4238 /* Accept all non-symbolic constants. */
4239 return false;
4241 case LABEL_REF:
4242 /* Labels are OK iff we are non-PIC. */
4243 return flag_pic != 0;
4245 case SYMBOL_REF:
4246 /* 'Naked' TLS symbol references are never OK,
4247 non-TLS symbols are OK iff we are non-PIC. */
4248 if (SYMBOL_REF_TLS_MODEL (x))
4249 return true;
4250 else
4251 return flag_pic != 0;
4253 case CONST:
4254 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4255 case PLUS:
4256 case MINUS:
4257 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4258 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4259 case UNSPEC:
4260 return true;
4261 default:
4262 gcc_unreachable ();
4266 /* Global Offset Table support. */
4267 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4268 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
4270 /* Return the SYMBOL_REF for the Global Offset Table. */
4272 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
4274 static rtx
4275 sparc_got (void)
4277 if (!sparc_got_symbol)
4278 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4280 return sparc_got_symbol;
4283 /* Ensure that we are not using patterns that are not OK with PIC. */
4286 check_pic (int i)
4288 rtx op;
4290 switch (flag_pic)
4292 case 1:
4293 op = recog_data.operand[i];
4294 gcc_assert (GET_CODE (op) != SYMBOL_REF
4295 && (GET_CODE (op) != CONST
4296 || (GET_CODE (XEXP (op, 0)) == MINUS
4297 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4298 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4299 /* fallthrough */
4300 case 2:
4301 default:
4302 return 1;
4306 /* Return true if X is an address which needs a temporary register when
4307 reloaded while generating PIC code. */
4310 pic_address_needs_scratch (rtx x)
4312 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4313 if (GET_CODE (x) == CONST
4314 && GET_CODE (XEXP (x, 0)) == PLUS
4315 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4316 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4317 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4318 return 1;
4320 return 0;
4323 /* Determine if a given RTX is a valid constant. We already know this
4324 satisfies CONSTANT_P. */
4326 static bool
4327 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4329 switch (GET_CODE (x))
4331 case CONST:
4332 case SYMBOL_REF:
4333 if (sparc_tls_referenced_p (x))
4334 return false;
4335 break;
4337 case CONST_DOUBLE:
4338 /* Floating point constants are generally not ok.
4339 The only exception is 0.0 and all-ones in VIS. */
4340 if (TARGET_VIS
4341 && SCALAR_FLOAT_MODE_P (mode)
4342 && (const_zero_operand (x, mode)
4343 || const_all_ones_operand (x, mode)))
4344 return true;
4346 return false;
4348 case CONST_VECTOR:
4349 /* Vector constants are generally not ok.
4350 The only exception is 0 or -1 in VIS. */
4351 if (TARGET_VIS
4352 && (const_zero_operand (x, mode)
4353 || const_all_ones_operand (x, mode)))
4354 return true;
4356 return false;
4358 default:
4359 break;
4362 return true;
4365 /* Determine if a given RTX is a valid constant address. */
4367 bool
4368 constant_address_p (rtx x)
4370 switch (GET_CODE (x))
4372 case LABEL_REF:
4373 case CONST_INT:
4374 case HIGH:
4375 return true;
4377 case CONST:
4378 if (flag_pic && pic_address_needs_scratch (x))
4379 return false;
4380 return sparc_legitimate_constant_p (Pmode, x);
4382 case SYMBOL_REF:
4383 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4385 default:
4386 return false;
4390 /* Nonzero if the constant value X is a legitimate general operand
4391 when generating PIC code. It is given that flag_pic is on and
4392 that X satisfies CONSTANT_P. */
4394 bool
4395 legitimate_pic_operand_p (rtx x)
4397 if (pic_address_needs_scratch (x))
4398 return false;
4399 if (sparc_tls_referenced_p (x))
4400 return false;
4401 return true;
4404 /* Return true if X is a representation of the PIC register. */
4406 static bool
4407 sparc_pic_register_p (rtx x)
4409 if (!REG_P (x) || !pic_offset_table_rtx)
4410 return false;
4412 if (x == pic_offset_table_rtx)
4413 return true;
4415 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4416 && (HARD_REGISTER_P (x) || lra_in_progress)
4417 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4418 return true;
4420 return false;
4423 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4424 (CONST_INT_P (X) \
4425 && INTVAL (X) >= -0x1000 \
4426 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4428 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4429 (CONST_INT_P (X) \
4430 && INTVAL (X) >= -0x1000 \
4431 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4433 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4435 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4436 ordinarily. This changes a bit when generating PIC. */
4438 static bool
4439 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4441 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4443 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4444 rs1 = addr;
4445 else if (GET_CODE (addr) == PLUS)
4447 rs1 = XEXP (addr, 0);
4448 rs2 = XEXP (addr, 1);
4450 /* Canonicalize. REG comes first, if there are no regs,
4451 LO_SUM comes first. */
4452 if (!REG_P (rs1)
4453 && GET_CODE (rs1) != SUBREG
4454 && (REG_P (rs2)
4455 || GET_CODE (rs2) == SUBREG
4456 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4458 rs1 = XEXP (addr, 1);
4459 rs2 = XEXP (addr, 0);
4462 if ((flag_pic == 1
4463 && sparc_pic_register_p (rs1)
4464 && !REG_P (rs2)
4465 && GET_CODE (rs2) != SUBREG
4466 && GET_CODE (rs2) != LO_SUM
4467 && GET_CODE (rs2) != MEM
4468 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4469 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4470 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4471 || ((REG_P (rs1)
4472 || GET_CODE (rs1) == SUBREG)
4473 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4475 imm1 = rs2;
4476 rs2 = NULL;
4478 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4479 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4481 /* We prohibit REG + REG for TFmode when there are no quad move insns
4482 and we consequently need to split. We do this because REG+REG
4483 is not an offsettable address. If we get the situation in reload
4484 where source and destination of a movtf pattern are both MEMs with
4485 REG+REG address, then only one of them gets converted to an
4486 offsettable address. */
4487 if (mode == TFmode
4488 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4489 return 0;
4491 /* Likewise for TImode, but in all cases. */
4492 if (mode == TImode)
4493 return 0;
4495 /* We prohibit REG + REG on ARCH32 if not optimizing for
4496 DFmode/DImode because then mem_min_alignment is likely to be zero
4497 after reload and the forced split would lack a matching splitter
4498 pattern. */
4499 if (TARGET_ARCH32 && !optimize
4500 && (mode == DFmode || mode == DImode))
4501 return 0;
4503 else if (USE_AS_OFFSETABLE_LO10
4504 && GET_CODE (rs1) == LO_SUM
4505 && TARGET_ARCH64
4506 && ! TARGET_CM_MEDMID
4507 && RTX_OK_FOR_OLO10_P (rs2, mode))
4509 rs2 = NULL;
4510 imm1 = XEXP (rs1, 1);
4511 rs1 = XEXP (rs1, 0);
4512 if (!CONSTANT_P (imm1)
4513 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4514 return 0;
4517 else if (GET_CODE (addr) == LO_SUM)
4519 rs1 = XEXP (addr, 0);
4520 imm1 = XEXP (addr, 1);
4522 if (!CONSTANT_P (imm1)
4523 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4524 return 0;
4526 /* We can't allow TFmode in 32-bit mode, because an offset greater
4527 than the alignment (8) may cause the LO_SUM to overflow. */
4528 if (mode == TFmode && TARGET_ARCH32)
4529 return 0;
4531 /* During reload, accept the HIGH+LO_SUM construct generated by
4532 sparc_legitimize_reload_address. */
4533 if (reload_in_progress
4534 && GET_CODE (rs1) == HIGH
4535 && XEXP (rs1, 0) == imm1)
4536 return 1;
4538 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4539 return 1;
4540 else
4541 return 0;
4543 if (GET_CODE (rs1) == SUBREG)
4544 rs1 = SUBREG_REG (rs1);
4545 if (!REG_P (rs1))
4546 return 0;
4548 if (rs2)
4550 if (GET_CODE (rs2) == SUBREG)
4551 rs2 = SUBREG_REG (rs2);
4552 if (!REG_P (rs2))
4553 return 0;
4556 if (strict)
4558 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4559 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4560 return 0;
4562 else
4564 if ((! SPARC_INT_REG_P (REGNO (rs1))
4565 && REGNO (rs1) != FRAME_POINTER_REGNUM
4566 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4567 || (rs2
4568 && (! SPARC_INT_REG_P (REGNO (rs2))
4569 && REGNO (rs2) != FRAME_POINTER_REGNUM
4570 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4571 return 0;
4573 return 1;
4576 /* Return the SYMBOL_REF for the tls_get_addr function. */
4578 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4580 static rtx
4581 sparc_tls_get_addr (void)
4583 if (!sparc_tls_symbol)
4584 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4586 return sparc_tls_symbol;
4589 /* Return the Global Offset Table to be used in TLS mode. */
4591 static rtx
4592 sparc_tls_got (void)
4594 /* In PIC mode, this is just the PIC offset table. */
4595 if (flag_pic)
4597 crtl->uses_pic_offset_table = 1;
4598 return pic_offset_table_rtx;
4601 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4602 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4603 if (TARGET_SUN_TLS && TARGET_ARCH32)
4605 load_got_register ();
4606 return global_offset_table_rtx;
4609 /* In all other cases, we load a new pseudo with the GOT symbol. */
4610 return copy_to_reg (sparc_got ());
4613 /* Return true if X contains a thread-local symbol. */
4615 static bool
4616 sparc_tls_referenced_p (rtx x)
4618 if (!TARGET_HAVE_TLS)
4619 return false;
4621 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4622 x = XEXP (XEXP (x, 0), 0);
4624 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4625 return true;
4627 /* That's all we handle in sparc_legitimize_tls_address for now. */
4628 return false;
4631 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4632 this (thread-local) address. */
4634 static rtx
4635 sparc_legitimize_tls_address (rtx addr)
4637 rtx temp1, temp2, temp3, ret, o0, got;
4638 rtx_insn *insn;
4640 gcc_assert (can_create_pseudo_p ());
4642 if (GET_CODE (addr) == SYMBOL_REF)
4643 switch (SYMBOL_REF_TLS_MODEL (addr))
4645 case TLS_MODEL_GLOBAL_DYNAMIC:
4646 start_sequence ();
4647 temp1 = gen_reg_rtx (SImode);
4648 temp2 = gen_reg_rtx (SImode);
4649 ret = gen_reg_rtx (Pmode);
4650 o0 = gen_rtx_REG (Pmode, 8);
4651 got = sparc_tls_got ();
4652 emit_insn (gen_tgd_hi22 (temp1, addr));
4653 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4654 if (TARGET_ARCH32)
4656 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4657 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4658 addr, const1_rtx));
4660 else
4662 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4663 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4664 addr, const1_rtx));
4666 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4667 insn = get_insns ();
4668 end_sequence ();
4669 emit_libcall_block (insn, ret, o0, addr);
4670 break;
4672 case TLS_MODEL_LOCAL_DYNAMIC:
4673 start_sequence ();
4674 temp1 = gen_reg_rtx (SImode);
4675 temp2 = gen_reg_rtx (SImode);
4676 temp3 = gen_reg_rtx (Pmode);
4677 ret = gen_reg_rtx (Pmode);
4678 o0 = gen_rtx_REG (Pmode, 8);
4679 got = sparc_tls_got ();
4680 emit_insn (gen_tldm_hi22 (temp1));
4681 emit_insn (gen_tldm_lo10 (temp2, temp1));
4682 if (TARGET_ARCH32)
4684 emit_insn (gen_tldm_add32 (o0, got, temp2));
4685 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4686 const1_rtx));
4688 else
4690 emit_insn (gen_tldm_add64 (o0, got, temp2));
4691 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4692 const1_rtx));
4694 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4695 insn = get_insns ();
4696 end_sequence ();
4697 emit_libcall_block (insn, temp3, o0,
4698 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4699 UNSPEC_TLSLD_BASE));
4700 temp1 = gen_reg_rtx (SImode);
4701 temp2 = gen_reg_rtx (SImode);
4702 emit_insn (gen_tldo_hix22 (temp1, addr));
4703 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4704 if (TARGET_ARCH32)
4705 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4706 else
4707 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4708 break;
4710 case TLS_MODEL_INITIAL_EXEC:
4711 temp1 = gen_reg_rtx (SImode);
4712 temp2 = gen_reg_rtx (SImode);
4713 temp3 = gen_reg_rtx (Pmode);
4714 got = sparc_tls_got ();
4715 emit_insn (gen_tie_hi22 (temp1, addr));
4716 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4717 if (TARGET_ARCH32)
4718 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4719 else
4720 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4721 if (TARGET_SUN_TLS)
4723 ret = gen_reg_rtx (Pmode);
4724 if (TARGET_ARCH32)
4725 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4726 temp3, addr));
4727 else
4728 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4729 temp3, addr));
4731 else
4732 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4733 break;
4735 case TLS_MODEL_LOCAL_EXEC:
4736 temp1 = gen_reg_rtx (Pmode);
4737 temp2 = gen_reg_rtx (Pmode);
4738 if (TARGET_ARCH32)
4740 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4741 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4743 else
4745 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4746 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4748 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4749 break;
4751 default:
4752 gcc_unreachable ();
4755 else if (GET_CODE (addr) == CONST)
4757 rtx base, offset;
4759 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4761 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4762 offset = XEXP (XEXP (addr, 0), 1);
4764 base = force_operand (base, NULL_RTX);
4765 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4766 offset = force_reg (Pmode, offset);
4767 ret = gen_rtx_PLUS (Pmode, base, offset);
4770 else
4771 gcc_unreachable (); /* for now ... */
4773 return ret;
4776 /* Legitimize PIC addresses. If the address is already position-independent,
4777 we return ORIG. Newly generated position-independent addresses go into a
4778 reg. This is REG if nonzero, otherwise we allocate register(s) as
4779 necessary. */
4781 static rtx
4782 sparc_legitimize_pic_address (rtx orig, rtx reg)
4784 if (GET_CODE (orig) == SYMBOL_REF
4785 /* See the comment in sparc_expand_move. */
4786 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4788 bool gotdata_op = false;
4789 rtx pic_ref, address;
4790 rtx_insn *insn;
4792 if (!reg)
4794 gcc_assert (can_create_pseudo_p ());
4795 reg = gen_reg_rtx (Pmode);
4798 if (flag_pic == 2)
4800 /* If not during reload, allocate another temp reg here for loading
4801 in the address, so that these instructions can be optimized
4802 properly. */
4803 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4805 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4806 won't get confused into thinking that these two instructions
4807 are loading in the true address of the symbol. If in the
4808 future a PIC rtx exists, that should be used instead. */
4809 if (TARGET_ARCH64)
4811 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4812 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4814 else
4816 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4817 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4820 address = temp_reg;
4821 gotdata_op = true;
4823 else
4824 address = orig;
4826 crtl->uses_pic_offset_table = 1;
4827 if (gotdata_op)
4829 if (TARGET_ARCH64)
4830 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4831 pic_offset_table_rtx,
4832 address, orig));
4833 else
4834 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4835 pic_offset_table_rtx,
4836 address, orig));
4838 else
4840 pic_ref
4841 = gen_const_mem (Pmode,
4842 gen_rtx_PLUS (Pmode,
4843 pic_offset_table_rtx, address));
4844 insn = emit_move_insn (reg, pic_ref);
4847 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4848 by loop. */
4849 set_unique_reg_note (insn, REG_EQUAL, orig);
4850 return reg;
4852 else if (GET_CODE (orig) == CONST)
4854 rtx base, offset;
4856 if (GET_CODE (XEXP (orig, 0)) == PLUS
4857 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4858 return orig;
4860 if (!reg)
4862 gcc_assert (can_create_pseudo_p ());
4863 reg = gen_reg_rtx (Pmode);
4866 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4867 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4868 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4869 base == reg ? NULL_RTX : reg);
4871 if (GET_CODE (offset) == CONST_INT)
4873 if (SMALL_INT (offset))
4874 return plus_constant (Pmode, base, INTVAL (offset));
4875 else if (can_create_pseudo_p ())
4876 offset = force_reg (Pmode, offset);
4877 else
4878 /* If we reach here, then something is seriously wrong. */
4879 gcc_unreachable ();
4881 return gen_rtx_PLUS (Pmode, base, offset);
4883 else if (GET_CODE (orig) == LABEL_REF)
4884 /* ??? We ought to be checking that the register is live instead, in case
4885 it is eliminated. */
4886 crtl->uses_pic_offset_table = 1;
4888 return orig;
4891 /* Try machine-dependent ways of modifying an illegitimate address X
4892 to be legitimate. If we find one, return the new, valid address.
4894 OLDX is the address as it was before break_out_memory_refs was called.
4895 In some cases it is useful to look at this to decide what needs to be done.
4897 MODE is the mode of the operand pointed to by X.
4899 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4901 static rtx
4902 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4903 machine_mode mode)
4905 rtx orig_x = x;
4907 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4908 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4909 force_operand (XEXP (x, 0), NULL_RTX));
4910 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4911 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4912 force_operand (XEXP (x, 1), NULL_RTX));
4913 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4914 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4915 XEXP (x, 1));
4916 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4917 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4918 force_operand (XEXP (x, 1), NULL_RTX));
4920 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4921 return x;
4923 if (sparc_tls_referenced_p (x))
4924 x = sparc_legitimize_tls_address (x);
4925 else if (flag_pic)
4926 x = sparc_legitimize_pic_address (x, NULL_RTX);
4927 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4928 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4929 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4930 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4931 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4932 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4933 else if (GET_CODE (x) == SYMBOL_REF
4934 || GET_CODE (x) == CONST
4935 || GET_CODE (x) == LABEL_REF)
4936 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4938 return x;
4941 /* Delegitimize an address that was legitimized by the above function. */
4943 static rtx
4944 sparc_delegitimize_address (rtx x)
4946 x = delegitimize_mem_from_attrs (x);
4948 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4949 switch (XINT (XEXP (x, 1), 1))
4951 case UNSPEC_MOVE_PIC:
4952 case UNSPEC_TLSLE:
4953 x = XVECEXP (XEXP (x, 1), 0, 0);
4954 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4955 break;
4956 default:
4957 break;
4960 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4961 if (GET_CODE (x) == MINUS
4962 && sparc_pic_register_p (XEXP (x, 0))
4963 && GET_CODE (XEXP (x, 1)) == LO_SUM
4964 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4965 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4967 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4968 gcc_assert (GET_CODE (x) == LABEL_REF
4969 || (GET_CODE (x) == CONST
4970 && GET_CODE (XEXP (x, 0)) == PLUS
4971 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4972 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
4975 return x;
4978 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4979 replace the input X, or the original X if no replacement is called for.
4980 The output parameter *WIN is 1 if the calling macro should goto WIN,
4981 0 if it should not.
4983 For SPARC, we wish to handle addresses by splitting them into
4984 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4985 This cuts the number of extra insns by one.
4987 Do nothing when generating PIC code and the address is a symbolic
4988 operand or requires a scratch register. */
4991 sparc_legitimize_reload_address (rtx x, machine_mode mode,
4992 int opnum, int type,
4993 int ind_levels ATTRIBUTE_UNUSED, int *win)
4995 /* Decompose SImode constants into HIGH+LO_SUM. */
4996 if (CONSTANT_P (x)
4997 && (mode != TFmode || TARGET_ARCH64)
4998 && GET_MODE (x) == SImode
4999 && GET_CODE (x) != LO_SUM
5000 && GET_CODE (x) != HIGH
5001 && sparc_cmodel <= CM_MEDLOW
5002 && !(flag_pic
5003 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
5005 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
5006 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5007 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5008 opnum, (enum reload_type)type);
5009 *win = 1;
5010 return x;
5013 /* We have to recognize what we have already generated above. */
5014 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
5016 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5017 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5018 opnum, (enum reload_type)type);
5019 *win = 1;
5020 return x;
5023 *win = 0;
5024 return x;
5027 /* Return true if ADDR (a legitimate address expression)
5028 has an effect that depends on the machine mode it is used for.
5030 In PIC mode,
5032 (mem:HI [%l7+a])
5034 is not equivalent to
5036 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5038 because [%l7+a+1] is interpreted as the address of (a+1). */
5041 static bool
5042 sparc_mode_dependent_address_p (const_rtx addr,
5043 addr_space_t as ATTRIBUTE_UNUSED)
5045 if (GET_CODE (addr) == PLUS
5046 && sparc_pic_register_p (XEXP (addr, 0))
5047 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5048 return true;
5050 return false;
5053 #ifdef HAVE_GAS_HIDDEN
5054 # define USE_HIDDEN_LINKONCE 1
5055 #else
5056 # define USE_HIDDEN_LINKONCE 0
5057 #endif
5059 static void
5060 get_pc_thunk_name (char name[32], unsigned int regno)
5062 const char *reg_name = reg_names[regno];
5064 /* Skip the leading '%' as that cannot be used in a
5065 symbol name. */
5066 reg_name += 1;
5068 if (USE_HIDDEN_LINKONCE)
5069 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
5070 else
5071 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
5074 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
5076 static rtx
5077 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
5079 int orig_flag_pic = flag_pic;
5080 rtx insn;
5082 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
5083 flag_pic = 0;
5084 if (TARGET_ARCH64)
5085 insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
5086 else
5087 insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
5088 flag_pic = orig_flag_pic;
5090 return insn;
5093 /* Emit code to load the GOT register. */
5095 void
5096 load_got_register (void)
5098 if (!global_offset_table_rtx)
5099 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
5101 if (TARGET_VXWORKS_RTP)
5102 emit_insn (gen_vxworks_load_got ());
5103 else
5105 /* The GOT symbol is subject to a PC-relative relocation so we need a
5106 helper function to add the PC value and thus get the final value. */
5107 if (!got_helper_rtx)
5109 char name[32];
5110 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
5111 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5114 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
5115 got_helper_rtx));
5119 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5120 address of the call target. */
5122 void
5123 sparc_emit_call_insn (rtx pat, rtx addr)
5125 rtx_insn *insn;
5127 insn = emit_call_insn (pat);
5129 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5130 if (TARGET_VXWORKS_RTP
5131 && flag_pic
5132 && GET_CODE (addr) == SYMBOL_REF
5133 && (SYMBOL_REF_DECL (addr)
5134 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5135 : !SYMBOL_REF_LOCAL_P (addr)))
5137 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5138 crtl->uses_pic_offset_table = 1;
5142 /* Return 1 if RTX is a MEM which is known to be aligned to at
5143 least a DESIRED byte boundary. */
5146 mem_min_alignment (rtx mem, int desired)
5148 rtx addr, base, offset;
5150 /* If it's not a MEM we can't accept it. */
5151 if (GET_CODE (mem) != MEM)
5152 return 0;
5154 /* Obviously... */
5155 if (!TARGET_UNALIGNED_DOUBLES
5156 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5157 return 1;
5159 /* ??? The rest of the function predates MEM_ALIGN so
5160 there is probably a bit of redundancy. */
5161 addr = XEXP (mem, 0);
5162 base = offset = NULL_RTX;
5163 if (GET_CODE (addr) == PLUS)
5165 if (GET_CODE (XEXP (addr, 0)) == REG)
5167 base = XEXP (addr, 0);
5169 /* What we are saying here is that if the base
5170 REG is aligned properly, the compiler will make
5171 sure any REG based index upon it will be so
5172 as well. */
5173 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5174 offset = XEXP (addr, 1);
5175 else
5176 offset = const0_rtx;
5179 else if (GET_CODE (addr) == REG)
5181 base = addr;
5182 offset = const0_rtx;
5185 if (base != NULL_RTX)
5187 int regno = REGNO (base);
5189 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5191 /* Check if the compiler has recorded some information
5192 about the alignment of the base REG. If reload has
5193 completed, we already matched with proper alignments.
5194 If not running global_alloc, reload might give us
5195 unaligned pointer to local stack though. */
5196 if (((cfun != 0
5197 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5198 || (optimize && reload_completed))
5199 && (INTVAL (offset) & (desired - 1)) == 0)
5200 return 1;
5202 else
5204 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5205 return 1;
5208 else if (! TARGET_UNALIGNED_DOUBLES
5209 || CONSTANT_P (addr)
5210 || GET_CODE (addr) == LO_SUM)
5212 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5213 is true, in which case we can only assume that an access is aligned if
5214 it is to a constant address, or the address involves a LO_SUM. */
5215 return 1;
5218 /* An obviously unaligned address. */
5219 return 0;
5223 /* Vectors to keep interesting information about registers where it can easily
5224 be got. We used to use the actual mode value as the bit number, but there
5225 are more than 32 modes now. Instead we use two tables: one indexed by
5226 hard register number, and one indexed by mode. */
5228 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5229 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5230 mapped into one sparc_mode_class mode. */
5232 enum sparc_mode_class {
5233 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5234 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5235 CC_MODE, CCFP_MODE
5238 /* Modes for single-word and smaller quantities. */
5239 #define S_MODES \
5240 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5242 /* Modes for double-word and smaller quantities. */
5243 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5245 /* Modes for quad-word and smaller quantities. */
5246 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5248 /* Modes for 8-word and smaller quantities. */
5249 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5251 /* Modes for single-float quantities. */
5252 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5254 /* Modes for double-float and smaller quantities. */
5255 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5257 /* Modes for quad-float and smaller quantities. */
5258 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5260 /* Modes for quad-float pairs and smaller quantities. */
5261 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5263 /* Modes for double-float only quantities. */
5264 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5266 /* Modes for quad-float and double-float only quantities. */
5267 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5269 /* Modes for quad-float pairs and double-float only quantities. */
5270 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5272 /* Modes for condition codes. */
5273 #define CC_MODES (1 << (int) CC_MODE)
5274 #define CCFP_MODES (1 << (int) CCFP_MODE)
5276 /* Value is 1 if register/mode pair is acceptable on sparc.
5278 The funny mixture of D and T modes is because integer operations
5279 do not specially operate on tetra quantities, so non-quad-aligned
5280 registers can hold quadword quantities (except %o4 and %i4 because
5281 they cross fixed registers).
5283 ??? Note that, despite the settings, non-double-aligned parameter
5284 registers can hold double-word quantities in 32-bit mode. */
5286 /* This points to either the 32-bit or the 64-bit version. */
5287 static const int *hard_regno_mode_classes;
5289 static const int hard_32bit_mode_classes[] = {
5290 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5291 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5292 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5293 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5295 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5296 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5297 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5298 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5300 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5301 and none can hold SFmode/SImode values. */
5302 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5303 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5304 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5305 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5307 /* %fcc[0123] */
5308 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5310 /* %icc, %sfp, %gsr */
5311 CC_MODES, 0, D_MODES
5314 static const int hard_64bit_mode_classes[] = {
5315 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5316 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5317 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5318 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5320 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5321 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5322 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5323 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5325 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5326 and none can hold SFmode/SImode values. */
5327 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5328 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5329 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5330 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5332 /* %fcc[0123] */
5333 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5335 /* %icc, %sfp, %gsr */
5336 CC_MODES, 0, D_MODES
5339 static int sparc_mode_class [NUM_MACHINE_MODES];
5341 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5343 static void
5344 sparc_init_modes (void)
5346 int i;
5348 for (i = 0; i < NUM_MACHINE_MODES; i++)
5350 machine_mode m = (machine_mode) i;
5351 unsigned int size = GET_MODE_SIZE (m);
5353 switch (GET_MODE_CLASS (m))
5355 case MODE_INT:
5356 case MODE_PARTIAL_INT:
5357 case MODE_COMPLEX_INT:
5358 if (size < 4)
5359 sparc_mode_class[i] = 1 << (int) H_MODE;
5360 else if (size == 4)
5361 sparc_mode_class[i] = 1 << (int) S_MODE;
5362 else if (size == 8)
5363 sparc_mode_class[i] = 1 << (int) D_MODE;
5364 else if (size == 16)
5365 sparc_mode_class[i] = 1 << (int) T_MODE;
5366 else if (size == 32)
5367 sparc_mode_class[i] = 1 << (int) O_MODE;
5368 else
5369 sparc_mode_class[i] = 0;
5370 break;
5371 case MODE_VECTOR_INT:
5372 if (size == 4)
5373 sparc_mode_class[i] = 1 << (int) SF_MODE;
5374 else if (size == 8)
5375 sparc_mode_class[i] = 1 << (int) DF_MODE;
5376 else
5377 sparc_mode_class[i] = 0;
5378 break;
5379 case MODE_FLOAT:
5380 case MODE_COMPLEX_FLOAT:
5381 if (size == 4)
5382 sparc_mode_class[i] = 1 << (int) SF_MODE;
5383 else if (size == 8)
5384 sparc_mode_class[i] = 1 << (int) DF_MODE;
5385 else if (size == 16)
5386 sparc_mode_class[i] = 1 << (int) TF_MODE;
5387 else if (size == 32)
5388 sparc_mode_class[i] = 1 << (int) OF_MODE;
5389 else
5390 sparc_mode_class[i] = 0;
5391 break;
5392 case MODE_CC:
5393 if (m == CCFPmode || m == CCFPEmode)
5394 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5395 else
5396 sparc_mode_class[i] = 1 << (int) CC_MODE;
5397 break;
5398 default:
5399 sparc_mode_class[i] = 0;
5400 break;
5404 if (TARGET_ARCH64)
5405 hard_regno_mode_classes = hard_64bit_mode_classes;
5406 else
5407 hard_regno_mode_classes = hard_32bit_mode_classes;
5409 /* Initialize the array used by REGNO_REG_CLASS. */
5410 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5412 if (i < 16 && TARGET_V8PLUS)
5413 sparc_regno_reg_class[i] = I64_REGS;
5414 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5415 sparc_regno_reg_class[i] = GENERAL_REGS;
5416 else if (i < 64)
5417 sparc_regno_reg_class[i] = FP_REGS;
5418 else if (i < 96)
5419 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5420 else if (i < 100)
5421 sparc_regno_reg_class[i] = FPCC_REGS;
5422 else
5423 sparc_regno_reg_class[i] = NO_REGS;
5427 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5429 static inline bool
5430 save_global_or_fp_reg_p (unsigned int regno,
5431 int leaf_function ATTRIBUTE_UNUSED)
5433 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5436 /* Return whether the return address register (%i7) is needed. */
5438 static inline bool
5439 return_addr_reg_needed_p (int leaf_function)
5441 /* If it is live, for example because of __builtin_return_address (0). */
5442 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5443 return true;
5445 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5446 if (!leaf_function
5447 /* Loading the GOT register clobbers %o7. */
5448 || crtl->uses_pic_offset_table
5449 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5450 return true;
5452 return false;
5455 /* Return whether REGNO, a local or in register, must be saved/restored. */
5457 static bool
5458 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5460 /* General case: call-saved registers live at some point. */
5461 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5462 return true;
5464 /* Frame pointer register (%fp) if needed. */
5465 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5466 return true;
5468 /* Return address register (%i7) if needed. */
5469 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5470 return true;
5472 /* GOT register (%l7) if needed. */
5473 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
5474 return true;
5476 /* If the function accesses prior frames, the frame pointer and the return
5477 address of the previous frame must be saved on the stack. */
5478 if (crtl->accesses_prior_frames
5479 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5480 return true;
5482 return false;
5485 /* Compute the frame size required by the function. This function is called
5486 during the reload pass and also by sparc_expand_prologue. */
5488 static HOST_WIDE_INT
5489 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5491 HOST_WIDE_INT frame_size, apparent_frame_size;
5492 int args_size, n_global_fp_regs = 0;
5493 bool save_local_in_regs_p = false;
5494 unsigned int i;
5496 /* If the function allocates dynamic stack space, the dynamic offset is
5497 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5498 if (leaf_function && !cfun->calls_alloca)
5499 args_size = 0;
5500 else
5501 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5503 /* Calculate space needed for global registers. */
5504 if (TARGET_ARCH64)
5506 for (i = 0; i < 8; i++)
5507 if (save_global_or_fp_reg_p (i, 0))
5508 n_global_fp_regs += 2;
5510 else
5512 for (i = 0; i < 8; i += 2)
5513 if (save_global_or_fp_reg_p (i, 0)
5514 || save_global_or_fp_reg_p (i + 1, 0))
5515 n_global_fp_regs += 2;
5518 /* In the flat window model, find out which local and in registers need to
5519 be saved. We don't reserve space in the current frame for them as they
5520 will be spilled into the register window save area of the caller's frame.
5521 However, as soon as we use this register window save area, we must create
5522 that of the current frame to make it the live one. */
5523 if (TARGET_FLAT)
5524 for (i = 16; i < 32; i++)
5525 if (save_local_or_in_reg_p (i, leaf_function))
5527 save_local_in_regs_p = true;
5528 break;
5531 /* Calculate space needed for FP registers. */
5532 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5533 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5534 n_global_fp_regs += 2;
5536 if (size == 0
5537 && n_global_fp_regs == 0
5538 && args_size == 0
5539 && !save_local_in_regs_p)
5540 frame_size = apparent_frame_size = 0;
5541 else
5543 /* Start from the apparent frame size. */
5544 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5546 /* We need to add the size of the outgoing argument area. */
5547 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5549 /* And that of the register window save area. */
5550 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5552 /* Finally, bump to the appropriate alignment. */
5553 frame_size = SPARC_STACK_ALIGN (frame_size);
5556 /* Set up values for use in prologue and epilogue. */
5557 sparc_frame_size = frame_size;
5558 sparc_apparent_frame_size = apparent_frame_size;
5559 sparc_n_global_fp_regs = n_global_fp_regs;
5560 sparc_save_local_in_regs_p = save_local_in_regs_p;
5562 return frame_size;
5565 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5568 sparc_initial_elimination_offset (int to)
5570 int offset;
5572 if (to == STACK_POINTER_REGNUM)
5573 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5574 else
5575 offset = 0;
5577 offset += SPARC_STACK_BIAS;
5578 return offset;
5581 /* Output any necessary .register pseudo-ops. */
5583 void
5584 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5586 int i;
5588 if (TARGET_ARCH32)
5589 return;
5591 /* Check if %g[2367] were used without
5592 .register being printed for them already. */
5593 for (i = 2; i < 8; i++)
5595 if (df_regs_ever_live_p (i)
5596 && ! sparc_hard_reg_printed [i])
5598 sparc_hard_reg_printed [i] = 1;
5599 /* %g7 is used as TLS base register, use #ignore
5600 for it instead of #scratch. */
5601 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5602 i == 7 ? "ignore" : "scratch");
5604 if (i == 3) i = 5;
5608 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5610 #if PROBE_INTERVAL > 4096
5611 #error Cannot use indexed addressing mode for stack probing
5612 #endif
5614 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5615 inclusive. These are offsets from the current stack pointer.
5617 Note that we don't use the REG+REG addressing mode for the probes because
5618 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5619 so the advantages of having a single code win here. */
5621 static void
5622 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5624 rtx g1 = gen_rtx_REG (Pmode, 1);
5626 /* See if we have a constant small number of probes to generate. If so,
5627 that's the easy case. */
5628 if (size <= PROBE_INTERVAL)
5630 emit_move_insn (g1, GEN_INT (first));
5631 emit_insn (gen_rtx_SET (g1,
5632 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5633 emit_stack_probe (plus_constant (Pmode, g1, -size));
5636 /* The run-time loop is made up of 9 insns in the generic case while the
5637 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5638 else if (size <= 4 * PROBE_INTERVAL)
5640 HOST_WIDE_INT i;
5642 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5643 emit_insn (gen_rtx_SET (g1,
5644 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5645 emit_stack_probe (g1);
5647 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5648 it exceeds SIZE. If only two probes are needed, this will not
5649 generate any code. Then probe at FIRST + SIZE. */
5650 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5652 emit_insn (gen_rtx_SET (g1,
5653 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5654 emit_stack_probe (g1);
5657 emit_stack_probe (plus_constant (Pmode, g1,
5658 (i - PROBE_INTERVAL) - size));
5661 /* Otherwise, do the same as above, but in a loop. Note that we must be
5662 extra careful with variables wrapping around because we might be at
5663 the very top (or the very bottom) of the address space and we have
5664 to be able to handle this case properly; in particular, we use an
5665 equality test for the loop condition. */
5666 else
5668 HOST_WIDE_INT rounded_size;
5669 rtx g4 = gen_rtx_REG (Pmode, 4);
5671 emit_move_insn (g1, GEN_INT (first));
5674 /* Step 1: round SIZE to the previous multiple of the interval. */
5676 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5677 emit_move_insn (g4, GEN_INT (rounded_size));
5680 /* Step 2: compute initial and final value of the loop counter. */
5682 /* TEST_ADDR = SP + FIRST. */
5683 emit_insn (gen_rtx_SET (g1,
5684 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5686 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5687 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5690 /* Step 3: the loop
5692 while (TEST_ADDR != LAST_ADDR)
5694 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5695 probe at TEST_ADDR
5698 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5699 until it is equal to ROUNDED_SIZE. */
5701 if (TARGET_ARCH64)
5702 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5703 else
5704 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5707 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5708 that SIZE is equal to ROUNDED_SIZE. */
5710 if (size != rounded_size)
5711 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5714 /* Make sure nothing is scheduled before we are done. */
5715 emit_insn (gen_blockage ());
5718 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5719 absolute addresses. */
5721 const char *
5722 output_probe_stack_range (rtx reg1, rtx reg2)
5724 static int labelno = 0;
5725 char loop_lab[32];
5726 rtx xops[2];
5728 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5730 /* Loop. */
5731 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5733 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5734 xops[0] = reg1;
5735 xops[1] = GEN_INT (-PROBE_INTERVAL);
5736 output_asm_insn ("add\t%0, %1, %0", xops);
5738 /* Test if TEST_ADDR == LAST_ADDR. */
5739 xops[1] = reg2;
5740 output_asm_insn ("cmp\t%0, %1", xops);
5742 /* Probe at TEST_ADDR and branch. */
5743 if (TARGET_ARCH64)
5744 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5745 else
5746 fputs ("\tbne\t", asm_out_file);
5747 assemble_name_raw (asm_out_file, loop_lab);
5748 fputc ('\n', asm_out_file);
5749 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5750 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5752 return "";
5755 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5756 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5757 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5758 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5759 the action to be performed if it returns false. Return the new offset. */
5761 typedef bool (*sorr_pred_t) (unsigned int, int);
5762 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5764 static int
5765 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5766 int offset, int leaf_function, sorr_pred_t save_p,
5767 sorr_act_t action_true, sorr_act_t action_false)
5769 unsigned int i;
5770 rtx mem;
5771 rtx_insn *insn;
5773 if (TARGET_ARCH64 && high <= 32)
5775 int fp_offset = -1;
5777 for (i = low; i < high; i++)
5779 if (save_p (i, leaf_function))
5781 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5782 base, offset));
5783 if (action_true == SORR_SAVE)
5785 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5786 RTX_FRAME_RELATED_P (insn) = 1;
5788 else /* action_true == SORR_RESTORE */
5790 /* The frame pointer must be restored last since its old
5791 value may be used as base address for the frame. This
5792 is problematic in 64-bit mode only because of the lack
5793 of double-word load instruction. */
5794 if (i == HARD_FRAME_POINTER_REGNUM)
5795 fp_offset = offset;
5796 else
5797 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5799 offset += 8;
5801 else if (action_false == SORR_ADVANCE)
5802 offset += 8;
5805 if (fp_offset >= 0)
5807 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5808 emit_move_insn (hard_frame_pointer_rtx, mem);
5811 else
5813 for (i = low; i < high; i += 2)
5815 bool reg0 = save_p (i, leaf_function);
5816 bool reg1 = save_p (i + 1, leaf_function);
5817 machine_mode mode;
5818 int regno;
5820 if (reg0 && reg1)
5822 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5823 regno = i;
5825 else if (reg0)
5827 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5828 regno = i;
5830 else if (reg1)
5832 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5833 regno = i + 1;
5834 offset += 4;
5836 else
5838 if (action_false == SORR_ADVANCE)
5839 offset += 8;
5840 continue;
5843 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5844 if (action_true == SORR_SAVE)
5846 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5847 RTX_FRAME_RELATED_P (insn) = 1;
5848 if (mode == DImode)
5850 rtx set1, set2;
5851 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5852 offset));
5853 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5854 RTX_FRAME_RELATED_P (set1) = 1;
5856 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5857 offset + 4));
5858 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5859 RTX_FRAME_RELATED_P (set2) = 1;
5860 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5861 gen_rtx_PARALLEL (VOIDmode,
5862 gen_rtvec (2, set1, set2)));
5865 else /* action_true == SORR_RESTORE */
5866 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5868 /* Bump and round down to double word
5869 in case we already bumped by 4. */
5870 offset = ROUND_DOWN (offset + 8, 8);
5874 return offset;
5877 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5879 static rtx
5880 emit_adjust_base_to_offset (rtx base, int offset)
5882 /* ??? This might be optimized a little as %g1 might already have a
5883 value close enough that a single add insn will do. */
5884 /* ??? Although, all of this is probably only a temporary fix because
5885 if %g1 can hold a function result, then sparc_expand_epilogue will
5886 lose (the result will be clobbered). */
5887 rtx new_base = gen_rtx_REG (Pmode, 1);
5888 emit_move_insn (new_base, GEN_INT (offset));
5889 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5890 return new_base;
5893 /* Emit code to save/restore call-saved global and FP registers. */
5895 static void
5896 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5898 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5900 base = emit_adjust_base_to_offset (base, offset);
5901 offset = 0;
5904 offset
5905 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5906 save_global_or_fp_reg_p, action, SORR_NONE);
5907 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5908 save_global_or_fp_reg_p, action, SORR_NONE);
5911 /* Emit code to save/restore call-saved local and in registers. */
5913 static void
5914 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5916 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5918 base = emit_adjust_base_to_offset (base, offset);
5919 offset = 0;
5922 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5923 save_local_or_in_reg_p, action, SORR_ADVANCE);
5926 /* Emit a window_save insn. */
5928 static rtx_insn *
5929 emit_window_save (rtx increment)
5931 rtx_insn *insn = emit_insn (gen_window_save (increment));
5932 RTX_FRAME_RELATED_P (insn) = 1;
5934 /* The incoming return address (%o7) is saved in %i7. */
5935 add_reg_note (insn, REG_CFA_REGISTER,
5936 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5937 gen_rtx_REG (Pmode,
5938 INCOMING_RETURN_ADDR_REGNUM)));
5940 /* The window save event. */
5941 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5943 /* The CFA is %fp, the hard frame pointer. */
5944 add_reg_note (insn, REG_CFA_DEF_CFA,
5945 plus_constant (Pmode, hard_frame_pointer_rtx,
5946 INCOMING_FRAME_SP_OFFSET));
5948 return insn;
5951 /* Generate an increment for the stack pointer. */
5953 static rtx
5954 gen_stack_pointer_inc (rtx increment)
5956 return gen_rtx_SET (stack_pointer_rtx,
5957 gen_rtx_PLUS (Pmode,
5958 stack_pointer_rtx,
5959 increment));
5962 /* Expand the function prologue. The prologue is responsible for reserving
5963 storage for the frame, saving the call-saved registers and loading the
5964 GOT register if needed. */
5966 void
5967 sparc_expand_prologue (void)
5969 HOST_WIDE_INT size;
5970 rtx_insn *insn;
5972 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5973 on the final value of the flag means deferring the prologue/epilogue
5974 expansion until just before the second scheduling pass, which is too
5975 late to emit multiple epilogues or return insns.
5977 Of course we are making the assumption that the value of the flag
5978 will not change between now and its final value. Of the three parts
5979 of the formula, only the last one can reasonably vary. Let's take a
5980 closer look, after assuming that the first two ones are set to true
5981 (otherwise the last value is effectively silenced).
5983 If only_leaf_regs_used returns false, the global predicate will also
5984 be false so the actual frame size calculated below will be positive.
5985 As a consequence, the save_register_window insn will be emitted in
5986 the instruction stream; now this insn explicitly references %fp
5987 which is not a leaf register so only_leaf_regs_used will always
5988 return false subsequently.
5990 If only_leaf_regs_used returns true, we hope that the subsequent
5991 optimization passes won't cause non-leaf registers to pop up. For
5992 example, the regrename pass has special provisions to not rename to
5993 non-leaf registers in a leaf function. */
5994 sparc_leaf_function_p
5995 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5997 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5999 if (flag_stack_usage_info)
6000 current_function_static_stack_size = size;
6002 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6003 || flag_stack_clash_protection)
6005 if (crtl->is_leaf && !cfun->calls_alloca)
6007 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6008 sparc_emit_probe_stack_range (get_stack_check_protect (),
6009 size - get_stack_check_protect ());
6011 else if (size > 0)
6012 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6015 if (size == 0)
6016 ; /* do nothing. */
6017 else if (sparc_leaf_function_p)
6019 rtx size_int_rtx = GEN_INT (-size);
6021 if (size <= 4096)
6022 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6023 else if (size <= 8192)
6025 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6026 RTX_FRAME_RELATED_P (insn) = 1;
6028 /* %sp is still the CFA register. */
6029 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6031 else
6033 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6034 emit_move_insn (size_rtx, size_int_rtx);
6035 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6036 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6037 gen_stack_pointer_inc (size_int_rtx));
6040 RTX_FRAME_RELATED_P (insn) = 1;
6042 else
6044 rtx size_int_rtx = GEN_INT (-size);
6046 if (size <= 4096)
6047 emit_window_save (size_int_rtx);
6048 else if (size <= 8192)
6050 emit_window_save (GEN_INT (-4096));
6052 /* %sp is not the CFA register anymore. */
6053 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6055 /* Make sure no %fp-based store is issued until after the frame is
6056 established. The offset between the frame pointer and the stack
6057 pointer is calculated relative to the value of the stack pointer
6058 at the end of the function prologue, and moving instructions that
6059 access the stack via the frame pointer between the instructions
6060 that decrement the stack pointer could result in accessing the
6061 register window save area, which is volatile. */
6062 emit_insn (gen_frame_blockage ());
6064 else
6066 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6067 emit_move_insn (size_rtx, size_int_rtx);
6068 emit_window_save (size_rtx);
6072 if (sparc_leaf_function_p)
6074 sparc_frame_base_reg = stack_pointer_rtx;
6075 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6077 else
6079 sparc_frame_base_reg = hard_frame_pointer_rtx;
6080 sparc_frame_base_offset = SPARC_STACK_BIAS;
6083 if (sparc_n_global_fp_regs > 0)
6084 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6085 sparc_frame_base_offset
6086 - sparc_apparent_frame_size,
6087 SORR_SAVE);
6089 /* Advertise that the data calculated just above are now valid. */
6090 sparc_prologue_data_valid_p = true;
6093 /* Expand the function prologue. The prologue is responsible for reserving
6094 storage for the frame, saving the call-saved registers and loading the
6095 GOT register if needed. */
6097 void
6098 sparc_flat_expand_prologue (void)
6100 HOST_WIDE_INT size;
6101 rtx_insn *insn;
6103 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6105 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6107 if (flag_stack_usage_info)
6108 current_function_static_stack_size = size;
6110 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6111 || flag_stack_clash_protection)
6113 if (crtl->is_leaf && !cfun->calls_alloca)
6115 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6116 sparc_emit_probe_stack_range (get_stack_check_protect (),
6117 size - get_stack_check_protect ());
6119 else if (size > 0)
6120 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6123 if (sparc_save_local_in_regs_p)
6124 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6125 SORR_SAVE);
6127 if (size == 0)
6128 ; /* do nothing. */
6129 else
6131 rtx size_int_rtx, size_rtx;
6133 size_rtx = size_int_rtx = GEN_INT (-size);
6135 /* We establish the frame (i.e. decrement the stack pointer) first, even
6136 if we use a frame pointer, because we cannot clobber any call-saved
6137 registers, including the frame pointer, if we haven't created a new
6138 register save area, for the sake of compatibility with the ABI. */
6139 if (size <= 4096)
6140 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6141 else if (size <= 8192 && !frame_pointer_needed)
6143 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6144 RTX_FRAME_RELATED_P (insn) = 1;
6145 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6147 else
6149 size_rtx = gen_rtx_REG (Pmode, 1);
6150 emit_move_insn (size_rtx, size_int_rtx);
6151 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6152 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6153 gen_stack_pointer_inc (size_int_rtx));
6155 RTX_FRAME_RELATED_P (insn) = 1;
6157 /* Ensure nothing is scheduled until after the frame is established. */
6158 emit_insn (gen_blockage ());
6160 if (frame_pointer_needed)
6162 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6163 gen_rtx_MINUS (Pmode,
6164 stack_pointer_rtx,
6165 size_rtx)));
6166 RTX_FRAME_RELATED_P (insn) = 1;
6168 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6169 gen_rtx_SET (hard_frame_pointer_rtx,
6170 plus_constant (Pmode, stack_pointer_rtx,
6171 size)));
6174 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6176 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6177 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6179 insn = emit_move_insn (i7, o7);
6180 RTX_FRAME_RELATED_P (insn) = 1;
6182 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6184 /* Prevent this instruction from ever being considered dead,
6185 even if this function has no epilogue. */
6186 emit_use (i7);
6190 if (frame_pointer_needed)
6192 sparc_frame_base_reg = hard_frame_pointer_rtx;
6193 sparc_frame_base_offset = SPARC_STACK_BIAS;
6195 else
6197 sparc_frame_base_reg = stack_pointer_rtx;
6198 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6201 if (sparc_n_global_fp_regs > 0)
6202 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6203 sparc_frame_base_offset
6204 - sparc_apparent_frame_size,
6205 SORR_SAVE);
6207 /* Advertise that the data calculated just above are now valid. */
6208 sparc_prologue_data_valid_p = true;
6211 /* This function generates the assembly code for function entry, which boils
6212 down to emitting the necessary .register directives. */
6214 static void
6215 sparc_asm_function_prologue (FILE *file)
6217 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6218 if (!TARGET_FLAT)
6219 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6221 sparc_output_scratch_registers (file);
6224 /* Expand the function epilogue, either normal or part of a sibcall.
6225 We emit all the instructions except the return or the call. */
6227 void
6228 sparc_expand_epilogue (bool for_eh)
6230 HOST_WIDE_INT size = sparc_frame_size;
6232 if (cfun->calls_alloca)
6233 emit_insn (gen_frame_blockage ());
6235 if (sparc_n_global_fp_regs > 0)
6236 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6237 sparc_frame_base_offset
6238 - sparc_apparent_frame_size,
6239 SORR_RESTORE);
6241 if (size == 0 || for_eh)
6242 ; /* do nothing. */
6243 else if (sparc_leaf_function_p)
6245 if (size <= 4096)
6246 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6247 else if (size <= 8192)
6249 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6250 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6252 else
6254 rtx reg = gen_rtx_REG (Pmode, 1);
6255 emit_move_insn (reg, GEN_INT (size));
6256 emit_insn (gen_stack_pointer_inc (reg));
6261 /* Expand the function epilogue, either normal or part of a sibcall.
6262 We emit all the instructions except the return or the call. */
6264 void
6265 sparc_flat_expand_epilogue (bool for_eh)
6267 HOST_WIDE_INT size = sparc_frame_size;
6269 if (sparc_n_global_fp_regs > 0)
6270 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6271 sparc_frame_base_offset
6272 - sparc_apparent_frame_size,
6273 SORR_RESTORE);
6275 /* If we have a frame pointer, we'll need both to restore it before the
6276 frame is destroyed and use its current value in destroying the frame.
6277 Since we don't have an atomic way to do that in the flat window model,
6278 we save the current value into a temporary register (%g1). */
6279 if (frame_pointer_needed && !for_eh)
6280 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6282 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6283 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6284 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6286 if (sparc_save_local_in_regs_p)
6287 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6288 sparc_frame_base_offset,
6289 SORR_RESTORE);
6291 if (size == 0 || for_eh)
6292 ; /* do nothing. */
6293 else if (frame_pointer_needed)
6295 /* Make sure the frame is destroyed after everything else is done. */
6296 emit_insn (gen_blockage ());
6298 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6300 else
6302 /* Likewise. */
6303 emit_insn (gen_blockage ());
6305 if (size <= 4096)
6306 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6307 else if (size <= 8192)
6309 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6310 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6312 else
6314 rtx reg = gen_rtx_REG (Pmode, 1);
6315 emit_move_insn (reg, GEN_INT (size));
6316 emit_insn (gen_stack_pointer_inc (reg));
6321 /* Return true if it is appropriate to emit `return' instructions in the
6322 body of a function. */
6324 bool
6325 sparc_can_use_return_insn_p (void)
6327 return sparc_prologue_data_valid_p
6328 && sparc_n_global_fp_regs == 0
6329 && TARGET_FLAT
6330 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6331 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6334 /* This function generates the assembly code for function exit. */
6336 static void
6337 sparc_asm_function_epilogue (FILE *file)
6339 /* If the last two instructions of a function are "call foo; dslot;"
6340 the return address might point to the first instruction in the next
6341 function and we have to output a dummy nop for the sake of sane
6342 backtraces in such cases. This is pointless for sibling calls since
6343 the return address is explicitly adjusted. */
6345 rtx_insn *insn = get_last_insn ();
6347 rtx last_real_insn = prev_real_insn (insn);
6348 if (last_real_insn
6349 && NONJUMP_INSN_P (last_real_insn)
6350 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6351 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6353 if (last_real_insn
6354 && CALL_P (last_real_insn)
6355 && !SIBLING_CALL_P (last_real_insn))
6356 fputs("\tnop\n", file);
6358 sparc_output_deferred_case_vectors ();
6361 /* Output a 'restore' instruction. */
6363 static void
6364 output_restore (rtx pat)
6366 rtx operands[3];
6368 if (! pat)
6370 fputs ("\t restore\n", asm_out_file);
6371 return;
6374 gcc_assert (GET_CODE (pat) == SET);
6376 operands[0] = SET_DEST (pat);
6377 pat = SET_SRC (pat);
6379 switch (GET_CODE (pat))
6381 case PLUS:
6382 operands[1] = XEXP (pat, 0);
6383 operands[2] = XEXP (pat, 1);
6384 output_asm_insn (" restore %r1, %2, %Y0", operands);
6385 break;
6386 case LO_SUM:
6387 operands[1] = XEXP (pat, 0);
6388 operands[2] = XEXP (pat, 1);
6389 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6390 break;
6391 case ASHIFT:
6392 operands[1] = XEXP (pat, 0);
6393 gcc_assert (XEXP (pat, 1) == const1_rtx);
6394 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6395 break;
6396 default:
6397 operands[1] = pat;
6398 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6399 break;
6403 /* Output a return. */
6405 const char *
6406 output_return (rtx_insn *insn)
6408 if (crtl->calls_eh_return)
6410 /* If the function uses __builtin_eh_return, the eh_return
6411 machinery occupies the delay slot. */
6412 gcc_assert (!final_sequence);
6414 if (flag_delayed_branch)
6416 if (!TARGET_FLAT && TARGET_V9)
6417 fputs ("\treturn\t%i7+8\n", asm_out_file);
6418 else
6420 if (!TARGET_FLAT)
6421 fputs ("\trestore\n", asm_out_file);
6423 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6426 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6428 else
6430 if (!TARGET_FLAT)
6431 fputs ("\trestore\n", asm_out_file);
6433 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6434 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6437 else if (sparc_leaf_function_p || TARGET_FLAT)
6439 /* This is a leaf or flat function so we don't have to bother restoring
6440 the register window, which frees us from dealing with the convoluted
6441 semantics of restore/return. We simply output the jump to the
6442 return address and the insn in the delay slot (if any). */
6444 return "jmp\t%%o7+%)%#";
6446 else
6448 /* This is a regular function so we have to restore the register window.
6449 We may have a pending insn for the delay slot, which will be either
6450 combined with the 'restore' instruction or put in the delay slot of
6451 the 'return' instruction. */
6453 if (final_sequence)
6455 rtx_insn *delay;
6456 rtx pat;
6458 delay = NEXT_INSN (insn);
6459 gcc_assert (delay);
6461 pat = PATTERN (delay);
6463 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6465 epilogue_renumber (&pat, 0);
6466 return "return\t%%i7+%)%#";
6468 else
6470 output_asm_insn ("jmp\t%%i7+%)", NULL);
6472 /* We're going to output the insn in the delay slot manually.
6473 Make sure to output its source location first. */
6474 PATTERN (delay) = gen_blockage ();
6475 INSN_CODE (delay) = -1;
6476 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6477 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6479 output_restore (pat);
6482 else
6484 /* The delay slot is empty. */
6485 if (TARGET_V9)
6486 return "return\t%%i7+%)\n\t nop";
6487 else if (flag_delayed_branch)
6488 return "jmp\t%%i7+%)\n\t restore";
6489 else
6490 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6494 return "";
6497 /* Output a sibling call. */
6499 const char *
6500 output_sibcall (rtx_insn *insn, rtx call_operand)
6502 rtx operands[1];
6504 gcc_assert (flag_delayed_branch);
6506 operands[0] = call_operand;
6508 if (sparc_leaf_function_p || TARGET_FLAT)
6510 /* This is a leaf or flat function so we don't have to bother restoring
6511 the register window. We simply output the jump to the function and
6512 the insn in the delay slot (if any). */
6514 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6516 if (final_sequence)
6517 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6518 operands);
6519 else
6520 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6521 it into branch if possible. */
6522 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6523 operands);
6525 else
6527 /* This is a regular function so we have to restore the register window.
6528 We may have a pending insn for the delay slot, which will be combined
6529 with the 'restore' instruction. */
6531 output_asm_insn ("call\t%a0, 0", operands);
6533 if (final_sequence)
6535 rtx_insn *delay;
6536 rtx pat;
6538 delay = NEXT_INSN (insn);
6539 gcc_assert (delay);
6541 pat = PATTERN (delay);
6543 /* We're going to output the insn in the delay slot manually.
6544 Make sure to output its source location first. */
6545 PATTERN (delay) = gen_blockage ();
6546 INSN_CODE (delay) = -1;
6547 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6548 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6550 output_restore (pat);
6552 else
6553 output_restore (NULL_RTX);
6556 return "";
6559 /* Functions for handling argument passing.
6561 For 32-bit, the first 6 args are normally in registers and the rest are
6562 pushed. Any arg that starts within the first 6 words is at least
6563 partially passed in a register unless its data type forbids.
6565 For 64-bit, the argument registers are laid out as an array of 16 elements
6566 and arguments are added sequentially. The first 6 int args and up to the
6567 first 16 fp args (depending on size) are passed in regs.
6569 Slot Stack Integral Float Float in structure Double Long Double
6570 ---- ----- -------- ----- ------------------ ------ -----------
6571 15 [SP+248] %f31 %f30,%f31 %d30
6572 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6573 13 [SP+232] %f27 %f26,%f27 %d26
6574 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6575 11 [SP+216] %f23 %f22,%f23 %d22
6576 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6577 9 [SP+200] %f19 %f18,%f19 %d18
6578 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6579 7 [SP+184] %f15 %f14,%f15 %d14
6580 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6581 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6582 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6583 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6584 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6585 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6586 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6588 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6590 Integral arguments are always passed as 64-bit quantities appropriately
6591 extended.
6593 Passing of floating point values is handled as follows.
6594 If a prototype is in scope:
6595 If the value is in a named argument (i.e. not a stdarg function or a
6596 value not part of the `...') then the value is passed in the appropriate
6597 fp reg.
6598 If the value is part of the `...' and is passed in one of the first 6
6599 slots then the value is passed in the appropriate int reg.
6600 If the value is part of the `...' and is not passed in one of the first 6
6601 slots then the value is passed in memory.
6602 If a prototype is not in scope:
6603 If the value is one of the first 6 arguments the value is passed in the
6604 appropriate integer reg and the appropriate fp reg.
6605 If the value is not one of the first 6 arguments the value is passed in
6606 the appropriate fp reg and in memory.
6609 Summary of the calling conventions implemented by GCC on the SPARC:
6611 32-bit ABI:
6612 size argument return value
6614 small integer <4 int. reg. int. reg.
6615 word 4 int. reg. int. reg.
6616 double word 8 int. reg. int. reg.
6618 _Complex small integer <8 int. reg. int. reg.
6619 _Complex word 8 int. reg. int. reg.
6620 _Complex double word 16 memory int. reg.
6622 vector integer <=8 int. reg. FP reg.
6623 vector integer >8 memory memory
6625 float 4 int. reg. FP reg.
6626 double 8 int. reg. FP reg.
6627 long double 16 memory memory
6629 _Complex float 8 memory FP reg.
6630 _Complex double 16 memory FP reg.
6631 _Complex long double 32 memory FP reg.
6633 vector float any memory memory
6635 aggregate any memory memory
6639 64-bit ABI:
6640 size argument return value
6642 small integer <8 int. reg. int. reg.
6643 word 8 int. reg. int. reg.
6644 double word 16 int. reg. int. reg.
6646 _Complex small integer <16 int. reg. int. reg.
6647 _Complex word 16 int. reg. int. reg.
6648 _Complex double word 32 memory int. reg.
6650 vector integer <=16 FP reg. FP reg.
6651 vector integer 16<s<=32 memory FP reg.
6652 vector integer >32 memory memory
6654 float 4 FP reg. FP reg.
6655 double 8 FP reg. FP reg.
6656 long double 16 FP reg. FP reg.
6658 _Complex float 8 FP reg. FP reg.
6659 _Complex double 16 FP reg. FP reg.
6660 _Complex long double 32 memory FP reg.
6662 vector float <=16 FP reg. FP reg.
6663 vector float 16<s<=32 memory FP reg.
6664 vector float >32 memory memory
6666 aggregate <=16 reg. reg.
6667 aggregate 16<s<=32 memory reg.
6668 aggregate >32 memory memory
6672 Note #1: complex floating-point types follow the extended SPARC ABIs as
6673 implemented by the Sun compiler.
6675 Note #2: integral vector types follow the scalar floating-point types
6676 conventions to match what is implemented by the Sun VIS SDK.
6678 Note #3: floating-point vector types follow the aggregate types
6679 conventions. */
6682 /* Maximum number of int regs for args. */
6683 #define SPARC_INT_ARG_MAX 6
6684 /* Maximum number of fp regs for args. */
6685 #define SPARC_FP_ARG_MAX 16
6686 /* Number of words (partially) occupied for a given size in units. */
6687 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6689 /* Handle the INIT_CUMULATIVE_ARGS macro.
6690 Initialize a variable CUM of type CUMULATIVE_ARGS
6691 for a call to a function whose data type is FNTYPE.
6692 For a library call, FNTYPE is 0. */
6694 void
6695 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6697 cum->words = 0;
6698 cum->prototype_p = fntype && prototype_p (fntype);
6699 cum->libcall_p = !fntype;
6702 /* Handle promotion of pointer and integer arguments. */
6704 static machine_mode
6705 sparc_promote_function_mode (const_tree type, machine_mode mode,
6706 int *punsignedp, const_tree, int)
6708 if (type && POINTER_TYPE_P (type))
6710 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6711 return Pmode;
6714 /* Integral arguments are passed as full words, as per the ABI. */
6715 if (GET_MODE_CLASS (mode) == MODE_INT
6716 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6717 return word_mode;
6719 return mode;
6722 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6724 static bool
6725 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6727 return TARGET_ARCH64 ? true : false;
6730 /* Traverse the record TYPE recursively and call FUNC on its fields.
6731 NAMED is true if this is for a named parameter. DATA is passed
6732 to FUNC for each field. OFFSET is the starting position and
6733 PACKED is true if we are inside a packed record. */
6735 template <typename T, void Func (const_tree, HOST_WIDE_INT, bool, T*)>
6736 static void
6737 traverse_record_type (const_tree type, bool named, T *data,
6738 HOST_WIDE_INT offset = 0, bool packed = false)
6740 /* The ABI obviously doesn't specify how packed structures are passed.
6741 These are passed in integer regs if possible, otherwise memory. */
6742 if (!packed)
6743 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6744 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6746 packed = true;
6747 break;
6750 /* Walk the real fields, but skip those with no size or a zero size.
6751 ??? Fields with variable offset are handled as having zero offset. */
6752 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6753 if (TREE_CODE (field) == FIELD_DECL)
6755 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6756 continue;
6758 HOST_WIDE_INT bitpos = offset;
6759 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6760 bitpos += int_bit_position (field);
6762 tree field_type = TREE_TYPE (field);
6763 if (TREE_CODE (field_type) == RECORD_TYPE)
6764 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6765 packed);
6766 else
6768 const bool fp_type
6769 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6770 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6771 data);
6776 /* Handle recursive register classifying for structure layout. */
6778 typedef struct
6780 bool fp_regs; /* true if field eligible to FP registers. */
6781 bool fp_regs_in_first_word; /* true if such field in first word. */
6782 } classify_data_t;
6784 /* A subroutine of function_arg_slotno. Classify the field. */
6786 inline void
6787 classify_registers (const_tree, HOST_WIDE_INT bitpos, bool fp,
6788 classify_data_t *data)
6790 if (fp)
6792 data->fp_regs = true;
6793 if (bitpos < BITS_PER_WORD)
6794 data->fp_regs_in_first_word = true;
6798 /* Compute the slot number to pass an argument in.
6799 Return the slot number or -1 if passing on the stack.
6801 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6802 the preceding args and about the function being called.
6803 MODE is the argument's machine mode.
6804 TYPE is the data type of the argument (as a tree).
6805 This is null for libcalls where that information may
6806 not be available.
6807 NAMED is nonzero if this argument is a named parameter
6808 (otherwise it is an extra parameter matching an ellipsis).
6809 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6810 *PREGNO records the register number to use if scalar type.
6811 *PPADDING records the amount of padding needed in words. */
6813 static int
6814 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6815 const_tree type, bool named, bool incoming,
6816 int *pregno, int *ppadding)
6818 int regbase = (incoming
6819 ? SPARC_INCOMING_INT_ARG_FIRST
6820 : SPARC_OUTGOING_INT_ARG_FIRST);
6821 int slotno = cum->words;
6822 enum mode_class mclass;
6823 int regno;
6825 *ppadding = 0;
6827 if (type && TREE_ADDRESSABLE (type))
6828 return -1;
6830 if (TARGET_ARCH32
6831 && mode == BLKmode
6832 && type
6833 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6834 return -1;
6836 /* For SPARC64, objects requiring 16-byte alignment get it. */
6837 if (TARGET_ARCH64
6838 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6839 && (slotno & 1) != 0)
6840 slotno++, *ppadding = 1;
6842 mclass = GET_MODE_CLASS (mode);
6843 if (type && TREE_CODE (type) == VECTOR_TYPE)
6845 /* Vector types deserve special treatment because they are
6846 polymorphic wrt their mode, depending upon whether VIS
6847 instructions are enabled. */
6848 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6850 /* The SPARC port defines no floating-point vector modes. */
6851 gcc_assert (mode == BLKmode);
6853 else
6855 /* Integral vector types should either have a vector
6856 mode or an integral mode, because we are guaranteed
6857 by pass_by_reference that their size is not greater
6858 than 16 bytes and TImode is 16-byte wide. */
6859 gcc_assert (mode != BLKmode);
6861 /* Vector integers are handled like floats according to
6862 the Sun VIS SDK. */
6863 mclass = MODE_FLOAT;
6867 switch (mclass)
6869 case MODE_FLOAT:
6870 case MODE_COMPLEX_FLOAT:
6871 case MODE_VECTOR_INT:
6872 if (TARGET_ARCH64 && TARGET_FPU && named)
6874 /* If all arg slots are filled, then must pass on stack. */
6875 if (slotno >= SPARC_FP_ARG_MAX)
6876 return -1;
6878 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6879 /* Arguments filling only one single FP register are
6880 right-justified in the outer double FP register. */
6881 if (GET_MODE_SIZE (mode) <= 4)
6882 regno++;
6883 break;
6885 /* fallthrough */
6887 case MODE_INT:
6888 case MODE_COMPLEX_INT:
6889 /* If all arg slots are filled, then must pass on stack. */
6890 if (slotno >= SPARC_INT_ARG_MAX)
6891 return -1;
6893 regno = regbase + slotno;
6894 break;
6896 case MODE_RANDOM:
6897 if (mode == VOIDmode)
6898 /* MODE is VOIDmode when generating the actual call. */
6899 return -1;
6901 gcc_assert (mode == BLKmode);
6903 if (TARGET_ARCH32
6904 || !type
6905 || (TREE_CODE (type) != RECORD_TYPE
6906 && TREE_CODE (type) != VECTOR_TYPE))
6908 /* If all arg slots are filled, then must pass on stack. */
6909 if (slotno >= SPARC_INT_ARG_MAX)
6910 return -1;
6912 regno = regbase + slotno;
6914 else /* TARGET_ARCH64 && type */
6916 /* If all arg slots are filled, then must pass on stack. */
6917 if (slotno >= SPARC_FP_ARG_MAX)
6918 return -1;
6920 if (TREE_CODE (type) == RECORD_TYPE)
6922 classify_data_t data = { false, false };
6923 traverse_record_type<classify_data_t, classify_registers>
6924 (type, named, &data);
6926 if (data.fp_regs)
6928 /* If all FP slots are filled except for the last one and
6929 there is no FP field in the first word, then must pass
6930 on stack. */
6931 if (slotno >= SPARC_FP_ARG_MAX - 1
6932 && !data.fp_regs_in_first_word)
6933 return -1;
6935 else
6937 /* If all int slots are filled, then must pass on stack. */
6938 if (slotno >= SPARC_INT_ARG_MAX)
6939 return -1;
6943 /* PREGNO isn't set since both int and FP regs can be used. */
6944 return slotno;
6946 break;
6948 default :
6949 gcc_unreachable ();
6952 *pregno = regno;
6953 return slotno;
6956 /* Handle recursive register counting/assigning for structure layout. */
6958 typedef struct
6960 int slotno; /* slot number of the argument. */
6961 int regbase; /* regno of the base register. */
6962 int intoffset; /* offset of the first pending integer field. */
6963 int nregs; /* number of words passed in registers. */
6964 bool stack; /* true if part of the argument is on the stack. */
6965 rtx ret; /* return expression being built. */
6966 } assign_data_t;
6968 /* A subroutine of function_arg_record_value. Compute the number of integer
6969 registers to be assigned between PARMS->intoffset and BITPOS. Return
6970 true if at least one integer register is assigned or false otherwise. */
6972 static bool
6973 compute_int_layout (HOST_WIDE_INT bitpos, assign_data_t *data, int *pnregs)
6975 if (data->intoffset < 0)
6976 return false;
6978 const int intoffset = data->intoffset;
6979 data->intoffset = -1;
6981 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6982 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
6983 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
6984 int nregs = (endbit - startbit) / BITS_PER_WORD;
6986 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
6988 nregs = SPARC_INT_ARG_MAX - this_slotno;
6990 /* We need to pass this field (partly) on the stack. */
6991 data->stack = 1;
6994 if (nregs <= 0)
6995 return false;
6997 *pnregs = nregs;
6998 return true;
7001 /* A subroutine of function_arg_record_value. Compute the number and the mode
7002 of the FP registers to be assigned for FIELD. Return true if at least one
7003 FP register is assigned or false otherwise. */
7005 static bool
7006 compute_fp_layout (const_tree field, HOST_WIDE_INT bitpos,
7007 assign_data_t *data,
7008 int *pnregs, machine_mode *pmode)
7010 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7011 machine_mode mode = DECL_MODE (field);
7012 int nregs, nslots;
7014 /* Slots are counted as words while regs are counted as having the size of
7015 the (inner) mode. */
7016 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE && mode == BLKmode)
7018 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7019 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
7021 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
7023 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7024 nregs = 2;
7026 else
7027 nregs = 1;
7029 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7031 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7033 nslots = SPARC_FP_ARG_MAX - this_slotno;
7034 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7036 /* We need to pass this field (partly) on the stack. */
7037 data->stack = 1;
7039 if (nregs <= 0)
7040 return false;
7043 *pnregs = nregs;
7044 *pmode = mode;
7045 return true;
7048 /* A subroutine of function_arg_record_value. Count the number of registers
7049 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7051 inline void
7052 count_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7053 assign_data_t *data)
7055 if (fp)
7057 int nregs;
7058 machine_mode mode;
7060 if (compute_int_layout (bitpos, data, &nregs))
7061 data->nregs += nregs;
7063 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7064 data->nregs += nregs;
7066 else
7068 if (data->intoffset < 0)
7069 data->intoffset = bitpos;
7073 /* A subroutine of function_arg_record_value. Assign the bits of the
7074 structure between PARMS->intoffset and BITPOS to integer registers. */
7076 static void
7077 assign_int_registers (HOST_WIDE_INT bitpos, assign_data_t *data)
7079 int intoffset = data->intoffset;
7080 machine_mode mode;
7081 int nregs;
7083 if (!compute_int_layout (bitpos, data, &nregs))
7084 return;
7086 /* If this is the trailing part of a word, only load that much into
7087 the register. Otherwise load the whole register. Note that in
7088 the latter case we may pick up unwanted bits. It's not a problem
7089 at the moment but may wish to revisit. */
7090 if (intoffset % BITS_PER_WORD != 0)
7091 mode = smallest_int_mode_for_size (BITS_PER_WORD
7092 - intoffset % BITS_PER_WORD);
7093 else
7094 mode = word_mode;
7096 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7097 unsigned int regno = data->regbase + this_slotno;
7098 intoffset /= BITS_PER_UNIT;
7102 rtx reg = gen_rtx_REG (mode, regno);
7103 XVECEXP (data->ret, 0, data->stack + data->nregs)
7104 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7105 data->nregs += 1;
7106 mode = word_mode;
7107 regno += 1;
7108 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7110 while (--nregs > 0);
7113 /* A subroutine of function_arg_record_value. Assign FIELD at position
7114 BITPOS to FP registers. */
7116 static void
7117 assign_fp_registers (const_tree field, HOST_WIDE_INT bitpos,
7118 assign_data_t *data)
7120 int nregs;
7121 machine_mode mode;
7123 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7124 return;
7126 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7127 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7128 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7129 regno++;
7130 int pos = bitpos / BITS_PER_UNIT;
7134 rtx reg = gen_rtx_REG (mode, regno);
7135 XVECEXP (data->ret, 0, data->stack + data->nregs)
7136 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7137 data->nregs += 1;
7138 regno += GET_MODE_SIZE (mode) / 4;
7139 pos += GET_MODE_SIZE (mode);
7141 while (--nregs > 0);
7144 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7145 the structure between PARMS->intoffset and BITPOS to registers. */
7147 inline void
7148 assign_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7149 assign_data_t *data)
7151 if (fp)
7153 assign_int_registers (bitpos, data);
7155 assign_fp_registers (field, bitpos, data);
7157 else
7159 if (data->intoffset < 0)
7160 data->intoffset = bitpos;
7164 /* Used by function_arg and sparc_function_value_1 to implement the complex
7165 conventions of the 64-bit ABI for passing and returning structures.
7166 Return an expression valid as a return value for the FUNCTION_ARG
7167 and TARGET_FUNCTION_VALUE.
7169 TYPE is the data type of the argument (as a tree).
7170 This is null for libcalls where that information may
7171 not be available.
7172 MODE is the argument's machine mode.
7173 SLOTNO is the index number of the argument's slot in the parameter array.
7174 NAMED is true if this argument is a named parameter
7175 (otherwise it is an extra parameter matching an ellipsis).
7176 REGBASE is the regno of the base register for the parameter array. */
7178 static rtx
7179 function_arg_record_value (const_tree type, machine_mode mode,
7180 int slotno, bool named, int regbase)
7182 HOST_WIDE_INT typesize = int_size_in_bytes (type);
7183 assign_data_t data;
7184 int nregs;
7186 data.slotno = slotno;
7187 data.regbase = regbase;
7189 /* Count how many registers we need. */
7190 data.nregs = 0;
7191 data.intoffset = 0;
7192 data.stack = false;
7193 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7195 /* Take into account pending integer fields. */
7196 if (compute_int_layout (typesize * BITS_PER_UNIT, &data, &nregs))
7197 data.nregs += nregs;
7199 /* Allocate the vector and handle some annoying special cases. */
7200 nregs = data.nregs;
7202 if (nregs == 0)
7204 /* ??? Empty structure has no value? Duh? */
7205 if (typesize <= 0)
7207 /* Though there's nothing really to store, return a word register
7208 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7209 leads to breakage due to the fact that there are zero bytes to
7210 load. */
7211 return gen_rtx_REG (mode, regbase);
7214 /* ??? C++ has structures with no fields, and yet a size. Give up
7215 for now and pass everything back in integer registers. */
7216 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7217 if (nregs + slotno > SPARC_INT_ARG_MAX)
7218 nregs = SPARC_INT_ARG_MAX - slotno;
7221 gcc_assert (nregs > 0);
7223 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7225 /* If at least one field must be passed on the stack, generate
7226 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7227 also be passed on the stack. We can't do much better because the
7228 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7229 of structures for which the fields passed exclusively in registers
7230 are not at the beginning of the structure. */
7231 if (data.stack)
7232 XVECEXP (data.ret, 0, 0)
7233 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7235 /* Assign the registers. */
7236 data.nregs = 0;
7237 data.intoffset = 0;
7238 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7240 /* Assign pending integer fields. */
7241 assign_int_registers (typesize * BITS_PER_UNIT, &data);
7243 gcc_assert (data.nregs == nregs);
7245 return data.ret;
7248 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7249 of the 64-bit ABI for passing and returning unions.
7250 Return an expression valid as a return value for the FUNCTION_ARG
7251 and TARGET_FUNCTION_VALUE.
7253 SIZE is the size in bytes of the union.
7254 MODE is the argument's machine mode.
7255 REGNO is the hard register the union will be passed in. */
7257 static rtx
7258 function_arg_union_value (int size, machine_mode mode, int slotno,
7259 int regno)
7261 int nwords = CEIL_NWORDS (size), i;
7262 rtx regs;
7264 /* See comment in previous function for empty structures. */
7265 if (nwords == 0)
7266 return gen_rtx_REG (mode, regno);
7268 if (slotno == SPARC_INT_ARG_MAX - 1)
7269 nwords = 1;
7271 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7273 for (i = 0; i < nwords; i++)
7275 /* Unions are passed left-justified. */
7276 XVECEXP (regs, 0, i)
7277 = gen_rtx_EXPR_LIST (VOIDmode,
7278 gen_rtx_REG (word_mode, regno),
7279 GEN_INT (UNITS_PER_WORD * i));
7280 regno++;
7283 return regs;
7286 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7287 for passing and returning BLKmode vectors.
7288 Return an expression valid as a return value for the FUNCTION_ARG
7289 and TARGET_FUNCTION_VALUE.
7291 SIZE is the size in bytes of the vector.
7292 REGNO is the FP hard register the vector will be passed in. */
7294 static rtx
7295 function_arg_vector_value (int size, int regno)
7297 const int nregs = MAX (1, size / 8);
7298 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
7300 if (size < 8)
7301 XVECEXP (regs, 0, 0)
7302 = gen_rtx_EXPR_LIST (VOIDmode,
7303 gen_rtx_REG (SImode, regno),
7304 const0_rtx);
7305 else
7306 for (int i = 0; i < nregs; i++)
7307 XVECEXP (regs, 0, i)
7308 = gen_rtx_EXPR_LIST (VOIDmode,
7309 gen_rtx_REG (DImode, regno + 2*i),
7310 GEN_INT (i*8));
7312 return regs;
7315 /* Determine where to put an argument to a function.
7316 Value is zero to push the argument on the stack,
7317 or a hard register in which to store the argument.
7319 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7320 the preceding args and about the function being called.
7321 MODE is the argument's machine mode.
7322 TYPE is the data type of the argument (as a tree).
7323 This is null for libcalls where that information may
7324 not be available.
7325 NAMED is true if this argument is a named parameter
7326 (otherwise it is an extra parameter matching an ellipsis).
7327 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7328 TARGET_FUNCTION_INCOMING_ARG. */
7330 static rtx
7331 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7332 const_tree type, bool named, bool incoming)
7334 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7336 int regbase = (incoming
7337 ? SPARC_INCOMING_INT_ARG_FIRST
7338 : SPARC_OUTGOING_INT_ARG_FIRST);
7339 int slotno, regno, padding;
7340 enum mode_class mclass = GET_MODE_CLASS (mode);
7342 slotno = function_arg_slotno (cum, mode, type, named, incoming,
7343 &regno, &padding);
7344 if (slotno == -1)
7345 return 0;
7347 /* Vector types deserve special treatment because they are polymorphic wrt
7348 their mode, depending upon whether VIS instructions are enabled. */
7349 if (type && TREE_CODE (type) == VECTOR_TYPE)
7351 HOST_WIDE_INT size = int_size_in_bytes (type);
7352 gcc_assert ((TARGET_ARCH32 && size <= 8)
7353 || (TARGET_ARCH64 && size <= 16));
7355 if (mode == BLKmode)
7356 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST + 2*slotno);
7358 mclass = MODE_FLOAT;
7361 if (TARGET_ARCH32)
7362 return gen_rtx_REG (mode, regno);
7364 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7365 and are promoted to registers if possible. */
7366 if (type && TREE_CODE (type) == RECORD_TYPE)
7368 HOST_WIDE_INT size = int_size_in_bytes (type);
7369 gcc_assert (size <= 16);
7371 return function_arg_record_value (type, mode, slotno, named, regbase);
7374 /* Unions up to 16 bytes in size are passed in integer registers. */
7375 else if (type && TREE_CODE (type) == UNION_TYPE)
7377 HOST_WIDE_INT size = int_size_in_bytes (type);
7378 gcc_assert (size <= 16);
7380 return function_arg_union_value (size, mode, slotno, regno);
7383 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7384 but also have the slot allocated for them.
7385 If no prototype is in scope fp values in register slots get passed
7386 in two places, either fp regs and int regs or fp regs and memory. */
7387 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7388 && SPARC_FP_REG_P (regno))
7390 rtx reg = gen_rtx_REG (mode, regno);
7391 if (cum->prototype_p || cum->libcall_p)
7392 return reg;
7393 else
7395 rtx v0, v1;
7397 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7399 int intreg;
7401 /* On incoming, we don't need to know that the value
7402 is passed in %f0 and %i0, and it confuses other parts
7403 causing needless spillage even on the simplest cases. */
7404 if (incoming)
7405 return reg;
7407 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7408 + (regno - SPARC_FP_ARG_FIRST) / 2);
7410 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7411 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7412 const0_rtx);
7413 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7415 else
7417 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7418 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7419 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7424 /* All other aggregate types are passed in an integer register in a mode
7425 corresponding to the size of the type. */
7426 else if (type && AGGREGATE_TYPE_P (type))
7428 HOST_WIDE_INT size = int_size_in_bytes (type);
7429 gcc_assert (size <= 16);
7431 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7434 return gen_rtx_REG (mode, regno);
7437 /* Handle the TARGET_FUNCTION_ARG target hook. */
7439 static rtx
7440 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7441 const_tree type, bool named)
7443 return sparc_function_arg_1 (cum, mode, type, named, false);
7446 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7448 static rtx
7449 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7450 const_tree type, bool named)
7452 return sparc_function_arg_1 (cum, mode, type, named, true);
7455 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7457 static unsigned int
7458 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7460 return ((TARGET_ARCH64
7461 && (GET_MODE_ALIGNMENT (mode) == 128
7462 || (type && TYPE_ALIGN (type) == 128)))
7463 ? 128
7464 : PARM_BOUNDARY);
7467 /* For an arg passed partly in registers and partly in memory,
7468 this is the number of bytes of registers used.
7469 For args passed entirely in registers or entirely in memory, zero.
7471 Any arg that starts in the first 6 regs but won't entirely fit in them
7472 needs partial registers on v8. On v9, structures with integer
7473 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7474 values that begin in the last fp reg [where "last fp reg" varies with the
7475 mode] will be split between that reg and memory. */
7477 static int
7478 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7479 tree type, bool named)
7481 int slotno, regno, padding;
7483 /* We pass false for incoming here, it doesn't matter. */
7484 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7485 false, &regno, &padding);
7487 if (slotno == -1)
7488 return 0;
7490 if (TARGET_ARCH32)
7492 if ((slotno + (mode == BLKmode
7493 ? CEIL_NWORDS (int_size_in_bytes (type))
7494 : CEIL_NWORDS (GET_MODE_SIZE (mode))))
7495 > SPARC_INT_ARG_MAX)
7496 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
7498 else
7500 /* We are guaranteed by pass_by_reference that the size of the
7501 argument is not greater than 16 bytes, so we only need to return
7502 one word if the argument is partially passed in registers. */
7504 if (type && AGGREGATE_TYPE_P (type))
7506 int size = int_size_in_bytes (type);
7508 if (size > UNITS_PER_WORD
7509 && (slotno == SPARC_INT_ARG_MAX - 1
7510 || slotno == SPARC_FP_ARG_MAX - 1))
7511 return UNITS_PER_WORD;
7513 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7514 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7515 && ! (TARGET_FPU && named)))
7517 /* The complex types are passed as packed types. */
7518 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7519 && slotno == SPARC_INT_ARG_MAX - 1)
7520 return UNITS_PER_WORD;
7522 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7524 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
7525 > SPARC_FP_ARG_MAX)
7526 return UNITS_PER_WORD;
7530 return 0;
7533 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7534 Specify whether to pass the argument by reference. */
7536 static bool
7537 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
7538 machine_mode mode, const_tree type,
7539 bool named ATTRIBUTE_UNUSED)
7541 if (TARGET_ARCH32)
7542 /* Original SPARC 32-bit ABI says that structures and unions,
7543 and quad-precision floats are passed by reference.
7544 All base types are passed in registers.
7546 Extended ABI (as implemented by the Sun compiler) says that all
7547 complex floats are passed by reference. Pass complex integers
7548 in registers up to 8 bytes. More generally, enforce the 2-word
7549 cap for passing arguments in registers.
7551 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7552 integers are passed like floats of the same size, that is in
7553 registers up to 8 bytes. Pass all vector floats by reference
7554 like structure and unions. */
7555 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7556 || mode == SCmode
7557 /* Catch CDImode, TFmode, DCmode and TCmode. */
7558 || GET_MODE_SIZE (mode) > 8
7559 || (type
7560 && TREE_CODE (type) == VECTOR_TYPE
7561 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7562 else
7563 /* Original SPARC 64-bit ABI says that structures and unions
7564 smaller than 16 bytes are passed in registers, as well as
7565 all other base types.
7567 Extended ABI (as implemented by the Sun compiler) says that
7568 complex floats are passed in registers up to 16 bytes. Pass
7569 all complex integers in registers up to 16 bytes. More generally,
7570 enforce the 2-word cap for passing arguments in registers.
7572 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7573 integers are passed like floats of the same size, that is in
7574 registers (up to 16 bytes). Pass all vector floats like structure
7575 and unions. */
7576 return ((type
7577 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7578 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7579 /* Catch CTImode and TCmode. */
7580 || GET_MODE_SIZE (mode) > 16);
7583 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7584 Update the data in CUM to advance over an argument
7585 of mode MODE and data type TYPE.
7586 TYPE is null for libcalls where that information may not be available. */
7588 static void
7589 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7590 const_tree type, bool named)
7592 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7593 int regno, padding;
7595 /* We pass false for incoming here, it doesn't matter. */
7596 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7598 /* If argument requires leading padding, add it. */
7599 cum->words += padding;
7601 if (TARGET_ARCH32)
7602 cum->words += (mode == BLKmode
7603 ? CEIL_NWORDS (int_size_in_bytes (type))
7604 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7605 else
7607 if (type && AGGREGATE_TYPE_P (type))
7609 int size = int_size_in_bytes (type);
7611 if (size <= 8)
7612 ++cum->words;
7613 else if (size <= 16)
7614 cum->words += 2;
7615 else /* passed by reference */
7616 ++cum->words;
7618 else
7619 cum->words += (mode == BLKmode
7620 ? CEIL_NWORDS (int_size_in_bytes (type))
7621 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7625 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7626 are always stored left shifted in their argument slot. */
7628 static pad_direction
7629 sparc_function_arg_padding (machine_mode mode, const_tree type)
7631 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7632 return PAD_UPWARD;
7634 /* Fall back to the default. */
7635 return default_function_arg_padding (mode, type);
7638 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7639 Specify whether to return the return value in memory. */
7641 static bool
7642 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7644 if (TARGET_ARCH32)
7645 /* Original SPARC 32-bit ABI says that structures and unions,
7646 and quad-precision floats are returned in memory. All other
7647 base types are returned in registers.
7649 Extended ABI (as implemented by the Sun compiler) says that
7650 all complex floats are returned in registers (8 FP registers
7651 at most for '_Complex long double'). Return all complex integers
7652 in registers (4 at most for '_Complex long long').
7654 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7655 integers are returned like floats of the same size, that is in
7656 registers up to 8 bytes and in memory otherwise. Return all
7657 vector floats in memory like structure and unions; note that
7658 they always have BLKmode like the latter. */
7659 return (TYPE_MODE (type) == BLKmode
7660 || TYPE_MODE (type) == TFmode
7661 || (TREE_CODE (type) == VECTOR_TYPE
7662 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7663 else
7664 /* Original SPARC 64-bit ABI says that structures and unions
7665 smaller than 32 bytes are returned in registers, as well as
7666 all other base types.
7668 Extended ABI (as implemented by the Sun compiler) says that all
7669 complex floats are returned in registers (8 FP registers at most
7670 for '_Complex long double'). Return all complex integers in
7671 registers (4 at most for '_Complex TItype').
7673 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7674 integers are returned like floats of the same size, that is in
7675 registers. Return all vector floats like structure and unions;
7676 note that they always have BLKmode like the latter. */
7677 return (TYPE_MODE (type) == BLKmode
7678 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7681 /* Handle the TARGET_STRUCT_VALUE target hook.
7682 Return where to find the structure return value address. */
7684 static rtx
7685 sparc_struct_value_rtx (tree fndecl, int incoming)
7687 if (TARGET_ARCH64)
7688 return 0;
7689 else
7691 rtx mem;
7693 if (incoming)
7694 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7695 STRUCT_VALUE_OFFSET));
7696 else
7697 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7698 STRUCT_VALUE_OFFSET));
7700 /* Only follow the SPARC ABI for fixed-size structure returns.
7701 Variable size structure returns are handled per the normal
7702 procedures in GCC. This is enabled by -mstd-struct-return */
7703 if (incoming == 2
7704 && sparc_std_struct_return
7705 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7706 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7708 /* We must check and adjust the return address, as it is optional
7709 as to whether the return object is really provided. */
7710 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7711 rtx scratch = gen_reg_rtx (SImode);
7712 rtx_code_label *endlab = gen_label_rtx ();
7714 /* Calculate the return object size. */
7715 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7716 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7717 /* Construct a temporary return value. */
7718 rtx temp_val
7719 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7721 /* Implement SPARC 32-bit psABI callee return struct checking:
7723 Fetch the instruction where we will return to and see if
7724 it's an unimp instruction (the most significant 10 bits
7725 will be zero). */
7726 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7727 plus_constant (Pmode,
7728 ret_reg, 8)));
7729 /* Assume the size is valid and pre-adjust. */
7730 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7731 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7732 0, endlab);
7733 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7734 /* Write the address of the memory pointed to by temp_val into
7735 the memory pointed to by mem. */
7736 emit_move_insn (mem, XEXP (temp_val, 0));
7737 emit_label (endlab);
7740 return mem;
7744 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7745 For v9, function return values are subject to the same rules as arguments,
7746 except that up to 32 bytes may be returned in registers. */
7748 static rtx
7749 sparc_function_value_1 (const_tree type, machine_mode mode,
7750 bool outgoing)
7752 /* Beware that the two values are swapped here wrt function_arg. */
7753 int regbase = (outgoing
7754 ? SPARC_INCOMING_INT_ARG_FIRST
7755 : SPARC_OUTGOING_INT_ARG_FIRST);
7756 enum mode_class mclass = GET_MODE_CLASS (mode);
7757 int regno;
7759 /* Vector types deserve special treatment because they are polymorphic wrt
7760 their mode, depending upon whether VIS instructions are enabled. */
7761 if (type && TREE_CODE (type) == VECTOR_TYPE)
7763 HOST_WIDE_INT size = int_size_in_bytes (type);
7764 gcc_assert ((TARGET_ARCH32 && size <= 8)
7765 || (TARGET_ARCH64 && size <= 32));
7767 if (mode == BLKmode)
7768 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST);
7770 mclass = MODE_FLOAT;
7773 if (TARGET_ARCH64 && type)
7775 /* Structures up to 32 bytes in size are returned in registers. */
7776 if (TREE_CODE (type) == RECORD_TYPE)
7778 HOST_WIDE_INT size = int_size_in_bytes (type);
7779 gcc_assert (size <= 32);
7781 return function_arg_record_value (type, mode, 0, 1, regbase);
7784 /* Unions up to 32 bytes in size are returned in integer registers. */
7785 else if (TREE_CODE (type) == UNION_TYPE)
7787 HOST_WIDE_INT size = int_size_in_bytes (type);
7788 gcc_assert (size <= 32);
7790 return function_arg_union_value (size, mode, 0, regbase);
7793 /* Objects that require it are returned in FP registers. */
7794 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7797 /* All other aggregate types are returned in an integer register in a
7798 mode corresponding to the size of the type. */
7799 else if (AGGREGATE_TYPE_P (type))
7801 /* All other aggregate types are passed in an integer register
7802 in a mode corresponding to the size of the type. */
7803 HOST_WIDE_INT size = int_size_in_bytes (type);
7804 gcc_assert (size <= 32);
7806 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7808 /* ??? We probably should have made the same ABI change in
7809 3.4.0 as the one we made for unions. The latter was
7810 required by the SCD though, while the former is not
7811 specified, so we favored compatibility and efficiency.
7813 Now we're stuck for aggregates larger than 16 bytes,
7814 because OImode vanished in the meantime. Let's not
7815 try to be unduly clever, and simply follow the ABI
7816 for unions in that case. */
7817 if (mode == BLKmode)
7818 return function_arg_union_value (size, mode, 0, regbase);
7819 else
7820 mclass = MODE_INT;
7823 /* We should only have pointer and integer types at this point. This
7824 must match sparc_promote_function_mode. */
7825 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7826 mode = word_mode;
7829 /* We should only have pointer and integer types at this point, except with
7830 -freg-struct-return. This must match sparc_promote_function_mode. */
7831 else if (TARGET_ARCH32
7832 && !(type && AGGREGATE_TYPE_P (type))
7833 && mclass == MODE_INT
7834 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7835 mode = word_mode;
7837 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7838 regno = SPARC_FP_ARG_FIRST;
7839 else
7840 regno = regbase;
7842 return gen_rtx_REG (mode, regno);
7845 /* Handle TARGET_FUNCTION_VALUE.
7846 On the SPARC, the value is found in the first "output" register, but the
7847 called function leaves it in the first "input" register. */
7849 static rtx
7850 sparc_function_value (const_tree valtype,
7851 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7852 bool outgoing)
7854 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7857 /* Handle TARGET_LIBCALL_VALUE. */
7859 static rtx
7860 sparc_libcall_value (machine_mode mode,
7861 const_rtx fun ATTRIBUTE_UNUSED)
7863 return sparc_function_value_1 (NULL_TREE, mode, false);
7866 /* Handle FUNCTION_VALUE_REGNO_P.
7867 On the SPARC, the first "output" reg is used for integer values, and the
7868 first floating point register is used for floating point values. */
7870 static bool
7871 sparc_function_value_regno_p (const unsigned int regno)
7873 return (regno == 8 || (TARGET_FPU && regno == 32));
7876 /* Do what is necessary for `va_start'. We look at the current function
7877 to determine if stdarg or varargs is used and return the address of
7878 the first unnamed parameter. */
7880 static rtx
7881 sparc_builtin_saveregs (void)
7883 int first_reg = crtl->args.info.words;
7884 rtx address;
7885 int regno;
7887 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7888 emit_move_insn (gen_rtx_MEM (word_mode,
7889 gen_rtx_PLUS (Pmode,
7890 frame_pointer_rtx,
7891 GEN_INT (FIRST_PARM_OFFSET (0)
7892 + (UNITS_PER_WORD
7893 * regno)))),
7894 gen_rtx_REG (word_mode,
7895 SPARC_INCOMING_INT_ARG_FIRST + regno));
7897 address = gen_rtx_PLUS (Pmode,
7898 frame_pointer_rtx,
7899 GEN_INT (FIRST_PARM_OFFSET (0)
7900 + UNITS_PER_WORD * first_reg));
7902 return address;
7905 /* Implement `va_start' for stdarg. */
7907 static void
7908 sparc_va_start (tree valist, rtx nextarg)
7910 nextarg = expand_builtin_saveregs ();
7911 std_expand_builtin_va_start (valist, nextarg);
7914 /* Implement `va_arg' for stdarg. */
7916 static tree
7917 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7918 gimple_seq *post_p)
7920 HOST_WIDE_INT size, rsize, align;
7921 tree addr, incr;
7922 bool indirect;
7923 tree ptrtype = build_pointer_type (type);
7925 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7927 indirect = true;
7928 size = rsize = UNITS_PER_WORD;
7929 align = 0;
7931 else
7933 indirect = false;
7934 size = int_size_in_bytes (type);
7935 rsize = ROUND_UP (size, UNITS_PER_WORD);
7936 align = 0;
7938 if (TARGET_ARCH64)
7940 /* For SPARC64, objects requiring 16-byte alignment get it. */
7941 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7942 align = 2 * UNITS_PER_WORD;
7944 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7945 are left-justified in their slots. */
7946 if (AGGREGATE_TYPE_P (type))
7948 if (size == 0)
7949 size = rsize = UNITS_PER_WORD;
7950 else
7951 size = rsize;
7956 incr = valist;
7957 if (align)
7959 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7960 incr = fold_convert (sizetype, incr);
7961 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7962 size_int (-align));
7963 incr = fold_convert (ptr_type_node, incr);
7966 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7967 addr = incr;
7969 if (BYTES_BIG_ENDIAN && size < rsize)
7970 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7972 if (indirect)
7974 addr = fold_convert (build_pointer_type (ptrtype), addr);
7975 addr = build_va_arg_indirect_ref (addr);
7978 /* If the address isn't aligned properly for the type, we need a temporary.
7979 FIXME: This is inefficient, usually we can do this in registers. */
7980 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7982 tree tmp = create_tmp_var (type, "va_arg_tmp");
7983 tree dest_addr = build_fold_addr_expr (tmp);
7984 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7985 3, dest_addr, addr, size_int (rsize));
7986 TREE_ADDRESSABLE (tmp) = 1;
7987 gimplify_and_add (copy, pre_p);
7988 addr = dest_addr;
7991 else
7992 addr = fold_convert (ptrtype, addr);
7994 incr = fold_build_pointer_plus_hwi (incr, rsize);
7995 gimplify_assign (valist, incr, post_p);
7997 return build_va_arg_indirect_ref (addr);
8000 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
8001 Specify whether the vector mode is supported by the hardware. */
8003 static bool
8004 sparc_vector_mode_supported_p (machine_mode mode)
8006 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
8009 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
8011 static machine_mode
8012 sparc_preferred_simd_mode (scalar_mode mode)
8014 if (TARGET_VIS)
8015 switch (mode)
8017 case E_SImode:
8018 return V2SImode;
8019 case E_HImode:
8020 return V4HImode;
8021 case E_QImode:
8022 return V8QImode;
8024 default:;
8027 return word_mode;
8030 \f/* Implement TARGET_CAN_FOLLOW_JUMP. */
8032 static bool
8033 sparc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
8035 /* Do not fold unconditional jumps that have been created for crossing
8036 partition boundaries. */
8037 if (CROSSING_JUMP_P (followee) && !CROSSING_JUMP_P (follower))
8038 return false;
8040 return true;
8043 /* Return the string to output an unconditional branch to LABEL, which is
8044 the operand number of the label.
8046 DEST is the destination insn (i.e. the label), INSN is the source. */
8048 const char *
8049 output_ubranch (rtx dest, rtx_insn *insn)
8051 static char string[64];
8052 bool v9_form = false;
8053 int delta;
8054 char *p;
8056 /* Even if we are trying to use cbcond for this, evaluate
8057 whether we can use V9 branches as our backup plan. */
8058 delta = 5000000;
8059 if (!CROSSING_JUMP_P (insn) && INSN_ADDRESSES_SET_P ())
8060 delta = (INSN_ADDRESSES (INSN_UID (dest))
8061 - INSN_ADDRESSES (INSN_UID (insn)));
8063 /* Leave some instructions for "slop". */
8064 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8065 v9_form = true;
8067 if (TARGET_CBCOND)
8069 bool emit_nop = emit_cbcond_nop (insn);
8070 bool far = false;
8071 const char *rval;
8073 if (delta < -500 || delta > 500)
8074 far = true;
8076 if (far)
8078 if (v9_form)
8079 rval = "ba,a,pt\t%%xcc, %l0";
8080 else
8081 rval = "b,a\t%l0";
8083 else
8085 if (emit_nop)
8086 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8087 else
8088 rval = "cwbe\t%%g0, %%g0, %l0";
8090 return rval;
8093 if (v9_form)
8094 strcpy (string, "ba%*,pt\t%%xcc, ");
8095 else
8096 strcpy (string, "b%*\t");
8098 p = strchr (string, '\0');
8099 *p++ = '%';
8100 *p++ = 'l';
8101 *p++ = '0';
8102 *p++ = '%';
8103 *p++ = '(';
8104 *p = '\0';
8106 return string;
8109 /* Return the string to output a conditional branch to LABEL, which is
8110 the operand number of the label. OP is the conditional expression.
8111 XEXP (OP, 0) is assumed to be a condition code register (integer or
8112 floating point) and its mode specifies what kind of comparison we made.
8114 DEST is the destination insn (i.e. the label), INSN is the source.
8116 REVERSED is nonzero if we should reverse the sense of the comparison.
8118 ANNUL is nonzero if we should generate an annulling branch. */
8120 const char *
8121 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8122 rtx_insn *insn)
8124 static char string[64];
8125 enum rtx_code code = GET_CODE (op);
8126 rtx cc_reg = XEXP (op, 0);
8127 machine_mode mode = GET_MODE (cc_reg);
8128 const char *labelno, *branch;
8129 int spaces = 8, far;
8130 char *p;
8132 /* v9 branches are limited to +-1MB. If it is too far away,
8133 change
8135 bne,pt %xcc, .LC30
8139 be,pn %xcc, .+12
8141 ba .LC30
8145 fbne,a,pn %fcc2, .LC29
8149 fbe,pt %fcc2, .+16
8151 ba .LC29 */
8153 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8154 if (reversed ^ far)
8156 /* Reversal of FP compares takes care -- an ordered compare
8157 becomes an unordered compare and vice versa. */
8158 if (mode == CCFPmode || mode == CCFPEmode)
8159 code = reverse_condition_maybe_unordered (code);
8160 else
8161 code = reverse_condition (code);
8164 /* Start by writing the branch condition. */
8165 if (mode == CCFPmode || mode == CCFPEmode)
8167 switch (code)
8169 case NE:
8170 branch = "fbne";
8171 break;
8172 case EQ:
8173 branch = "fbe";
8174 break;
8175 case GE:
8176 branch = "fbge";
8177 break;
8178 case GT:
8179 branch = "fbg";
8180 break;
8181 case LE:
8182 branch = "fble";
8183 break;
8184 case LT:
8185 branch = "fbl";
8186 break;
8187 case UNORDERED:
8188 branch = "fbu";
8189 break;
8190 case ORDERED:
8191 branch = "fbo";
8192 break;
8193 case UNGT:
8194 branch = "fbug";
8195 break;
8196 case UNLT:
8197 branch = "fbul";
8198 break;
8199 case UNEQ:
8200 branch = "fbue";
8201 break;
8202 case UNGE:
8203 branch = "fbuge";
8204 break;
8205 case UNLE:
8206 branch = "fbule";
8207 break;
8208 case LTGT:
8209 branch = "fblg";
8210 break;
8211 default:
8212 gcc_unreachable ();
8215 /* ??? !v9: FP branches cannot be preceded by another floating point
8216 insn. Because there is currently no concept of pre-delay slots,
8217 we can fix this only by always emitting a nop before a floating
8218 point branch. */
8220 string[0] = '\0';
8221 if (! TARGET_V9)
8222 strcpy (string, "nop\n\t");
8223 strcat (string, branch);
8225 else
8227 switch (code)
8229 case NE:
8230 if (mode == CCVmode || mode == CCXVmode)
8231 branch = "bvs";
8232 else
8233 branch = "bne";
8234 break;
8235 case EQ:
8236 if (mode == CCVmode || mode == CCXVmode)
8237 branch = "bvc";
8238 else
8239 branch = "be";
8240 break;
8241 case GE:
8242 if (mode == CCNZmode || mode == CCXNZmode)
8243 branch = "bpos";
8244 else
8245 branch = "bge";
8246 break;
8247 case GT:
8248 branch = "bg";
8249 break;
8250 case LE:
8251 branch = "ble";
8252 break;
8253 case LT:
8254 if (mode == CCNZmode || mode == CCXNZmode)
8255 branch = "bneg";
8256 else
8257 branch = "bl";
8258 break;
8259 case GEU:
8260 branch = "bgeu";
8261 break;
8262 case GTU:
8263 branch = "bgu";
8264 break;
8265 case LEU:
8266 branch = "bleu";
8267 break;
8268 case LTU:
8269 branch = "blu";
8270 break;
8271 default:
8272 gcc_unreachable ();
8274 strcpy (string, branch);
8276 spaces -= strlen (branch);
8277 p = strchr (string, '\0');
8279 /* Now add the annulling, the label, and a possible noop. */
8280 if (annul && ! far)
8282 strcpy (p, ",a");
8283 p += 2;
8284 spaces -= 2;
8287 if (TARGET_V9)
8289 rtx note;
8290 int v8 = 0;
8292 if (! far && insn && INSN_ADDRESSES_SET_P ())
8294 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8295 - INSN_ADDRESSES (INSN_UID (insn)));
8296 /* Leave some instructions for "slop". */
8297 if (delta < -260000 || delta >= 260000)
8298 v8 = 1;
8301 switch (mode)
8303 case E_CCmode:
8304 case E_CCNZmode:
8305 case E_CCCmode:
8306 case E_CCVmode:
8307 labelno = "%%icc, ";
8308 if (v8)
8309 labelno = "";
8310 break;
8311 case E_CCXmode:
8312 case E_CCXNZmode:
8313 case E_CCXCmode:
8314 case E_CCXVmode:
8315 labelno = "%%xcc, ";
8316 gcc_assert (!v8);
8317 break;
8318 case E_CCFPmode:
8319 case E_CCFPEmode:
8321 static char v9_fcc_labelno[] = "%%fccX, ";
8322 /* Set the char indicating the number of the fcc reg to use. */
8323 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8324 labelno = v9_fcc_labelno;
8325 if (v8)
8327 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8328 labelno = "";
8331 break;
8332 default:
8333 gcc_unreachable ();
8336 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8338 strcpy (p,
8339 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8340 >= profile_probability::even ()) ^ far)
8341 ? ",pt" : ",pn");
8342 p += 3;
8343 spaces -= 3;
8346 else
8347 labelno = "";
8349 if (spaces > 0)
8350 *p++ = '\t';
8351 else
8352 *p++ = ' ';
8353 strcpy (p, labelno);
8354 p = strchr (p, '\0');
8355 if (far)
8357 strcpy (p, ".+12\n\t nop\n\tb\t");
8358 /* Skip the next insn if requested or
8359 if we know that it will be a nop. */
8360 if (annul || ! final_sequence)
8361 p[3] = '6';
8362 p += 14;
8364 *p++ = '%';
8365 *p++ = 'l';
8366 *p++ = label + '0';
8367 *p++ = '%';
8368 *p++ = '#';
8369 *p = '\0';
8371 return string;
8374 /* Emit a library call comparison between floating point X and Y.
8375 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8376 Return the new operator to be used in the comparison sequence.
8378 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8379 values as arguments instead of the TFmode registers themselves,
8380 that's why we cannot call emit_float_lib_cmp. */
8383 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8385 const char *qpfunc;
8386 rtx slot0, slot1, result, tem, tem2, libfunc;
8387 machine_mode mode;
8388 enum rtx_code new_comparison;
8390 switch (comparison)
8392 case EQ:
8393 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8394 break;
8396 case NE:
8397 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8398 break;
8400 case GT:
8401 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8402 break;
8404 case GE:
8405 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8406 break;
8408 case LT:
8409 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8410 break;
8412 case LE:
8413 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8414 break;
8416 case ORDERED:
8417 case UNORDERED:
8418 case UNGT:
8419 case UNLT:
8420 case UNEQ:
8421 case UNGE:
8422 case UNLE:
8423 case LTGT:
8424 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8425 break;
8427 default:
8428 gcc_unreachable ();
8431 if (TARGET_ARCH64)
8433 if (MEM_P (x))
8435 tree expr = MEM_EXPR (x);
8436 if (expr)
8437 mark_addressable (expr);
8438 slot0 = x;
8440 else
8442 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8443 emit_move_insn (slot0, x);
8446 if (MEM_P (y))
8448 tree expr = MEM_EXPR (y);
8449 if (expr)
8450 mark_addressable (expr);
8451 slot1 = y;
8453 else
8455 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8456 emit_move_insn (slot1, y);
8459 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8460 emit_library_call (libfunc, LCT_NORMAL,
8461 DImode,
8462 XEXP (slot0, 0), Pmode,
8463 XEXP (slot1, 0), Pmode);
8464 mode = DImode;
8466 else
8468 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8469 emit_library_call (libfunc, LCT_NORMAL,
8470 SImode,
8471 x, TFmode, y, TFmode);
8472 mode = SImode;
8476 /* Immediately move the result of the libcall into a pseudo
8477 register so reload doesn't clobber the value if it needs
8478 the return register for a spill reg. */
8479 result = gen_reg_rtx (mode);
8480 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8482 switch (comparison)
8484 default:
8485 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8486 case ORDERED:
8487 case UNORDERED:
8488 new_comparison = (comparison == UNORDERED ? EQ : NE);
8489 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8490 case UNGT:
8491 case UNGE:
8492 new_comparison = (comparison == UNGT ? GT : NE);
8493 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8494 case UNLE:
8495 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8496 case UNLT:
8497 tem = gen_reg_rtx (mode);
8498 if (TARGET_ARCH32)
8499 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8500 else
8501 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8502 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8503 case UNEQ:
8504 case LTGT:
8505 tem = gen_reg_rtx (mode);
8506 if (TARGET_ARCH32)
8507 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8508 else
8509 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8510 tem2 = gen_reg_rtx (mode);
8511 if (TARGET_ARCH32)
8512 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8513 else
8514 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8515 new_comparison = (comparison == UNEQ ? EQ : NE);
8516 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8519 gcc_unreachable ();
8522 /* Generate an unsigned DImode to FP conversion. This is the same code
8523 optabs would emit if we didn't have TFmode patterns. */
8525 void
8526 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8528 rtx i0, i1, f0, in, out;
8530 out = operands[0];
8531 in = force_reg (DImode, operands[1]);
8532 rtx_code_label *neglab = gen_label_rtx ();
8533 rtx_code_label *donelab = gen_label_rtx ();
8534 i0 = gen_reg_rtx (DImode);
8535 i1 = gen_reg_rtx (DImode);
8536 f0 = gen_reg_rtx (mode);
8538 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8540 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8541 emit_jump_insn (gen_jump (donelab));
8542 emit_barrier ();
8544 emit_label (neglab);
8546 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8547 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8548 emit_insn (gen_iordi3 (i0, i0, i1));
8549 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8550 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8552 emit_label (donelab);
8555 /* Generate an FP to unsigned DImode conversion. This is the same code
8556 optabs would emit if we didn't have TFmode patterns. */
8558 void
8559 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8561 rtx i0, i1, f0, in, out, limit;
8563 out = operands[0];
8564 in = force_reg (mode, operands[1]);
8565 rtx_code_label *neglab = gen_label_rtx ();
8566 rtx_code_label *donelab = gen_label_rtx ();
8567 i0 = gen_reg_rtx (DImode);
8568 i1 = gen_reg_rtx (DImode);
8569 limit = gen_reg_rtx (mode);
8570 f0 = gen_reg_rtx (mode);
8572 emit_move_insn (limit,
8573 const_double_from_real_value (
8574 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8575 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8577 emit_insn (gen_rtx_SET (out,
8578 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8579 emit_jump_insn (gen_jump (donelab));
8580 emit_barrier ();
8582 emit_label (neglab);
8584 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8585 emit_insn (gen_rtx_SET (i0,
8586 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8587 emit_insn (gen_movdi (i1, const1_rtx));
8588 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8589 emit_insn (gen_xordi3 (out, i0, i1));
8591 emit_label (donelab);
8594 /* Return the string to output a compare and branch instruction to DEST.
8595 DEST is the destination insn (i.e. the label), INSN is the source,
8596 and OP is the conditional expression. */
8598 const char *
8599 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8601 machine_mode mode = GET_MODE (XEXP (op, 0));
8602 enum rtx_code code = GET_CODE (op);
8603 const char *cond_str, *tmpl;
8604 int far, emit_nop, len;
8605 static char string[64];
8606 char size_char;
8608 /* Compare and Branch is limited to +-2KB. If it is too far away,
8609 change
8611 cxbne X, Y, .LC30
8615 cxbe X, Y, .+16
8617 ba,pt xcc, .LC30
8618 nop */
8620 len = get_attr_length (insn);
8622 far = len == 4;
8623 emit_nop = len == 2;
8625 if (far)
8626 code = reverse_condition (code);
8628 size_char = ((mode == SImode) ? 'w' : 'x');
8630 switch (code)
8632 case NE:
8633 cond_str = "ne";
8634 break;
8636 case EQ:
8637 cond_str = "e";
8638 break;
8640 case GE:
8641 cond_str = "ge";
8642 break;
8644 case GT:
8645 cond_str = "g";
8646 break;
8648 case LE:
8649 cond_str = "le";
8650 break;
8652 case LT:
8653 cond_str = "l";
8654 break;
8656 case GEU:
8657 cond_str = "cc";
8658 break;
8660 case GTU:
8661 cond_str = "gu";
8662 break;
8664 case LEU:
8665 cond_str = "leu";
8666 break;
8668 case LTU:
8669 cond_str = "cs";
8670 break;
8672 default:
8673 gcc_unreachable ();
8676 if (far)
8678 int veryfar = 1, delta;
8680 if (INSN_ADDRESSES_SET_P ())
8682 delta = (INSN_ADDRESSES (INSN_UID (dest))
8683 - INSN_ADDRESSES (INSN_UID (insn)));
8684 /* Leave some instructions for "slop". */
8685 if (delta >= -260000 && delta < 260000)
8686 veryfar = 0;
8689 if (veryfar)
8690 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8691 else
8692 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8694 else
8696 if (emit_nop)
8697 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8698 else
8699 tmpl = "c%cb%s\t%%1, %%2, %%3";
8702 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8704 return string;
8707 /* Return the string to output a conditional branch to LABEL, testing
8708 register REG. LABEL is the operand number of the label; REG is the
8709 operand number of the reg. OP is the conditional expression. The mode
8710 of REG says what kind of comparison we made.
8712 DEST is the destination insn (i.e. the label), INSN is the source.
8714 REVERSED is nonzero if we should reverse the sense of the comparison.
8716 ANNUL is nonzero if we should generate an annulling branch. */
8718 const char *
8719 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8720 int annul, rtx_insn *insn)
8722 static char string[64];
8723 enum rtx_code code = GET_CODE (op);
8724 machine_mode mode = GET_MODE (XEXP (op, 0));
8725 rtx note;
8726 int far;
8727 char *p;
8729 /* branch on register are limited to +-128KB. If it is too far away,
8730 change
8732 brnz,pt %g1, .LC30
8736 brz,pn %g1, .+12
8738 ba,pt %xcc, .LC30
8742 brgez,a,pn %o1, .LC29
8746 brlz,pt %o1, .+16
8748 ba,pt %xcc, .LC29 */
8750 far = get_attr_length (insn) >= 3;
8752 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8753 if (reversed ^ far)
8754 code = reverse_condition (code);
8756 /* Only 64-bit versions of these instructions exist. */
8757 gcc_assert (mode == DImode);
8759 /* Start by writing the branch condition. */
8761 switch (code)
8763 case NE:
8764 strcpy (string, "brnz");
8765 break;
8767 case EQ:
8768 strcpy (string, "brz");
8769 break;
8771 case GE:
8772 strcpy (string, "brgez");
8773 break;
8775 case LT:
8776 strcpy (string, "brlz");
8777 break;
8779 case LE:
8780 strcpy (string, "brlez");
8781 break;
8783 case GT:
8784 strcpy (string, "brgz");
8785 break;
8787 default:
8788 gcc_unreachable ();
8791 p = strchr (string, '\0');
8793 /* Now add the annulling, reg, label, and nop. */
8794 if (annul && ! far)
8796 strcpy (p, ",a");
8797 p += 2;
8800 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8802 strcpy (p,
8803 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8804 >= profile_probability::even ()) ^ far)
8805 ? ",pt" : ",pn");
8806 p += 3;
8809 *p = p < string + 8 ? '\t' : ' ';
8810 p++;
8811 *p++ = '%';
8812 *p++ = '0' + reg;
8813 *p++ = ',';
8814 *p++ = ' ';
8815 if (far)
8817 int veryfar = 1, delta;
8819 if (INSN_ADDRESSES_SET_P ())
8821 delta = (INSN_ADDRESSES (INSN_UID (dest))
8822 - INSN_ADDRESSES (INSN_UID (insn)));
8823 /* Leave some instructions for "slop". */
8824 if (delta >= -260000 && delta < 260000)
8825 veryfar = 0;
8828 strcpy (p, ".+12\n\t nop\n\t");
8829 /* Skip the next insn if requested or
8830 if we know that it will be a nop. */
8831 if (annul || ! final_sequence)
8832 p[3] = '6';
8833 p += 12;
8834 if (veryfar)
8836 strcpy (p, "b\t");
8837 p += 2;
8839 else
8841 strcpy (p, "ba,pt\t%%xcc, ");
8842 p += 13;
8845 *p++ = '%';
8846 *p++ = 'l';
8847 *p++ = '0' + label;
8848 *p++ = '%';
8849 *p++ = '#';
8850 *p = '\0';
8852 return string;
8855 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8856 Such instructions cannot be used in the delay slot of return insn on v9.
8857 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8860 static int
8861 epilogue_renumber (register rtx *where, int test)
8863 register const char *fmt;
8864 register int i;
8865 register enum rtx_code code;
8867 if (*where == 0)
8868 return 0;
8870 code = GET_CODE (*where);
8872 switch (code)
8874 case REG:
8875 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8876 return 1;
8877 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8878 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8879 /* fallthrough */
8880 case SCRATCH:
8881 case CC0:
8882 case PC:
8883 case CONST_INT:
8884 case CONST_WIDE_INT:
8885 case CONST_DOUBLE:
8886 return 0;
8888 /* Do not replace the frame pointer with the stack pointer because
8889 it can cause the delayed instruction to load below the stack.
8890 This occurs when instructions like:
8892 (set (reg/i:SI 24 %i0)
8893 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8894 (const_int -20 [0xffffffec])) 0))
8896 are in the return delayed slot. */
8897 case PLUS:
8898 if (GET_CODE (XEXP (*where, 0)) == REG
8899 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8900 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8901 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8902 return 1;
8903 break;
8905 case MEM:
8906 if (SPARC_STACK_BIAS
8907 && GET_CODE (XEXP (*where, 0)) == REG
8908 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8909 return 1;
8910 break;
8912 default:
8913 break;
8916 fmt = GET_RTX_FORMAT (code);
8918 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8920 if (fmt[i] == 'E')
8922 register int j;
8923 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8924 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8925 return 1;
8927 else if (fmt[i] == 'e'
8928 && epilogue_renumber (&(XEXP (*where, i)), test))
8929 return 1;
8931 return 0;
8934 /* Leaf functions and non-leaf functions have different needs. */
8936 static const int
8937 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8939 static const int
8940 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8942 static const int *const reg_alloc_orders[] = {
8943 reg_leaf_alloc_order,
8944 reg_nonleaf_alloc_order};
8946 void
8947 order_regs_for_local_alloc (void)
8949 static int last_order_nonleaf = 1;
8951 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8953 last_order_nonleaf = !last_order_nonleaf;
8954 memcpy ((char *) reg_alloc_order,
8955 (const char *) reg_alloc_orders[last_order_nonleaf],
8956 FIRST_PSEUDO_REGISTER * sizeof (int));
8960 /* Return 1 if REG and MEM are legitimate enough to allow the various
8961 MEM<-->REG splits to be run. */
8964 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8966 /* Punt if we are here by mistake. */
8967 gcc_assert (reload_completed);
8969 /* We must have an offsettable memory reference. */
8970 if (!offsettable_memref_p (mem))
8971 return 0;
8973 /* If we have legitimate args for ldd/std, we do not want
8974 the split to happen. */
8975 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8976 return 0;
8978 /* Success. */
8979 return 1;
8982 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8984 void
8985 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8987 rtx high_part = gen_highpart (mode, dest);
8988 rtx low_part = gen_lowpart (mode, dest);
8989 rtx word0 = adjust_address (src, mode, 0);
8990 rtx word1 = adjust_address (src, mode, 4);
8992 if (reg_overlap_mentioned_p (high_part, word1))
8994 emit_move_insn_1 (low_part, word1);
8995 emit_move_insn_1 (high_part, word0);
8997 else
8999 emit_move_insn_1 (high_part, word0);
9000 emit_move_insn_1 (low_part, word1);
9004 /* Split a MEM <-- REG move into a pair of moves in MODE. */
9006 void
9007 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
9009 rtx word0 = adjust_address (dest, mode, 0);
9010 rtx word1 = adjust_address (dest, mode, 4);
9011 rtx high_part = gen_highpart (mode, src);
9012 rtx low_part = gen_lowpart (mode, src);
9014 emit_move_insn_1 (word0, high_part);
9015 emit_move_insn_1 (word1, low_part);
9018 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
9021 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
9023 /* Punt if we are here by mistake. */
9024 gcc_assert (reload_completed);
9026 if (GET_CODE (reg1) == SUBREG)
9027 reg1 = SUBREG_REG (reg1);
9028 if (GET_CODE (reg1) != REG)
9029 return 0;
9030 const int regno1 = REGNO (reg1);
9032 if (GET_CODE (reg2) == SUBREG)
9033 reg2 = SUBREG_REG (reg2);
9034 if (GET_CODE (reg2) != REG)
9035 return 0;
9036 const int regno2 = REGNO (reg2);
9038 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9039 return 1;
9041 if (TARGET_VIS3)
9043 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9044 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9045 return 1;
9048 return 0;
9051 /* Split a REG <--> REG move into a pair of moves in MODE. */
9053 void
9054 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9056 rtx dest1 = gen_highpart (mode, dest);
9057 rtx dest2 = gen_lowpart (mode, dest);
9058 rtx src1 = gen_highpart (mode, src);
9059 rtx src2 = gen_lowpart (mode, src);
9061 /* Now emit using the real source and destination we found, swapping
9062 the order if we detect overlap. */
9063 if (reg_overlap_mentioned_p (dest1, src2))
9065 emit_move_insn_1 (dest2, src2);
9066 emit_move_insn_1 (dest1, src1);
9068 else
9070 emit_move_insn_1 (dest1, src1);
9071 emit_move_insn_1 (dest2, src2);
9075 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9076 This makes them candidates for using ldd and std insns.
9078 Note reg1 and reg2 *must* be hard registers. */
9081 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9083 /* We might have been passed a SUBREG. */
9084 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9085 return 0;
9087 if (REGNO (reg1) % 2 != 0)
9088 return 0;
9090 /* Integer ldd is deprecated in SPARC V9 */
9091 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9092 return 0;
9094 return (REGNO (reg1) == REGNO (reg2) - 1);
9097 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9098 an ldd or std insn.
9100 This can only happen when addr1 and addr2, the addresses in mem1
9101 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9102 addr1 must also be aligned on a 64-bit boundary.
9104 Also iff dependent_reg_rtx is not null it should not be used to
9105 compute the address for mem1, i.e. we cannot optimize a sequence
9106 like:
9107 ld [%o0], %o0
9108 ld [%o0 + 4], %o1
9110 ldd [%o0], %o0
9111 nor:
9112 ld [%g3 + 4], %g3
9113 ld [%g3], %g2
9115 ldd [%g3], %g2
9117 But, note that the transformation from:
9118 ld [%g2 + 4], %g3
9119 ld [%g2], %g2
9121 ldd [%g2], %g2
9122 is perfectly fine. Thus, the peephole2 patterns always pass us
9123 the destination register of the first load, never the second one.
9125 For stores we don't have a similar problem, so dependent_reg_rtx is
9126 NULL_RTX. */
9129 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9131 rtx addr1, addr2;
9132 unsigned int reg1;
9133 HOST_WIDE_INT offset1;
9135 /* The mems cannot be volatile. */
9136 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9137 return 0;
9139 /* MEM1 should be aligned on a 64-bit boundary. */
9140 if (MEM_ALIGN (mem1) < 64)
9141 return 0;
9143 addr1 = XEXP (mem1, 0);
9144 addr2 = XEXP (mem2, 0);
9146 /* Extract a register number and offset (if used) from the first addr. */
9147 if (GET_CODE (addr1) == PLUS)
9149 /* If not a REG, return zero. */
9150 if (GET_CODE (XEXP (addr1, 0)) != REG)
9151 return 0;
9152 else
9154 reg1 = REGNO (XEXP (addr1, 0));
9155 /* The offset must be constant! */
9156 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9157 return 0;
9158 offset1 = INTVAL (XEXP (addr1, 1));
9161 else if (GET_CODE (addr1) != REG)
9162 return 0;
9163 else
9165 reg1 = REGNO (addr1);
9166 /* This was a simple (mem (reg)) expression. Offset is 0. */
9167 offset1 = 0;
9170 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9171 if (GET_CODE (addr2) != PLUS)
9172 return 0;
9174 if (GET_CODE (XEXP (addr2, 0)) != REG
9175 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9176 return 0;
9178 if (reg1 != REGNO (XEXP (addr2, 0)))
9179 return 0;
9181 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9182 return 0;
9184 /* The first offset must be evenly divisible by 8 to ensure the
9185 address is 64-bit aligned. */
9186 if (offset1 % 8 != 0)
9187 return 0;
9189 /* The offset for the second addr must be 4 more than the first addr. */
9190 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9191 return 0;
9193 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9194 instructions. */
9195 return 1;
9198 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9201 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9203 rtx x = widen_memory_access (mem1, mode, 0);
9204 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9205 return x;
9208 /* Return 1 if reg is a pseudo, or is the first register in
9209 a hard register pair. This makes it suitable for use in
9210 ldd and std insns. */
9213 register_ok_for_ldd (rtx reg)
9215 /* We might have been passed a SUBREG. */
9216 if (!REG_P (reg))
9217 return 0;
9219 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9220 return (REGNO (reg) % 2 == 0);
9222 return 1;
9225 /* Return 1 if OP, a MEM, has an address which is known to be
9226 aligned to an 8-byte boundary. */
9229 memory_ok_for_ldd (rtx op)
9231 /* In 64-bit mode, we assume that the address is word-aligned. */
9232 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
9233 return 0;
9235 if (! can_create_pseudo_p ()
9236 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9237 return 0;
9239 return 1;
9242 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9244 static bool
9245 sparc_print_operand_punct_valid_p (unsigned char code)
9247 if (code == '#'
9248 || code == '*'
9249 || code == '('
9250 || code == ')'
9251 || code == '_'
9252 || code == '&')
9253 return true;
9255 return false;
9258 /* Implement TARGET_PRINT_OPERAND.
9259 Print operand X (an rtx) in assembler syntax to file FILE.
9260 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9261 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9263 static void
9264 sparc_print_operand (FILE *file, rtx x, int code)
9266 const char *s;
9268 switch (code)
9270 case '#':
9271 /* Output an insn in a delay slot. */
9272 if (final_sequence)
9273 sparc_indent_opcode = 1;
9274 else
9275 fputs ("\n\t nop", file);
9276 return;
9277 case '*':
9278 /* Output an annul flag if there's nothing for the delay slot and we
9279 are optimizing. This is always used with '(' below.
9280 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9281 this is a dbx bug. So, we only do this when optimizing.
9282 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9283 Always emit a nop in case the next instruction is a branch. */
9284 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9285 fputs (",a", file);
9286 return;
9287 case '(':
9288 /* Output a 'nop' if there's nothing for the delay slot and we are
9289 not optimizing. This is always used with '*' above. */
9290 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9291 fputs ("\n\t nop", file);
9292 else if (final_sequence)
9293 sparc_indent_opcode = 1;
9294 return;
9295 case ')':
9296 /* Output the right displacement from the saved PC on function return.
9297 The caller may have placed an "unimp" insn immediately after the call
9298 so we have to account for it. This insn is used in the 32-bit ABI
9299 when calling a function that returns a non zero-sized structure. The
9300 64-bit ABI doesn't have it. Be careful to have this test be the same
9301 as that for the call. The exception is when sparc_std_struct_return
9302 is enabled, the psABI is followed exactly and the adjustment is made
9303 by the code in sparc_struct_value_rtx. The call emitted is the same
9304 when sparc_std_struct_return is enabled. */
9305 if (!TARGET_ARCH64
9306 && cfun->returns_struct
9307 && !sparc_std_struct_return
9308 && DECL_SIZE (DECL_RESULT (current_function_decl))
9309 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9310 == INTEGER_CST
9311 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9312 fputs ("12", file);
9313 else
9314 fputc ('8', file);
9315 return;
9316 case '_':
9317 /* Output the Embedded Medium/Anywhere code model base register. */
9318 fputs (EMBMEDANY_BASE_REG, file);
9319 return;
9320 case '&':
9321 /* Print some local dynamic TLS name. */
9322 if (const char *name = get_some_local_dynamic_name ())
9323 assemble_name (file, name);
9324 else
9325 output_operand_lossage ("'%%&' used without any "
9326 "local dynamic TLS references");
9327 return;
9329 case 'Y':
9330 /* Adjust the operand to take into account a RESTORE operation. */
9331 if (GET_CODE (x) == CONST_INT)
9332 break;
9333 else if (GET_CODE (x) != REG)
9334 output_operand_lossage ("invalid %%Y operand");
9335 else if (REGNO (x) < 8)
9336 fputs (reg_names[REGNO (x)], file);
9337 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9338 fputs (reg_names[REGNO (x)-16], file);
9339 else
9340 output_operand_lossage ("invalid %%Y operand");
9341 return;
9342 case 'L':
9343 /* Print out the low order register name of a register pair. */
9344 if (WORDS_BIG_ENDIAN)
9345 fputs (reg_names[REGNO (x)+1], file);
9346 else
9347 fputs (reg_names[REGNO (x)], file);
9348 return;
9349 case 'H':
9350 /* Print out the high order register name of a register pair. */
9351 if (WORDS_BIG_ENDIAN)
9352 fputs (reg_names[REGNO (x)], file);
9353 else
9354 fputs (reg_names[REGNO (x)+1], file);
9355 return;
9356 case 'R':
9357 /* Print out the second register name of a register pair or quad.
9358 I.e., R (%o0) => %o1. */
9359 fputs (reg_names[REGNO (x)+1], file);
9360 return;
9361 case 'S':
9362 /* Print out the third register name of a register quad.
9363 I.e., S (%o0) => %o2. */
9364 fputs (reg_names[REGNO (x)+2], file);
9365 return;
9366 case 'T':
9367 /* Print out the fourth register name of a register quad.
9368 I.e., T (%o0) => %o3. */
9369 fputs (reg_names[REGNO (x)+3], file);
9370 return;
9371 case 'x':
9372 /* Print a condition code register. */
9373 if (REGNO (x) == SPARC_ICC_REG)
9375 switch (GET_MODE (x))
9377 case E_CCmode:
9378 case E_CCNZmode:
9379 case E_CCCmode:
9380 case E_CCVmode:
9381 s = "%icc";
9382 break;
9383 case E_CCXmode:
9384 case E_CCXNZmode:
9385 case E_CCXCmode:
9386 case E_CCXVmode:
9387 s = "%xcc";
9388 break;
9389 default:
9390 gcc_unreachable ();
9392 fputs (s, file);
9394 else
9395 /* %fccN register */
9396 fputs (reg_names[REGNO (x)], file);
9397 return;
9398 case 'm':
9399 /* Print the operand's address only. */
9400 output_address (GET_MODE (x), XEXP (x, 0));
9401 return;
9402 case 'r':
9403 /* In this case we need a register. Use %g0 if the
9404 operand is const0_rtx. */
9405 if (x == const0_rtx
9406 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9408 fputs ("%g0", file);
9409 return;
9411 else
9412 break;
9414 case 'A':
9415 switch (GET_CODE (x))
9417 case IOR:
9418 s = "or";
9419 break;
9420 case AND:
9421 s = "and";
9422 break;
9423 case XOR:
9424 s = "xor";
9425 break;
9426 default:
9427 output_operand_lossage ("invalid %%A operand");
9428 s = "";
9429 break;
9431 fputs (s, file);
9432 return;
9434 case 'B':
9435 switch (GET_CODE (x))
9437 case IOR:
9438 s = "orn";
9439 break;
9440 case AND:
9441 s = "andn";
9442 break;
9443 case XOR:
9444 s = "xnor";
9445 break;
9446 default:
9447 output_operand_lossage ("invalid %%B operand");
9448 s = "";
9449 break;
9451 fputs (s, file);
9452 return;
9454 /* This is used by the conditional move instructions. */
9455 case 'C':
9457 machine_mode mode = GET_MODE (XEXP (x, 0));
9458 switch (GET_CODE (x))
9460 case NE:
9461 if (mode == CCVmode || mode == CCXVmode)
9462 s = "vs";
9463 else
9464 s = "ne";
9465 break;
9466 case EQ:
9467 if (mode == CCVmode || mode == CCXVmode)
9468 s = "vc";
9469 else
9470 s = "e";
9471 break;
9472 case GE:
9473 if (mode == CCNZmode || mode == CCXNZmode)
9474 s = "pos";
9475 else
9476 s = "ge";
9477 break;
9478 case GT:
9479 s = "g";
9480 break;
9481 case LE:
9482 s = "le";
9483 break;
9484 case LT:
9485 if (mode == CCNZmode || mode == CCXNZmode)
9486 s = "neg";
9487 else
9488 s = "l";
9489 break;
9490 case GEU:
9491 s = "geu";
9492 break;
9493 case GTU:
9494 s = "gu";
9495 break;
9496 case LEU:
9497 s = "leu";
9498 break;
9499 case LTU:
9500 s = "lu";
9501 break;
9502 case LTGT:
9503 s = "lg";
9504 break;
9505 case UNORDERED:
9506 s = "u";
9507 break;
9508 case ORDERED:
9509 s = "o";
9510 break;
9511 case UNLT:
9512 s = "ul";
9513 break;
9514 case UNLE:
9515 s = "ule";
9516 break;
9517 case UNGT:
9518 s = "ug";
9519 break;
9520 case UNGE:
9521 s = "uge"
9522 ; break;
9523 case UNEQ:
9524 s = "ue";
9525 break;
9526 default:
9527 output_operand_lossage ("invalid %%C operand");
9528 s = "";
9529 break;
9531 fputs (s, file);
9532 return;
9535 /* This are used by the movr instruction pattern. */
9536 case 'D':
9538 switch (GET_CODE (x))
9540 case NE:
9541 s = "ne";
9542 break;
9543 case EQ:
9544 s = "e";
9545 break;
9546 case GE:
9547 s = "gez";
9548 break;
9549 case LT:
9550 s = "lz";
9551 break;
9552 case LE:
9553 s = "lez";
9554 break;
9555 case GT:
9556 s = "gz";
9557 break;
9558 default:
9559 output_operand_lossage ("invalid %%D operand");
9560 s = "";
9561 break;
9563 fputs (s, file);
9564 return;
9567 case 'b':
9569 /* Print a sign-extended character. */
9570 int i = trunc_int_for_mode (INTVAL (x), QImode);
9571 fprintf (file, "%d", i);
9572 return;
9575 case 'f':
9576 /* Operand must be a MEM; write its address. */
9577 if (GET_CODE (x) != MEM)
9578 output_operand_lossage ("invalid %%f operand");
9579 output_address (GET_MODE (x), XEXP (x, 0));
9580 return;
9582 case 's':
9584 /* Print a sign-extended 32-bit value. */
9585 HOST_WIDE_INT i;
9586 if (GET_CODE(x) == CONST_INT)
9587 i = INTVAL (x);
9588 else
9590 output_operand_lossage ("invalid %%s operand");
9591 return;
9593 i = trunc_int_for_mode (i, SImode);
9594 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9595 return;
9598 case 0:
9599 /* Do nothing special. */
9600 break;
9602 default:
9603 /* Undocumented flag. */
9604 output_operand_lossage ("invalid operand output code");
9607 if (GET_CODE (x) == REG)
9608 fputs (reg_names[REGNO (x)], file);
9609 else if (GET_CODE (x) == MEM)
9611 fputc ('[', file);
9612 /* Poor Sun assembler doesn't understand absolute addressing. */
9613 if (CONSTANT_P (XEXP (x, 0)))
9614 fputs ("%g0+", file);
9615 output_address (GET_MODE (x), XEXP (x, 0));
9616 fputc (']', file);
9618 else if (GET_CODE (x) == HIGH)
9620 fputs ("%hi(", file);
9621 output_addr_const (file, XEXP (x, 0));
9622 fputc (')', file);
9624 else if (GET_CODE (x) == LO_SUM)
9626 sparc_print_operand (file, XEXP (x, 0), 0);
9627 if (TARGET_CM_MEDMID)
9628 fputs ("+%l44(", file);
9629 else
9630 fputs ("+%lo(", file);
9631 output_addr_const (file, XEXP (x, 1));
9632 fputc (')', file);
9634 else if (GET_CODE (x) == CONST_DOUBLE)
9635 output_operand_lossage ("floating-point constant not a valid immediate operand");
9636 else
9637 output_addr_const (file, x);
9640 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9642 static void
9643 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9645 register rtx base, index = 0;
9646 int offset = 0;
9647 register rtx addr = x;
9649 if (REG_P (addr))
9650 fputs (reg_names[REGNO (addr)], file);
9651 else if (GET_CODE (addr) == PLUS)
9653 if (CONST_INT_P (XEXP (addr, 0)))
9654 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9655 else if (CONST_INT_P (XEXP (addr, 1)))
9656 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9657 else
9658 base = XEXP (addr, 0), index = XEXP (addr, 1);
9659 if (GET_CODE (base) == LO_SUM)
9661 gcc_assert (USE_AS_OFFSETABLE_LO10
9662 && TARGET_ARCH64
9663 && ! TARGET_CM_MEDMID);
9664 output_operand (XEXP (base, 0), 0);
9665 fputs ("+%lo(", file);
9666 output_address (VOIDmode, XEXP (base, 1));
9667 fprintf (file, ")+%d", offset);
9669 else
9671 fputs (reg_names[REGNO (base)], file);
9672 if (index == 0)
9673 fprintf (file, "%+d", offset);
9674 else if (REG_P (index))
9675 fprintf (file, "+%s", reg_names[REGNO (index)]);
9676 else if (GET_CODE (index) == SYMBOL_REF
9677 || GET_CODE (index) == LABEL_REF
9678 || GET_CODE (index) == CONST)
9679 fputc ('+', file), output_addr_const (file, index);
9680 else gcc_unreachable ();
9683 else if (GET_CODE (addr) == MINUS
9684 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9686 output_addr_const (file, XEXP (addr, 0));
9687 fputs ("-(", file);
9688 output_addr_const (file, XEXP (addr, 1));
9689 fputs ("-.)", file);
9691 else if (GET_CODE (addr) == LO_SUM)
9693 output_operand (XEXP (addr, 0), 0);
9694 if (TARGET_CM_MEDMID)
9695 fputs ("+%l44(", file);
9696 else
9697 fputs ("+%lo(", file);
9698 output_address (VOIDmode, XEXP (addr, 1));
9699 fputc (')', file);
9701 else if (flag_pic
9702 && GET_CODE (addr) == CONST
9703 && GET_CODE (XEXP (addr, 0)) == MINUS
9704 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9705 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9706 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9708 addr = XEXP (addr, 0);
9709 output_addr_const (file, XEXP (addr, 0));
9710 /* Group the args of the second CONST in parenthesis. */
9711 fputs ("-(", file);
9712 /* Skip past the second CONST--it does nothing for us. */
9713 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9714 /* Close the parenthesis. */
9715 fputc (')', file);
9717 else
9719 output_addr_const (file, addr);
9723 /* Target hook for assembling integer objects. The sparc version has
9724 special handling for aligned DI-mode objects. */
9726 static bool
9727 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9729 /* ??? We only output .xword's for symbols and only then in environments
9730 where the assembler can handle them. */
9731 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9733 if (TARGET_V9)
9735 assemble_integer_with_op ("\t.xword\t", x);
9736 return true;
9738 else
9740 assemble_aligned_integer (4, const0_rtx);
9741 assemble_aligned_integer (4, x);
9742 return true;
9745 return default_assemble_integer (x, size, aligned_p);
9748 /* Return the value of a code used in the .proc pseudo-op that says
9749 what kind of result this function returns. For non-C types, we pick
9750 the closest C type. */
9752 #ifndef SHORT_TYPE_SIZE
9753 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9754 #endif
9756 #ifndef INT_TYPE_SIZE
9757 #define INT_TYPE_SIZE BITS_PER_WORD
9758 #endif
9760 #ifndef LONG_TYPE_SIZE
9761 #define LONG_TYPE_SIZE BITS_PER_WORD
9762 #endif
9764 #ifndef LONG_LONG_TYPE_SIZE
9765 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9766 #endif
9768 #ifndef FLOAT_TYPE_SIZE
9769 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9770 #endif
9772 #ifndef DOUBLE_TYPE_SIZE
9773 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9774 #endif
9776 #ifndef LONG_DOUBLE_TYPE_SIZE
9777 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9778 #endif
9780 unsigned long
9781 sparc_type_code (register tree type)
9783 register unsigned long qualifiers = 0;
9784 register unsigned shift;
9786 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9787 setting more, since some assemblers will give an error for this. Also,
9788 we must be careful to avoid shifts of 32 bits or more to avoid getting
9789 unpredictable results. */
9791 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9793 switch (TREE_CODE (type))
9795 case ERROR_MARK:
9796 return qualifiers;
9798 case ARRAY_TYPE:
9799 qualifiers |= (3 << shift);
9800 break;
9802 case FUNCTION_TYPE:
9803 case METHOD_TYPE:
9804 qualifiers |= (2 << shift);
9805 break;
9807 case POINTER_TYPE:
9808 case REFERENCE_TYPE:
9809 case OFFSET_TYPE:
9810 qualifiers |= (1 << shift);
9811 break;
9813 case RECORD_TYPE:
9814 return (qualifiers | 8);
9816 case UNION_TYPE:
9817 case QUAL_UNION_TYPE:
9818 return (qualifiers | 9);
9820 case ENUMERAL_TYPE:
9821 return (qualifiers | 10);
9823 case VOID_TYPE:
9824 return (qualifiers | 16);
9826 case INTEGER_TYPE:
9827 /* If this is a range type, consider it to be the underlying
9828 type. */
9829 if (TREE_TYPE (type) != 0)
9830 break;
9832 /* Carefully distinguish all the standard types of C,
9833 without messing up if the language is not C. We do this by
9834 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9835 look at both the names and the above fields, but that's redundant.
9836 Any type whose size is between two C types will be considered
9837 to be the wider of the two types. Also, we do not have a
9838 special code to use for "long long", so anything wider than
9839 long is treated the same. Note that we can't distinguish
9840 between "int" and "long" in this code if they are the same
9841 size, but that's fine, since neither can the assembler. */
9843 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9844 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9846 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9847 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9849 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9850 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9852 else
9853 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9855 case REAL_TYPE:
9856 /* If this is a range type, consider it to be the underlying
9857 type. */
9858 if (TREE_TYPE (type) != 0)
9859 break;
9861 /* Carefully distinguish all the standard types of C,
9862 without messing up if the language is not C. */
9864 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9865 return (qualifiers | 6);
9867 else
9868 return (qualifiers | 7);
9870 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9871 /* ??? We need to distinguish between double and float complex types,
9872 but I don't know how yet because I can't reach this code from
9873 existing front-ends. */
9874 return (qualifiers | 7); /* Who knows? */
9876 case VECTOR_TYPE:
9877 case BOOLEAN_TYPE: /* Boolean truth value type. */
9878 case LANG_TYPE:
9879 case NULLPTR_TYPE:
9880 return qualifiers;
9882 default:
9883 gcc_unreachable (); /* Not a type! */
9887 return qualifiers;
9890 /* Nested function support. */
9892 /* Emit RTL insns to initialize the variable parts of a trampoline.
9893 FNADDR is an RTX for the address of the function's pure code.
9894 CXT is an RTX for the static chain value for the function.
9896 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9897 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9898 (to store insns). This is a bit excessive. Perhaps a different
9899 mechanism would be better here.
9901 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9903 static void
9904 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9906 /* SPARC 32-bit trampoline:
9908 sethi %hi(fn), %g1
9909 sethi %hi(static), %g2
9910 jmp %g1+%lo(fn)
9911 or %g2, %lo(static), %g2
9913 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9914 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9917 emit_move_insn
9918 (adjust_address (m_tramp, SImode, 0),
9919 expand_binop (SImode, ior_optab,
9920 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9921 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9922 NULL_RTX, 1, OPTAB_DIRECT));
9924 emit_move_insn
9925 (adjust_address (m_tramp, SImode, 4),
9926 expand_binop (SImode, ior_optab,
9927 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9928 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9929 NULL_RTX, 1, OPTAB_DIRECT));
9931 emit_move_insn
9932 (adjust_address (m_tramp, SImode, 8),
9933 expand_binop (SImode, ior_optab,
9934 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9935 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9936 NULL_RTX, 1, OPTAB_DIRECT));
9938 emit_move_insn
9939 (adjust_address (m_tramp, SImode, 12),
9940 expand_binop (SImode, ior_optab,
9941 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9942 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9943 NULL_RTX, 1, OPTAB_DIRECT));
9945 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9946 aligned on a 16 byte boundary so one flush clears it all. */
9947 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9948 if (sparc_cpu != PROCESSOR_ULTRASPARC
9949 && sparc_cpu != PROCESSOR_ULTRASPARC3
9950 && sparc_cpu != PROCESSOR_NIAGARA
9951 && sparc_cpu != PROCESSOR_NIAGARA2
9952 && sparc_cpu != PROCESSOR_NIAGARA3
9953 && sparc_cpu != PROCESSOR_NIAGARA4
9954 && sparc_cpu != PROCESSOR_NIAGARA7
9955 && sparc_cpu != PROCESSOR_M8)
9956 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9958 /* Call __enable_execute_stack after writing onto the stack to make sure
9959 the stack address is accessible. */
9960 #ifdef HAVE_ENABLE_EXECUTE_STACK
9961 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9962 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9963 #endif
9967 /* The 64-bit version is simpler because it makes more sense to load the
9968 values as "immediate" data out of the trampoline. It's also easier since
9969 we can read the PC without clobbering a register. */
9971 static void
9972 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9974 /* SPARC 64-bit trampoline:
9976 rd %pc, %g1
9977 ldx [%g1+24], %g5
9978 jmp %g5
9979 ldx [%g1+16], %g5
9980 +16 bytes data
9983 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9984 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9985 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9986 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9987 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9988 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9989 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9990 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9991 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9992 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9993 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9995 if (sparc_cpu != PROCESSOR_ULTRASPARC
9996 && sparc_cpu != PROCESSOR_ULTRASPARC3
9997 && sparc_cpu != PROCESSOR_NIAGARA
9998 && sparc_cpu != PROCESSOR_NIAGARA2
9999 && sparc_cpu != PROCESSOR_NIAGARA3
10000 && sparc_cpu != PROCESSOR_NIAGARA4
10001 && sparc_cpu != PROCESSOR_NIAGARA7
10002 && sparc_cpu != PROCESSOR_M8)
10003 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
10005 /* Call __enable_execute_stack after writing onto the stack to make sure
10006 the stack address is accessible. */
10007 #ifdef HAVE_ENABLE_EXECUTE_STACK
10008 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10009 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10010 #endif
10013 /* Worker for TARGET_TRAMPOLINE_INIT. */
10015 static void
10016 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10018 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
10019 cxt = force_reg (Pmode, cxt);
10020 if (TARGET_ARCH64)
10021 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
10022 else
10023 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
10026 /* Adjust the cost of a scheduling dependency. Return the new cost of
10027 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
10029 static int
10030 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
10031 int cost)
10033 enum attr_type insn_type;
10035 if (recog_memoized (insn) < 0)
10036 return cost;
10038 insn_type = get_attr_type (insn);
10040 if (dep_type == 0)
10042 /* Data dependency; DEP_INSN writes a register that INSN reads some
10043 cycles later. */
10045 /* if a load, then the dependence must be on the memory address;
10046 add an extra "cycle". Note that the cost could be two cycles
10047 if the reg was written late in an instruction group; we ca not tell
10048 here. */
10049 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10050 return cost + 3;
10052 /* Get the delay only if the address of the store is the dependence. */
10053 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10055 rtx pat = PATTERN(insn);
10056 rtx dep_pat = PATTERN (dep_insn);
10058 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10059 return cost; /* This should not happen! */
10061 /* The dependency between the two instructions was on the data that
10062 is being stored. Assume that this implies that the address of the
10063 store is not dependent. */
10064 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10065 return cost;
10067 return cost + 3; /* An approximation. */
10070 /* A shift instruction cannot receive its data from an instruction
10071 in the same cycle; add a one cycle penalty. */
10072 if (insn_type == TYPE_SHIFT)
10073 return cost + 3; /* Split before cascade into shift. */
10075 else
10077 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10078 INSN writes some cycles later. */
10080 /* These are only significant for the fpu unit; writing a fp reg before
10081 the fpu has finished with it stalls the processor. */
10083 /* Reusing an integer register causes no problems. */
10084 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10085 return 0;
10088 return cost;
10091 static int
10092 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10093 int cost)
10095 enum attr_type insn_type, dep_type;
10096 rtx pat = PATTERN(insn);
10097 rtx dep_pat = PATTERN (dep_insn);
10099 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10100 return cost;
10102 insn_type = get_attr_type (insn);
10103 dep_type = get_attr_type (dep_insn);
10105 switch (dtype)
10107 case 0:
10108 /* Data dependency; DEP_INSN writes a register that INSN reads some
10109 cycles later. */
10111 switch (insn_type)
10113 case TYPE_STORE:
10114 case TYPE_FPSTORE:
10115 /* Get the delay iff the address of the store is the dependence. */
10116 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10117 return cost;
10119 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10120 return cost;
10121 return cost + 3;
10123 case TYPE_LOAD:
10124 case TYPE_SLOAD:
10125 case TYPE_FPLOAD:
10126 /* If a load, then the dependence must be on the memory address. If
10127 the addresses aren't equal, then it might be a false dependency */
10128 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10130 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10131 || GET_CODE (SET_DEST (dep_pat)) != MEM
10132 || GET_CODE (SET_SRC (pat)) != MEM
10133 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10134 XEXP (SET_SRC (pat), 0)))
10135 return cost + 2;
10137 return cost + 8;
10139 break;
10141 case TYPE_BRANCH:
10142 /* Compare to branch latency is 0. There is no benefit from
10143 separating compare and branch. */
10144 if (dep_type == TYPE_COMPARE)
10145 return 0;
10146 /* Floating point compare to branch latency is less than
10147 compare to conditional move. */
10148 if (dep_type == TYPE_FPCMP)
10149 return cost - 1;
10150 break;
10151 default:
10152 break;
10154 break;
10156 case REG_DEP_ANTI:
10157 /* Anti-dependencies only penalize the fpu unit. */
10158 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10159 return 0;
10160 break;
10162 default:
10163 break;
10166 return cost;
10169 static int
10170 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10171 unsigned int)
10173 switch (sparc_cpu)
10175 case PROCESSOR_SUPERSPARC:
10176 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10177 break;
10178 case PROCESSOR_HYPERSPARC:
10179 case PROCESSOR_SPARCLITE86X:
10180 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10181 break;
10182 default:
10183 break;
10185 return cost;
10188 static void
10189 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10190 int sched_verbose ATTRIBUTE_UNUSED,
10191 int max_ready ATTRIBUTE_UNUSED)
10194 static int
10195 sparc_use_sched_lookahead (void)
10197 switch (sparc_cpu)
10199 case PROCESSOR_ULTRASPARC:
10200 case PROCESSOR_ULTRASPARC3:
10201 return 4;
10202 case PROCESSOR_SUPERSPARC:
10203 case PROCESSOR_HYPERSPARC:
10204 case PROCESSOR_SPARCLITE86X:
10205 return 3;
10206 case PROCESSOR_NIAGARA4:
10207 case PROCESSOR_NIAGARA7:
10208 case PROCESSOR_M8:
10209 return 2;
10210 case PROCESSOR_NIAGARA:
10211 case PROCESSOR_NIAGARA2:
10212 case PROCESSOR_NIAGARA3:
10213 default:
10214 return 0;
10218 static int
10219 sparc_issue_rate (void)
10221 switch (sparc_cpu)
10223 case PROCESSOR_ULTRASPARC:
10224 case PROCESSOR_ULTRASPARC3:
10225 case PROCESSOR_M8:
10226 return 4;
10227 case PROCESSOR_SUPERSPARC:
10228 return 3;
10229 case PROCESSOR_HYPERSPARC:
10230 case PROCESSOR_SPARCLITE86X:
10231 case PROCESSOR_V9:
10232 /* Assume V9 processors are capable of at least dual-issue. */
10233 case PROCESSOR_NIAGARA4:
10234 case PROCESSOR_NIAGARA7:
10235 return 2;
10236 case PROCESSOR_NIAGARA:
10237 case PROCESSOR_NIAGARA2:
10238 case PROCESSOR_NIAGARA3:
10239 default:
10240 return 1;
10245 sparc_branch_cost (bool speed_p, bool predictable_p)
10247 if (!speed_p)
10248 return 2;
10250 /* For pre-V9 processors we use a single value (usually 3) to take into
10251 account the potential annulling of the delay slot (which ends up being
10252 a bubble in the pipeline slot) plus a cycle to take into consideration
10253 the instruction cache effects.
10255 On V9 and later processors, which have branch prediction facilities,
10256 we take into account whether the branch is (easily) predictable. */
10257 const int cost = sparc_costs->branch_cost;
10259 switch (sparc_cpu)
10261 case PROCESSOR_V9:
10262 case PROCESSOR_ULTRASPARC:
10263 case PROCESSOR_ULTRASPARC3:
10264 case PROCESSOR_NIAGARA:
10265 case PROCESSOR_NIAGARA2:
10266 case PROCESSOR_NIAGARA3:
10267 case PROCESSOR_NIAGARA4:
10268 case PROCESSOR_NIAGARA7:
10269 case PROCESSOR_M8:
10270 return cost + (predictable_p ? 0 : 2);
10272 default:
10273 return cost;
10277 static int
10278 set_extends (rtx_insn *insn)
10280 register rtx pat = PATTERN (insn);
10282 switch (GET_CODE (SET_SRC (pat)))
10284 /* Load and some shift instructions zero extend. */
10285 case MEM:
10286 case ZERO_EXTEND:
10287 /* sethi clears the high bits */
10288 case HIGH:
10289 /* LO_SUM is used with sethi. sethi cleared the high
10290 bits and the values used with lo_sum are positive */
10291 case LO_SUM:
10292 /* Store flag stores 0 or 1 */
10293 case LT: case LTU:
10294 case GT: case GTU:
10295 case LE: case LEU:
10296 case GE: case GEU:
10297 case EQ:
10298 case NE:
10299 return 1;
10300 case AND:
10302 rtx op0 = XEXP (SET_SRC (pat), 0);
10303 rtx op1 = XEXP (SET_SRC (pat), 1);
10304 if (GET_CODE (op1) == CONST_INT)
10305 return INTVAL (op1) >= 0;
10306 if (GET_CODE (op0) != REG)
10307 return 0;
10308 if (sparc_check_64 (op0, insn) == 1)
10309 return 1;
10310 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10312 case IOR:
10313 case XOR:
10315 rtx op0 = XEXP (SET_SRC (pat), 0);
10316 rtx op1 = XEXP (SET_SRC (pat), 1);
10317 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10318 return 0;
10319 if (GET_CODE (op1) == CONST_INT)
10320 return INTVAL (op1) >= 0;
10321 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10323 case LSHIFTRT:
10324 return GET_MODE (SET_SRC (pat)) == SImode;
10325 /* Positive integers leave the high bits zero. */
10326 case CONST_INT:
10327 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10328 case ASHIFTRT:
10329 case SIGN_EXTEND:
10330 return - (GET_MODE (SET_SRC (pat)) == SImode);
10331 case REG:
10332 return sparc_check_64 (SET_SRC (pat), insn);
10333 default:
10334 return 0;
10338 /* We _ought_ to have only one kind per function, but... */
10339 static GTY(()) rtx sparc_addr_diff_list;
10340 static GTY(()) rtx sparc_addr_list;
10342 void
10343 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10345 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10346 if (diff)
10347 sparc_addr_diff_list
10348 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10349 else
10350 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10353 static void
10354 sparc_output_addr_vec (rtx vec)
10356 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10357 int idx, vlen = XVECLEN (body, 0);
10359 #ifdef ASM_OUTPUT_ADDR_VEC_START
10360 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10361 #endif
10363 #ifdef ASM_OUTPUT_CASE_LABEL
10364 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10365 NEXT_INSN (lab));
10366 #else
10367 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10368 #endif
10370 for (idx = 0; idx < vlen; idx++)
10372 ASM_OUTPUT_ADDR_VEC_ELT
10373 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10376 #ifdef ASM_OUTPUT_ADDR_VEC_END
10377 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10378 #endif
10381 static void
10382 sparc_output_addr_diff_vec (rtx vec)
10384 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10385 rtx base = XEXP (XEXP (body, 0), 0);
10386 int idx, vlen = XVECLEN (body, 1);
10388 #ifdef ASM_OUTPUT_ADDR_VEC_START
10389 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10390 #endif
10392 #ifdef ASM_OUTPUT_CASE_LABEL
10393 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10394 NEXT_INSN (lab));
10395 #else
10396 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10397 #endif
10399 for (idx = 0; idx < vlen; idx++)
10401 ASM_OUTPUT_ADDR_DIFF_ELT
10402 (asm_out_file,
10403 body,
10404 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10405 CODE_LABEL_NUMBER (base));
10408 #ifdef ASM_OUTPUT_ADDR_VEC_END
10409 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10410 #endif
10413 static void
10414 sparc_output_deferred_case_vectors (void)
10416 rtx t;
10417 int align;
10419 if (sparc_addr_list == NULL_RTX
10420 && sparc_addr_diff_list == NULL_RTX)
10421 return;
10423 /* Align to cache line in the function's code section. */
10424 switch_to_section (current_function_section ());
10426 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10427 if (align > 0)
10428 ASM_OUTPUT_ALIGN (asm_out_file, align);
10430 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10431 sparc_output_addr_vec (XEXP (t, 0));
10432 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10433 sparc_output_addr_diff_vec (XEXP (t, 0));
10435 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10438 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10439 unknown. Return 1 if the high bits are zero, -1 if the register is
10440 sign extended. */
10442 sparc_check_64 (rtx x, rtx_insn *insn)
10444 /* If a register is set only once it is safe to ignore insns this
10445 code does not know how to handle. The loop will either recognize
10446 the single set and return the correct value or fail to recognize
10447 it and return 0. */
10448 int set_once = 0;
10449 rtx y = x;
10451 gcc_assert (GET_CODE (x) == REG);
10453 if (GET_MODE (x) == DImode)
10454 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10456 if (flag_expensive_optimizations
10457 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10458 set_once = 1;
10460 if (insn == 0)
10462 if (set_once)
10463 insn = get_last_insn_anywhere ();
10464 else
10465 return 0;
10468 while ((insn = PREV_INSN (insn)))
10470 switch (GET_CODE (insn))
10472 case JUMP_INSN:
10473 case NOTE:
10474 break;
10475 case CODE_LABEL:
10476 case CALL_INSN:
10477 default:
10478 if (! set_once)
10479 return 0;
10480 break;
10481 case INSN:
10483 rtx pat = PATTERN (insn);
10484 if (GET_CODE (pat) != SET)
10485 return 0;
10486 if (rtx_equal_p (x, SET_DEST (pat)))
10487 return set_extends (insn);
10488 if (y && rtx_equal_p (y, SET_DEST (pat)))
10489 return set_extends (insn);
10490 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10491 return 0;
10495 return 0;
10498 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10499 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10501 const char *
10502 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10504 static char asm_code[60];
10506 /* The scratch register is only required when the destination
10507 register is not a 64-bit global or out register. */
10508 if (which_alternative != 2)
10509 operands[3] = operands[0];
10511 /* We can only shift by constants <= 63. */
10512 if (GET_CODE (operands[2]) == CONST_INT)
10513 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10515 if (GET_CODE (operands[1]) == CONST_INT)
10517 output_asm_insn ("mov\t%1, %3", operands);
10519 else
10521 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10522 if (sparc_check_64 (operands[1], insn) <= 0)
10523 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10524 output_asm_insn ("or\t%L1, %3, %3", operands);
10527 strcpy (asm_code, opcode);
10529 if (which_alternative != 2)
10530 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10531 else
10532 return
10533 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10536 /* Output rtl to increment the profiler label LABELNO
10537 for profiling a function entry. */
10539 void
10540 sparc_profile_hook (int labelno)
10542 char buf[32];
10543 rtx lab, fun;
10545 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10546 if (NO_PROFILE_COUNTERS)
10548 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10550 else
10552 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10553 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10554 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10558 #ifdef TARGET_SOLARIS
10559 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10561 static void
10562 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10563 tree decl ATTRIBUTE_UNUSED)
10565 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10567 solaris_elf_asm_comdat_section (name, flags, decl);
10568 return;
10571 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10573 if (!(flags & SECTION_DEBUG))
10574 fputs (",#alloc", asm_out_file);
10575 #if HAVE_GAS_SECTION_EXCLUDE
10576 if (flags & SECTION_EXCLUDE)
10577 fputs (",#exclude", asm_out_file);
10578 #endif
10579 if (flags & SECTION_WRITE)
10580 fputs (",#write", asm_out_file);
10581 if (flags & SECTION_TLS)
10582 fputs (",#tls", asm_out_file);
10583 if (flags & SECTION_CODE)
10584 fputs (",#execinstr", asm_out_file);
10586 if (flags & SECTION_NOTYPE)
10588 else if (flags & SECTION_BSS)
10589 fputs (",#nobits", asm_out_file);
10590 else
10591 fputs (",#progbits", asm_out_file);
10593 fputc ('\n', asm_out_file);
10595 #endif /* TARGET_SOLARIS */
10597 /* We do not allow indirect calls to be optimized into sibling calls.
10599 We cannot use sibling calls when delayed branches are disabled
10600 because they will likely require the call delay slot to be filled.
10602 Also, on SPARC 32-bit we cannot emit a sibling call when the
10603 current function returns a structure. This is because the "unimp
10604 after call" convention would cause the callee to return to the
10605 wrong place. The generic code already disallows cases where the
10606 function being called returns a structure.
10608 It may seem strange how this last case could occur. Usually there
10609 is code after the call which jumps to epilogue code which dumps the
10610 return value into the struct return area. That ought to invalidate
10611 the sibling call right? Well, in the C++ case we can end up passing
10612 the pointer to the struct return area to a constructor (which returns
10613 void) and then nothing else happens. Such a sibling call would look
10614 valid without the added check here.
10616 VxWorks PIC PLT entries require the global pointer to be initialized
10617 on entry. We therefore can't emit sibling calls to them. */
10618 static bool
10619 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10621 return (decl
10622 && flag_delayed_branch
10623 && (TARGET_ARCH64 || ! cfun->returns_struct)
10624 && !(TARGET_VXWORKS_RTP
10625 && flag_pic
10626 && !targetm.binds_local_p (decl)));
10629 /* libfunc renaming. */
10631 static void
10632 sparc_init_libfuncs (void)
10634 if (TARGET_ARCH32)
10636 /* Use the subroutines that Sun's library provides for integer
10637 multiply and divide. The `*' prevents an underscore from
10638 being prepended by the compiler. .umul is a little faster
10639 than .mul. */
10640 set_optab_libfunc (smul_optab, SImode, "*.umul");
10641 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10642 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10643 set_optab_libfunc (smod_optab, SImode, "*.rem");
10644 set_optab_libfunc (umod_optab, SImode, "*.urem");
10646 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10647 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10648 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10649 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10650 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10651 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10653 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10654 is because with soft-float, the SFmode and DFmode sqrt
10655 instructions will be absent, and the compiler will notice and
10656 try to use the TFmode sqrt instruction for calls to the
10657 builtin function sqrt, but this fails. */
10658 if (TARGET_FPU)
10659 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10661 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10662 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10663 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10664 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10665 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10666 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10668 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10669 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10670 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10671 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10673 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10674 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10675 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10676 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10678 if (DITF_CONVERSION_LIBFUNCS)
10680 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10681 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10682 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10683 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10686 if (SUN_CONVERSION_LIBFUNCS)
10688 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10689 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10690 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10691 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10694 if (TARGET_ARCH64)
10696 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10697 do not exist in the library. Make sure the compiler does not
10698 emit calls to them by accident. (It should always use the
10699 hardware instructions.) */
10700 set_optab_libfunc (smul_optab, SImode, 0);
10701 set_optab_libfunc (sdiv_optab, SImode, 0);
10702 set_optab_libfunc (udiv_optab, SImode, 0);
10703 set_optab_libfunc (smod_optab, SImode, 0);
10704 set_optab_libfunc (umod_optab, SImode, 0);
10706 if (SUN_INTEGER_MULTIPLY_64)
10708 set_optab_libfunc (smul_optab, DImode, "__mul64");
10709 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10710 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10711 set_optab_libfunc (smod_optab, DImode, "__rem64");
10712 set_optab_libfunc (umod_optab, DImode, "__urem64");
10715 if (SUN_CONVERSION_LIBFUNCS)
10717 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10718 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10719 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10720 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10725 /* SPARC builtins. */
10726 enum sparc_builtins
10728 /* FPU builtins. */
10729 SPARC_BUILTIN_LDFSR,
10730 SPARC_BUILTIN_STFSR,
10732 /* VIS 1.0 builtins. */
10733 SPARC_BUILTIN_FPACK16,
10734 SPARC_BUILTIN_FPACK32,
10735 SPARC_BUILTIN_FPACKFIX,
10736 SPARC_BUILTIN_FEXPAND,
10737 SPARC_BUILTIN_FPMERGE,
10738 SPARC_BUILTIN_FMUL8X16,
10739 SPARC_BUILTIN_FMUL8X16AU,
10740 SPARC_BUILTIN_FMUL8X16AL,
10741 SPARC_BUILTIN_FMUL8SUX16,
10742 SPARC_BUILTIN_FMUL8ULX16,
10743 SPARC_BUILTIN_FMULD8SUX16,
10744 SPARC_BUILTIN_FMULD8ULX16,
10745 SPARC_BUILTIN_FALIGNDATAV4HI,
10746 SPARC_BUILTIN_FALIGNDATAV8QI,
10747 SPARC_BUILTIN_FALIGNDATAV2SI,
10748 SPARC_BUILTIN_FALIGNDATADI,
10749 SPARC_BUILTIN_WRGSR,
10750 SPARC_BUILTIN_RDGSR,
10751 SPARC_BUILTIN_ALIGNADDR,
10752 SPARC_BUILTIN_ALIGNADDRL,
10753 SPARC_BUILTIN_PDIST,
10754 SPARC_BUILTIN_EDGE8,
10755 SPARC_BUILTIN_EDGE8L,
10756 SPARC_BUILTIN_EDGE16,
10757 SPARC_BUILTIN_EDGE16L,
10758 SPARC_BUILTIN_EDGE32,
10759 SPARC_BUILTIN_EDGE32L,
10760 SPARC_BUILTIN_FCMPLE16,
10761 SPARC_BUILTIN_FCMPLE32,
10762 SPARC_BUILTIN_FCMPNE16,
10763 SPARC_BUILTIN_FCMPNE32,
10764 SPARC_BUILTIN_FCMPGT16,
10765 SPARC_BUILTIN_FCMPGT32,
10766 SPARC_BUILTIN_FCMPEQ16,
10767 SPARC_BUILTIN_FCMPEQ32,
10768 SPARC_BUILTIN_FPADD16,
10769 SPARC_BUILTIN_FPADD16S,
10770 SPARC_BUILTIN_FPADD32,
10771 SPARC_BUILTIN_FPADD32S,
10772 SPARC_BUILTIN_FPSUB16,
10773 SPARC_BUILTIN_FPSUB16S,
10774 SPARC_BUILTIN_FPSUB32,
10775 SPARC_BUILTIN_FPSUB32S,
10776 SPARC_BUILTIN_ARRAY8,
10777 SPARC_BUILTIN_ARRAY16,
10778 SPARC_BUILTIN_ARRAY32,
10780 /* VIS 2.0 builtins. */
10781 SPARC_BUILTIN_EDGE8N,
10782 SPARC_BUILTIN_EDGE8LN,
10783 SPARC_BUILTIN_EDGE16N,
10784 SPARC_BUILTIN_EDGE16LN,
10785 SPARC_BUILTIN_EDGE32N,
10786 SPARC_BUILTIN_EDGE32LN,
10787 SPARC_BUILTIN_BMASK,
10788 SPARC_BUILTIN_BSHUFFLEV4HI,
10789 SPARC_BUILTIN_BSHUFFLEV8QI,
10790 SPARC_BUILTIN_BSHUFFLEV2SI,
10791 SPARC_BUILTIN_BSHUFFLEDI,
10793 /* VIS 3.0 builtins. */
10794 SPARC_BUILTIN_CMASK8,
10795 SPARC_BUILTIN_CMASK16,
10796 SPARC_BUILTIN_CMASK32,
10797 SPARC_BUILTIN_FCHKSM16,
10798 SPARC_BUILTIN_FSLL16,
10799 SPARC_BUILTIN_FSLAS16,
10800 SPARC_BUILTIN_FSRL16,
10801 SPARC_BUILTIN_FSRA16,
10802 SPARC_BUILTIN_FSLL32,
10803 SPARC_BUILTIN_FSLAS32,
10804 SPARC_BUILTIN_FSRL32,
10805 SPARC_BUILTIN_FSRA32,
10806 SPARC_BUILTIN_PDISTN,
10807 SPARC_BUILTIN_FMEAN16,
10808 SPARC_BUILTIN_FPADD64,
10809 SPARC_BUILTIN_FPSUB64,
10810 SPARC_BUILTIN_FPADDS16,
10811 SPARC_BUILTIN_FPADDS16S,
10812 SPARC_BUILTIN_FPSUBS16,
10813 SPARC_BUILTIN_FPSUBS16S,
10814 SPARC_BUILTIN_FPADDS32,
10815 SPARC_BUILTIN_FPADDS32S,
10816 SPARC_BUILTIN_FPSUBS32,
10817 SPARC_BUILTIN_FPSUBS32S,
10818 SPARC_BUILTIN_FUCMPLE8,
10819 SPARC_BUILTIN_FUCMPNE8,
10820 SPARC_BUILTIN_FUCMPGT8,
10821 SPARC_BUILTIN_FUCMPEQ8,
10822 SPARC_BUILTIN_FHADDS,
10823 SPARC_BUILTIN_FHADDD,
10824 SPARC_BUILTIN_FHSUBS,
10825 SPARC_BUILTIN_FHSUBD,
10826 SPARC_BUILTIN_FNHADDS,
10827 SPARC_BUILTIN_FNHADDD,
10828 SPARC_BUILTIN_UMULXHI,
10829 SPARC_BUILTIN_XMULX,
10830 SPARC_BUILTIN_XMULXHI,
10832 /* VIS 4.0 builtins. */
10833 SPARC_BUILTIN_FPADD8,
10834 SPARC_BUILTIN_FPADDS8,
10835 SPARC_BUILTIN_FPADDUS8,
10836 SPARC_BUILTIN_FPADDUS16,
10837 SPARC_BUILTIN_FPCMPLE8,
10838 SPARC_BUILTIN_FPCMPGT8,
10839 SPARC_BUILTIN_FPCMPULE16,
10840 SPARC_BUILTIN_FPCMPUGT16,
10841 SPARC_BUILTIN_FPCMPULE32,
10842 SPARC_BUILTIN_FPCMPUGT32,
10843 SPARC_BUILTIN_FPMAX8,
10844 SPARC_BUILTIN_FPMAX16,
10845 SPARC_BUILTIN_FPMAX32,
10846 SPARC_BUILTIN_FPMAXU8,
10847 SPARC_BUILTIN_FPMAXU16,
10848 SPARC_BUILTIN_FPMAXU32,
10849 SPARC_BUILTIN_FPMIN8,
10850 SPARC_BUILTIN_FPMIN16,
10851 SPARC_BUILTIN_FPMIN32,
10852 SPARC_BUILTIN_FPMINU8,
10853 SPARC_BUILTIN_FPMINU16,
10854 SPARC_BUILTIN_FPMINU32,
10855 SPARC_BUILTIN_FPSUB8,
10856 SPARC_BUILTIN_FPSUBS8,
10857 SPARC_BUILTIN_FPSUBUS8,
10858 SPARC_BUILTIN_FPSUBUS16,
10860 /* VIS 4.0B builtins. */
10862 /* Note that all the DICTUNPACK* entries should be kept
10863 contiguous. */
10864 SPARC_BUILTIN_FIRST_DICTUNPACK,
10865 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10866 SPARC_BUILTIN_DICTUNPACK16,
10867 SPARC_BUILTIN_DICTUNPACK32,
10868 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10870 /* Note that all the FPCMP*SHL entries should be kept
10871 contiguous. */
10872 SPARC_BUILTIN_FIRST_FPCMPSHL,
10873 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10874 SPARC_BUILTIN_FPCMPGT8SHL,
10875 SPARC_BUILTIN_FPCMPEQ8SHL,
10876 SPARC_BUILTIN_FPCMPNE8SHL,
10877 SPARC_BUILTIN_FPCMPLE16SHL,
10878 SPARC_BUILTIN_FPCMPGT16SHL,
10879 SPARC_BUILTIN_FPCMPEQ16SHL,
10880 SPARC_BUILTIN_FPCMPNE16SHL,
10881 SPARC_BUILTIN_FPCMPLE32SHL,
10882 SPARC_BUILTIN_FPCMPGT32SHL,
10883 SPARC_BUILTIN_FPCMPEQ32SHL,
10884 SPARC_BUILTIN_FPCMPNE32SHL,
10885 SPARC_BUILTIN_FPCMPULE8SHL,
10886 SPARC_BUILTIN_FPCMPUGT8SHL,
10887 SPARC_BUILTIN_FPCMPULE16SHL,
10888 SPARC_BUILTIN_FPCMPUGT16SHL,
10889 SPARC_BUILTIN_FPCMPULE32SHL,
10890 SPARC_BUILTIN_FPCMPUGT32SHL,
10891 SPARC_BUILTIN_FPCMPDE8SHL,
10892 SPARC_BUILTIN_FPCMPDE16SHL,
10893 SPARC_BUILTIN_FPCMPDE32SHL,
10894 SPARC_BUILTIN_FPCMPUR8SHL,
10895 SPARC_BUILTIN_FPCMPUR16SHL,
10896 SPARC_BUILTIN_FPCMPUR32SHL,
10897 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10899 SPARC_BUILTIN_MAX
10902 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10903 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10905 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10906 The instruction should require a constant operand of some sort. The
10907 function prints an error if OPVAL is not valid. */
10909 static int
10910 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10912 if (GET_CODE (opval) != CONST_INT)
10914 error ("%qs expects a constant argument", insn_data[icode].name);
10915 return false;
10918 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10920 error ("constant argument out of range for %qs", insn_data[icode].name);
10921 return false;
10923 return true;
10926 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10927 function decl or NULL_TREE if the builtin was not added. */
10929 static tree
10930 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10931 tree type)
10933 tree t
10934 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10936 if (t)
10938 sparc_builtins[code] = t;
10939 sparc_builtins_icode[code] = icode;
10942 return t;
10945 /* Likewise, but also marks the function as "const". */
10947 static tree
10948 def_builtin_const (const char *name, enum insn_code icode,
10949 enum sparc_builtins code, tree type)
10951 tree t = def_builtin (name, icode, code, type);
10953 if (t)
10954 TREE_READONLY (t) = 1;
10956 return t;
10959 /* Implement the TARGET_INIT_BUILTINS target hook.
10960 Create builtin functions for special SPARC instructions. */
10962 static void
10963 sparc_init_builtins (void)
10965 if (TARGET_FPU)
10966 sparc_fpu_init_builtins ();
10968 if (TARGET_VIS)
10969 sparc_vis_init_builtins ();
10972 /* Create builtin functions for FPU instructions. */
10974 static void
10975 sparc_fpu_init_builtins (void)
10977 tree ftype
10978 = build_function_type_list (void_type_node,
10979 build_pointer_type (unsigned_type_node), 0);
10980 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10981 SPARC_BUILTIN_LDFSR, ftype);
10982 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10983 SPARC_BUILTIN_STFSR, ftype);
10986 /* Create builtin functions for VIS instructions. */
10988 static void
10989 sparc_vis_init_builtins (void)
10991 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
10992 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
10993 tree v4hi = build_vector_type (intHI_type_node, 4);
10994 tree v2hi = build_vector_type (intHI_type_node, 2);
10995 tree v2si = build_vector_type (intSI_type_node, 2);
10996 tree v1si = build_vector_type (intSI_type_node, 1);
10998 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
10999 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
11000 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
11001 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
11002 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
11003 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
11004 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
11005 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
11006 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
11007 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
11008 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
11009 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
11010 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
11011 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
11012 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
11013 v8qi, v8qi,
11014 intDI_type_node, 0);
11015 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
11016 v8qi, v8qi, 0);
11017 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
11018 v8qi, v8qi, 0);
11019 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
11020 intSI_type_node, 0);
11021 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
11022 intSI_type_node, 0);
11023 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
11024 intDI_type_node, 0);
11025 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
11026 intDI_type_node,
11027 intDI_type_node, 0);
11028 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
11029 intSI_type_node,
11030 intSI_type_node, 0);
11031 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
11032 ptr_type_node,
11033 intSI_type_node, 0);
11034 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
11035 ptr_type_node,
11036 intDI_type_node, 0);
11037 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
11038 ptr_type_node,
11039 ptr_type_node, 0);
11040 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
11041 ptr_type_node,
11042 ptr_type_node, 0);
11043 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
11044 v4hi, v4hi, 0);
11045 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
11046 v2si, v2si, 0);
11047 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
11048 v4hi, v4hi, 0);
11049 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
11050 v2si, v2si, 0);
11051 tree void_ftype_di = build_function_type_list (void_type_node,
11052 intDI_type_node, 0);
11053 tree di_ftype_void = build_function_type_list (intDI_type_node,
11054 void_type_node, 0);
11055 tree void_ftype_si = build_function_type_list (void_type_node,
11056 intSI_type_node, 0);
11057 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
11058 float_type_node,
11059 float_type_node, 0);
11060 tree df_ftype_df_df = build_function_type_list (double_type_node,
11061 double_type_node,
11062 double_type_node, 0);
11064 /* Packing and expanding vectors. */
11065 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
11066 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
11067 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
11068 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
11069 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
11070 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
11071 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
11072 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
11073 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11074 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11076 /* Multiplications. */
11077 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11078 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11079 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11080 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11081 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11082 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11083 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11084 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11085 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11086 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11087 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11088 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11089 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11090 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11092 /* Data aligning. */
11093 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11094 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11095 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11096 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11097 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11098 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11099 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11100 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11102 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11103 SPARC_BUILTIN_WRGSR, void_ftype_di);
11104 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11105 SPARC_BUILTIN_RDGSR, di_ftype_void);
11107 if (TARGET_ARCH64)
11109 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11110 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11111 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11112 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11114 else
11116 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11117 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11118 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11119 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11122 /* Pixel distance. */
11123 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11124 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11126 /* Edge handling. */
11127 if (TARGET_ARCH64)
11129 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11130 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11131 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11132 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11133 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11134 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11135 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11136 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11137 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11138 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11139 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11140 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11142 else
11144 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11145 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11146 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11147 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11148 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11149 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11150 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11151 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11152 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11153 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11154 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11155 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11158 /* Pixel compare. */
11159 if (TARGET_ARCH64)
11161 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11162 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11163 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11164 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11165 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11166 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11167 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11168 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11169 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11170 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11171 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11172 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11173 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11174 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11175 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11176 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11178 else
11180 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11181 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11182 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11183 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11184 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11185 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11186 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11187 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11188 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11189 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11190 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11191 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11192 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11193 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11194 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11195 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11198 /* Addition and subtraction. */
11199 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11200 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11201 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11202 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11203 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11204 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11205 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11206 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11207 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11208 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11209 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11210 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11211 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11212 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11213 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11214 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11216 /* Three-dimensional array addressing. */
11217 if (TARGET_ARCH64)
11219 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11220 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11221 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11222 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11223 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11224 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11226 else
11228 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11229 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11230 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11231 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11232 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11233 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11236 if (TARGET_VIS2)
11238 /* Edge handling. */
11239 if (TARGET_ARCH64)
11241 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11242 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11243 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11244 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11245 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11246 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11247 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11248 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11249 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11250 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11251 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11252 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11254 else
11256 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11257 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11258 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11259 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11260 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11261 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11262 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11263 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11264 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11265 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11266 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11267 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11270 /* Byte mask and shuffle. */
11271 if (TARGET_ARCH64)
11272 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11273 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11274 else
11275 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11276 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11277 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11278 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11279 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11280 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11281 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11282 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11283 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11284 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11287 if (TARGET_VIS3)
11289 if (TARGET_ARCH64)
11291 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11292 SPARC_BUILTIN_CMASK8, void_ftype_di);
11293 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11294 SPARC_BUILTIN_CMASK16, void_ftype_di);
11295 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11296 SPARC_BUILTIN_CMASK32, void_ftype_di);
11298 else
11300 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11301 SPARC_BUILTIN_CMASK8, void_ftype_si);
11302 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11303 SPARC_BUILTIN_CMASK16, void_ftype_si);
11304 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11305 SPARC_BUILTIN_CMASK32, void_ftype_si);
11308 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11309 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11311 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11312 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11313 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11314 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11315 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11316 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11317 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11318 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11319 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11320 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11321 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11322 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11323 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11324 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11325 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11326 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11328 if (TARGET_ARCH64)
11329 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11330 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11331 else
11332 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11333 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11335 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11336 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11337 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11338 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11339 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11340 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11342 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11343 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11344 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11345 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11346 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11347 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11348 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11349 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11350 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11351 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11352 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11353 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11354 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11355 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11356 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11357 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11359 if (TARGET_ARCH64)
11361 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11362 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11363 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11364 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11365 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11366 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11367 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11368 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11370 else
11372 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11373 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11374 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11375 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11376 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11377 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11378 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11379 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11382 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11383 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11384 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11385 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11386 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11387 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11388 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11389 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11390 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11391 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11392 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11393 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11395 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11396 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11397 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11398 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11399 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11400 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11403 if (TARGET_VIS4)
11405 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11406 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11407 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11408 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11409 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11410 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11411 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11412 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11415 if (TARGET_ARCH64)
11417 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11418 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11419 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11420 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11421 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11422 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11423 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11424 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11425 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11426 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11427 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11428 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11430 else
11432 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11433 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11434 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11435 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11436 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11437 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11438 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11439 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11440 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11441 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11442 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11443 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11446 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11447 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11448 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11449 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11450 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11451 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11452 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11453 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11454 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11455 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11456 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11457 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11458 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11459 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11460 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11461 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11462 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11463 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11464 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11465 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11466 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11467 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11468 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11469 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11470 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11471 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11472 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11473 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11474 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11475 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11476 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11477 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11480 if (TARGET_VIS4B)
11482 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11483 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11484 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11485 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11486 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11487 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11489 if (TARGET_ARCH64)
11491 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11492 v8qi, v8qi,
11493 intSI_type_node, 0);
11494 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11495 v4hi, v4hi,
11496 intSI_type_node, 0);
11497 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11498 v2si, v2si,
11499 intSI_type_node, 0);
11501 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11502 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11503 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11504 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11505 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11506 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11507 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11508 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11510 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11511 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11512 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11513 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11514 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11515 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11516 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11517 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11519 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11520 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11521 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11522 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11523 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11524 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11525 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11526 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11529 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11530 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11531 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11532 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11534 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11535 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11536 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11537 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11539 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11540 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11541 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11542 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11544 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11545 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11546 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11547 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11548 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11549 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11551 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11552 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11553 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11554 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11555 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11556 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11559 else
11561 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11562 v8qi, v8qi,
11563 intSI_type_node, 0);
11564 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11565 v4hi, v4hi,
11566 intSI_type_node, 0);
11567 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11568 v2si, v2si,
11569 intSI_type_node, 0);
11571 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11572 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11573 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11574 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11575 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11576 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11577 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11578 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11580 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11581 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11582 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11583 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11584 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11585 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11586 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11587 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11589 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11590 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11591 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11592 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11593 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11594 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11595 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11596 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11599 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11600 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11601 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11602 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11604 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11605 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11606 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11607 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11609 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11610 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11611 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11612 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11614 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11615 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11616 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11617 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11618 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11619 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11621 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11622 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11623 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11624 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11625 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11626 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11631 /* Implement TARGET_BUILTIN_DECL hook. */
11633 static tree
11634 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11636 if (code >= SPARC_BUILTIN_MAX)
11637 return error_mark_node;
11639 return sparc_builtins[code];
11642 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11644 static rtx
11645 sparc_expand_builtin (tree exp, rtx target,
11646 rtx subtarget ATTRIBUTE_UNUSED,
11647 machine_mode tmode ATTRIBUTE_UNUSED,
11648 int ignore ATTRIBUTE_UNUSED)
11650 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11651 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11652 enum insn_code icode = sparc_builtins_icode[code];
11653 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11654 call_expr_arg_iterator iter;
11655 int arg_count = 0;
11656 rtx pat, op[4];
11657 tree arg;
11659 if (nonvoid)
11661 machine_mode tmode = insn_data[icode].operand[0].mode;
11662 if (!target
11663 || GET_MODE (target) != tmode
11664 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11665 op[0] = gen_reg_rtx (tmode);
11666 else
11667 op[0] = target;
11669 else
11670 op[0] = NULL_RTX;
11672 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11674 const struct insn_operand_data *insn_op;
11675 int idx;
11677 if (arg == error_mark_node)
11678 return NULL_RTX;
11680 arg_count++;
11681 idx = arg_count - !nonvoid;
11682 insn_op = &insn_data[icode].operand[idx];
11683 op[arg_count] = expand_normal (arg);
11685 /* Some of the builtins require constant arguments. We check
11686 for this here. */
11687 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11688 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11689 && arg_count == 3)
11690 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11691 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11692 && arg_count == 2))
11694 if (!check_constant_argument (icode, idx, op[arg_count]))
11695 return const0_rtx;
11698 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11700 if (!address_operand (op[arg_count], SImode))
11702 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11703 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11705 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11708 else if (insn_op->mode == V1DImode
11709 && GET_MODE (op[arg_count]) == DImode)
11710 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11712 else if (insn_op->mode == V1SImode
11713 && GET_MODE (op[arg_count]) == SImode)
11714 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11716 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11717 insn_op->mode))
11718 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11721 switch (arg_count)
11723 case 0:
11724 pat = GEN_FCN (icode) (op[0]);
11725 break;
11726 case 1:
11727 if (nonvoid)
11728 pat = GEN_FCN (icode) (op[0], op[1]);
11729 else
11730 pat = GEN_FCN (icode) (op[1]);
11731 break;
11732 case 2:
11733 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11734 break;
11735 case 3:
11736 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11737 break;
11738 default:
11739 gcc_unreachable ();
11742 if (!pat)
11743 return NULL_RTX;
11745 emit_insn (pat);
11747 return (nonvoid ? op[0] : const0_rtx);
11750 /* Return the upper 16 bits of the 8x16 multiplication. */
11752 static int
11753 sparc_vis_mul8x16 (int e8, int e16)
11755 return (e8 * e16 + 128) / 256;
11758 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11759 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11761 static void
11762 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11763 tree inner_type, tree cst0, tree cst1)
11765 unsigned i, num = VECTOR_CST_NELTS (cst0);
11766 int scale;
11768 switch (fncode)
11770 case SPARC_BUILTIN_FMUL8X16:
11771 for (i = 0; i < num; ++i)
11773 int val
11774 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11775 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11776 n_elts->quick_push (build_int_cst (inner_type, val));
11778 break;
11780 case SPARC_BUILTIN_FMUL8X16AU:
11781 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11783 for (i = 0; i < num; ++i)
11785 int val
11786 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11787 scale);
11788 n_elts->quick_push (build_int_cst (inner_type, val));
11790 break;
11792 case SPARC_BUILTIN_FMUL8X16AL:
11793 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11795 for (i = 0; i < num; ++i)
11797 int val
11798 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11799 scale);
11800 n_elts->quick_push (build_int_cst (inner_type, val));
11802 break;
11804 default:
11805 gcc_unreachable ();
11809 /* Implement TARGET_FOLD_BUILTIN hook.
11811 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11812 result of the function call is ignored. NULL_TREE is returned if the
11813 function could not be folded. */
11815 static tree
11816 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11817 tree *args, bool ignore)
11819 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11820 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11821 tree arg0, arg1, arg2;
11823 if (ignore)
11824 switch (code)
11826 case SPARC_BUILTIN_LDFSR:
11827 case SPARC_BUILTIN_STFSR:
11828 case SPARC_BUILTIN_ALIGNADDR:
11829 case SPARC_BUILTIN_WRGSR:
11830 case SPARC_BUILTIN_BMASK:
11831 case SPARC_BUILTIN_CMASK8:
11832 case SPARC_BUILTIN_CMASK16:
11833 case SPARC_BUILTIN_CMASK32:
11834 break;
11836 default:
11837 return build_zero_cst (rtype);
11840 switch (code)
11842 case SPARC_BUILTIN_FEXPAND:
11843 arg0 = args[0];
11844 STRIP_NOPS (arg0);
11846 if (TREE_CODE (arg0) == VECTOR_CST)
11848 tree inner_type = TREE_TYPE (rtype);
11849 unsigned i;
11851 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11852 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11854 unsigned HOST_WIDE_INT val
11855 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11856 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11858 return n_elts.build ();
11860 break;
11862 case SPARC_BUILTIN_FMUL8X16:
11863 case SPARC_BUILTIN_FMUL8X16AU:
11864 case SPARC_BUILTIN_FMUL8X16AL:
11865 arg0 = args[0];
11866 arg1 = args[1];
11867 STRIP_NOPS (arg0);
11868 STRIP_NOPS (arg1);
11870 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11872 tree inner_type = TREE_TYPE (rtype);
11873 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11874 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11875 return n_elts.build ();
11877 break;
11879 case SPARC_BUILTIN_FPMERGE:
11880 arg0 = args[0];
11881 arg1 = args[1];
11882 STRIP_NOPS (arg0);
11883 STRIP_NOPS (arg1);
11885 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11887 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11888 unsigned i;
11889 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11891 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
11892 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
11895 return n_elts.build ();
11897 break;
11899 case SPARC_BUILTIN_PDIST:
11900 case SPARC_BUILTIN_PDISTN:
11901 arg0 = args[0];
11902 arg1 = args[1];
11903 STRIP_NOPS (arg0);
11904 STRIP_NOPS (arg1);
11905 if (code == SPARC_BUILTIN_PDIST)
11907 arg2 = args[2];
11908 STRIP_NOPS (arg2);
11910 else
11911 arg2 = integer_zero_node;
11913 if (TREE_CODE (arg0) == VECTOR_CST
11914 && TREE_CODE (arg1) == VECTOR_CST
11915 && TREE_CODE (arg2) == INTEGER_CST)
11917 bool overflow = false;
11918 widest_int result = wi::to_widest (arg2);
11919 widest_int tmp;
11920 unsigned i;
11922 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11924 tree e0 = VECTOR_CST_ELT (arg0, i);
11925 tree e1 = VECTOR_CST_ELT (arg1, i);
11927 wi::overflow_type neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11929 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11930 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11931 if (wi::neg_p (tmp))
11932 tmp = wi::neg (tmp, &neg2_ovf);
11933 else
11934 neg2_ovf = wi::OVF_NONE;
11935 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11936 overflow |= ((neg1_ovf != wi::OVF_NONE)
11937 | (neg2_ovf != wi::OVF_NONE)
11938 | (add1_ovf != wi::OVF_NONE)
11939 | (add2_ovf != wi::OVF_NONE));
11942 gcc_assert (!overflow);
11944 return wide_int_to_tree (rtype, result);
11947 default:
11948 break;
11951 return NULL_TREE;
11954 /* ??? This duplicates information provided to the compiler by the
11955 ??? scheduler description. Some day, teach genautomata to output
11956 ??? the latencies and then CSE will just use that. */
11958 static bool
11959 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11960 int opno ATTRIBUTE_UNUSED,
11961 int *total, bool speed ATTRIBUTE_UNUSED)
11963 int code = GET_CODE (x);
11964 bool float_mode_p = FLOAT_MODE_P (mode);
11966 switch (code)
11968 case CONST_INT:
11969 if (SMALL_INT (x))
11970 *total = 0;
11971 else
11972 *total = 2;
11973 return true;
11975 case CONST_WIDE_INT:
11976 *total = 0;
11977 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11978 *total += 2;
11979 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11980 *total += 2;
11981 return true;
11983 case HIGH:
11984 *total = 2;
11985 return true;
11987 case CONST:
11988 case LABEL_REF:
11989 case SYMBOL_REF:
11990 *total = 4;
11991 return true;
11993 case CONST_DOUBLE:
11994 *total = 8;
11995 return true;
11997 case MEM:
11998 /* If outer-code was a sign or zero extension, a cost
11999 of COSTS_N_INSNS (1) was already added in. This is
12000 why we are subtracting it back out. */
12001 if (outer_code == ZERO_EXTEND)
12003 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
12005 else if (outer_code == SIGN_EXTEND)
12007 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
12009 else if (float_mode_p)
12011 *total = sparc_costs->float_load;
12013 else
12015 *total = sparc_costs->int_load;
12018 return true;
12020 case PLUS:
12021 case MINUS:
12022 if (float_mode_p)
12023 *total = sparc_costs->float_plusminus;
12024 else
12025 *total = COSTS_N_INSNS (1);
12026 return false;
12028 case FMA:
12030 rtx sub;
12032 gcc_assert (float_mode_p);
12033 *total = sparc_costs->float_mul;
12035 sub = XEXP (x, 0);
12036 if (GET_CODE (sub) == NEG)
12037 sub = XEXP (sub, 0);
12038 *total += rtx_cost (sub, mode, FMA, 0, speed);
12040 sub = XEXP (x, 2);
12041 if (GET_CODE (sub) == NEG)
12042 sub = XEXP (sub, 0);
12043 *total += rtx_cost (sub, mode, FMA, 2, speed);
12044 return true;
12047 case MULT:
12048 if (float_mode_p)
12049 *total = sparc_costs->float_mul;
12050 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
12051 *total = COSTS_N_INSNS (25);
12052 else
12054 int bit_cost;
12056 bit_cost = 0;
12057 if (sparc_costs->int_mul_bit_factor)
12059 int nbits;
12061 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
12063 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
12064 for (nbits = 0; value != 0; value &= value - 1)
12065 nbits++;
12067 else
12068 nbits = 7;
12070 if (nbits < 3)
12071 nbits = 3;
12072 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
12073 bit_cost = COSTS_N_INSNS (bit_cost);
12076 if (mode == DImode || !TARGET_HARD_MUL)
12077 *total = sparc_costs->int_mulX + bit_cost;
12078 else
12079 *total = sparc_costs->int_mul + bit_cost;
12081 return false;
12083 case ASHIFT:
12084 case ASHIFTRT:
12085 case LSHIFTRT:
12086 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12087 return false;
12089 case DIV:
12090 case UDIV:
12091 case MOD:
12092 case UMOD:
12093 if (float_mode_p)
12095 if (mode == DFmode)
12096 *total = sparc_costs->float_div_df;
12097 else
12098 *total = sparc_costs->float_div_sf;
12100 else
12102 if (mode == DImode)
12103 *total = sparc_costs->int_divX;
12104 else
12105 *total = sparc_costs->int_div;
12107 return false;
12109 case NEG:
12110 if (! float_mode_p)
12112 *total = COSTS_N_INSNS (1);
12113 return false;
12115 /* FALLTHRU */
12117 case ABS:
12118 case FLOAT:
12119 case UNSIGNED_FLOAT:
12120 case FIX:
12121 case UNSIGNED_FIX:
12122 case FLOAT_EXTEND:
12123 case FLOAT_TRUNCATE:
12124 *total = sparc_costs->float_move;
12125 return false;
12127 case SQRT:
12128 if (mode == DFmode)
12129 *total = sparc_costs->float_sqrt_df;
12130 else
12131 *total = sparc_costs->float_sqrt_sf;
12132 return false;
12134 case COMPARE:
12135 if (float_mode_p)
12136 *total = sparc_costs->float_cmp;
12137 else
12138 *total = COSTS_N_INSNS (1);
12139 return false;
12141 case IF_THEN_ELSE:
12142 if (float_mode_p)
12143 *total = sparc_costs->float_cmove;
12144 else
12145 *total = sparc_costs->int_cmove;
12146 return false;
12148 case IOR:
12149 /* Handle the NAND vector patterns. */
12150 if (sparc_vector_mode_supported_p (mode)
12151 && GET_CODE (XEXP (x, 0)) == NOT
12152 && GET_CODE (XEXP (x, 1)) == NOT)
12154 *total = COSTS_N_INSNS (1);
12155 return true;
12157 else
12158 return false;
12160 default:
12161 return false;
12165 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12167 static inline bool
12168 general_or_i64_p (reg_class_t rclass)
12170 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12173 /* Implement TARGET_REGISTER_MOVE_COST. */
12175 static int
12176 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12177 reg_class_t from, reg_class_t to)
12179 bool need_memory = false;
12181 /* This helps postreload CSE to eliminate redundant comparisons. */
12182 if (from == NO_REGS || to == NO_REGS)
12183 return 100;
12185 if (from == FPCC_REGS || to == FPCC_REGS)
12186 need_memory = true;
12187 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12188 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12190 if (TARGET_VIS3)
12192 int size = GET_MODE_SIZE (mode);
12193 if (size == 8 || size == 4)
12195 if (! TARGET_ARCH32 || size == 4)
12196 return 4;
12197 else
12198 return 6;
12201 need_memory = true;
12204 if (need_memory)
12206 if (sparc_cpu == PROCESSOR_ULTRASPARC
12207 || sparc_cpu == PROCESSOR_ULTRASPARC3
12208 || sparc_cpu == PROCESSOR_NIAGARA
12209 || sparc_cpu == PROCESSOR_NIAGARA2
12210 || sparc_cpu == PROCESSOR_NIAGARA3
12211 || sparc_cpu == PROCESSOR_NIAGARA4
12212 || sparc_cpu == PROCESSOR_NIAGARA7
12213 || sparc_cpu == PROCESSOR_M8)
12214 return 12;
12216 return 6;
12219 return 2;
12222 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12223 This is achieved by means of a manual dynamic stack space allocation in
12224 the current frame. We make the assumption that SEQ doesn't contain any
12225 function calls, with the possible exception of calls to the GOT helper. */
12227 static void
12228 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12230 /* We must preserve the lowest 16 words for the register save area. */
12231 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12232 /* We really need only 2 words of fresh stack space. */
12233 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12235 rtx slot
12236 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12237 SPARC_STACK_BIAS + offset));
12239 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12240 emit_insn (gen_rtx_SET (slot, reg));
12241 if (reg2)
12242 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12243 reg2));
12244 emit_insn (seq);
12245 if (reg2)
12246 emit_insn (gen_rtx_SET (reg2,
12247 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12248 emit_insn (gen_rtx_SET (reg, slot));
12249 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12252 /* Output the assembler code for a thunk function. THUNK_DECL is the
12253 declaration for the thunk function itself, FUNCTION is the decl for
12254 the target function. DELTA is an immediate constant offset to be
12255 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12256 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12258 static void
12259 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12260 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12261 tree function)
12263 rtx this_rtx, funexp;
12264 rtx_insn *insn;
12265 unsigned int int_arg_first;
12267 reload_completed = 1;
12268 epilogue_completed = 1;
12270 emit_note (NOTE_INSN_PROLOGUE_END);
12272 if (TARGET_FLAT)
12274 sparc_leaf_function_p = 1;
12276 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12278 else if (flag_delayed_branch)
12280 /* We will emit a regular sibcall below, so we need to instruct
12281 output_sibcall that we are in a leaf function. */
12282 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12284 /* This will cause final.c to invoke leaf_renumber_regs so we
12285 must behave as if we were in a not-yet-leafified function. */
12286 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12288 else
12290 /* We will emit the sibcall manually below, so we will need to
12291 manually spill non-leaf registers. */
12292 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12294 /* We really are in a leaf function. */
12295 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12298 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12299 returns a structure, the structure return pointer is there instead. */
12300 if (TARGET_ARCH64
12301 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12302 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12303 else
12304 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12306 /* Add DELTA. When possible use a plain add, otherwise load it into
12307 a register first. */
12308 if (delta)
12310 rtx delta_rtx = GEN_INT (delta);
12312 if (! SPARC_SIMM13_P (delta))
12314 rtx scratch = gen_rtx_REG (Pmode, 1);
12315 emit_move_insn (scratch, delta_rtx);
12316 delta_rtx = scratch;
12319 /* THIS_RTX += DELTA. */
12320 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12323 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12324 if (vcall_offset)
12326 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12327 rtx scratch = gen_rtx_REG (Pmode, 1);
12329 gcc_assert (vcall_offset < 0);
12331 /* SCRATCH = *THIS_RTX. */
12332 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12334 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12335 may not have any available scratch register at this point. */
12336 if (SPARC_SIMM13_P (vcall_offset))
12338 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12339 else if (! fixed_regs[5]
12340 /* The below sequence is made up of at least 2 insns,
12341 while the default method may need only one. */
12342 && vcall_offset < -8192)
12344 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12345 emit_move_insn (scratch2, vcall_offset_rtx);
12346 vcall_offset_rtx = scratch2;
12348 else
12350 rtx increment = GEN_INT (-4096);
12352 /* VCALL_OFFSET is a negative number whose typical range can be
12353 estimated as -32768..0 in 32-bit mode. In almost all cases
12354 it is therefore cheaper to emit multiple add insns than
12355 spilling and loading the constant into a register (at least
12356 6 insns). */
12357 while (! SPARC_SIMM13_P (vcall_offset))
12359 emit_insn (gen_add2_insn (scratch, increment));
12360 vcall_offset += 4096;
12362 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12365 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12366 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12367 gen_rtx_PLUS (Pmode,
12368 scratch,
12369 vcall_offset_rtx)));
12371 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12372 emit_insn (gen_add2_insn (this_rtx, scratch));
12375 /* Generate a tail call to the target function. */
12376 if (! TREE_USED (function))
12378 assemble_external (function);
12379 TREE_USED (function) = 1;
12381 funexp = XEXP (DECL_RTL (function), 0);
12383 if (flag_delayed_branch)
12385 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12386 insn = emit_call_insn (gen_sibcall (funexp));
12387 SIBLING_CALL_P (insn) = 1;
12389 else
12391 /* The hoops we have to jump through in order to generate a sibcall
12392 without using delay slots... */
12393 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12395 if (flag_pic)
12397 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12398 start_sequence ();
12399 load_got_register (); /* clobbers %o7 */
12400 if (!TARGET_VXWORKS_RTP)
12401 pic_offset_table_rtx = global_offset_table_rtx;
12402 scratch = sparc_legitimize_pic_address (funexp, scratch);
12403 seq = get_insns ();
12404 end_sequence ();
12405 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12407 else if (TARGET_ARCH32)
12409 emit_insn (gen_rtx_SET (scratch,
12410 gen_rtx_HIGH (SImode, funexp)));
12411 emit_insn (gen_rtx_SET (scratch,
12412 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12414 else /* TARGET_ARCH64 */
12416 switch (sparc_cmodel)
12418 case CM_MEDLOW:
12419 case CM_MEDMID:
12420 /* The destination can serve as a temporary. */
12421 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12422 break;
12424 case CM_MEDANY:
12425 case CM_EMBMEDANY:
12426 /* The destination cannot serve as a temporary. */
12427 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12428 start_sequence ();
12429 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12430 seq = get_insns ();
12431 end_sequence ();
12432 emit_and_preserve (seq, spill_reg, 0);
12433 break;
12435 default:
12436 gcc_unreachable ();
12440 emit_jump_insn (gen_indirect_jump (scratch));
12443 emit_barrier ();
12445 /* Run just enough of rest_of_compilation to get the insns emitted.
12446 There's not really enough bulk here to make other passes such as
12447 instruction scheduling worth while. Note that use_thunk calls
12448 assemble_start_function and assemble_end_function. */
12449 insn = get_insns ();
12450 shorten_branches (insn);
12451 final_start_function (insn, file, 1);
12452 final (insn, file, 1);
12453 final_end_function ();
12455 reload_completed = 0;
12456 epilogue_completed = 0;
12459 /* Return true if sparc_output_mi_thunk would be able to output the
12460 assembler code for the thunk function specified by the arguments
12461 it is passed, and false otherwise. */
12462 static bool
12463 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12464 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12465 HOST_WIDE_INT vcall_offset,
12466 const_tree function ATTRIBUTE_UNUSED)
12468 /* Bound the loop used in the default method above. */
12469 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12472 /* How to allocate a 'struct machine_function'. */
12474 static struct machine_function *
12475 sparc_init_machine_status (void)
12477 return ggc_cleared_alloc<machine_function> ();
12480 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12481 We need to emit DTP-relative relocations. */
12483 static void
12484 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12486 switch (size)
12488 case 4:
12489 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12490 break;
12491 case 8:
12492 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12493 break;
12494 default:
12495 gcc_unreachable ();
12497 output_addr_const (file, x);
12498 fputs (")", file);
12501 /* Do whatever processing is required at the end of a file. */
12503 static void
12504 sparc_file_end (void)
12506 /* If we need to emit the special GOT helper function, do so now. */
12507 if (got_helper_rtx)
12509 const char *name = XSTR (got_helper_rtx, 0);
12510 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12511 #ifdef DWARF2_UNWIND_INFO
12512 bool do_cfi;
12513 #endif
12515 if (USE_HIDDEN_LINKONCE)
12517 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12518 get_identifier (name),
12519 build_function_type_list (void_type_node,
12520 NULL_TREE));
12521 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12522 NULL_TREE, void_type_node);
12523 TREE_PUBLIC (decl) = 1;
12524 TREE_STATIC (decl) = 1;
12525 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12526 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12527 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12528 resolve_unique_section (decl, 0, flag_function_sections);
12529 allocate_struct_function (decl, true);
12530 cfun->is_thunk = 1;
12531 current_function_decl = decl;
12532 init_varasm_status ();
12533 assemble_start_function (decl, name);
12535 else
12537 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12538 switch_to_section (text_section);
12539 if (align > 0)
12540 ASM_OUTPUT_ALIGN (asm_out_file, align);
12541 ASM_OUTPUT_LABEL (asm_out_file, name);
12544 #ifdef DWARF2_UNWIND_INFO
12545 do_cfi = dwarf2out_do_cfi_asm ();
12546 if (do_cfi)
12547 fprintf (asm_out_file, "\t.cfi_startproc\n");
12548 #endif
12549 if (flag_delayed_branch)
12550 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12551 reg_name, reg_name);
12552 else
12553 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12554 reg_name, reg_name);
12555 #ifdef DWARF2_UNWIND_INFO
12556 if (do_cfi)
12557 fprintf (asm_out_file, "\t.cfi_endproc\n");
12558 #endif
12561 if (NEED_INDICATE_EXEC_STACK)
12562 file_end_indicate_exec_stack ();
12564 #ifdef TARGET_SOLARIS
12565 solaris_file_end ();
12566 #endif
12569 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12570 /* Implement TARGET_MANGLE_TYPE. */
12572 static const char *
12573 sparc_mangle_type (const_tree type)
12575 if (TARGET_ARCH32
12576 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12577 && TARGET_LONG_DOUBLE_128)
12578 return "g";
12580 /* For all other types, use normal C++ mangling. */
12581 return NULL;
12583 #endif
12585 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12586 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12587 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12589 void
12590 sparc_emit_membar_for_model (enum memmodel model,
12591 int load_store, int before_after)
12593 /* Bits for the MEMBAR mmask field. */
12594 const int LoadLoad = 1;
12595 const int StoreLoad = 2;
12596 const int LoadStore = 4;
12597 const int StoreStore = 8;
12599 int mm = 0, implied = 0;
12601 switch (sparc_memory_model)
12603 case SMM_SC:
12604 /* Sequential Consistency. All memory transactions are immediately
12605 visible in sequential execution order. No barriers needed. */
12606 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12607 break;
12609 case SMM_TSO:
12610 /* Total Store Ordering: all memory transactions with store semantics
12611 are followed by an implied StoreStore. */
12612 implied |= StoreStore;
12614 /* If we're not looking for a raw barrer (before+after), then atomic
12615 operations get the benefit of being both load and store. */
12616 if (load_store == 3 && before_after == 1)
12617 implied |= StoreLoad;
12618 /* FALLTHRU */
12620 case SMM_PSO:
12621 /* Partial Store Ordering: all memory transactions with load semantics
12622 are followed by an implied LoadLoad | LoadStore. */
12623 implied |= LoadLoad | LoadStore;
12625 /* If we're not looking for a raw barrer (before+after), then atomic
12626 operations get the benefit of being both load and store. */
12627 if (load_store == 3 && before_after == 2)
12628 implied |= StoreLoad | StoreStore;
12629 /* FALLTHRU */
12631 case SMM_RMO:
12632 /* Relaxed Memory Ordering: no implicit bits. */
12633 break;
12635 default:
12636 gcc_unreachable ();
12639 if (before_after & 1)
12641 if (is_mm_release (model) || is_mm_acq_rel (model)
12642 || is_mm_seq_cst (model))
12644 if (load_store & 1)
12645 mm |= LoadLoad | StoreLoad;
12646 if (load_store & 2)
12647 mm |= LoadStore | StoreStore;
12650 if (before_after & 2)
12652 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12653 || is_mm_seq_cst (model))
12655 if (load_store & 1)
12656 mm |= LoadLoad | LoadStore;
12657 if (load_store & 2)
12658 mm |= StoreLoad | StoreStore;
12662 /* Remove the bits implied by the system memory model. */
12663 mm &= ~implied;
12665 /* For raw barriers (before+after), always emit a barrier.
12666 This will become a compile-time barrier if needed. */
12667 if (mm || before_after == 3)
12668 emit_insn (gen_membar (GEN_INT (mm)));
12671 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12672 compare and swap on the word containing the byte or half-word. */
12674 static void
12675 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12676 rtx oldval, rtx newval)
12678 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12679 rtx addr = gen_reg_rtx (Pmode);
12680 rtx off = gen_reg_rtx (SImode);
12681 rtx oldv = gen_reg_rtx (SImode);
12682 rtx newv = gen_reg_rtx (SImode);
12683 rtx oldvalue = gen_reg_rtx (SImode);
12684 rtx newvalue = gen_reg_rtx (SImode);
12685 rtx res = gen_reg_rtx (SImode);
12686 rtx resv = gen_reg_rtx (SImode);
12687 rtx memsi, val, mask, cc;
12689 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12691 if (Pmode != SImode)
12692 addr1 = gen_lowpart (SImode, addr1);
12693 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12695 memsi = gen_rtx_MEM (SImode, addr);
12696 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12697 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12699 val = copy_to_reg (memsi);
12701 emit_insn (gen_rtx_SET (off,
12702 gen_rtx_XOR (SImode, off,
12703 GEN_INT (GET_MODE (mem) == QImode
12704 ? 3 : 2))));
12706 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12708 if (GET_MODE (mem) == QImode)
12709 mask = force_reg (SImode, GEN_INT (0xff));
12710 else
12711 mask = force_reg (SImode, GEN_INT (0xffff));
12713 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12715 emit_insn (gen_rtx_SET (val,
12716 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12717 val)));
12719 oldval = gen_lowpart (SImode, oldval);
12720 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12722 newval = gen_lowpart_common (SImode, newval);
12723 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12725 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12727 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12729 rtx_code_label *end_label = gen_label_rtx ();
12730 rtx_code_label *loop_label = gen_label_rtx ();
12731 emit_label (loop_label);
12733 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12735 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12737 emit_move_insn (bool_result, const1_rtx);
12739 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12741 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12743 emit_insn (gen_rtx_SET (resv,
12744 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12745 res)));
12747 emit_move_insn (bool_result, const0_rtx);
12749 cc = gen_compare_reg_1 (NE, resv, val);
12750 emit_insn (gen_rtx_SET (val, resv));
12752 /* Use cbranchcc4 to separate the compare and branch! */
12753 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12754 cc, const0_rtx, loop_label));
12756 emit_label (end_label);
12758 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12760 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12762 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12765 /* Expand code to perform a compare-and-swap. */
12767 void
12768 sparc_expand_compare_and_swap (rtx operands[])
12770 rtx bval, retval, mem, oldval, newval;
12771 machine_mode mode;
12772 enum memmodel model;
12774 bval = operands[0];
12775 retval = operands[1];
12776 mem = operands[2];
12777 oldval = operands[3];
12778 newval = operands[4];
12779 model = (enum memmodel) INTVAL (operands[6]);
12780 mode = GET_MODE (mem);
12782 sparc_emit_membar_for_model (model, 3, 1);
12784 if (reg_overlap_mentioned_p (retval, oldval))
12785 oldval = copy_to_reg (oldval);
12787 if (mode == QImode || mode == HImode)
12788 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12789 else
12791 rtx (*gen) (rtx, rtx, rtx, rtx);
12792 rtx x;
12794 if (mode == SImode)
12795 gen = gen_atomic_compare_and_swapsi_1;
12796 else
12797 gen = gen_atomic_compare_and_swapdi_1;
12798 emit_insn (gen (retval, mem, oldval, newval));
12800 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12801 if (x != bval)
12802 convert_move (bval, x, 1);
12805 sparc_emit_membar_for_model (model, 3, 2);
12808 void
12809 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12811 rtx t_1, t_2, t_3;
12813 sel = gen_lowpart (DImode, sel);
12814 switch (vmode)
12816 case E_V2SImode:
12817 /* inp = xxxxxxxAxxxxxxxB */
12818 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12819 NULL_RTX, 1, OPTAB_DIRECT);
12820 /* t_1 = ....xxxxxxxAxxx. */
12821 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12822 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12823 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12824 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12825 /* sel = .......B */
12826 /* t_1 = ...A.... */
12827 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12828 /* sel = ...A...B */
12829 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12830 /* sel = AAAABBBB * 4 */
12831 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12832 /* sel = { A*4, A*4+1, A*4+2, ... } */
12833 break;
12835 case E_V4HImode:
12836 /* inp = xxxAxxxBxxxCxxxD */
12837 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12838 NULL_RTX, 1, OPTAB_DIRECT);
12839 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12840 NULL_RTX, 1, OPTAB_DIRECT);
12841 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12842 NULL_RTX, 1, OPTAB_DIRECT);
12843 /* t_1 = ..xxxAxxxBxxxCxx */
12844 /* t_2 = ....xxxAxxxBxxxC */
12845 /* t_3 = ......xxxAxxxBxx */
12846 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12847 GEN_INT (0x07),
12848 NULL_RTX, 1, OPTAB_DIRECT);
12849 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12850 GEN_INT (0x0700),
12851 NULL_RTX, 1, OPTAB_DIRECT);
12852 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12853 GEN_INT (0x070000),
12854 NULL_RTX, 1, OPTAB_DIRECT);
12855 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12856 GEN_INT (0x07000000),
12857 NULL_RTX, 1, OPTAB_DIRECT);
12858 /* sel = .......D */
12859 /* t_1 = .....C.. */
12860 /* t_2 = ...B.... */
12861 /* t_3 = .A...... */
12862 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12863 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12864 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12865 /* sel = .A.B.C.D */
12866 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12867 /* sel = AABBCCDD * 2 */
12868 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12869 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12870 break;
12872 case E_V8QImode:
12873 /* input = xAxBxCxDxExFxGxH */
12874 sel = expand_simple_binop (DImode, AND, sel,
12875 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12876 | 0x0f0f0f0f),
12877 NULL_RTX, 1, OPTAB_DIRECT);
12878 /* sel = .A.B.C.D.E.F.G.H */
12879 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12880 NULL_RTX, 1, OPTAB_DIRECT);
12881 /* t_1 = ..A.B.C.D.E.F.G. */
12882 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12883 NULL_RTX, 1, OPTAB_DIRECT);
12884 /* sel = .AABBCCDDEEFFGGH */
12885 sel = expand_simple_binop (DImode, AND, sel,
12886 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12887 | 0xff00ff),
12888 NULL_RTX, 1, OPTAB_DIRECT);
12889 /* sel = ..AB..CD..EF..GH */
12890 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12891 NULL_RTX, 1, OPTAB_DIRECT);
12892 /* t_1 = ....AB..CD..EF.. */
12893 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12894 NULL_RTX, 1, OPTAB_DIRECT);
12895 /* sel = ..ABABCDCDEFEFGH */
12896 sel = expand_simple_binop (DImode, AND, sel,
12897 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12898 NULL_RTX, 1, OPTAB_DIRECT);
12899 /* sel = ....ABCD....EFGH */
12900 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12901 NULL_RTX, 1, OPTAB_DIRECT);
12902 /* t_1 = ........ABCD.... */
12903 sel = gen_lowpart (SImode, sel);
12904 t_1 = gen_lowpart (SImode, t_1);
12905 break;
12907 default:
12908 gcc_unreachable ();
12911 /* Always perform the final addition/merge within the bmask insn. */
12912 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12915 /* Implement TARGET_VEC_PERM_CONST. */
12917 static bool
12918 sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
12919 rtx op1, const vec_perm_indices &sel)
12921 if (!TARGET_VIS2)
12922 return false;
12924 /* All permutes are supported. */
12925 if (!target)
12926 return true;
12928 /* Force target-independent code to convert constant permutations on other
12929 modes down to V8QI. Rely on this to avoid the complexity of the byte
12930 order of the permutation. */
12931 if (vmode != V8QImode)
12932 return false;
12934 unsigned int i, mask;
12935 for (i = mask = 0; i < 8; ++i)
12936 mask |= (sel[i] & 0xf) << (28 - i*4);
12937 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
12939 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
12940 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
12941 return true;
12944 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12946 static bool
12947 sparc_frame_pointer_required (void)
12949 /* If the stack pointer is dynamically modified in the function, it cannot
12950 serve as the frame pointer. */
12951 if (cfun->calls_alloca)
12952 return true;
12954 /* If the function receives nonlocal gotos, it needs to save the frame
12955 pointer in the nonlocal_goto_save_area object. */
12956 if (cfun->has_nonlocal_label)
12957 return true;
12959 /* In flat mode, that's it. */
12960 if (TARGET_FLAT)
12961 return false;
12963 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12964 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12965 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12968 /* The way this is structured, we can't eliminate SFP in favor of SP
12969 if the frame pointer is required: we want to use the SFP->HFP elimination
12970 in that case. But the test in update_eliminables doesn't know we are
12971 assuming below that we only do the former elimination. */
12973 static bool
12974 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
12976 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
12979 /* Return the hard frame pointer directly to bypass the stack bias. */
12981 static rtx
12982 sparc_builtin_setjmp_frame_value (void)
12984 return hard_frame_pointer_rtx;
12987 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12988 they won't be allocated. */
12990 static void
12991 sparc_conditional_register_usage (void)
12993 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
12995 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12996 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12998 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12999 /* then honor it. */
13000 if (TARGET_ARCH32 && fixed_regs[5])
13001 fixed_regs[5] = 1;
13002 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
13003 fixed_regs[5] = 0;
13004 if (! TARGET_V9)
13006 int regno;
13007 for (regno = SPARC_FIRST_V9_FP_REG;
13008 regno <= SPARC_LAST_V9_FP_REG;
13009 regno++)
13010 fixed_regs[regno] = 1;
13011 /* %fcc0 is used by v8 and v9. */
13012 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
13013 regno <= SPARC_LAST_V9_FCC_REG;
13014 regno++)
13015 fixed_regs[regno] = 1;
13017 if (! TARGET_FPU)
13019 int regno;
13020 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
13021 fixed_regs[regno] = 1;
13023 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
13024 /* then honor it. Likewise with g3 and g4. */
13025 if (fixed_regs[2] == 2)
13026 fixed_regs[2] = ! TARGET_APP_REGS;
13027 if (fixed_regs[3] == 2)
13028 fixed_regs[3] = ! TARGET_APP_REGS;
13029 if (TARGET_ARCH32 && fixed_regs[4] == 2)
13030 fixed_regs[4] = ! TARGET_APP_REGS;
13031 else if (TARGET_CM_EMBMEDANY)
13032 fixed_regs[4] = 1;
13033 else if (fixed_regs[4] == 2)
13034 fixed_regs[4] = 0;
13035 if (TARGET_FLAT)
13037 int regno;
13038 /* Disable leaf functions. */
13039 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
13040 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
13041 leaf_reg_remap [regno] = regno;
13043 if (TARGET_VIS)
13044 global_regs[SPARC_GSR_REG] = 1;
13047 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
13049 static bool
13050 sparc_use_pseudo_pic_reg (void)
13052 return !TARGET_VXWORKS_RTP && flag_pic;
13055 /* Implement TARGET_INIT_PIC_REG. */
13057 static void
13058 sparc_init_pic_reg (void)
13060 edge entry_edge;
13061 rtx_insn *seq;
13063 if (!crtl->uses_pic_offset_table)
13064 return;
13066 start_sequence ();
13067 load_got_register ();
13068 if (!TARGET_VXWORKS_RTP)
13069 emit_move_insn (pic_offset_table_rtx, global_offset_table_rtx);
13070 seq = get_insns ();
13071 end_sequence ();
13073 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13074 insert_insn_on_edge (seq, entry_edge);
13075 commit_one_edge_insertion (entry_edge);
13078 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13080 - We can't load constants into FP registers.
13081 - We can't load FP constants into integer registers when soft-float,
13082 because there is no soft-float pattern with a r/F constraint.
13083 - We can't load FP constants into integer registers for TFmode unless
13084 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13085 - Try and reload integer constants (symbolic or otherwise) back into
13086 registers directly, rather than having them dumped to memory. */
13088 static reg_class_t
13089 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13091 machine_mode mode = GET_MODE (x);
13092 if (CONSTANT_P (x))
13094 if (FP_REG_CLASS_P (rclass)
13095 || rclass == GENERAL_OR_FP_REGS
13096 || rclass == GENERAL_OR_EXTRA_FP_REGS
13097 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13098 || (mode == TFmode && ! const_zero_operand (x, mode)))
13099 return NO_REGS;
13101 if (GET_MODE_CLASS (mode) == MODE_INT)
13102 return GENERAL_REGS;
13104 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13106 if (! FP_REG_CLASS_P (rclass)
13107 || !(const_zero_operand (x, mode)
13108 || const_all_ones_operand (x, mode)))
13109 return NO_REGS;
13113 if (TARGET_VIS3
13114 && ! TARGET_ARCH64
13115 && (rclass == EXTRA_FP_REGS
13116 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13118 int regno = true_regnum (x);
13120 if (SPARC_INT_REG_P (regno))
13121 return (rclass == EXTRA_FP_REGS
13122 ? FP_REGS : GENERAL_OR_FP_REGS);
13125 return rclass;
13128 /* Return true if we use LRA instead of reload pass. */
13130 static bool
13131 sparc_lra_p (void)
13133 return TARGET_LRA;
13136 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13137 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13139 const char *
13140 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13142 char mulstr[32];
13144 gcc_assert (! TARGET_ARCH64);
13146 if (sparc_check_64 (operands[1], insn) <= 0)
13147 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13148 if (which_alternative == 1)
13149 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13150 if (GET_CODE (operands[2]) == CONST_INT)
13152 if (which_alternative == 1)
13154 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13155 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13156 output_asm_insn (mulstr, operands);
13157 return "srlx\t%L0, 32, %H0";
13159 else
13161 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13162 output_asm_insn ("or\t%L1, %3, %3", operands);
13163 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13164 output_asm_insn (mulstr, operands);
13165 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13166 return "mov\t%3, %L0";
13169 else if (rtx_equal_p (operands[1], operands[2]))
13171 if (which_alternative == 1)
13173 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13174 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13175 output_asm_insn (mulstr, operands);
13176 return "srlx\t%L0, 32, %H0";
13178 else
13180 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13181 output_asm_insn ("or\t%L1, %3, %3", operands);
13182 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13183 output_asm_insn (mulstr, operands);
13184 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13185 return "mov\t%3, %L0";
13188 if (sparc_check_64 (operands[2], insn) <= 0)
13189 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13190 if (which_alternative == 1)
13192 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13193 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13194 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13195 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13196 output_asm_insn (mulstr, operands);
13197 return "srlx\t%L0, 32, %H0";
13199 else
13201 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13202 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13203 output_asm_insn ("or\t%L1, %3, %3", operands);
13204 output_asm_insn ("or\t%L2, %4, %4", operands);
13205 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13206 output_asm_insn (mulstr, operands);
13207 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13208 return "mov\t%3, %L0";
13212 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13213 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13214 and INNER_MODE are the modes describing TARGET. */
13216 static void
13217 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13218 machine_mode inner_mode)
13220 rtx t1, final_insn, sel;
13221 int bmask;
13223 t1 = gen_reg_rtx (mode);
13225 elt = convert_modes (SImode, inner_mode, elt, true);
13226 emit_move_insn (gen_lowpart(SImode, t1), elt);
13228 switch (mode)
13230 case E_V2SImode:
13231 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13232 bmask = 0x45674567;
13233 break;
13234 case E_V4HImode:
13235 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13236 bmask = 0x67676767;
13237 break;
13238 case E_V8QImode:
13239 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13240 bmask = 0x77777777;
13241 break;
13242 default:
13243 gcc_unreachable ();
13246 sel = force_reg (SImode, GEN_INT (bmask));
13247 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13248 emit_insn (final_insn);
13251 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13252 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13254 static void
13255 vector_init_fpmerge (rtx target, rtx elt)
13257 rtx t1, t2, t2_low, t3, t3_low;
13259 t1 = gen_reg_rtx (V4QImode);
13260 elt = convert_modes (SImode, QImode, elt, true);
13261 emit_move_insn (gen_lowpart (SImode, t1), elt);
13263 t2 = gen_reg_rtx (V8QImode);
13264 t2_low = gen_lowpart (V4QImode, t2);
13265 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13267 t3 = gen_reg_rtx (V8QImode);
13268 t3_low = gen_lowpart (V4QImode, t3);
13269 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13271 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13274 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13275 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13277 static void
13278 vector_init_faligndata (rtx target, rtx elt)
13280 rtx t1 = gen_reg_rtx (V4HImode);
13281 int i;
13283 elt = convert_modes (SImode, HImode, elt, true);
13284 emit_move_insn (gen_lowpart (SImode, t1), elt);
13286 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13287 force_reg (SImode, GEN_INT (6)),
13288 const0_rtx));
13290 for (i = 0; i < 4; i++)
13291 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13294 /* Emit code to initialize TARGET to values for individual fields VALS. */
13296 void
13297 sparc_expand_vector_init (rtx target, rtx vals)
13299 const machine_mode mode = GET_MODE (target);
13300 const machine_mode inner_mode = GET_MODE_INNER (mode);
13301 const int n_elts = GET_MODE_NUNITS (mode);
13302 int i, n_var = 0;
13303 bool all_same = true;
13304 rtx mem;
13306 for (i = 0; i < n_elts; i++)
13308 rtx x = XVECEXP (vals, 0, i);
13309 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13310 n_var++;
13312 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13313 all_same = false;
13316 if (n_var == 0)
13318 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13319 return;
13322 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13324 if (GET_MODE_SIZE (inner_mode) == 4)
13326 emit_move_insn (gen_lowpart (SImode, target),
13327 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13328 return;
13330 else if (GET_MODE_SIZE (inner_mode) == 8)
13332 emit_move_insn (gen_lowpart (DImode, target),
13333 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13334 return;
13337 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13338 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13340 emit_move_insn (gen_highpart (word_mode, target),
13341 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13342 emit_move_insn (gen_lowpart (word_mode, target),
13343 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13344 return;
13347 if (all_same && GET_MODE_SIZE (mode) == 8)
13349 if (TARGET_VIS2)
13351 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13352 return;
13354 if (mode == V8QImode)
13356 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13357 return;
13359 if (mode == V4HImode)
13361 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13362 return;
13366 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13367 for (i = 0; i < n_elts; i++)
13368 emit_move_insn (adjust_address_nv (mem, inner_mode,
13369 i * GET_MODE_SIZE (inner_mode)),
13370 XVECEXP (vals, 0, i));
13371 emit_move_insn (target, mem);
13374 /* Implement TARGET_SECONDARY_RELOAD. */
13376 static reg_class_t
13377 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13378 machine_mode mode, secondary_reload_info *sri)
13380 enum reg_class rclass = (enum reg_class) rclass_i;
13382 sri->icode = CODE_FOR_nothing;
13383 sri->extra_cost = 0;
13385 /* We need a temporary when loading/storing a HImode/QImode value
13386 between memory and the FPU registers. This can happen when combine puts
13387 a paradoxical subreg in a float/fix conversion insn. */
13388 if (FP_REG_CLASS_P (rclass)
13389 && (mode == HImode || mode == QImode)
13390 && (GET_CODE (x) == MEM
13391 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13392 && true_regnum (x) == -1)))
13393 return GENERAL_REGS;
13395 /* On 32-bit we need a temporary when loading/storing a DFmode value
13396 between unaligned memory and the upper FPU registers. */
13397 if (TARGET_ARCH32
13398 && rclass == EXTRA_FP_REGS
13399 && mode == DFmode
13400 && GET_CODE (x) == MEM
13401 && ! mem_min_alignment (x, 8))
13402 return FP_REGS;
13404 if (((TARGET_CM_MEDANY
13405 && symbolic_operand (x, mode))
13406 || (TARGET_CM_EMBMEDANY
13407 && text_segment_operand (x, mode)))
13408 && ! flag_pic)
13410 if (in_p)
13411 sri->icode = direct_optab_handler (reload_in_optab, mode);
13412 else
13413 sri->icode = direct_optab_handler (reload_out_optab, mode);
13414 return NO_REGS;
13417 if (TARGET_VIS3 && TARGET_ARCH32)
13419 int regno = true_regnum (x);
13421 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13422 to move 8-byte values in 4-byte pieces. This only works via
13423 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13424 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13425 an FP_REGS intermediate move. */
13426 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13427 || ((general_or_i64_p (rclass)
13428 || rclass == GENERAL_OR_FP_REGS)
13429 && SPARC_FP_REG_P (regno)))
13431 sri->extra_cost = 2;
13432 return FP_REGS;
13436 return NO_REGS;
13439 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13441 On SPARC when not VIS3 it is not possible to directly move data
13442 between GENERAL_REGS and FP_REGS. */
13444 static bool
13445 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13446 reg_class_t class2)
13448 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13449 && (! TARGET_VIS3
13450 || GET_MODE_SIZE (mode) > 8
13451 || GET_MODE_SIZE (mode) < 4));
13454 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13456 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13457 because the movsi and movsf patterns don't handle r/f moves.
13458 For v8 we copy the default definition. */
13460 static machine_mode
13461 sparc_secondary_memory_needed_mode (machine_mode mode)
13463 if (TARGET_ARCH64)
13465 if (GET_MODE_BITSIZE (mode) < 32)
13466 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13467 return mode;
13469 else
13471 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13472 return mode_for_size (BITS_PER_WORD,
13473 GET_MODE_CLASS (mode), 0).require ();
13474 return mode;
13478 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13479 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13481 bool
13482 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13484 enum rtx_code rc = GET_CODE (operands[1]);
13485 machine_mode cmp_mode;
13486 rtx cc_reg, dst, cmp;
13488 cmp = operands[1];
13489 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13490 return false;
13492 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13493 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13495 cmp_mode = GET_MODE (XEXP (cmp, 0));
13496 rc = GET_CODE (cmp);
13498 dst = operands[0];
13499 if (! rtx_equal_p (operands[2], dst)
13500 && ! rtx_equal_p (operands[3], dst))
13502 if (reg_overlap_mentioned_p (dst, cmp))
13503 dst = gen_reg_rtx (mode);
13505 emit_move_insn (dst, operands[3]);
13507 else if (operands[2] == dst)
13509 operands[2] = operands[3];
13511 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13512 rc = reverse_condition_maybe_unordered (rc);
13513 else
13514 rc = reverse_condition (rc);
13517 if (XEXP (cmp, 1) == const0_rtx
13518 && GET_CODE (XEXP (cmp, 0)) == REG
13519 && cmp_mode == DImode
13520 && v9_regcmp_p (rc))
13521 cc_reg = XEXP (cmp, 0);
13522 else
13523 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13525 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13527 emit_insn (gen_rtx_SET (dst,
13528 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13530 if (dst != operands[0])
13531 emit_move_insn (operands[0], dst);
13533 return true;
13536 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13537 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13538 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13539 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13540 code to be used for the condition mask. */
13542 void
13543 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13545 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13546 enum rtx_code code = GET_CODE (operands[3]);
13548 mask = gen_reg_rtx (Pmode);
13549 cop0 = operands[4];
13550 cop1 = operands[5];
13551 if (code == LT || code == GE)
13553 rtx t;
13555 code = swap_condition (code);
13556 t = cop0; cop0 = cop1; cop1 = t;
13559 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13561 fcmp = gen_rtx_UNSPEC (Pmode,
13562 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13563 fcode);
13565 cmask = gen_rtx_UNSPEC (DImode,
13566 gen_rtvec (2, mask, gsr),
13567 ccode);
13569 bshuf = gen_rtx_UNSPEC (mode,
13570 gen_rtvec (3, operands[1], operands[2], gsr),
13571 UNSPEC_BSHUFFLE);
13573 emit_insn (gen_rtx_SET (mask, fcmp));
13574 emit_insn (gen_rtx_SET (gsr, cmask));
13576 emit_insn (gen_rtx_SET (operands[0], bshuf));
13579 /* On sparc, any mode which naturally allocates into the float
13580 registers should return 4 here. */
13582 unsigned int
13583 sparc_regmode_natural_size (machine_mode mode)
13585 int size = UNITS_PER_WORD;
13587 if (TARGET_ARCH64)
13589 enum mode_class mclass = GET_MODE_CLASS (mode);
13591 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13592 size = 4;
13595 return size;
13598 /* Implement TARGET_HARD_REGNO_NREGS.
13600 On SPARC, ordinary registers hold 32 bits worth; this means both
13601 integer and floating point registers. On v9, integer regs hold 64
13602 bits worth; floating point regs hold 32 bits worth (this includes the
13603 new fp regs as even the odd ones are included in the hard register
13604 count). */
13606 static unsigned int
13607 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13609 if (regno == SPARC_GSR_REG)
13610 return 1;
13611 if (TARGET_ARCH64)
13613 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13614 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13615 return CEIL (GET_MODE_SIZE (mode), 4);
13617 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13620 /* Implement TARGET_HARD_REGNO_MODE_OK.
13622 ??? Because of the funny way we pass parameters we should allow certain
13623 ??? types of float/complex values to be in integer registers during
13624 ??? RTL generation. This only matters on arch32. */
13626 static bool
13627 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13629 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13632 /* Implement TARGET_MODES_TIEABLE_P.
13634 For V9 we have to deal with the fact that only the lower 32 floating
13635 point registers are 32-bit addressable. */
13637 static bool
13638 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13640 enum mode_class mclass1, mclass2;
13641 unsigned short size1, size2;
13643 if (mode1 == mode2)
13644 return true;
13646 mclass1 = GET_MODE_CLASS (mode1);
13647 mclass2 = GET_MODE_CLASS (mode2);
13648 if (mclass1 != mclass2)
13649 return false;
13651 if (! TARGET_V9)
13652 return true;
13654 /* Classes are the same and we are V9 so we have to deal with upper
13655 vs. lower floating point registers. If one of the modes is a
13656 4-byte mode, and the other is not, we have to mark them as not
13657 tieable because only the lower 32 floating point register are
13658 addressable 32-bits at a time.
13660 We can't just test explicitly for SFmode, otherwise we won't
13661 cover the vector mode cases properly. */
13663 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13664 return true;
13666 size1 = GET_MODE_SIZE (mode1);
13667 size2 = GET_MODE_SIZE (mode2);
13668 if ((size1 > 4 && size2 == 4)
13669 || (size2 > 4 && size1 == 4))
13670 return false;
13672 return true;
13675 /* Implement TARGET_CSTORE_MODE. */
13677 static scalar_int_mode
13678 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13680 return (TARGET_ARCH64 ? DImode : SImode);
13683 /* Return the compound expression made of T1 and T2. */
13685 static inline tree
13686 compound_expr (tree t1, tree t2)
13688 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13691 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13693 static void
13694 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13696 if (!TARGET_FPU)
13697 return;
13699 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13700 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13702 /* We generate the equivalent of feholdexcept (&fenv_var):
13704 unsigned int fenv_var;
13705 __builtin_store_fsr (&fenv_var);
13707 unsigned int tmp1_var;
13708 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13710 __builtin_load_fsr (&tmp1_var); */
13712 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13713 TREE_ADDRESSABLE (fenv_var) = 1;
13714 tree fenv_addr = build_fold_addr_expr (fenv_var);
13715 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13716 tree hold_stfsr
13717 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13718 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13720 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13721 TREE_ADDRESSABLE (tmp1_var) = 1;
13722 tree masked_fenv_var
13723 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13724 build_int_cst (unsigned_type_node,
13725 ~(accrued_exception_mask | trap_enable_mask)));
13726 tree hold_mask
13727 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13728 NULL_TREE, NULL_TREE);
13730 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13731 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13732 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13734 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13736 /* We reload the value of tmp1_var to clear the exceptions:
13738 __builtin_load_fsr (&tmp1_var); */
13740 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13742 /* We generate the equivalent of feupdateenv (&fenv_var):
13744 unsigned int tmp2_var;
13745 __builtin_store_fsr (&tmp2_var);
13747 __builtin_load_fsr (&fenv_var);
13749 if (SPARC_LOW_FE_EXCEPT_VALUES)
13750 tmp2_var >>= 5;
13751 __atomic_feraiseexcept ((int) tmp2_var); */
13753 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13754 TREE_ADDRESSABLE (tmp2_var) = 1;
13755 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13756 tree update_stfsr
13757 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13758 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13760 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13762 tree atomic_feraiseexcept
13763 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13764 tree update_call
13765 = build_call_expr (atomic_feraiseexcept, 1,
13766 fold_convert (integer_type_node, tmp2_var));
13768 if (SPARC_LOW_FE_EXCEPT_VALUES)
13770 tree shifted_tmp2_var
13771 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13772 build_int_cst (unsigned_type_node, 5));
13773 tree update_shift
13774 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13775 update_call = compound_expr (update_shift, update_call);
13778 *update
13779 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13782 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13784 SImode loads to floating-point registers are not zero-extended.
13785 The definition for LOAD_EXTEND_OP specifies that integer loads
13786 narrower than BITS_PER_WORD will be zero-extended. As a result,
13787 we inhibit changes from SImode unless they are to a mode that is
13788 identical in size.
13790 Likewise for SFmode, since word-mode paradoxical subregs are
13791 problematic on big-endian architectures. */
13793 static bool
13794 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13795 reg_class_t rclass)
13797 if (TARGET_ARCH64
13798 && GET_MODE_SIZE (from) == 4
13799 && GET_MODE_SIZE (to) != 4)
13800 return !reg_classes_intersect_p (rclass, FP_REGS);
13801 return true;
13804 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13806 static HOST_WIDE_INT
13807 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13809 if (TREE_CODE (exp) == STRING_CST)
13810 return MAX (align, FASTEST_ALIGNMENT);
13811 return align;
13814 #include "gt-sparc.h"